diff options
Diffstat (limited to 'drivers')
176 files changed, 2344 insertions, 2703 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 9826907eb06e..7bdad836fc62 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -135,8 +135,6 @@ source "drivers/uio/Kconfig" source "drivers/vfio/Kconfig" -source "drivers/vlynq/Kconfig" - source "drivers/virt/Kconfig" source "drivers/virtio/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 722d15be0eb7..d828329c268d 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -151,7 +151,6 @@ obj-$(CONFIG_BCMA) += bcma/ obj-$(CONFIG_VHOST_RING) += vhost/ obj-$(CONFIG_VHOST_IOTLB) += vhost/ obj-$(CONFIG_VHOST) += vhost/ -obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_COMEDI) += comedi/ obj-$(CONFIG_STAGING) += staging/ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 6fb4e8dc8c3c..09ed67772fae 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6180,24 +6180,10 @@ EXPORT_SYMBOL_GPL(ata_pci_remove_one); void ata_pci_shutdown_one(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); - struct ata_port *ap; - unsigned long flags; int i; - /* Tell EH to disable all devices */ - for (i = 0; i < host->n_ports; i++) { - ap = host->ports[i]; - spin_lock_irqsave(ap->lock, flags); - ap->pflags |= ATA_PFLAG_UNLOADING; - ata_port_schedule_eh(ap); - spin_unlock_irqrestore(ap->lock, flags); - } - for (i = 0; i < host->n_ports; i++) { - ap = host->ports[i]; - - /* Wait for EH to complete before freezing the port */ - ata_port_wait_eh(ap); + struct ata_port *ap = host->ports[i]; ap->pflags |= ATA_PFLAG_FROZEN; diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c index 0c2ae430f5aa..18ceefd176df 100644 --- a/drivers/ata/pata_falcon.c +++ b/drivers/ata/pata_falcon.c @@ -121,7 +121,7 @@ static struct ata_port_operations pata_falcon_ops = { .set_mode = pata_falcon_set_mode, }; -static int __init pata_falcon_init_one(struct platform_device *pdev) +static int pata_falcon_init_one(struct platform_device *pdev) { struct resource *base_mem_res, *ctl_mem_res; struct resource *base_res, *ctl_res, *irq_res; @@ -216,23 +216,22 @@ static int __init pata_falcon_init_one(struct platform_device *pdev) IRQF_SHARED, &pata_falcon_sht); } -static int __exit pata_falcon_remove_one(struct platform_device *pdev) +static void pata_falcon_remove_one(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); ata_host_detach(host); - - return 0; } static struct platform_driver pata_falcon_driver = { - .remove = __exit_p(pata_falcon_remove_one), + .probe = pata_falcon_init_one, + .remove_new = pata_falcon_remove_one, .driver = { .name = "atari-falcon-ide", }, }; -module_platform_driver_probe(pata_falcon_driver, pata_falcon_init_one); +module_platform_driver(pata_falcon_driver); MODULE_AUTHOR("Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA"); diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c index 3bdbe2b65a2b..94df60ac2307 100644 --- a/drivers/ata/pata_gayle.c +++ b/drivers/ata/pata_gayle.c @@ -124,7 +124,7 @@ static struct ata_port_operations pata_gayle_a4000_ops = { .set_mode = pata_gayle_set_mode, }; -static int __init pata_gayle_init_one(struct platform_device *pdev) +static int pata_gayle_init_one(struct platform_device *pdev) { struct resource *res; struct gayle_ide_platform_data *pdata; @@ -193,23 +193,22 @@ static int __init pata_gayle_init_one(struct platform_device *pdev) return 0; } -static int __exit pata_gayle_remove_one(struct platform_device *pdev) +static void pata_gayle_remove_one(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); ata_host_detach(host); - - return 0; } static struct platform_driver pata_gayle_driver = { - .remove = __exit_p(pata_gayle_remove_one), + .probe = pata_gayle_init_one, + .remove_new = pata_gayle_remove_one, .driver = { .name = "amiga-gayle-ide", }, }; -module_platform_driver_probe(pata_gayle_driver, pata_gayle_init_one); +module_platform_driver(pata_gayle_driver); MODULE_AUTHOR("Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("low-level driver for Amiga Gayle PATA"); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 800f131222fc..855fdf5c3b4e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd) struct gendisk *disk = nbd->disk; del_gendisk(disk); - put_disk(disk); blk_mq_free_tag_set(&nbd->tag_set); /* @@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd) idr_remove(&nbd_index_idr, nbd->index); mutex_unlock(&nbd_index_mutex); destroy_workqueue(nbd->recv_workq); - kfree(nbd); + put_disk(disk); } static void nbd_dev_remove_work(struct work_struct *work) @@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk) nbd_put(nbd); } +static void nbd_free_disk(struct gendisk *disk) +{ + struct nbd_device *nbd = disk->private_data; + + kfree(nbd); +} + static const struct block_device_operations nbd_fops = { .owner = THIS_MODULE, @@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops = .release = nbd_release, .ioctl = nbd_ioctl, .compat_ioctl = nbd_ioctl, + .free_disk = nbd_free_disk, }; #if IS_ENABLED(CONFIG_DEBUG_FS) diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 06f5bad3c3e0..57857c0dfba9 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -25,7 +25,7 @@ #include <linux/limits.h> #include <clocksource/timer-riscv.h> #include <asm/smp.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/sbi.h> #include <asm/timex.h> diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 74d00b0c83fe..4a98a859d44d 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -131,7 +131,7 @@ config RASPBERRYPI_FIRMWARE config FW_CFG_SYSFS tristate "QEMU fw_cfg device support in sysfs" - depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86) + depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || RISCV || SPARC || X86) depends on HAS_IOPORT_MAP default n help diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index ef4c12f0877b..06964a3c130f 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ -DEFI_HAVE_STRCMP -fno-builtin -fpic \ $(call cc-option,-mno-single-pic-base) -cflags-$(CONFIG_RISCV) += -fpic +cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE cflags-$(CONFIG_LOONGARCH) += -fpie cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index a69399a6b7c0..1448f61173b3 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) -# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64)) +# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) # define FW_CFG_CTRL_OFF 0x08 # define FW_CFG_DATA_OFF 0x00 # define FW_CFG_DMA_OFF 0x10 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8b4ca2576a3d..afec09930efa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1159,11 +1159,18 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, u64 reg_addr); +uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t acc_flags, + uint32_t xcc_id); void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, u64 reg_addr, u32 reg_data); +void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, + uint32_t acc_flags, + uint32_t xcc_id); void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); @@ -1204,8 +1211,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) -#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg)) -#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v)) +#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0) +#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0) #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) @@ -1215,6 +1222,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) +#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst) +#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 4da82fc64fef..2deebece810e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1494,6 +1494,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) if (adev->asic_type < CHIP_RAVEN) return false; + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) + return false; + /* * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally * risky to do any special firmware-related preparations for entering diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 490c8f5ddb60..f6598b9e4faa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -300,14 +300,13 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, hqd_end = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_AQL_DISPATCH_ID_HI); for (reg = hqd_base; reg <= hqd_end; reg++) - WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); + WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL), - data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL, data); if (wptr) { /* Don't read wptr with get_user because the user @@ -336,27 +335,24 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO), - lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI), - upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR), - lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), - regCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO, + lower_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI, + upper_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR, + lower_32_bits((uintptr_t)wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, upper_32_bits((uintptr_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1), - (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, - queue_id)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1, + (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR), - REG_SET_FIELD(m->cp_hqd_eop_rptr, - CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR, + REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE), data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE, data); kgd_gfx_v9_release_queue(adev, inst); @@ -494,15 +490,15 @@ static uint32_t kgd_gfx_v9_4_3_set_address_watch( VALID, 1); - WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regTCP_WATCH0_ADDR_H) + (watch_id * TCP_WATCH_STRIDE)), - watch_address_high); + watch_address_high, inst); - WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regTCP_WATCH0_ADDR_L) + (watch_id * TCP_WATCH_STRIDE)), - watch_address_low); + watch_address_low, inst); return watch_address_cntl; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 51011e8ee90d..00fbc0f44c92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -91,8 +91,8 @@ void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi { kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ kgd_gfx_v9_unlock_srbm(adev, inst); @@ -239,14 +239,13 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, for (reg = hqd_base; reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++) - WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); + WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL), - data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL, data); if (wptr) { /* Don't read wptr with get_user because the user @@ -275,25 +274,24 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO), - lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI), - upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR), - lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), - upper_32_bits((uintptr_t)wptr)); - WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1, - (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO, + lower_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI, + upper_32_bits(guessed_wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR, + lower_32_bits((uintptr_t)wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, + upper_32_bits((uintptr_t)wptr)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1, + (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR), - REG_SET_FIELD(m->cp_hqd_eop_rptr, - CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR, + REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE, data); kgd_gfx_v9_release_queue(adev, inst); @@ -556,7 +554,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, break; } - WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST, type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { @@ -908,8 +906,8 @@ void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t inst) { - *wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), - mmCP_IQ_WAIT_TIME2)); + *wait_times = RREG32_SOC15_RLC(GC, GET_INST(GC, inst), + mmCP_IQ_WAIT_TIME2); } void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 781e5c5ce04d..702f6610d024 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -172,6 +172,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, } rcu_read_unlock(); + *result = NULL; return -ENOENT; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 86f88512eb7e..df3ecfa9e13f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1415,7 +1415,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); else if (r != -ERESTARTSYS && r != -EAGAIN) - DRM_ERROR("Failed to process the buffer list %d!\n", r); + DRM_DEBUG("Failed to process the buffer list %d!\n", r); goto error_fini; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fd8cd8e2d3f2..7eeaf0aa7f81 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -73,6 +73,7 @@ #include "amdgpu_pmu.h" #include "amdgpu_fru_eeprom.h" #include "amdgpu_reset.h" +#include "amdgpu_virt.h" #include <linux/suspend.h> #include <drm/task_barrier.h> @@ -472,7 +473,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && down_read_trylock(&adev->reset_domain->sem)) { - ret = amdgpu_kiq_rreg(adev, reg); + ret = amdgpu_kiq_rreg(adev, reg, 0); up_read(&adev->reset_domain->sem); } else { ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); @@ -509,6 +510,49 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) BUG(); } + +/** + * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @acc_flags: access flags which require special behavior + * @xcc_id: xcc accelerated compute core id + * + * Returns the 32 bit value from the offset specified. + */ +uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t acc_flags, + uint32_t xcc_id) +{ + uint32_t ret, rlcg_flag; + + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if ((reg * 4) < adev->rmmio_size) { + if (amdgpu_sriov_vf(adev) && + !amdgpu_sriov_runtime(adev) && + adev->gfx.rlc.rlcg_reg_access_supported && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, + GC_HWIP, false, + &rlcg_flag)) { + ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id); + } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && + amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_domain->sem)) { + ret = amdgpu_kiq_rreg(adev, reg, xcc_id); + up_read(&adev->reset_domain->sem); + } else { + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); + } + } else { + ret = adev->pcie_rreg(adev, reg * 4); + } + + return ret; +} + /* * MMIO register write with bytes helper functions * @offset:bytes offset from MMIO start @@ -556,7 +600,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && down_read_trylock(&adev->reset_domain->sem)) { - amdgpu_kiq_wreg(adev, reg, v); + amdgpu_kiq_wreg(adev, reg, v, 0); up_read(&adev->reset_domain->sem); } else { writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); @@ -598,6 +642,47 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, } /** + * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @v: 32 bit value to write to the register + * @acc_flags: access flags which require special behavior + * @xcc_id: xcc accelerated compute core id + * + * Writes the value specified to the offset specified. + */ +void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, + uint32_t acc_flags, uint32_t xcc_id) +{ + uint32_t rlcg_flag; + + if (amdgpu_device_skip_hw_access(adev)) + return; + + if ((reg * 4) < adev->rmmio_size) { + if (amdgpu_sriov_vf(adev) && + !amdgpu_sriov_runtime(adev) && + adev->gfx.rlc.rlcg_reg_access_supported && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, + GC_HWIP, true, + &rlcg_flag)) { + amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id); + } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && + amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_kiq_wreg(adev, reg, v, xcc_id); + up_read(&adev->reset_domain->sem); + } else { + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + } + } else { + adev->pcie_wreg(adev, reg * 4, v); + } +} + +/** * amdgpu_device_indirect_rreg - read an indirect register * * @adev: amdgpu_device pointer @@ -2499,6 +2584,18 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) ring->name); return r; } + r = amdgpu_uvd_entity_init(adev, ring); + if (r) { + DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n", + ring->name); + return r; + } + r = amdgpu_vce_entity_init(adev, ring); + if (r) { + DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n", + ring->name); + return r; + } } amdgpu_xcp_update_partition_sched_list(adev); @@ -4486,19 +4583,18 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) } amdgpu_fence_driver_hw_init(adev); - r = amdgpu_device_ip_late_init(adev); - if (r) - goto exit; - - queue_delayed_work(system_wq, &adev->delayed_init_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); - if (!adev->in_s0ix) { r = amdgpu_amdkfd_resume(adev, adev->in_runpm); if (r) goto exit; } + r = amdgpu_device_ip_late_init(adev); + if (r) + goto exit; + + queue_delayed_work(system_wq, &adev->delayed_init_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); exit: if (amdgpu_sriov_vf(adev)) { amdgpu_virt_init_data_exchange(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a2a29dcb2422..b9674c57c436 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -931,12 +931,12 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, func(adev, ras_error_status, i); } -uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id) { signed long r, cnt = 0; unsigned long flags; uint32_t seq, reg_val_offs = 0, value = 0; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *ring = &kiq->ring; if (amdgpu_device_skip_hw_access(adev)) @@ -999,12 +999,12 @@ failed_kiq_read: return ~0; } -void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id) { signed long r, cnt = 0; unsigned long flags; uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_wreg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 7088c5015675..f23bafec71c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -521,8 +521,8 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); -void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 2dce338b0f1e..5f71414190e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -826,7 +826,10 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) gc_ver == IP_VERSION(9, 4, 3) || gc_ver >= IP_VERSION(10, 3, 0)); - gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; + if (!amdgpu_sriov_xnack_support(adev)) + gmc->noretry = 1; + else + gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; } void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 5a828c175e3a..cf33eb219e25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -143,6 +143,46 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev) return 0; } +void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set) +{ + if (!mca_set) + return; + + memset(mca_set, 0, sizeof(*mca_set)); + INIT_LIST_HEAD(&mca_set->list); +} + +int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry) +{ + struct mca_bank_node *node; + + if (!entry) + return -EINVAL; + + node = kvzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + memcpy(&node->entry, entry, sizeof(*entry)); + + INIT_LIST_HEAD(&node->node); + list_add_tail(&node->node, &mca_set->list); + + mca_set->nr_entries++; + + return 0; +} + +void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set) +{ + struct mca_bank_node *node, *tmp; + + list_for_each_entry_safe(node, tmp, &mca_set->list, node) { + list_del(&node->node); + kvfree(node); + } +} + void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs) { struct amdgpu_mca *mca = &adev->mca; @@ -160,6 +200,65 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) return -EOPNOTSUPP; } +static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry) +{ + dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n"); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_STATUS]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_ADDR]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_MISC0]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_IPID]); + dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_SYND]); +} + +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data) +{ + struct amdgpu_smuio_mcm_config_info mcm_info; + struct mca_bank_set mca_set; + struct mca_bank_node *node; + struct mca_bank_entry *entry; + uint32_t count; + int ret, i = 0; + + amdgpu_mca_bank_set_init(&mca_set); + + ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set); + if (ret) + goto out_mca_release; + + list_for_each_entry(node, &mca_set.list, node) { + entry = &node->entry; + + amdgpu_mca_smu_mca_bank_dump(adev, i++, entry); + + count = 0; + ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count); + if (ret) + goto out_mca_release; + + if (!count) + continue; + + mcm_info.socket_id = entry->info.socket_id; + mcm_info.die_id = entry->info.aid; + + if (type == AMDGPU_MCA_ERROR_TYPE_UE) + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count); + else + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count); + } + +out_mca_release: + amdgpu_mca_bank_set_release(&mca_set); + + return ret; +} + + int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) { const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; @@ -173,17 +272,77 @@ int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_m return -EOPNOTSUPP; } -int amdgpu_mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, uint32_t *count) +int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, uint32_t *total) { const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; - if (!count) + struct mca_bank_set mca_set; + struct mca_bank_node *node; + struct mca_bank_entry *entry; + uint32_t count; + int ret; + + if (!total) return -EINVAL; - if (mca_funcs && mca_funcs->mca_get_error_count) - return mca_funcs->mca_get_error_count(adev, blk, type, count); + if (!mca_funcs) + return -EOPNOTSUPP; - return -EOPNOTSUPP; + if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count) + return -EOPNOTSUPP; + + amdgpu_mca_bank_set_init(&mca_set); + + ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set); + if (ret) + goto err_mca_set_release; + + *total = 0; + list_for_each_entry(node, &mca_set.list, node) { + entry = &node->entry; + + count = 0; + ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count); + if (ret) + goto err_mca_set_release; + + *total += count; + } + +err_mca_set_release: + amdgpu_mca_bank_set_release(&mca_set); + + return ret; +} + +int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + if (!count || !entry) + return -EINVAL; + + if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count) + return -EOPNOTSUPP; + + + return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count); +} + +int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) +{ + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!mca_set) + return -EINVAL; + + if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set) + return -EOPNOTSUPP; + + WARN_ON(!list_empty(&mca_set->list)); + + return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set); } int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, @@ -230,14 +389,21 @@ static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val) static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry) { int i, idx = entry->idx; + int reg_idx_array[] = { + MCA_REG_IDX_STATUS, + MCA_REG_IDX_ADDR, + MCA_REG_IDX_MISC0, + MCA_REG_IDX_IPID, + MCA_REG_IDX_SYND, + }; seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE"); seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip); seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype); - for (i = 0; i < ARRAY_SIZE(entry->regs); i++) - seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, i, entry->regs[i]); + for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++) + seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]); } static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index 28ad463cf5c9..2b488fcf2f95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -25,6 +25,27 @@ #define MCA_MAX_REGS_COUNT (16) +#define MCA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) +#define MCA_REG__STATUS__VAL(x) MCA_REG_FIELD(x, 63, 63) +#define MCA_REG__STATUS__OVERFLOW(x) MCA_REG_FIELD(x, 62, 62) +#define MCA_REG__STATUS__UC(x) MCA_REG_FIELD(x, 61, 61) +#define MCA_REG__STATUS__EN(x) MCA_REG_FIELD(x, 60, 60) +#define MCA_REG__STATUS__MISCV(x) MCA_REG_FIELD(x, 59, 59) +#define MCA_REG__STATUS__ADDRV(x) MCA_REG_FIELD(x, 58, 58) +#define MCA_REG__STATUS__PCC(x) MCA_REG_FIELD(x, 57, 57) +#define MCA_REG__STATUS__ERRCOREIDVAL(x) MCA_REG_FIELD(x, 56, 56) +#define MCA_REG__STATUS__TCC(x) MCA_REG_FIELD(x, 55, 55) +#define MCA_REG__STATUS__SYNDV(x) MCA_REG_FIELD(x, 53, 53) +#define MCA_REG__STATUS__CECC(x) MCA_REG_FIELD(x, 46, 46) +#define MCA_REG__STATUS__UECC(x) MCA_REG_FIELD(x, 45, 45) +#define MCA_REG__STATUS__DEFERRED(x) MCA_REG_FIELD(x, 44, 44) +#define MCA_REG__STATUS__POISON(x) MCA_REG_FIELD(x, 43, 43) +#define MCA_REG__STATUS__SCRUB(x) MCA_REG_FIELD(x, 40, 40) +#define MCA_REG__STATUS__ERRCOREID(x) MCA_REG_FIELD(x, 37, 32) +#define MCA_REG__STATUS__ADDRLSB(x) MCA_REG_FIELD(x, 29, 24) +#define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16) +#define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0) + enum amdgpu_mca_ip { AMDGPU_MCA_IP_UNKNOW = -1, AMDGPU_MCA_IP_PSP = 0, @@ -33,6 +54,7 @@ enum amdgpu_mca_ip { AMDGPU_MCA_IP_SMU, AMDGPU_MCA_IP_MP5, AMDGPU_MCA_IP_UMC, + AMDGPU_MCA_IP_PCS_XGMI, AMDGPU_MCA_IP_COUNT, }; @@ -57,6 +79,15 @@ struct amdgpu_mca { const struct amdgpu_mca_smu_funcs *mca_funcs; }; +enum mca_reg_idx { + MCA_REG_IDX_STATUS = 1, + MCA_REG_IDX_ADDR = 2, + MCA_REG_IDX_MISC0 = 3, + MCA_REG_IDX_IPID = 5, + MCA_REG_IDX_SYND = 6, + MCA_REG_IDX_COUNT = 16, +}; + struct mca_bank_info { int socket_id; int aid; @@ -72,18 +103,28 @@ struct mca_bank_entry { uint64_t regs[MCA_MAX_REGS_COUNT]; }; +struct mca_bank_node { + struct mca_bank_entry entry; + struct list_head node; +}; + +struct mca_bank_set { + int nr_entries; + struct list_head list; +}; + struct amdgpu_mca_smu_funcs { int max_ue_count; int max_ce_count; int (*mca_set_debug_mode)(struct amdgpu_device *adev, bool enable); - int (*mca_get_error_count)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, uint32_t *count); + int (*mca_get_ras_mca_set)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct mca_bank_set *mca_set); + int (*mca_parse_mca_error_count)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct mca_bank_entry *entry, uint32_t *count); int (*mca_get_valid_mca_count)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count); int (*mca_get_mca_entry)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry); - int (*mca_get_ras_mca_idx_array)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size); }; void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, @@ -107,11 +148,22 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev); void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs); int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable); int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count); +int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, uint32_t *total); int amdgpu_mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, uint32_t *count); +int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count); +int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set); int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry); void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root); +void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set); +int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry); +void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set); +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 0dcb6c36b02c..cef920a93924 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1062,9 +1062,6 @@ static const char * const amdgpu_vram_names[] = { */ int amdgpu_bo_init(struct amdgpu_device *adev) { - /* set the default AGP aperture state */ - amdgpu_gmc_set_agp_default(adev, &adev->gmc); - /* On A+A platform, VRAM can be mapped as WB */ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { /* reserve PAT memory space to WC for VRAM */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index b7fe5951b166..84e5987b14e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1165,13 +1165,53 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s } } -/* query/inject/cure begin */ -int amdgpu_ras_query_error_status(struct amdgpu_device *adev, - struct ras_query_if *info) +static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, + struct ras_query_if *info, + struct ras_err_data *err_data, + unsigned int error_query_mode) { + enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; struct amdgpu_ras_block_object *block_obj = NULL; + + if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) + return -EINVAL; + + if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { + if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { + amdgpu_ras_get_ecc_info(adev, err_data); + } else { + block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); + if (!block_obj || !block_obj->hw_ops) { + dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", + get_ras_block_str(&info->head)); + return -EINVAL; + } + + if (block_obj->hw_ops->query_ras_error_count) + block_obj->hw_ops->query_ras_error_count(adev, &err_data); + + if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || + (info->head.block == AMDGPU_RAS_BLOCK__GFX) || + (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); + } + } + } else { + /* FIXME: add code to check return value later */ + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data); + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data); + } + + return 0; +} + +/* query/inject/cure begin */ +int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) +{ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data; + unsigned int error_query_mode; int ret; if (!obj) @@ -1181,27 +1221,14 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, if (ret) return ret; - if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { - amdgpu_ras_get_ecc_info(adev, &err_data); - } else { - block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); - if (!block_obj || !block_obj->hw_ops) { - dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", - get_ras_block_str(&info->head)); - ret = -EINVAL; - goto out_fini_err_data; - } - - if (block_obj->hw_ops->query_ras_error_count) - block_obj->hw_ops->query_ras_error_count(adev, &err_data); + if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) + return -EINVAL; - if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || - (info->head.block == AMDGPU_RAS_BLOCK__GFX) || - (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { - if (block_obj->hw_ops->query_ras_error_status) - block_obj->hw_ops->query_ras_error_status(adev); - } - } + ret = amdgpu_ras_query_error_status_helper(adev, info, + &err_data, + error_query_mode); + if (ret) + goto out_fini_err_data; amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); @@ -1537,7 +1564,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - sysfs_remove_file_from_group(&adev->dev->kobj, + if (adev->dev->kobj.sd) + sysfs_remove_file_from_group(&adev->dev->kobj, &con->badpages_attr.attr, RAS_FS_NAME); } @@ -1556,7 +1584,8 @@ static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev) .attrs = attrs, }; - sysfs_remove_group(&adev->dev->kobj, &group); + if (adev->dev->kobj.sd) + sysfs_remove_group(&adev->dev->kobj, &group); return 0; } @@ -1603,7 +1632,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, if (!obj || !obj->attr_inuse) return -EINVAL; - sysfs_remove_file_from_group(&adev->dev->kobj, + if (adev->dev->kobj.sd) + sysfs_remove_file_from_group(&adev->dev->kobj, &obj->sysfs_attr.attr, RAS_FS_NAME); obj->attr_inuse = 0; @@ -3397,6 +3427,26 @@ bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev) return true; } +bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, + unsigned int *error_query_mode) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + + if (!con) { + *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY; + return false; + } + + if (mca_funcs && mca_funcs->mca_set_debug_mode) + *error_query_mode = + (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; + else + *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; + + return true; +} + /* Register each ip ras block into amdgpu ras */ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object *ras_block_obj) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 665414c22ca9..19161916ac46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -320,6 +320,12 @@ enum amdgpu_ras_ret { AMDGPU_RAS_PT, }; +enum amdgpu_ras_error_query_mode { + AMDGPU_RAS_INVALID_ERROR_QUERY = 0, + AMDGPU_RAS_DIRECT_ERROR_QUERY = 1, + AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2, +}; + /* ras error status reisger fields */ #define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0 #define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L @@ -769,6 +775,8 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable); bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev); +bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, + unsigned int *mode); int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object *ras_block_obj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 815b7c34ed33..65949cc7abb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -399,20 +399,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * + * Initialize the entity used for handle management in the kernel driver. */ -int amdgpu_uvd_entity_init(struct amdgpu_device *adev) +int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - struct amdgpu_ring *ring; - struct drm_gpu_scheduler *sched; - int r; + if (ring == &adev->uvd.inst[0].ring) { + struct drm_gpu_scheduler *sched = &ring->sched; + int r; - ring = &adev->uvd.inst[0].ring; - sched = &ring->sched; - r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, NULL); - if (r) { - DRM_ERROR("Failed setting up UVD kernel entity.\n"); - return r; + r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); + if (r) { + DRM_ERROR("Failed setting up UVD kernel entity.\n"); + return r; + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index a9f342537c68..9dfad2f48ef4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -73,7 +73,7 @@ struct amdgpu_uvd { int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); -int amdgpu_uvd_entity_init(struct amdgpu_device *adev); +int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev); int amdgpu_uvd_suspend(struct amdgpu_device *adev); int amdgpu_uvd_resume(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 1904edf68407..0954447f689d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -231,20 +231,20 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * + * Initialize the entity used for handle management in the kernel driver. */ -int amdgpu_vce_entity_init(struct amdgpu_device *adev) +int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - struct amdgpu_ring *ring; - struct drm_gpu_scheduler *sched; - int r; - - ring = &adev->vce.ring[0]; - sched = &ring->sched; - r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up VCE run queue.\n"); - return r; + if (ring == &adev->vce.ring[0]) { + struct drm_gpu_scheduler *sched = &ring->sched; + int r; + + r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); + if (r != 0) { + DRM_ERROR("Failed setting up VCE run queue.\n"); + return r; + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index ea680fc9a6c3..6e53f872d084 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -55,7 +55,7 @@ struct amdgpu_vce { int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size); int amdgpu_vce_sw_fini(struct amdgpu_device *adev); -int amdgpu_vce_entity_init(struct amdgpu_device *adev); +int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index a0aa624f5a92..3a632c3b1a2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -73,9 +73,10 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, - uint32_t ref, uint32_t mask) + uint32_t ref, uint32_t mask, + uint32_t xcc_inst) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst]; struct amdgpu_ring *ring = &kiq->ring; signed long r, cnt = 0; unsigned long flags; @@ -942,7 +943,7 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, } } -static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, +bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, bool write, u32 *rlcg_flag) { @@ -975,7 +976,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, return ret; } -static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) +u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; uint32_t timeout = 50000; @@ -1093,3 +1094,13 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, else return RREG32(offset); } + +bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) +{ + bool xnack_mode = true; + + if (amdgpu_sriov_vf(adev) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + xnack_mode = false; + + return xnack_mode; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 858ef21ae515..d4207e44141f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -334,7 +334,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t rreg1, - uint32_t ref, uint32_t mask); + uint32_t ref, uint32_t mask, + uint32_t xcc_inst); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); @@ -365,4 +366,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id); void amdgpu_virt_post_reset(struct amdgpu_device *adev); +bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev); +bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, + u32 acc_flags, u32 hwip, + bool write, u32 *rlcg_flag); +u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 904252456d25..d1b8afd105c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1098,8 +1098,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bo = gem_to_amdgpu_bo(gobj); } mem = bo->tbo.resource; - if (mem->mem_type == TTM_PL_TT || - mem->mem_type == AMDGPU_PL_PREEMPT) + if (mem && (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_PREEMPT)) pages_addr = bo->tbo.ttm->dma_address; } @@ -2139,7 +2139,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) * Returns: * 0 for success, error for failure. */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, + int32_t xcp_id) { struct amdgpu_bo *root_bo; struct amdgpu_bo_vm *root; @@ -2158,6 +2159,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp INIT_LIST_HEAD(&vm->done); INIT_LIST_HEAD(&vm->pt_freed); INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); + INIT_KFIFO(vm->faults); r = amdgpu_vm_init_entities(adev, vm); if (r) @@ -2192,34 +2194,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp false, &root, xcp_id); if (r) goto error_free_delayed; - root_bo = &root->bo; + + root_bo = amdgpu_bo_ref(&root->bo); r = amdgpu_bo_reserve(root_bo, true); - if (r) - goto error_free_root; + if (r) { + amdgpu_bo_unref(&root->shadow); + amdgpu_bo_unref(&root_bo); + goto error_free_delayed; + } + amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); if (r) - goto error_unreserve; - - amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); + goto error_free_root; r = amdgpu_vm_pt_clear(adev, vm, root, false); if (r) - goto error_unreserve; + goto error_free_root; amdgpu_bo_unreserve(vm->root.bo); - - INIT_KFIFO(vm->faults); + amdgpu_bo_unref(&root_bo); return 0; -error_unreserve: - amdgpu_bo_unreserve(vm->root.bo); - error_free_root: - amdgpu_bo_unref(&root->shadow); + amdgpu_vm_pt_free_root(adev, vm); + amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unref(&root_bo); - vm->root.bo = NULL; error_free_delayed: dma_fence_put(vm->last_tlb_flush); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 9d5d742ee9d3..bd20cb3b9819 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -103,6 +103,53 @@ static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = { smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 }; +static const int xgmi3x16_pcs_err_status_reg_v6_4[] = { + smnPCS_XGMI3X16_PCS_ERROR_STATUS, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000 +}; + +static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[] = { + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK, + smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 +}; + +static const u64 xgmi_v6_4_0_mca_base_array[] = { + 0x11a09200, + 0x11b09200, +}; + +static const char *xgmi_v6_4_0_ras_error_code_ext[32] = { + [0x00] = "XGMI PCS DataLossErr", + [0x01] = "XGMI PCS TrainingErr", + [0x02] = "XGMI PCS FlowCtrlAckErr", + [0x03] = "XGMI PCS RxFifoUnderflowErr", + [0x04] = "XGMI PCS RxFifoOverflowErr", + [0x05] = "XGMI PCS CRCErr", + [0x06] = "XGMI PCS BERExceededErr", + [0x07] = "XGMI PCS TxMetaDataErr", + [0x08] = "XGMI PCS ReplayBufParityErr", + [0x09] = "XGMI PCS DataParityErr", + [0x0a] = "XGMI PCS ReplayFifoOverflowErr", + [0x0b] = "XGMI PCS ReplayFifoUnderflowErr", + [0x0c] = "XGMI PCS ElasticFifoOverflowErr", + [0x0d] = "XGMI PCS DeskewErr", + [0x0e] = "XGMI PCS FlowCtrlCRCErr", + [0x0f] = "XGMI PCS DataStartupLimitErr", + [0x10] = "XGMI PCS FCInitTimeoutErr", + [0x11] = "XGMI PCS RecoveryTimeoutErr", + [0x12] = "XGMI PCS ReadySerialTimeoutErr", + [0x13] = "XGMI PCS ReadySerialAttemptErr", + [0x14] = "XGMI PCS RecoveryAttemptErr", + [0x15] = "XGMI PCS RecoveryRelockAttemptErr", + [0x16] = "XGMI PCS ReplayAttemptErr", + [0x17] = "XGMI PCS SyncHdrErr", + [0x18] = "XGMI PCS TxReplayTimeoutErr", + [0x19] = "XGMI PCS RxReplayTimeoutErr", + [0x1a] = "XGMI PCS LinkSubTxTimeoutErr", + [0x1b] = "XGMI PCS LinkSubRxTimeoutErr", + [0x1c] = "XGMI PCS RxCMDPktErr", +}; + static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { {"XGMI PCS DataLossErr", SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, @@ -926,7 +973,7 @@ static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg WREG32_PCIE(pcs_status_reg, 0); } -static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) +static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev) { uint32_t i; @@ -952,6 +999,49 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) default: break; } + + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) + pcs_clear_status(adev, + xgmi3x16_pcs_err_status_reg_v6_4[i]); + break; + default: + break; + } +} + +static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base) +{ + WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL); +} + +static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++) + __xgmi_v6_4_0_reset_error_count(adev, xgmi_inst, xgmi_v6_4_0_mca_base_array[i]); +} + +static void xgmi_v6_4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i; + + for_each_inst(i, adev->aid_mask) + xgmi_v6_4_0_reset_error_count(adev, i); +} + +static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + xgmi_v6_4_0_reset_ras_error_count(adev); + break; + default: + amdgpu_xgmi_legacy_reset_ras_error_count(adev); + break; + } } static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, @@ -969,7 +1059,9 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, if (is_xgmi_pcs) { if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == - IP_VERSION(6, 1, 0)) { + IP_VERSION(6, 1, 0) || + amdgpu_ip_version(adev, XGMI_HWIP, 0) == + IP_VERSION(6, 4, 0)) { pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0]; field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields); } else { @@ -1003,11 +1095,11 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, return 0; } -static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, - void *ras_error_status) +static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - int i; + int i, supported = 1; uint32_t data, mask_data = 0; uint32_t ue_cnt = 0, ce_cnt = 0; @@ -1071,7 +1163,25 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, } break; default: - dev_warn(adev->dev, "XGMI RAS error query not supported"); + supported = 0; + break; + } + + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + /* check xgmi3x16 pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) { + data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]); + mask_data = + RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, data, + mask_data, &ue_cnt, &ce_cnt, true, true); + } + break; + default: + if (!supported) + dev_warn(adev->dev, "XGMI RAS error query not supported"); break; } @@ -1081,32 +1191,116 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += ce_cnt; } +static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status) +{ + const char *error_str; + int ext_error_code; + + ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status); + + error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ? + xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL; + if (error_str) + dev_info(adev->dev, "%s detected\n", error_str); + + switch (ext_error_code) { + case 0: + return AMDGPU_MCA_ERROR_TYPE_UE; + case 6: + return AMDGPU_MCA_ERROR_TYPE_CE; + default: + return -EINVAL; + } + + return -EINVAL; +} + +static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 mca_base, struct ras_err_data *err_data) +{ + int xgmi_inst = mcm_info->die_id; + u64 status = 0; + + status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS); + if (!MCA_REG__STATUS__VAL(status)) + return; + + switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) { + case AMDGPU_MCA_ERROR_TYPE_UE: + amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL); + break; + case AMDGPU_MCA_ERROR_TYPE_CE: + amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL); + break; + default: + break; + } + + WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL); +} + +static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data) +{ + struct amdgpu_smuio_mcm_config_info mcm_info = { + .socket_id = adev->smuio.funcs->get_socket_id(adev), + .die_id = xgmi_inst, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++) + __xgmi_v6_4_0_query_error_count(adev, &mcm_info, xgmi_v6_4_0_mca_base_array[i], err_data); +} + +static void xgmi_v6_4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + int i; + + for_each_inst(i, adev->aid_mask) + xgmi_v6_4_0_query_error_count(adev, i, err_data); +} + +static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status); + break; + default: + amdgpu_xgmi_legacy_query_ras_error_count(adev, ras_error_status); + break; + } +} + /* Trigger XGMI/WAFL error */ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if, uint32_t instance_mask) { - int ret = 0; + int ret1, ret2; struct ta_ras_trigger_error_input *block_info = (struct ta_ras_trigger_error_input *)inject_if; if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) dev_warn(adev->dev, "Failed to disallow df cstate"); - if (amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DISALLOW)) + ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DISALLOW); + if (ret1 && ret1 != -EOPNOTSUPP) dev_warn(adev->dev, "Failed to disallow XGMI power down"); - ret = psp_ras_trigger_error(&adev->psp, block_info, instance_mask); + ret2 = psp_ras_trigger_error(&adev->psp, block_info, instance_mask); if (amdgpu_ras_intr_triggered()) - return ret; + return ret2; - if (amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DEFAULT)) + ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DEFAULT); + if (ret1 && ret1 != -EOPNOTSUPP) dev_warn(adev->dev, "Failed to allow XGMI power down"); if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) dev_warn(adev->dev, "Failed to allow df cstate"); - return ret; + return ret2; } struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 41bbabd9ad4d..40d06d32bb74 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1102,6 +1102,7 @@ static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); } + adev->gfx.rlc.rlcg_reg_access_supported = true; } static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) @@ -2738,16 +2739,16 @@ static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); break; case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); break; default: break; @@ -3799,6 +3800,27 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, } } + /* handle extra register entries of UE */ + for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { + for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { + /* no need to select if instance number is 1 */ + if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || + gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) + gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); + + amdgpu_ras_inst_query_ras_error_count(adev, + &(gfx_v9_4_3_ue_reg_list[i].reg_entry), + 1, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, + gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, + GET_INST(GC, xcc_id), + AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + &ue_count); + } + } + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); @@ -3838,6 +3860,23 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, } } + /* handle extra register entries of UE */ + for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { + for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { + /* no need to select if instance number is 1 */ + if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || + gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) + gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); + + amdgpu_ras_inst_reset_ras_error_count(adev, + &(gfx_v9_4_3_ue_reg_list[i].reg_entry), + 1, + GET_INST(GC, xcc_id)); + } + } + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); @@ -4300,7 +4339,7 @@ const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 9, .minor = 4, - .rev = 0, + .rev = 3, .funcs = &gfx_v9_4_3_ip_funcs, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index d8a4fddab9c1..0ec7b061d7c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid); + 1 << vmid, GET_INST(GC, 0)); return; } @@ -672,6 +672,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); if (!amdgpu_sriov_vf(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 4713a62ad586..6dce9b29f675 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -229,7 +229,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid); + 1 << vmid, GET_INST(GC, 0)); return; } @@ -637,6 +637,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, base = adev->mmhub.funcs->get_fb_location(adev); + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH); if (!amdgpu_sriov_vf(adev) || diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 7f66954fd302..42e103d7077d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -211,6 +211,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, base <<= 24; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 61ca1a82b651..efc16e580f1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -239,6 +239,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, base <<= 24; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index fa59749c2aef..ff4ae73d27ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -413,6 +413,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; + amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b66c5f7e1c56..bde25eb4ed8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -817,7 +817,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); - u32 j, inv_req, tmp, sem, req, ack; + u32 j, inv_req, tmp, sem, req, ack, inst; const unsigned int eng = 17; struct amdgpu_vmhub *hub; @@ -832,13 +832,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal */ - if (adev->gfx.kiq[0].ring.sched.ready && + if (vmhub >= AMDGPU_MMHUB0(0)) + inst = GET_INST(GC, 0); + else + inst = vmhub; + if (adev->gfx.kiq[inst].ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid); + 1 << vmid, inst); return; } @@ -856,9 +860,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acquire */ if (vmhub >= AMDGPU_MMHUB0(0)) - tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem); + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, inst); else - tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem); + tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, inst); if (tmp & 0x1) break; udelay(1); @@ -869,9 +873,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } if (vmhub >= AMDGPU_MMHUB0(0)) - WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req); + WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, inst); else - WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req); + WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, inst); /* * Issue a dummy read to wait for the ACK register to @@ -884,9 +888,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, for (j = 0; j < adev->usec_timeout; j++) { if (vmhub >= AMDGPU_MMHUB0(0)) - tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack); + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, inst); else - tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack); + tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, inst); if (tmp & (1 << vmid)) break; udelay(1); @@ -899,9 +903,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * write with 0 means semaphore release */ if (vmhub >= AMDGPU_MMHUB0(0)) - WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0); + WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, inst); else - WREG32_SOC15_IP_NO_KIQ(GC, sem, 0); + WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, inst); } spin_unlock(&adev->gmc.invalidate_lock); @@ -1176,7 +1180,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, if (uncached) { mtype = MTYPE_UC; } else if (ext_coherent) { - mtype = is_local ? MTYPE_CC : MTYPE_UC; + if (adev->rev_id) + mtype = is_local ? MTYPE_CC : MTYPE_UC; + else + mtype = MTYPE_UC; } else if (adev->flags & AMD_IS_APU) { mtype = is_local ? mtype_local : MTYPE_NC; } else { @@ -1297,7 +1304,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(mtype_local); - } else { + } else if (adev->rev_id) { /* MTYPE_UC case */ *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); @@ -1614,6 +1621,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, { u64 base = adev->mmhub.funcs->get_fb_location(adev); + amdgpu_gmc_set_agp_default(adev, mc); + /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; if (adev->gmc.xgmi.connected_to_cpu) { diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index 3f3a6445c006..49e934975719 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -145,6 +145,10 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev) break; } + /* Do not program registers if VF */ + if (amdgpu_sriov_vf(adev)) + return; + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0)) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 9a8ec4d7e333..82b6b62c170b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -654,9 +654,11 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) */ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + if (!amdgpu_sriov_vf(ring->adev)) { + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + } amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 0, PACKETJ_TYPE0)); @@ -672,9 +674,11 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) */ static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x62a04); + if (!amdgpu_sriov_vf(ring->adev)) { + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x62a04); + } amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 0, PACKETJ_TYPE0)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index c46bc6aa4f48..0f24af6f2810 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -427,6 +427,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask) { struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; + u32 doorbell_offset, doorbell; u32 rb_cntl, ib_cntl; int i, unset = 0; @@ -444,6 +445,18 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + + if (sdma[i]->use_doorbell) { + doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); + doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET); + + doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, 0); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA_GFX_DOORBELL_OFFSET, + OFFSET, 0); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset); + } } } @@ -631,12 +644,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i) rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl); WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); - /* Initialize the ring buffer's read and write pointers */ - WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0); - WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0); - WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0); - WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0); - /* set the wb address whether it's enabled or not */ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI, upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); @@ -654,6 +661,12 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i) /* before programing wptr to a less value, need set minor_ptr_update first */ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1); + /* Initialize the ring buffer's read and write pointers */ + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0); + doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET); @@ -2048,7 +2061,7 @@ const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = { .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 4, .minor = 4, - .rev = 0, + .rev = 2, .funcs = &sdma_v4_4_2_ip_funcs, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index da683afa0222..242b24f73c17 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -69,7 +69,7 @@ #define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0) -#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) +#define RREG32_SOC15_IP_NO_KIQ(ip, reg, inst) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ @@ -86,8 +86,8 @@ #define WREG32_SOC15_IP(ip, reg, value) \ __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0) -#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) +#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value, inst) \ + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ @@ -140,7 +140,7 @@ /* for GC only */ #define RREG32_RLC(reg) \ - __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP) + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP, 0) #define WREG32_RLC_NO_KIQ(reg, value, hwip) \ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0) @@ -204,4 +204,10 @@ + adev->asic_funcs->encode_ext_smn_addressing(ext), \ value) \ +#define RREG64_MCA(ext, mca_base, idx) \ + RREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8)) + +#define WREG64_MCA(ext, mca_base, idx, val) \ + WREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8), val) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index d5083c549330..48c6efcdeac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -381,6 +381,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev) return AMD_RESET_METHOD_MODE1; case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): return AMD_RESET_METHOD_MODE2; default: if (amdgpu_dpm_is_baco_supported(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 770b4b4e3138..e9c2ff74f0bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -88,7 +88,7 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) umc_v12_0_reset_error_count_per_channel, NULL); } -static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) +bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) { return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || @@ -96,7 +96,7 @@ static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); } -static bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) +bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) { return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index 4885b9fff272..b34b1e358f8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -117,6 +117,9 @@ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ } while (0) +bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status); +bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status); + extern const uint32_t umc_v12_0_channel_idx_tbl[] [UMC_V12_0_UMC_INSTANCE_NUM] diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 58a8f78c003c..a6006f231c65 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -577,8 +577,6 @@ static int uvd_v3_1_sw_init(void *handle) ptr += ucode_len; memcpy(&adev->uvd.keyselect, ptr, 4); - r = amdgpu_uvd_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index d3b1e31f5450..1aa09ad7bbe3 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -127,8 +127,6 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 5a8116437abf..f8b229b75435 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -125,8 +125,6 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 74c09230aeb3..a9a6880f44e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -432,8 +432,6 @@ static int uvd_v6_0_sw_init(void *handle) } } - r = amdgpu_uvd_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 1c42cf10cc29..6068b784dc69 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -480,10 +480,6 @@ static int uvd_v7_0_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_entity_init(adev); - if (r) - return r; - r = amdgpu_virt_alloc_mm_table(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 67eb01fef789..a08e7abca423 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -441,8 +441,6 @@ static int vce_v2_0_sw_init(void *handle) return r; } - r = amdgpu_vce_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 18f6e62af339..f4760748d349 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -450,8 +450,6 @@ static int vce_v3_0_sw_init(void *handle) return r; } - r = amdgpu_vce_entity_init(adev); - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index e0b70cd3b697..06d787385ad4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -486,11 +486,6 @@ static int vce_v4_0_sw_init(void *handle) return r; } - - r = amdgpu_vce_entity_init(adev); - if (r) - return r; - r = amdgpu_virt_alloc_mm_table(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index fbf053001af9..7a33e06f5c90 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1416,8 +1416,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * per-process XNACK mode selection. But let the dev->noretry * setting still influence the default XNACK mode. */ - if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) + if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) { + if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) { + pr_debug("SRIOV platform xnack not supported\n"); + return false; + } continue; + } /* GFXv10 and later GPUs do not support shader preemption * during page faults. This can lead to poor QoS for queue diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index e67d06a42809..f2f3c338fd94 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1255,9 +1255,11 @@ svm_range_get_pte_flags(struct kfd_node *node, } break; case IP_VERSION(9, 4, 3): - mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : - (amdgpu_mtype_local == 2 || ext_coherent ? - AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW); + if (ext_coherent) + mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC; + else + mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : + amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; snoop = true; if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 9cd83b780102..ed784cf27d39 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -1216,6 +1216,9 @@ bool dm_helpers_dp_handle_test_pattern_request( } + pipe_ctx->stream->test_pattern.type = test_pattern; + pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 0f580ea37576..133af994a08c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -37,7 +37,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_encoder.h> #include <drm/drm_atomic.h> -#include "dcn10/dcn10_optc.h" +#include "dc/inc/hw/optc.h" #include "dc/inc/core_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index f80917f6153b..0fa4fcd00de2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -111,17 +111,21 @@ static int dcn35_get_active_display_cnt_wa( return display_count; } -static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { struct stream_encoder *stream_enc = pipe->stream_res.stream_enc; if (disable) { @@ -301,11 +305,11 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn35_disable_otg_wa(clk_mgr_base, context, true); + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn35_disable_otg_wa(clk_mgr_base, context, false); + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } @@ -814,7 +818,8 @@ static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) struct dc *dc = clk_mgr_base->ctx->dc; uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); - if (dc->config.disable_ips == 0) { + if (dc->config.disable_ips == DMUB_IPS_ENABLE || + dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { val |= DMUB_IPS1_ALLOW_MASK; val |= DMUB_IPS2_ALLOW_MASK; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { @@ -1114,7 +1119,7 @@ void dcn35_clk_mgr_construct( dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, smu_dpm_clks.dpm_clks); - if (ctx->dc->config.disable_ips == 0) { + if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { bool ips_support = false; /*avoid call pmfw at init*/ @@ -1127,7 +1132,7 @@ void dcn35_clk_mgr_construct( ctx->dc->debug.disable_hpo_power_gate = false; } else { /*let's reset the config control flag*/ - ctx->dc->config.disable_ips = 1; /*pmfw not support it, disable it all*/ + ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ } } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4360a696f10a..7b9bf5cb4529 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2582,6 +2582,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->gamut_remap_matrix) update_flags->bits.gamut_remap_change = 1; + if (u->blend_tf) + update_flags->bits.gamma_change = 1; + if (u->gamma) { enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; @@ -4113,8 +4116,17 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, bool success = false; struct dc_state *minimal_transition_context; struct pipe_split_policy_backup policy; + struct mall_temp_config mall_temp_config; /* commit based on new context */ + /* Since all phantom pipes are removed in full validation, + * we have to save and restore the subvp/mall config when + * we do a minimal transition since the flags marking the + * pipe as subvp/phantom will be cleared (dc copy constructor + * creates a shallow copy). + */ + if (dc->res_pool->funcs->save_mall_state) + dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); minimal_transition_context = create_minimal_transition_state(dc, context, &policy); if (minimal_transition_context) { @@ -4123,9 +4135,20 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, dc->hwss.is_pipe_topology_transition_seamless( dc, minimal_transition_context, context)) { DC_LOG_DC("%s base = new state\n", __func__); + success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; } release_minimal_transition_state(dc, minimal_transition_context, &policy); + if (dc->res_pool->funcs->restore_mall_state) + dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); + /* If we do a minimal transition with plane removal and the context + * has subvp we also have to retain back the phantom stream / planes + * since the refcount is decremented as part of the min transition + * (we commit a state with no subvp, so the phantom streams / planes + * had to be removed). + */ + if (dc->res_pool->funcs->retain_phantom_pipes) + dc->res_pool->funcs->retain_phantom_pipes(dc, context); } if (!success) { @@ -4884,7 +4907,7 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) if (dc->debug.disable_idle_power_optimizations) return; - if (dc->caps.ips_support && dc->config.disable_ips) + if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return; if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) @@ -4905,7 +4928,7 @@ bool dc_dmub_is_ips_idle_state(struct dc *dc) if (dc->debug.disable_idle_power_optimizations) return false; - if (!dc->caps.ips_support || dc->config.disable_ips) + if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return false; if (dc->hwss.get_idle_state) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 6e54ca055fcb..9316b737a8ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -49,7 +49,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.256" +#define DC_VER "3.2.259" #define MAX_SURFACES 3 #define MAX_PLANES 6 diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index ba142bef626b..e4c007203318 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -120,6 +120,80 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, } } +bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dmub_srv *dmub; + enum dmub_status status; + int i; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dmub = dc_dmub_srv->dmub; + + for (i = 0 ; i < count; i++) { + // Queue command + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + + if (status == DMUB_STATUS_QUEUE_FULL) { + /* Execute and wait for queue to become empty again. */ + dmub_srv_cmd_execute(dmub); + dmub_srv_wait_for_idle(dmub, 100000); + + /* Requeue the command. */ + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + } + + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + } + + status = dmub_srv_cmd_execute(dmub); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + + return true; +} + +bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, + enum dm_dmub_wait_type wait_type, + union dmub_rb_cmd *cmd_list) +{ + struct dmub_srv *dmub; + enum dmub_status status; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dmub = dc_dmub_srv->dmub; + + // Wait for DMUB to process command + if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { + status = dmub_srv_wait_for_idle(dmub, 100000); + + if (status != DMUB_STATUS_OK) { + DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + + // Copy data back from ring buffer into command + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); + } + + return true; +} + bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 31150b214394..d4a60f53faab 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -56,6 +56,14 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list); + +bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, + enum dm_dmub_wait_type wait_type, + union dmub_rb_cmd *cmd_list); + bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 35ae245ef722..eeeeeef4d717 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -142,7 +142,8 @@ enum dp_test_link_rate { DP_TEST_LINK_RATE_HBR3 = 0x1E, DP_TEST_LINK_RATE_UHBR10 = 0x01, DP_TEST_LINK_RATE_UHBR20 = 0x02, - DP_TEST_LINK_RATE_UHBR13_5 = 0x03, + DP_TEST_LINK_RATE_UHBR13_5_LEGACY = 0x03, /* For backward compatibility*/ + DP_TEST_LINK_RATE_UHBR13_5 = 0x04, }; struct dc_link_settings { diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 40dc51853d62..cea666ea66c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1037,7 +1037,9 @@ struct replay_config { bool replay_smu_opt_supported; // SMU optimization is supported unsigned int replay_enable_option; // Replay enablement option uint32_t debug_flags; // Replay debug flags - bool replay_timing_sync_supported; // Replay desync is supported + bool replay_timing_sync_supported; // Replay desync is supported + bool force_disable_desync_error_check; // Replay desync is supported + bool received_desync_error_hpd; //Replay Received Desync Error HPD. union replay_error_status replay_error_status; // Replay error status }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index c50aa30614be..051e4c2b4cf2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -128,21 +128,6 @@ SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) -#define ABM_DCN32_REG_LIST(id)\ - SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ - SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \ - SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \ - SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \ - SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \ - SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \ - SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \ - SRI(BL1_PWM_USER_LEVEL, ABM, id), \ - SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ - SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ - SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ - SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ - NBIO_SR(BIOS_SCRATCH_2) - #define ABM_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index aaf6c981fd9e..ab81594a7fad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -26,7 +26,7 @@ #ifndef __DC_TIMING_GENERATOR_DCN10_H__ #define __DC_TIMING_GENERATOR_DCN10_H__ -#include "timing_generator.h" +#include "optc.h" #define DCN10TG_FROM_TG(tg)\ container_of(tg, struct optc, base) @@ -594,190 +594,6 @@ struct dcn_optc_mask { TG_REG_FIELD_LIST_DCN3_5(uint32_t) }; -struct optc { - struct timing_generator base; - - const struct dcn_optc_registers *tg_regs; - const struct dcn_optc_shift *tg_shift; - const struct dcn_optc_mask *tg_mask; - - int opp_count; - - uint32_t max_h_total; - uint32_t max_v_total; - - uint32_t min_h_blank; - - uint32_t min_h_sync_width; - uint32_t min_v_sync_width; - uint32_t min_v_blank; - uint32_t min_v_blank_interlace; - - int vstartup_start; - int vupdate_offset; - int vupdate_width; - int vready_offset; - struct dc_crtc_timing orginal_patched_timing; - enum signal_type signal; -}; - void dcn10_timing_generator_init(struct optc *optc); -struct dcn_otg_state { - uint32_t v_blank_start; - uint32_t v_blank_end; - uint32_t v_sync_a_pol; - uint32_t v_total; - uint32_t v_total_max; - uint32_t v_total_min; - uint32_t v_total_min_sel; - uint32_t v_total_max_sel; - uint32_t v_sync_a_start; - uint32_t v_sync_a_end; - uint32_t h_blank_start; - uint32_t h_blank_end; - uint32_t h_sync_a_start; - uint32_t h_sync_a_end; - uint32_t h_sync_a_pol; - uint32_t h_total; - uint32_t underflow_occurred_status; - uint32_t otg_enabled; - uint32_t blank_enabled; - uint32_t vertical_interrupt1_en; - uint32_t vertical_interrupt1_line; - uint32_t vertical_interrupt2_en; - uint32_t vertical_interrupt2_line; -}; - -void optc1_read_otg_state(struct optc *optc1, - struct dcn_otg_state *s); - -bool optc1_get_hw_timing(struct timing_generator *tg, - struct dc_crtc_timing *hw_crtc_timing); - -bool optc1_validate_timing( - struct timing_generator *optc, - const struct dc_crtc_timing *timing); - -void optc1_program_timing( - struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, - int vready_offset, - int vstartup_start, - int vupdate_offset, - int vupdate_width, - const enum signal_type signal, - bool use_vbios); - -void optc1_setup_vertical_interrupt0( - struct timing_generator *optc, - uint32_t start_line, - uint32_t end_line); -void optc1_setup_vertical_interrupt1( - struct timing_generator *optc, - uint32_t start_line); -void optc1_setup_vertical_interrupt2( - struct timing_generator *optc, - uint32_t start_line); - -void optc1_program_global_sync( - struct timing_generator *optc, - int vready_offset, - int vstartup_start, - int vupdate_offset, - int vupdate_width); - -bool optc1_disable_crtc(struct timing_generator *optc); - -bool optc1_is_counter_moving(struct timing_generator *optc); - -void optc1_get_position(struct timing_generator *optc, - struct crtc_position *position); - -uint32_t optc1_get_vblank_counter(struct timing_generator *optc); - -void optc1_get_crtc_scanoutpos( - struct timing_generator *optc, - uint32_t *v_blank_start, - uint32_t *v_blank_end, - uint32_t *h_position, - uint32_t *v_position); - -void optc1_set_early_control( - struct timing_generator *optc, - uint32_t early_cntl); - -void optc1_wait_for_state(struct timing_generator *optc, - enum crtc_state state); - -void optc1_set_blank(struct timing_generator *optc, - bool enable_blanking); - -bool optc1_is_blanked(struct timing_generator *optc); - -void optc1_program_blank_color( - struct timing_generator *optc, - const struct tg_color *black_color); - -bool optc1_did_triggered_reset_occur( - struct timing_generator *optc); - -void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst); - -void optc1_disable_reset_trigger(struct timing_generator *optc); - -void optc1_lock(struct timing_generator *optc); - -void optc1_unlock(struct timing_generator *optc); - -void optc1_enable_optc_clock(struct timing_generator *optc, bool enable); - -void optc1_set_drr( - struct timing_generator *optc, - const struct drr_params *params); - -void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); - -void optc1_set_static_screen_control( - struct timing_generator *optc, - uint32_t event_triggers, - uint32_t num_frames); - -void optc1_program_stereo(struct timing_generator *optc, - const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags); - -bool optc1_is_stereo_left_eye(struct timing_generator *optc); - -void optc1_clear_optc_underflow(struct timing_generator *optc); - -void optc1_tg_init(struct timing_generator *optc); - -bool optc1_is_tg_enabled(struct timing_generator *optc); - -bool optc1_is_optc_underflow_occurred(struct timing_generator *optc); - -void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable); - -void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable); - -bool optc1_get_otg_active_size(struct timing_generator *optc, - uint32_t *otg_active_width, - uint32_t *otg_active_height); - -void optc1_enable_crtc_reset( - struct timing_generator *optc, - int source_tg_inst, - struct crtc_trigger_info *crtc_tp); - -bool optc1_configure_crc(struct timing_generator *optc, - const struct crc_params *params); - -bool optc1_get_crc(struct timing_generator *optc, - uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb); - -bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); - -void optc1_set_vtg_params(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2); - #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 5eebe7f03ddc..c9ae2d8f0096 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -137,7 +137,15 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 2; } - // TODO DSC: This is actually image width limitation, not a slice width. This should be added to the criteria to use ODM. + /* For pixel clock bigger than a single-pipe limit needing four engines ODM 4:1, which then quardruples our + * throughput and number of slices + */ + if (pixel_clock_100Hz > DCN20_MAX_PIXEL_CLOCK_Mhz*10000*2) { + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; + dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 4; + } + dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c index addedcfd1238..479f3683c0b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c @@ -325,6 +325,43 @@ static void dccg35_set_dpstreamclk( } } +static void dccg35_set_physymclk_root_clock_gating( + struct dccg *dccg, + int phy_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + return; + + switch (phy_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 4: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + static void dccg35_set_physymclk( struct dccg *dccg, int phy_inst, @@ -340,16 +377,10 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 1, PHYASYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYASYMCLK_ROOT_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 0, PHYASYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYASYMCLK_ROOT_GATE_DISABLE, 0); } break; case 1: @@ -357,16 +388,10 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 1, PHYBSYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYBSYMCLK_ROOT_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 0, PHYBSYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYBSYMCLK_ROOT_GATE_DISABLE, 0); } break; case 2: @@ -374,16 +399,10 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 1, PHYCSYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYCSYMCLK_ROOT_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 0, PHYCSYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYCSYMCLK_ROOT_GATE_DISABLE, 0); } break; case 3: @@ -391,16 +410,10 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 1, PHYDSYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYDSYMCLK_ROOT_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 0, PHYDSYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYDSYMCLK_ROOT_GATE_DISABLE, 0); } break; case 4: @@ -408,16 +421,10 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 1, PHYESYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYESYMCLK_ROOT_GATE_DISABLE, 1); } else { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 0, PHYESYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYESYMCLK_ROOT_GATE_DISABLE, 0); } break; default: @@ -490,8 +497,8 @@ void dccg35_init(struct dccg *dccg) if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) for (otg_inst = 0; otg_inst < 5; otg_inst++) - dccg35_set_physymclk(dccg, otg_inst, - PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg35_set_physymclk_root_clock_gating(dccg, otg_inst, + false); /* dccg35_enable_global_fgcg_rep( dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits @@ -754,7 +761,9 @@ static const struct dccg_funcs dccg35_funcs = { .disable_symclk32_se = dccg31_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, + .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, .set_physymclk = dccg35_set_physymclk, + .set_physymclk_root_clock_gating = dccg35_set_physymclk_root_clock_gating, .set_dtbclk_dto = dccg35_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c index 0f60c40e1fc5..46f71ff08fd1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c @@ -332,6 +332,13 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on) pg_cntl->pg_res_enable[PG_DCIO] = power_on; } +void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on) +{ + struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl); + + REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, power_on ? 1 : 0); +} + static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl) { struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl); @@ -501,7 +508,8 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = { .mpcc_pg_control = pg_cntl35_mpcc_pg_control, .opp_pg_control = pg_cntl35_opp_pg_control, .optc_pg_control = pg_cntl35_optc_pg_control, - .dwb_pg_control = pg_cntl35_dwb_pg_control + .dwb_pg_control = pg_cntl35_dwb_pg_control, + .set_force_poweron_domain22 = pg_cntl35_set_force_poweron_domain22 }; struct pg_cntl *pg_cntl35_create( diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h index 3de240884d22..069dae08e222 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h @@ -183,6 +183,7 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on); void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl); +void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on); struct pg_cntl *pg_cntl35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c index 3c7c810bab1f..c7e011d26d41 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c @@ -610,7 +610,23 @@ static struct dce_hwseq_registers hwseq_reg; HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\ HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\ HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh) + HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN35_MASK_SH_LIST(__SHIFT) @@ -708,7 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = { .i2c = true, .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled .dscl = true, - .cm = true, + .cm = false, .mpc = true, .optc = true, .vpg = true, @@ -719,14 +735,14 @@ static const struct dc_debug_options debug_defaults_drv = { .bits = { .dpp = true, .dsc = true,/*dscclk and dsc pg*/ - .hdmistream = false, - .hdmichar = false, - .dpstream = false, - .symclk32_se = false, - .symclk32_le = false, - .symclk_fe = false, - .physymclk = false, - .dpiasymclk = false, + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, } }, .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, @@ -741,7 +757,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_boot_optimizations = false, .disable_unbounded_requesting = false, .disable_mem_low_power = false, - .enable_hpo_pg_support = false, //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions .enable_double_buffered_dsc_pg_support = true, .enable_dp_dig_pixel_rate_div_policy = 1, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index ad741a723c0e..3686f1e7de3a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -5128,7 +5128,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ViewportExceedsSurface = true; if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 - && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { + && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) { ViewportExceedsSurface = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index d2046e770c50..1a2b24cc6b61 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -55,10 +55,11 @@ struct dc_pipe_mapping_scratch { struct dc_plane_pipe_pool pipe_pool; }; -static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, - unsigned int stream_id, unsigned int *plane_id) +static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane, + unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { int i, j; + bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) return false; @@ -66,7 +67,8 @@ static bool get_plane_id(const struct dc_state *state, const struct dc_plane_sta for (i = 0; i < state->stream_count; i++) { if (state->streams[i]->stream_id == stream_id) { for (j = 0; j < state->stream_status[i].plane_count; j++) { - if (state->stream_status[i].plane_states[j] == plane) { + if (state->stream_status[i].plane_states[j] == plane && + (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) { *plane_id = (i << 16) | j; return true; } @@ -123,8 +125,9 @@ static struct pipe_ctx *find_master_pipe_of_plane(struct dml2_context *ctx, unsigned int plane_id_assigned_to_pipe; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { - if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state, - state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id_assigned_to_pipe)) { + if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state, + state->res_ctx.pipe_ctx[i].stream->stream_id, + ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) { if (plane_id_assigned_to_pipe == plane_id) return &state->res_ctx.pipe_ctx[i]; } @@ -141,8 +144,9 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx, unsigned int plane_id_assigned_to_pipe; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { - if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state, - state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id_assigned_to_pipe)) { + if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state, + state->res_ctx.pipe_ctx[i].stream->stream_id, + ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) { if (plane_id_assigned_to_pipe == plane_id) pipes[num_found++] = i; } @@ -609,6 +613,7 @@ static struct pipe_ctx *assign_pipes_to_plane(struct dml2_context *ctx, struct d const struct dc_plane_state *plane, int odm_factor, int mpc_factor, + int plane_index, struct dc_plane_pipe_pool *pipe_pool, const struct dc_state *existing_state) { @@ -620,7 +625,7 @@ static struct pipe_ctx *assign_pipes_to_plane(struct dml2_context *ctx, struct d unsigned int next_pipe_to_assign; int odm_slice, mpc_slice; - if (!get_plane_id(state, plane, stream->stream_id, &plane_id)) { + if (!get_plane_id(ctx, state, plane, stream->stream_id, plane_index, &plane_id)) { ASSERT(false); return master_pipe; } @@ -667,12 +672,16 @@ static void free_pipe(struct pipe_ctx *pipe) } static void free_unused_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state, - const struct dc_plane_state *plane, const struct dc_plane_pipe_pool *pool, unsigned int stream_id) + const struct dc_plane_state *plane, const struct dc_plane_pipe_pool *pool, unsigned int stream_id, int plane_index) { int i; + bool is_plane_duplicate = ctx->v20.scratch.plane_duplicate_exists; + for (i = 0; i < ctx->config.dcn_pipe_count; i++) { if (state->res_ctx.pipe_ctx[i].plane_state == plane && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id && + (!is_plane_duplicate || (is_plane_duplicate && + ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx] == plane_index)) && !is_pipe_used(pool, state->res_ctx.pipe_ctx[i].pipe_idx)) { free_pipe(&state->res_ctx.pipe_ctx[i]); } @@ -717,19 +726,20 @@ static void map_pipes_for_stream(struct dml2_context *ctx, struct dc_state *stat } static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state, const struct dc_stream_state *stream, const struct dc_plane_state *plane, - struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state) + int plane_index, struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state) { int odm_slice_index; unsigned int plane_id; struct pipe_ctx *master_pipe = NULL; int i; - if (!get_plane_id(state, plane, stream->stream_id, &plane_id)) { + if (!get_plane_id(ctx, state, plane, stream->stream_id, plane_index, &plane_id)) { ASSERT(false); return; } - master_pipe = assign_pipes_to_plane(ctx, state, stream, plane, scratch->odm_info.odm_factor, scratch->mpc_info.mpc_factor, &scratch->pipe_pool, existing_state); + master_pipe = assign_pipes_to_plane(ctx, state, stream, plane, scratch->odm_info.odm_factor, + scratch->mpc_info.mpc_factor, plane_index, &scratch->pipe_pool, existing_state); sort_pipes_for_splitting(&scratch->pipe_pool); for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) { @@ -755,7 +765,7 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state } } - free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id); + free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index); } static unsigned int get_mpc_factor(struct dml2_context *ctx, @@ -768,7 +778,7 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, unsigned int plane_id; unsigned int cfg_idx; - get_plane_id(state, status->plane_states[plane_idx], stream_id, &plane_id); + get_plane_id(ctx, state, status->plane_states[plane_idx], stream_id, plane_idx, &plane_id); cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); if (ctx->architecture == dml2_architecture_20) return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; @@ -911,26 +921,14 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s unsigned int stream_id; const unsigned int *ODMMode, *DPPPerSurface; - unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; struct dc_pipe_mapping_scratch scratch; if (ctx->config.map_dc_pipes_with_callbacks) return map_dc_pipes_with_callbacks( ctx, state, disp_cfg, mapping, existing_state); - if (ctx->architecture == dml2_architecture_21) { - /* - * Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes. - * As data cannot be directly extracted in const pointers, assign these arrays to const pointers before proceeding to - * maximize the reuse of existing code. Const pointers are required because dml2.0 dml_display_cfg_st is const. - * - */ - ODMMode = (const unsigned int *)odm_mode_array; - DPPPerSurface = (const unsigned int *)dpp_per_surface_array; - } else { - ODMMode = (unsigned int *)disp_cfg->hw.ODMMode; - DPPPerSurface = disp_cfg->hw.DPPPerSurface; - } + ODMMode = (unsigned int *)disp_cfg->hw.ODMMode; + DPPPerSurface = disp_cfg->hw.DPPPerSurface; for (stream_index = 0; stream_index < state->stream_count; stream_index++) { memset(&scratch, 0, sizeof(struct dc_pipe_mapping_scratch)); @@ -958,8 +956,8 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s for (plane_index = 0; plane_index < state->stream_status[stream_index].plane_count; plane_index++) { // Planes are ordered top to bottom. - if (get_plane_id(state, state->stream_status[stream_index].plane_states[plane_index], - stream_id, &plane_id)) { + if (get_plane_id(ctx, state, state->stream_status[stream_index].plane_states[plane_index], + stream_id, plane_index, &plane_id)) { plane_disp_cfg_index = find_disp_cfg_idx_by_plane_id(mapping, plane_id); // Setup mpc_info for this plane @@ -983,7 +981,8 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s // Clear the pool assignment scratch (which is per plane) memset(&scratch.pipe_pool, 0, sizeof(struct dc_plane_pipe_pool)); - map_pipes_for_plane(ctx, state, state->streams[stream_index], state->stream_status[stream_index].plane_states[plane_index], &scratch, existing_state); + map_pipes_for_plane(ctx, state, state->streams[stream_index], + state->stream_status[stream_index].plane_states[plane_index], plane_index, &scratch, existing_state); } else { // Plane ID cannot be generated, therefore no DML mapping can be performed. ASSERT(false); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h index ed5b767d46e0..1cf8a884c0fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h @@ -75,6 +75,8 @@ struct dml2_dml_to_dc_pipe_mapping { bool dml_pipe_idx_to_stream_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; unsigned int dml_pipe_idx_to_plane_id[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; bool dml_pipe_idx_to_plane_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; + unsigned int dml_pipe_idx_to_plane_index[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; + bool dml_pipe_idx_to_plane_index_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; }; struct dml2_wrapper_scratch { @@ -96,6 +98,7 @@ struct dml2_wrapper_scratch { struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping; bool enable_flexible_pipe_mapping; + bool plane_duplicate_exists; }; struct dml2_helper_det_policy_scratch { @@ -104,7 +107,6 @@ struct dml2_helper_det_policy_scratch { enum dml2_architecture { dml2_architecture_20, - dml2_architecture_21 }; struct dml2_context { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 89836f175a13..75171bee6f71 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -231,6 +231,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s out->num_chans = 4; out->round_trip_ping_latency_dcfclk_cycles = 106; out->smn_latency_us = 2; + out->dispclk_dppclk_vco_speed_mhz = 3600; break; case dml_project_dcn351: @@ -930,10 +931,11 @@ static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml return location; } -static bool get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, - unsigned int stream_id, unsigned int *plane_id) +static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *context, const struct dc_plane_state *plane, + unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { int i, j; + bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) return false; @@ -941,7 +943,8 @@ static bool get_plane_id(const struct dc_state *context, const struct dc_plane_s for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->stream_id == stream_id) { for (j = 0; j < context->stream_status[i].plane_count; j++) { - if (context->stream_status[i].plane_states[j] == plane) { + if (context->stream_status[i].plane_states[j] == plane && + (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) { *plane_id = (i << 16) | j; return true; } @@ -953,13 +956,13 @@ static bool get_plane_id(const struct dc_state *context, const struct dc_plane_s } static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2, const struct dc_plane_state *plane, - const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id) + const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id, int plane_index) { unsigned int plane_id; int i = 0; int location = -1; - if (!get_plane_id(context, plane, stream_id, &plane_id)) { + if (!get_plane_id(context->bw_ctx.dml2, context, plane, stream_id, plane_index, &plane_id)) { ASSERT(false); return -1; } @@ -990,7 +993,41 @@ static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struc } } -void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) +static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state) +{ + unsigned int i; + unsigned int pipe_index = 0; + unsigned int plane_index = 0; + struct dml2_dml_to_dc_pipe_mapping *dml_to_dc_pipe_mapping = &dml2->v20.scratch.dml_to_dc_pipe_mapping; + + for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[i] = false; + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index[i] = 0; + } + + for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (!pipe || !pipe->stream || !pipe->plane_state) + continue; + + while (pipe) { + pipe_index = pipe->pipe_idx; + + if (pipe->stream && dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[pipe_index] == false) { + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index[pipe_index] = plane_index; + plane_index++; + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[pipe_index] = true; + } + + pipe = pipe->bottom_pipe; + } + + plane_index = 0; + } +} + +void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) { int i = 0, j = 0; int disp_cfg_stream_location, disp_cfg_plane_location; @@ -1007,6 +1044,8 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d dml_dispcfg->plane.GPUVMMaxPageTableLevels = 4; dml_dispcfg->plane.HostVMEnable = false; + dml2_populate_pipe_to_plane_index_mapping(dml2, context); + for (i = 0; i < context->stream_count; i++) { disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg); @@ -1043,7 +1082,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d } else { for (j = 0; j < context->stream_status[i].plane_count; j++) { disp_cfg_plane_location = map_plane_to_dml_display_cfg(dml2, - context->stream_status[i].plane_states[j], context, dml_dispcfg, context->streams[i]->stream_id); + context->stream_status[i].plane_states[j], context, dml_dispcfg, context->streams[i]->stream_id, j); if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_surfaces++; @@ -1067,7 +1106,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d dml_dispcfg->plane.BlendingAndTiming[disp_cfg_plane_location] = disp_cfg_stream_location; - if (get_plane_id(context, context->stream_status[i].plane_states[j], context->streams[i]->stream_id, + if (get_plane_id(dml2, context, context->stream_status[i].plane_states[j], context->streams[i]->stream_id, j, &dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h index dac6d27b14cd..d764773938f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h @@ -34,7 +34,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, void dml2_translate_ip_params(const struct dc *in_dc, struct ip_params_st *out); void dml2_translate_socbb_params(const struct dc *in_dc, struct soc_bounding_box_st *out); void dml2_translate_soc_states(const struct dc *in_dc, struct soc_states_st *out, int num_states); -void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg); +void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg); void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, struct _vcs_dpi_dml_display_dlg_regs_st *disp_dlg_regs, struct _vcs_dpi_dml_display_ttu_regs_st *disp_ttu_regs, struct pipe_ctx *out); bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 69fd96f4f3b0..2498b8341199 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -209,10 +209,11 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int return -1; } -static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, - unsigned int stream_id, unsigned int *plane_id) +static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane, + unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { int i, j; + bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) return false; @@ -220,7 +221,8 @@ static bool get_plane_id(const struct dc_state *state, const struct dc_plane_sta for (i = 0; i < state->stream_count; i++) { if (state->streams[i]->stream_id == stream_id) { for (j = 0; j < state->stream_status[i].plane_count; j++) { - if (state->stream_status[i].plane_states[j] == plane) { + if (state->stream_status[i].plane_states[j] == plane && + (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) { *plane_id = (i << 16) | j; return true; } @@ -304,8 +306,9 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont * there is a need to know which DML pipe index maps to which DC pipe. The code below * finds a dml_pipe_index from the plane id if a plane is valid. If a plane is not valid then * it finds a dml_pipe_index from the stream id. */ - if (get_plane_id(context, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state, - context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id, &plane_id)) { + if (get_plane_id(in_ctx, context, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state, + context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id, + in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[context->res_ctx.pipe_ctx[dc_pipe_ctx_index].pipe_idx], &plane_id)) { dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id); } else { dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id); @@ -445,8 +448,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc for (i = 0; i < MAX_PIPES; i++) { if (!display_state->res_ctx.pipe_ctx[i].stream) continue; - if (get_plane_id(display_state, display_state->res_ctx.pipe_ctx[i].plane_state, - display_state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id)) + if (get_plane_id(in_ctx, display_state, display_state->res_ctx.pipe_ctx[i].plane_state, + display_state->res_ctx.pipe_ctx[i].stream->stream_id, + in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[display_state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id)) dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id); else dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 0a06bf3b135a..8f231418870f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -639,7 +639,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s return result; } -static bool dml2_validate_only(const struct dc_state *context) +static bool dml2_validate_only(struct dc_state *context) { struct dml2_context *dml2 = context->bw_ctx.dml2; unsigned int result = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 3966845c7694..e8b5f17beb96 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -512,6 +512,11 @@ static bool intersect_dsc_caps( dsc_sink_caps->slice_caps1.bits.NUM_SLICES_4 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; dsc_common_caps->slice_caps.bits.NUM_SLICES_8 = dsc_sink_caps->slice_caps1.bits.NUM_SLICES_8 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_8; + dsc_common_caps->slice_caps.bits.NUM_SLICES_12 = + dsc_sink_caps->slice_caps1.bits.NUM_SLICES_12 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_12; + dsc_common_caps->slice_caps.bits.NUM_SLICES_16 = + dsc_sink_caps->slice_caps2.bits.NUM_SLICES_16 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_16; + if (!dsc_common_caps->slice_caps.raw) return false; @@ -703,6 +708,12 @@ static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *av if (slice_caps.bits.NUM_SLICES_8) available_slices[idx++] = 8; + if (slice_caps.bits.NUM_SLICES_12) + available_slices[idx++] = 12; + + if (slice_caps.bits.NUM_SLICES_16) + available_slices[idx++] = 16; + return idx; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h index 2fefdf40612d..44b4df6469d1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h @@ -1183,7 +1183,23 @@ struct dce_hwseq_registers { type LONO_FGCG_REP_DIS;\ type LONO_DISPCLK_GATE_DISABLE;\ type LONO_SOCCLK_GATE_DISABLE;\ - type LONO_DMCUBCLK_GATE_DISABLE; + type LONO_DMCUBCLK_GATE_DISABLE;\ + type SYMCLKA_FE_GATE_DISABLE;\ + type SYMCLKB_FE_GATE_DISABLE;\ + type SYMCLKC_FE_GATE_DISABLE;\ + type SYMCLKD_FE_GATE_DISABLE;\ + type SYMCLKE_FE_GATE_DISABLE;\ + type HDMICHARCLK0_GATE_DISABLE;\ + type SYMCLKA_GATE_DISABLE;\ + type SYMCLKB_GATE_DISABLE;\ + type SYMCLKC_GATE_DISABLE;\ + type SYMCLKD_GATE_DISABLE;\ + type SYMCLKE_GATE_DISABLE;\ + type PHYASYMCLK_ROOT_GATE_DISABLE;\ + type PHYBSYMCLK_ROOT_GATE_DISABLE;\ + type PHYCSYMCLK_ROOT_GATE_DISABLE;\ + type PHYDSYMCLK_ROOT_GATE_DISABLE;\ + type PHYESYMCLK_ROOT_GATE_DISABLE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 1b9f21fd4f17..6a65af8c36b9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -615,12 +615,6 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); - } - - /* Today only FPO uses cursor P-State force. Only clear cursor P-State force - * if it's not FPO. - */ - if (!pipe->stream || !pipe->stream->fpo_in_use) { if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false); } @@ -632,17 +626,10 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN || + pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); - } - - if (pipe->stream && pipe->stream->fpo_in_use) { - if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) - hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); - /* For now only force cursor p-state disallow for FPO - * Needs to be added for subvp once FW side gets updated - */ if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, true); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 34737d60b965..5a8258287438 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -138,16 +138,25 @@ void dcn35_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0x3F000000); - REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf); - //dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu); if (!dcb->funcs->is_accelerated_mode(dcb)) { /*this calls into dmubfw to do the init*/ hws->funcs.bios_golden_init(dc); } + + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */ + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1, + PHYBSYMCLK_ROOT_GATE_DISABLE, 1, + PHYCSYMCLK_ROOT_GATE_DISABLE, 1, + PHYDSYMCLK_ROOT_GATE_DISABLE, 1, + PHYESYMCLK_ROOT_GATE_DISABLE, 1); + + REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf); + // Initialize the dccg if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); @@ -274,7 +283,19 @@ void dcn35_init_hw(struct dc *dc) if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0, + SYMCLKB_FE_GATE_DISABLE, 0, + SYMCLKC_FE_GATE_DISABLE, 0, + SYMCLKD_FE_GATE_DISABLE, 0, + SYMCLKE_FE_GATE_DISABLE, 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0); + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0, + SYMCLKB_GATE_DISABLE, 0, + SYMCLKC_GATE_DISABLE, 0, + SYMCLKD_GATE_DISABLE, 0, + SYMCLKE_GATE_DISABLE, 0); + REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } @@ -311,6 +332,9 @@ void dcn35_init_hw(struct dc *dc) if (dc->res_pool->pg_cntl) { if (dc->res_pool->pg_cntl->funcs->init_pg_status) dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl); + + if (dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22) + dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22(dc->res_pool->pg_cntl, false); } } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 13f12f2a3f81..ce2f0c0e82bd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -141,6 +141,11 @@ struct dccg_funcs { enum physymclk_clock_source clk_src, bool force_enable); + void (*set_physymclk_root_clock_gating)( + struct dccg *dccg, + int phy_inst, + bool enable); + void (*set_dtbclk_dto)( struct dccg *dccg, const struct dtbclk_dto_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index d7b8d586b523..4b27f29d0d80 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -76,6 +76,8 @@ union dsc_enc_slice_caps { uint8_t NUM_SLICES_3 : 1; /* This one is not per DSC spec, but our encoder supports it */ uint8_t NUM_SLICES_4 : 1; uint8_t NUM_SLICES_8 : 1; + uint8_t NUM_SLICES_12 : 1; + uint8_t NUM_SLICES_16 : 1; } bits; uint8_t raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h new file mode 100644 index 000000000000..9a8bf6ec70ea --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/** + * DOC: overview + * + * Output Pipe Timing Combiner (OPTC) includes two major functional blocks: + * Output Data Mapper (ODM) and Output Timing Generator (OTG). + * + * - ODM: It is Output Data Mapping block. It can combine input data from + * multiple OPP data pipes into one single data stream or split data from one + * OPP data pipe into multiple data streams or just bypass OPP data to DIO. + * - OTG: It is Output Timing Generator. It generates display timing signals to + * drive the display output. + */ + +#ifndef __DC_OPTC_H__ +#define __DC_OPTC_H__ + +#include "timing_generator.h" + +struct optc { + struct timing_generator base; + + const struct dcn_optc_registers *tg_regs; + const struct dcn_optc_shift *tg_shift; + const struct dcn_optc_mask *tg_mask; + + int opp_count; + + uint32_t max_h_total; + uint32_t max_v_total; + + uint32_t min_h_blank; + + uint32_t min_h_sync_width; + uint32_t min_v_sync_width; + uint32_t min_v_blank; + uint32_t min_v_blank_interlace; + + int vstartup_start; + int vupdate_offset; + int vupdate_width; + int vready_offset; + struct dc_crtc_timing orginal_patched_timing; + enum signal_type signal; +}; + +struct dcn_otg_state { + uint32_t v_blank_start; + uint32_t v_blank_end; + uint32_t v_sync_a_pol; + uint32_t v_total; + uint32_t v_total_max; + uint32_t v_total_min; + uint32_t v_total_min_sel; + uint32_t v_total_max_sel; + uint32_t v_sync_a_start; + uint32_t v_sync_a_end; + uint32_t h_blank_start; + uint32_t h_blank_end; + uint32_t h_sync_a_start; + uint32_t h_sync_a_end; + uint32_t h_sync_a_pol; + uint32_t h_total; + uint32_t underflow_occurred_status; + uint32_t otg_enabled; + uint32_t blank_enabled; + uint32_t vertical_interrupt1_en; + uint32_t vertical_interrupt1_line; + uint32_t vertical_interrupt2_en; + uint32_t vertical_interrupt2_line; +}; + +void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); + +bool optc1_get_hw_timing(struct timing_generator *tg, struct dc_crtc_timing *hw_crtc_timing); + +bool optc1_validate_timing(struct timing_generator *optc, + const struct dc_crtc_timing *timing); + +void optc1_program_timing(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width, + const enum signal_type signal, + bool use_vbios); + +void optc1_setup_vertical_interrupt0(struct timing_generator *optc, + uint32_t start_line, + uint32_t end_line); + +void optc1_setup_vertical_interrupt1(struct timing_generator *optc, + uint32_t start_line); + +void optc1_setup_vertical_interrupt2(struct timing_generator *optc, + uint32_t start_line); + +void optc1_program_global_sync(struct timing_generator *optc, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width); + +bool optc1_disable_crtc(struct timing_generator *optc); + +bool optc1_is_counter_moving(struct timing_generator *optc); + +void optc1_get_position(struct timing_generator *optc, + struct crtc_position *position); + +uint32_t optc1_get_vblank_counter(struct timing_generator *optc); + +void optc1_get_crtc_scanoutpos(struct timing_generator *optc, + uint32_t *v_blank_start, + uint32_t *v_blank_end, + uint32_t *h_position, + uint32_t *v_position); + +void optc1_set_early_control(struct timing_generator *optc, + uint32_t early_cntl); + +void optc1_wait_for_state(struct timing_generator *optc, + enum crtc_state state); + +void optc1_set_blank(struct timing_generator *optc, + bool enable_blanking); + +bool optc1_is_blanked(struct timing_generator *optc); + +void optc1_program_blank_color(struct timing_generator *optc, + const struct tg_color *black_color); + +bool optc1_did_triggered_reset_occur(struct timing_generator *optc); + +void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst); + +void optc1_disable_reset_trigger(struct timing_generator *optc); + +void optc1_lock(struct timing_generator *optc); + +void optc1_unlock(struct timing_generator *optc); + +void optc1_enable_optc_clock(struct timing_generator *optc, bool enable); + +void optc1_set_drr(struct timing_generator *optc, + const struct drr_params *params); + +void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); + +void optc1_set_static_screen_control(struct timing_generator *optc, + uint32_t event_triggers, + uint32_t num_frames); + +void optc1_program_stereo(struct timing_generator *optc, + const struct dc_crtc_timing *timing, + struct crtc_stereo_flags *flags); + +bool optc1_is_stereo_left_eye(struct timing_generator *optc); + +void optc1_clear_optc_underflow(struct timing_generator *optc); + +void optc1_tg_init(struct timing_generator *optc); + +bool optc1_is_tg_enabled(struct timing_generator *optc); + +bool optc1_is_optc_underflow_occurred(struct timing_generator *optc); + +void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable); + +void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable); + +bool optc1_get_otg_active_size(struct timing_generator *optc, + uint32_t *otg_active_width, + uint32_t *otg_active_height); + +void optc1_enable_crtc_reset(struct timing_generator *optc, + int source_tg_inst, + struct crtc_trigger_info *crtc_tp); + +bool optc1_configure_crc(struct timing_generator *optc, const struct crc_params *params); + +bool optc1_get_crc(struct timing_generator *optc, + uint32_t *r_cr, + uint32_t *g_y, + uint32_t *b_cb); + +bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); + +void optc1_set_vtg_params(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + bool program_fp2); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h index 00ea3864dd4d..b9812afb886b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h @@ -47,6 +47,8 @@ struct pg_cntl_funcs { void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on); void (*init_pg_status)(struct pg_cntl *pg_cntl); + + void (*set_force_poweron_domain22)(struct pg_cntl *pg_cntl, bool power_on); }; #endif //__DC_PG_CNTL_H__ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 21a39afd274b..2d152b68a501 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -53,6 +53,7 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) return LINK_RATE_UHBR10; case DP_TEST_LINK_RATE_UHBR20: return LINK_RATE_UHBR20; + case DP_TEST_LINK_RATE_UHBR13_5_LEGACY: case DP_TEST_LINK_RATE_UHBR13_5: return LINK_RATE_UHBR13_5; default: @@ -119,6 +120,11 @@ static void dp_test_send_link_training(struct dc_link *link) 1); link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); + if (link_settings.link_rate == LINK_RATE_UNKNOWN) { + DC_LOG_ERROR("%s: Invalid test link rate.", __func__); + ASSERT(0); + } + /* Set preferred link settings */ link->verified_link_cap.lane_count = link_settings.lane_count; link->verified_link_cap.link_rate = link_settings.link_rate; @@ -457,7 +463,7 @@ static void set_crtc_test_pattern(struct dc_link *link, controller_color_space = pipe_ctx->stream_res.test_pattern_params.color_space; if (controller_color_space == CONTROLLER_DP_COLOR_SPACE_UDEFINED) { - DC_LOG_WARNING("%s: Color space must be defined for test pattern", __func__); + DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); ASSERT(0); } @@ -592,6 +598,7 @@ bool dp_set_test_pattern( const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) { + const struct link_hwss *link_hwss; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct pipe_ctx *pipe_ctx = NULL; unsigned int lane; @@ -828,11 +835,9 @@ bool dp_set_test_pattern( pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); /* update MSA to requested color space */ - pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, - &pipe_ctx->stream->timing, - color_space, - pipe_ctx->stream->use_vsc_sdp_for_colorimetry, - link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + pipe_ctx->stream->output_color_space = color_space; + link_hwss->setup_stream_attribute(pipe_ctx); if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 34bf8a9ef738..0c00e94e90b1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -184,14 +184,17 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) return false; } -static bool handle_hpd_irq_replay_sink(struct dc_link *link) +static void handle_hpd_irq_replay_sink(struct dc_link *link) { union dpcd_replay_configuration replay_configuration; /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ union psr_error_status replay_error_status; + if (link->replay_settings.config.force_disable_desync_error_check) + return; + if (!link->replay_settings.replay_feature_enabled) - return false; + return; dm_helpers_dp_read_dpcd( link->ctx, @@ -207,6 +210,9 @@ static bool handle_hpd_irq_replay_sink(struct dc_link *link) &replay_error_status.raw, sizeof(replay_error_status.raw)); + if (replay_configuration.bits.DESYNC_ERROR_STATUS) + link->replay_settings.config.received_desync_error_hpd = 1; + link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = replay_error_status.bits.LINK_CRC_ERROR; link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = @@ -243,7 +249,6 @@ static bool handle_hpd_irq_replay_sink(struct dc_link *link) edp_set_replay_allow_active(link, &allow_active, true, false, NULL); } } - return true; } void dp_handle_link_loss(struct dc_link *link) @@ -424,9 +429,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, /* PSR-related error was detected and handled */ return true; - if (handle_hpd_irq_replay_sink(link)) - /* Replay-related error was detected and handled */ - return true; + handle_hpd_irq_replay_sink(link); /* If PSR-related error handled, Main link may be off, * so do not handle as a normal sink status change interrupt. diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index bc907ae2052d..ed4379c04715 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -583,6 +583,7 @@ union dmub_fw_boot_status { uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */ uint32_t detection_required: 1; /**< if detection need to be triggered by driver */ uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */ + uint32_t ono_regions_enabled: 1; /**< 1 if ONO regions are enabled */ } bits; /**< status bits */ uint32_t all; /**< 32-bit access to status bits */ }; @@ -599,6 +600,7 @@ enum dmub_fw_boot_status_bit { DMUB_FW_BOOT_STATUS_BIT_FAMS_ENABLED = (1 << 5), /**< 1 if FAMS is enabled*/ DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/ DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */ + DMUB_FW_BOOT_STATUS_BIT_ONO_REGIONS_ENABLED = (1 << 8), /**< 1 if ONO regions are enabled */ }; /* Register bit definition for SCRATCH5 */ @@ -617,9 +619,12 @@ enum dmub_lvtma_status_bit { }; enum dmub_ips_disable_type { - DMUB_IPS_DISABLE_IPS1 = 1, - DMUB_IPS_DISABLE_IPS2 = 2, - DMUB_IPS_DISABLE_IPS2_Z10 = 3, + DMUB_IPS_ENABLE = 0, + DMUB_IPS_DISABLE_ALL = 1, + DMUB_IPS_DISABLE_IPS1 = 2, + DMUB_IPS_DISABLE_IPS2 = 3, + DMUB_IPS_DISABLE_IPS2_Z10 = 4, + DMUB_IPS_DISABLE_DYNAMIC = 5, }; #define DMUB_IPS1_ALLOW_MASK 0x00000001 @@ -653,8 +658,8 @@ union dmub_fw_boot_options { uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ - uint32_t ips_disable: 2; /* options to disable ips support*/ - uint32_t reserved : 10; /**< reserved */ + uint32_t ips_disable: 3; /* options to disable ips support*/ + uint32_t reserved : 9; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -2098,7 +2103,7 @@ enum psr_version { /** * PSR not supported. */ - PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, + PSR_VERSION_UNSUPPORTED = 0xFF, // psr_version field is only 8 bits wide }; /** @@ -3620,7 +3625,6 @@ struct dmub_cmd_abm_pause_data { uint8_t pad[1]; }; - /** * Definition of a DMUB_CMD__ABM_PAUSE command. */ @@ -4046,6 +4050,7 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__MALL command. */ struct dmub_rb_cmd_mall mall; + /** * Definition of a DMUB_CMD__CAB command. */ @@ -4067,6 +4072,7 @@ union dmub_rb_cmd { * Definition of DMUB_CMD__PANEL_CNTL commands. */ struct dmub_rb_cmd_panel_cntl panel_cntl; + /** * Definition of a DMUB_CMD__ABM_SET_PIPE command. */ @@ -4470,10 +4476,6 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr); uint8_t i; - /* Don't remove this. - * The contents need to actually be read from the ring buffer - * for this function to be effective. - */ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) (void)READ_ONCE(*data++); @@ -4522,5 +4524,4 @@ static inline void dmub_rb_get_return_data(struct dmub_rb *rb, //============================================================================== //</DMUB_RB>==================================================================== //============================================================================== - #endif /* _DMUB_CMD_H_ */ diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index acf3527fff2d..08cb79401410 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -491,7 +491,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - int ret = -EINVAL; + int ret = -EOPNOTSUPP; if (pp_funcs && pp_funcs->get_apu_thermal_limit) { mutex_lock(&adev->pm.mutex); @@ -505,7 +505,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - int ret = -EINVAL; + int ret = -EOPNOTSUPP; if (pp_funcs && pp_funcs->set_apu_thermal_limit) { mutex_lock(&adev->pm.mutex); @@ -1182,7 +1182,7 @@ int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) int ret = 0; if (!pp_funcs->get_sclk_od) - return 0; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); @@ -1196,7 +1196,7 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; if (is_support_sw_smu(adev)) - return 0; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); if (pp_funcs->set_sclk_od) @@ -1219,7 +1219,7 @@ int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) int ret = 0; if (!pp_funcs->get_mclk_od) - return 0; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); @@ -1233,7 +1233,7 @@ int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; if (is_support_sw_smu(adev)) - return 0; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); if (pp_funcs->set_mclk_od) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 4ba9195c83c5..ca2ece24e1e0 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -989,12 +989,13 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, * Reading back the files will show you the available power levels within * the power state and the clock information for those levels. If deep sleep is * applied to a clock, the level will be denoted by a special level 'S:' - * E.g., - * S: 19Mhz * - * 0: 615Mhz - * 1: 800Mhz - * 2: 888Mhz - * 3: 1000Mhz + * E.g., :: + * + * S: 19Mhz * + * 0: 615Mhz + * 1: 800Mhz + * 2: 888Mhz + * 3: 1000Mhz * * * To manually adjust these states, first select manual using @@ -2197,6 +2198,22 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) { if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) { + if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) { + if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(apu_thermal_cap)) { + u32 limit; + + if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == + -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { + if (gc_ver == IP_VERSION(9, 4, 2) || + gc_ver == IP_VERSION(9, 4, 3)) + *states = ATTR_STATE_UNSUPPORTED; } switch (gc_ver) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 23b00eddc1af..1ead323f1c78 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1711,6 +1711,7 @@ static int smu_disable_dpms(struct smu_context *smu) } if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) && + !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) && !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -2747,7 +2748,7 @@ unlock: static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit) { - int ret = -EINVAL; + int ret = -EOPNOTSUPP; struct smu_context *smu = handle; if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) @@ -2758,7 +2759,7 @@ static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit) static int smu_set_apu_thermal_limit(void *handle, uint32_t limit) { - int ret = -EINVAL; + int ret = -EOPNOTSUPP; struct smu_context *smu = handle; if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 20f66e696f87..891605d4975f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -48,6 +48,7 @@ #include "smu_cmn.h" #include "mp/mp_13_0_6_offset.h" #include "mp/mp_13_0_6_sh_mask.h" +#include "umc_v12_0.h" #undef MP1_Public #undef smnMP1_FIRMWARE_FLAGS @@ -94,22 +95,11 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define LINK_SPEED_MAX 4 -#define SMU_13_0_6_DSCLK_THRESHOLD 100 +#define SMU_13_0_6_DSCLK_THRESHOLD 140 #define MCA_BANK_IPID(_ip, _hwid, _type) \ [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, } -enum mca_reg_idx { - MCA_REG_IDX_CONTROL = 0, - MCA_REG_IDX_STATUS = 1, - MCA_REG_IDX_ADDR = 2, - MCA_REG_IDX_MISC0 = 3, - MCA_REG_IDX_CONFIG = 4, - MCA_REG_IDX_IPID = 5, - MCA_REG_IDX_SYND = 6, - MCA_REG_IDX_COUNT = 16, -}; - struct mca_bank_ipid { enum amdgpu_mca_ip ip; uint16_t hwid; @@ -122,7 +112,9 @@ struct mca_ras_info { int *err_code_array; int err_code_count; int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, - enum amdgpu_mca_error_type type, int idx, uint32_t *count); + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count); + bool (*bank_is_valid)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry); }; #define P2S_TABLE_ID_A 0x50325341 @@ -2305,7 +2297,7 @@ static int smu_v13_0_6_post_init(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; if (!amdgpu_sriov_vf(adev) && adev->ras_enabled) - return smu_v13_0_6_mca_set_debug_mode(smu, true); + return smu_v13_0_6_mca_set_debug_mode(smu, false); return 0; } @@ -2387,6 +2379,7 @@ static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT MCA_BANK_IPID(UMC, 0x96, 0x0), MCA_BANK_IPID(SMU, 0x01, 0x1), MCA_BANK_IPID(MP5, 0x01, 0x2), + MCA_BANK_IPID(PCS_XGMI, 0x50, 0x0), }; static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info) @@ -2448,53 +2441,60 @@ static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_t return 0; } -static int mca_decode_mca_ipid(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, int *ip) +static int mca_decode_ipid_to_hwip(uint64_t val) { const struct mca_bank_ipid *ipid; - uint64_t val; uint16_t hwid, mcatype; - int i, ret; - - ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_IPID, &val); - if (ret) - return ret; + int i; hwid = REG_GET_FIELD(val, MCMP1_IPIDT0, HardwareID); mcatype = REG_GET_FIELD(val, MCMP1_IPIDT0, McaType); - if (hwid) { - for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) { - ipid = &smu_v13_0_6_mca_ipid_table[i]; + for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) { + ipid = &smu_v13_0_6_mca_ipid_table[i]; - if (!ipid->hwid) - continue; + if (!ipid->hwid) + continue; - if (ipid->hwid == hwid && ipid->mcatype == mcatype) { - *ip = i; - return 0; - } - } + if (ipid->hwid == hwid && ipid->mcatype == mcatype) + return i; } - *ip = AMDGPU_MCA_IP_UNKNOW; + return AMDGPU_MCA_IP_UNKNOW; +} + +static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) +{ + uint64_t status0; + + status0 = entry->regs[MCA_REG_IDX_STATUS]; + + if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { + *count = 0; + return 0; + } + + if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0)) + *count = 1; + else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0)) + *count = 1; return 0; } -static int mca_normal_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, - enum amdgpu_mca_error_type type, int idx, uint32_t *count) +static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, + uint32_t *count) { - uint64_t status0; - int ret; + u32 ext_error_code; - ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0); - if (ret) - return ret; + ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); - if (REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) + *count = 1; + else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) *count = 1; - else - *count = 0; return 0; } @@ -2515,70 +2515,41 @@ static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mc return false; } -static int mca_mp5_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, - enum amdgpu_mca_error_type type, int idx, uint32_t *count) +static int mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) { - uint64_t status0 = 0, misc0 = 0; - uint32_t errcode; - int ret; - - if (mca_ras->ip != AMDGPU_MCA_IP_MP5) - return -EINVAL; - - ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0); - if (ret) - return ret; + uint64_t status0, misc0; + status0 = entry->regs[MCA_REG_IDX_STATUS]; if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { *count = 0; return 0; } - errcode = REG_GET_FIELD(status0, MCMP1_STATUST0, ErrorCode); - if (!mca_smu_check_error_code(adev, mca_ras, errcode)) - return 0; - if (type == AMDGPU_MCA_ERROR_TYPE_UE && REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 && REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) { - if (count) - *count = 1; + *count = 1; return 0; - } - - ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_MISC0, &misc0); - if (ret) - return ret; - - if (count) + } else { + misc0 = entry->regs[MCA_REG_IDX_MISC0]; *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt); + } return 0; } static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, - enum amdgpu_mca_error_type type, int idx, uint32_t *count) + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) { - uint64_t status0 = 0, misc0 = 0; - uint32_t errcode; - int ret; - - if (mca_ras->ip != AMDGPU_MCA_IP_SMU) - return -EINVAL; - - ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0); - if (ret) - return ret; + uint64_t status0, misc0; + status0 = entry->regs[MCA_REG_IDX_STATUS]; if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { *count = 0; return 0; } - errcode = REG_GET_FIELD(status0, MCMP1_STATUST0, ErrorCode); - if (!mca_smu_check_error_code(adev, mca_ras, errcode)) - return 0; - if (type == AMDGPU_MCA_ERROR_TYPE_UE && REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 && REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) { @@ -2587,16 +2558,43 @@ static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct return 0; } - ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_MISC0, &misc0); - if (ret) - return ret; - - if (count) - *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt); + misc0 = entry->regs[MCA_REG_IDX_MISC0]; + *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt); return 0; } +static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) +{ + uint32_t instlo; + + instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + switch (instlo) { + case 0x36430400: /* SMNAID XCD 0 */ + case 0x38430400: /* SMNAID XCD 1 */ + case 0x40430400: /* SMNXCD XCD 0, NOTE: FIXME: fix this error later */ + return true; + default: + return false; + } + + return false; +}; + +static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) +{ + uint32_t errcode, instlo; + + instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + if (instlo != 0x03b30400) + return false; + + errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode); + return mca_smu_check_error_code(adev, mca_ras, errcode); +} + static int sdma_err_codes[] = { CODE_SDMA0, CODE_SDMA1, CODE_SDMA2, CODE_SDMA3 }; static int mmhub_err_codes[] = { CODE_DAGB0, CODE_DAGB0 + 1, CODE_DAGB0 + 2, CODE_DAGB0 + 3, CODE_DAGB0 + 4, /* DAGB0-4 */ @@ -2608,23 +2606,30 @@ static const struct mca_ras_info mca_ras_table[] = { { .blkid = AMDGPU_RAS_BLOCK__UMC, .ip = AMDGPU_MCA_IP_UMC, - .get_err_count = mca_normal_mca_get_err_count, + .get_err_count = mca_umc_mca_get_err_count, }, { .blkid = AMDGPU_RAS_BLOCK__GFX, - .ip = AMDGPU_MCA_IP_MP5, - .get_err_count = mca_mp5_mca_get_err_count, + .ip = AMDGPU_MCA_IP_SMU, + .get_err_count = mca_gfx_mca_get_err_count, + .bank_is_valid = mca_gfx_smu_bank_is_valid, }, { .blkid = AMDGPU_RAS_BLOCK__SDMA, .ip = AMDGPU_MCA_IP_SMU, .err_code_array = sdma_err_codes, .err_code_count = ARRAY_SIZE(sdma_err_codes), .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, }, { .blkid = AMDGPU_RAS_BLOCK__MMHUB, .ip = AMDGPU_MCA_IP_SMU, .err_code_array = mmhub_err_codes, .err_code_count = ARRAY_SIZE(mmhub_err_codes), .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, + }, { + .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL, + .ip = AMDGPU_MCA_IP_PCS_XGMI, + .get_err_count = mca_pcs_xgmi_mca_get_err_count, }, }; @@ -2659,130 +2664,84 @@ static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_e } static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, - enum amdgpu_mca_error_type type, int idx) + enum amdgpu_mca_error_type type, struct mca_bank_entry *entry) { - int ret, ip = AMDGPU_MCA_IP_UNKNOW; - - ret = mca_decode_mca_ipid(adev, type, idx, &ip); - if (ret) - return false; - - if (ip == AMDGPU_MCA_IP_UNKNOW) + if (mca_decode_ipid_to_hwip(entry->regs[MCA_REG_IDX_IPID]) != mca_ras->ip) return false; - return ip == mca_ras->ip; -} + if (mca_ras->bank_is_valid) + return mca_ras->bank_is_valid(mca_ras, adev, type, entry); -static int mca_get_valid_mca_idx(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, - enum amdgpu_mca_error_type type, - uint32_t mca_cnt, int *idx_array, int idx_array_size) -{ - int i, idx_cnt = 0; - - for (i = 0; i < mca_cnt; i++) { - if (!mca_bank_is_valid(adev, mca_ras, type, i)) - continue; - - if (idx_array) { - if (idx_cnt < idx_array_size) - idx_array[idx_cnt] = i; - else - return -EINVAL; - } - - idx_cnt++; - } - - return idx_cnt; + return true; } -static int __mca_smu_get_error_count(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, enum amdgpu_mca_error_type type, uint32_t *count) +static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, + enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) { - uint32_t result, mca_cnt, total = 0; - int idx_array[16]; - int i, ret, idx_cnt = 0; + struct mca_bank_entry entry; + uint32_t mca_cnt; + int i, ret; ret = mca_get_valid_mca_count(adev, type, &mca_cnt); if (ret) return ret; /* if valid mca bank count is 0, the driver can return 0 directly */ - if (!mca_cnt) { - *count = 0; + if (!mca_cnt) return 0; - } - if (!mca_ras->get_err_count) - return -EINVAL; + for (i = 0; i < mca_cnt; i++) { + memset(&entry, 0, sizeof(entry)); + ret = mca_get_mca_entry(adev, type, i, &entry); + if (ret) + return ret; - idx_cnt = mca_get_valid_mca_idx(adev, mca_ras, type, mca_cnt, idx_array, ARRAY_SIZE(idx_array)); - if (idx_cnt < 0) - return -EINVAL; + if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry)) + continue; - for (i = 0; i < idx_cnt; i++) { - result = 0; - ret = mca_ras->get_err_count(mca_ras, adev, type, idx_array[i], &result); + ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry); if (ret) return ret; - - total += result; } - *count = total; - return 0; } -static int mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, uint32_t *count) +static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) { - const struct mca_ras_info *mca_ras; + const struct mca_ras_info *mca_ras = NULL; - if (!count) + if (!mca_set) return -EINVAL; - mca_ras = mca_get_mca_ras_info(adev, blk); - if (!mca_ras) - return -EOPNOTSUPP; - - return __mca_smu_get_error_count(adev, mca_ras, type, count); -} - -static int __mca_smu_get_ras_mca_idx_array(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, - enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size) -{ - uint32_t mca_cnt = 0; - int ret, idx_cnt = 0; - - ret = mca_get_valid_mca_count(adev, type, &mca_cnt); - if (ret) - return ret; - - /* if valid mca bank count is 0, the driver can return 0 directly */ - if (!mca_cnt) { - *idx_array_size = 0; - return 0; + if (blk != AMDGPU_RAS_BLOCK_COUNT) { + mca_ras = mca_get_mca_ras_info(adev, blk); + if (!mca_ras) + return -EOPNOTSUPP; } - idx_cnt = mca_get_valid_mca_idx(adev, mca_ras, type, mca_cnt, idx_array, *idx_array_size); - if (idx_cnt < 0) - return -EINVAL; - - *idx_array_size = idx_cnt; - - return 0; + return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set); } -static int mca_smu_get_ras_mca_idx_array(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size) +static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct mca_bank_entry *entry, uint32_t *count) { const struct mca_ras_info *mca_ras; + if (!entry || !count) + return -EINVAL; + mca_ras = mca_get_mca_ras_info(adev, blk); if (!mca_ras) return -EOPNOTSUPP; - return __mca_smu_get_ras_mca_idx_array(adev, mca_ras, type, idx_array, idx_array_size); + if (!mca_bank_is_valid(adev, mca_ras, type, entry)) { + *count = 0; + return 0; + } + + return mca_ras->get_err_count(mca_ras, adev, type, entry, count); } static int mca_smu_get_mca_entry(struct amdgpu_device *adev, @@ -2801,10 +2760,10 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { .max_ue_count = 12, .max_ce_count = 12, .mca_set_debug_mode = mca_smu_set_debug_mode, - .mca_get_error_count = mca_smu_get_error_count, + .mca_get_ras_mca_set = mca_smu_get_ras_mca_set, + .mca_parse_mca_error_count = mca_smu_parse_mca_error_count, .mca_get_mca_entry = mca_smu_get_mca_entry, .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, - .mca_get_ras_mca_idx_array = mca_smu_get_ras_mca_idx_array, }; static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt index d8856d1581fd..e9994c9db799 100644 --- a/drivers/gpu/drm/ci/xfails/requirements.txt +++ b/drivers/gpu/drm/ci/xfails/requirements.txt @@ -5,7 +5,7 @@ termcolor==2.3.0 certifi==2023.7.22 charset-normalizer==3.2.0 idna==3.4 -pip==23.2.1 +pip==23.3 python-gitlab==3.15.0 requests==2.31.0 requests-toolbelt==1.0.0 @@ -13,5 +13,5 @@ ruamel.yaml==0.17.32 ruamel.yaml.clib==0.2.7 setuptools==68.0.0 tenacity==8.2.3 -urllib3==2.0.4 -wheel==0.41.1
\ No newline at end of file +urllib3==2.0.7 +wheel==0.41.1 diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index f7003d1ec5ef..01da6789d044 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1069,7 +1069,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, fence = drm_syncobj_fence_get(syncobjs[i]); if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) { dma_fence_put(fence); - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) { continue; } else { timeout = -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 6d7ba4d0f130..c4839c67cb0f 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2750,6 +2750,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) for_each_pipe(dev_priv, pipe) min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); + /* + * Avoid glk_force_audio_cdclk() causing excessive screen + * blinking when multiple pipes are active by making sure + * CDCLK frequency is always high enough for audio. With a + * single active pipe we can always change CDCLK frequency + * by changing the cd2x divider (see glk_cdclk_table[]) and + * thus a full modeset won't be needed then. + */ + if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes && + !is_power_of_2(cdclk_state->active_pipes)) + min_cdclk = max(2 * 96000, min_cdclk); + if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) { drm_dbg_kms(&dev_priv->drm, "required cdclk (%d kHz) exceeds max (%d kHz)\n", diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 1891c0cc187d..2c1034578984 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -430,7 +430,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp) enum phy phy = intel_port_to_phy(i915, dig_port->base.port); if (intel_is_c10phy(i915, phy)) - return intel_dp_is_edp(intel_dp) ? 675000 : 810000; + return 810000; return 2000000; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 37b0f8529b4f..f64d348a969e 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -58,7 +58,7 @@ struct intel_tc_port { struct delayed_work link_reset_work; int link_refcount; bool legacy_port:1; - char port_name[8]; + const char *port_name; enum tc_port_mode mode; enum tc_port_mode init_mode; enum phy_fia phy_fia; @@ -1875,8 +1875,12 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) else tc->phy_ops = &icl_tc_phy_ops; - snprintf(tc->port_name, sizeof(tc->port_name), - "%c/TC#%d", port_name(port), tc_port + 1); + tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port), + tc_port + 1); + if (!tc->port_name) { + kfree(tc); + return -ENOMEM; + } mutex_init(&tc->lock); /* TODO: Combine the two works */ @@ -1897,6 +1901,7 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port) { intel_tc_port_suspend(dig_port); + kfree(dig_port->tc->port_name); kfree(dig_port->tc); dig_port->tc = NULL; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 9a9ff84c90d7..e38f06a6e56e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, if (idx >= pc->num_user_engines) return -EINVAL; + idx = array_index_nospec(idx, pc->num_user_engines); pe = &pc->user_engines[idx]; /* Only render engine supports RPCS configuration. */ diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 1c93e84278a0..15fc8e4703f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -195,6 +195,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) spin_unlock_irq(&uncore->lock); } +static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915) +{ + /* + * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range + * will be dropped. For WC mappings in general we have 64 byte burst + * writes when the WC buffer is flushed, so we can't use it, but have to + * resort to an uncached mapping. The WC issue is easily caught by the + * readback check when writing GTT PTE entries. + */ + if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11) + return true; + + return false; +} + static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) { struct intel_uncore *uncore = ggtt->vm.gt->uncore; @@ -202,8 +217,12 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) /* * Note that as an uncached mmio write, this will flush the * WCB of the writes into the GGTT before it triggers the invalidate. + * + * Only perform this when GGTT is mapped as WC, see ggtt_probe_common(). */ - intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + if (needs_wc_ggtt_mapping(ggtt->vm.i915)) + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, + GFX_FLSH_CNTL_EN); } static void guc_ggtt_ct_invalidate(struct intel_gt *gt) @@ -1140,17 +1159,11 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915)); phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915); - /* - * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range - * will be dropped. For WC mappings in general we have 64 byte burst - * writes when the WC buffer is flushed, so we can't use it, but have to - * resort to an uncached mapping. The WC issue is easily caught by the - * readback check when writing GTT PTE entries. - */ - if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11) - ggtt->gsm = ioremap(phys_addr, size); - else + if (needs_wc_ggtt_mapping(i915)) ggtt->gsm = ioremap_wc(phys_addr, size); + else + ggtt->gsm = ioremap(phys_addr, size); + if (!ggtt->gsm) { drm_err(&i915->drm, "Failed to map the ggtt page table\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 8b67abd720be..7090e4be29cb 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -581,19 +581,23 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6) static void rc6_res_reg_init(struct intel_rc6 *rc6) { - memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg)); + i915_reg_t res_reg[INTEL_RC6_RES_MAX] = { + [0 ... INTEL_RC6_RES_MAX - 1] = INVALID_MMIO_REG, + }; switch (rc6_to_gt(rc6)->type) { case GT_MEDIA: - rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6; + res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6; break; default: - rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED; - rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6; - rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p; - rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp; + res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED; + res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6; + res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p; + res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp; break; } + + memcpy(rc6->res_reg, res_reg, sizeof(res_reg)); } void intel_rc6_init(struct intel_rc6 *rc6) diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c index 614bde321589..8bca02025e09 100644 --- a/drivers/gpu/drm/i915/i915_debugfs_params.c +++ b/drivers/gpu/drm/i915/i915_debugfs_params.c @@ -38,10 +38,13 @@ static int i915_param_int_open(struct inode *inode, struct file *file) static int notify_guc(struct drm_i915_private *i915) { - int ret = 0; + struct intel_gt *gt; + int i, ret = 0; - if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) - ret = intel_guc_global_policies_update(&to_gt(i915)->uc.guc); + for_each_gt(gt, i915, i) { + if (intel_uc_uses_guc_submission(>->uc)) + ret = intel_guc_global_policies_update(>->uc.guc); + } return ret; } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2f3ecd7d4804..7b1c8de2f9cb 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -4227,11 +4227,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, u32 known_open_flags; int ret; - if (!perf->i915) { - drm_dbg(&perf->i915->drm, - "i915 perf interface not available for this system\n"); + if (!perf->i915) return -ENOTSUPP; - } known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | I915_PERF_FLAG_FD_NONBLOCK | @@ -4607,11 +4604,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, struct i915_oa_reg *regs; int err, id; - if (!perf->i915) { - drm_dbg(&perf->i915->drm, - "i915 perf interface not available for this system\n"); + if (!perf->i915) return -ENOTSUPP; - } if (!perf->metrics_kobj) { drm_dbg(&perf->i915->drm, @@ -4773,11 +4767,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct i915_oa_config *oa_config; int ret; - if (!perf->i915) { - drm_dbg(&perf->i915->drm, - "i915 perf interface not available for this system\n"); + if (!perf->i915) return -ENOTSUPP; - } if (i915_perf_stream_paranoid && !perfmon_capable()) { drm_dbg(&perf->i915->drm, diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 6492a70e3c39..404b0483bb7c 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev) if (!qdev->monitors_config_bo) return 0; + kfree(qdev->dumb_heads); + qdev->dumb_heads = NULL; + qdev->monitors_config = NULL; qdev->ram_header->monitors_config = 0; diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c index 5d12d7beef0e..ade3309ae042 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c @@ -26,7 +26,7 @@ struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test, struct vc4_crtc *vc4_crtc; int ret; - dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL); + dummy_crtc = drmm_kzalloc(drm, sizeof(*dummy_crtc), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, dummy_crtc); vc4_crtc = &dummy_crtc->crtc; diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c index 6e11fcc9ef45..e70d7c3076ac 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c @@ -32,7 +32,7 @@ struct vc4_dummy_output *vc4_dummy_output(struct kunit *test, struct drm_encoder *enc; int ret; - dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL); + dummy_output = drmm_kzalloc(drm, sizeof(*dummy_output), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output); dummy_output->encoder.type = vc4_encoder_type; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 68d11ccee441..98b0329b7154 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -39,8 +39,7 @@ #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) -#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) -#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2) #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) @@ -106,7 +105,7 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 * interrupt. */ -DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); @@ -1779,15 +1778,6 @@ static bool gic_enable_quirk_msm8996(void *data) return true; } -static bool gic_enable_quirk_mtk_gicr(void *data) -{ - struct gic_chip_data *d = data; - - d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE; - - return true; -} - static bool gic_enable_quirk_cavium_38539(void *data) { struct gic_chip_data *d = data; @@ -1889,11 +1879,6 @@ static const struct gic_quirk gic_quirks[] = { .init = gic_enable_quirk_asr8601, }, { - .desc = "GICv3: Mediatek Chromebook GICR save problem", - .property = "mediatek,broken-save-restore-fw", - .init = gic_enable_quirk_mtk_gicr, - }, - { .desc = "GICv3: HIP06 erratum 161010803", .iidr = 0x0204043b, .mask = 0xffffffff, @@ -1959,11 +1944,6 @@ static void gic_enable_nmi_support(void) if (!gic_prio_masking_enabled()) return; - if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { - pr_warn("Skipping NMI enable due to firmware issues\n"); - return; - } - rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR, sizeof(*rdist_nmi_refs), GFP_KERNEL); if (!rdist_nmi_refs) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 3a8f27c3e310..152dfe593c43 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2381,8 +2381,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) } ret = mmc_blk_cqe_issue_flush(mq, req); break; - case REQ_OP_READ: case REQ_OP_WRITE: + card->written_flag = true; + fallthrough; + case REQ_OP_READ: if (host->cqe_enabled) ret = mmc_blk_cqe_issue_rw_rq(mq, req); else diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index 4edf9057fa79..b7754a1b8d97 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c) return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE; } +static inline int mmc_card_broken_cache_flush(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH; +} #endif diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 8180983bd402..705942edacc6 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card) case 3: /* MMC v3.1 - v3.3 */ case 4: /* MMC v4 */ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 8); + card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); @@ -2086,13 +2086,17 @@ static int _mmc_flush_cache(struct mmc_host *host) { int err = 0; + if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag) + return 0; + if (_mmc_cache_enabled(host)) { err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE, 1, CACHE_FLUSH_TIMEOUT_MS); if (err) - pr_err("%s: cache flush error %d\n", - mmc_hostname(host), err); + pr_err("%s: cache flush error %d\n", mmc_hostname(host), err); + else + host->card->written_flag = false; } return err; diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index 32b64b564fb1..cca71867bc4a 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -110,11 +110,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { MMC_QUIRK_TRIM_BROKEN), /* - * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to - * support being used to offload WRITE_ZEROES. + * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support + * WRITE_ZEROES offloading. It also supports caching, but the cache can + * only be flushed after a write has occurred. */ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc, - MMC_QUIRK_TRIM_BROKEN), + MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH), /* * Kingston EMMC04G-M627 advertises TRIM but it does not seems to diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index d83261e857a5..d8a991b349a8 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -28,6 +28,9 @@ #define PCI_GLI_9750_PM_CTRL 0xFC #define PCI_GLI_9750_PM_STATE GENMASK(1, 0) +#define PCI_GLI_9750_CORRERR_MASK 0x214 +#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12) + #define SDHCI_GLI_9750_CFG2 0x848 #define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24) #define GLI_9750_CFG2_L1DLY_VALUE 0x1F @@ -152,6 +155,9 @@ #define PCI_GLI_9755_PM_CTRL 0xFC #define PCI_GLI_9755_PM_STATE GENMASK(1, 0) +#define PCI_GLI_9755_CORRERR_MASK 0x214 +#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12) + #define SDHCI_GLI_9767_GM_BURST_SIZE 0x510 #define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8) @@ -561,6 +567,11 @@ static void gl9750_hw_setting(struct sdhci_host *host) value &= ~PCI_GLI_9750_PM_STATE; pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value); + /* mask the replay timer timeout of AER */ + pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value); + value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT; + pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value); + gl9750_wt_off(host); } @@ -770,6 +781,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot) value &= ~PCI_GLI_9755_PM_STATE; pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value); + /* mask the replay timer timeout of AER */ + pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value); + value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT; + pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value); + gl9755_wt_off(pdev); } diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index c125485ba80e..967bd2dfcda1 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -598,7 +598,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host, return 0; } - for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) { + for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) { ret = device_property_read_u32(dev, td[i].otap_binding, &sdhci_am654->otap_del_sel[i]); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index de3f443f5fdc..fd67c0682b38 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -2309,6 +2309,7 @@ static int vub300_probe(struct usb_interface *interface, vub300->read_only = (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; } else { + retval = -EINVAL; goto error5; } usb_set_intfdata(interface, vub300); diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig index 60738edcd5d5..da03ab6efe04 100644 --- a/drivers/mtd/parsers/Kconfig +++ b/drivers/mtd/parsers/Kconfig @@ -1,9 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -config MTD_AR7_PARTS - tristate "TI AR7 partitioning parser" - help - TI AR7 partitioning parser support - config MTD_BCM47XX_PARTS tristate "BCM47XX partitioning parser" depends on BCM47XX || ARCH_BCM_5301X diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile index 0e70b621a1d8..9b00c62b837a 100644 --- a/drivers/mtd/parsers/Makefile +++ b/drivers/mtd/parsers/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o obj-$(CONFIG_MTD_BRCM_U_BOOT) += brcm_u-boot.o diff --git a/drivers/mtd/parsers/ar7part.c b/drivers/mtd/parsers/ar7part.c deleted file mode 100644 index 8cd683711ac6..000000000000 --- a/drivers/mtd/parsers/ar7part.c +++ /dev/null @@ -1,129 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright © 2007 Eugene Konev <ejka@openwrt.org> - * - * TI AR7 flash partition table. - * Based on ar7 map by Felix Fietkau <nbd@openwrt.org> - */ - -#include <linux/kernel.h> -#include <linux/slab.h> - -#include <linux/mtd/mtd.h> -#include <linux/mtd/partitions.h> -#include <linux/memblock.h> -#include <linux/module.h> - -#include <uapi/linux/magic.h> - -#define AR7_PARTS 4 -#define ROOT_OFFSET 0xe0000 - -#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42) -#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281) - -struct ar7_bin_rec { - unsigned int checksum; - unsigned int length; - unsigned int address; -}; - -static int create_mtd_partitions(struct mtd_info *master, - const struct mtd_partition **pparts, - struct mtd_part_parser_data *data) -{ - struct ar7_bin_rec header; - unsigned int offset; - size_t len; - unsigned int pre_size = master->erasesize, post_size = 0; - unsigned int root_offset = ROOT_OFFSET; - - int retries = 10; - struct mtd_partition *ar7_parts; - - ar7_parts = kcalloc(AR7_PARTS, sizeof(*ar7_parts), GFP_KERNEL); - if (!ar7_parts) - return -ENOMEM; - ar7_parts[0].name = "loader"; - ar7_parts[0].offset = 0; - ar7_parts[0].size = master->erasesize; - ar7_parts[0].mask_flags = MTD_WRITEABLE; - - ar7_parts[1].name = "config"; - ar7_parts[1].offset = 0; - ar7_parts[1].size = master->erasesize; - ar7_parts[1].mask_flags = 0; - - do { /* Try 10 blocks starting from master->erasesize */ - offset = pre_size; - mtd_read(master, offset, sizeof(header), &len, - (uint8_t *)&header); - if (!strncmp((char *)&header, "TIENV0.8", 8)) - ar7_parts[1].offset = pre_size; - if (header.checksum == LOADER_MAGIC1) - break; - if (header.checksum == LOADER_MAGIC2) - break; - pre_size += master->erasesize; - } while (retries--); - - pre_size = offset; - - if (!ar7_parts[1].offset) { - ar7_parts[1].offset = master->size - master->erasesize; - post_size = master->erasesize; - } - - switch (header.checksum) { - case LOADER_MAGIC1: - while (header.length) { - offset += sizeof(header) + header.length; - mtd_read(master, offset, sizeof(header), &len, - (uint8_t *)&header); - } - root_offset = offset + sizeof(header) + 4; - break; - case LOADER_MAGIC2: - while (header.length) { - offset += sizeof(header) + header.length; - mtd_read(master, offset, sizeof(header), &len, - (uint8_t *)&header); - } - root_offset = offset + sizeof(header) + 4 + 0xff; - root_offset &= ~(uint32_t)0xff; - break; - default: - printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum); - break; - } - - mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header); - if (header.checksum != SQUASHFS_MAGIC) { - root_offset += master->erasesize - 1; - root_offset &= ~(master->erasesize - 1); - } - - ar7_parts[2].name = "linux"; - ar7_parts[2].offset = pre_size; - ar7_parts[2].size = master->size - pre_size - post_size; - ar7_parts[2].mask_flags = 0; - - ar7_parts[3].name = "rootfs"; - ar7_parts[3].offset = root_offset; - ar7_parts[3].size = master->size - root_offset - post_size; - ar7_parts[3].mask_flags = 0; - - *pparts = ar7_parts; - return AR7_PARTS; -} - -static struct mtd_part_parser ar7_parser = { - .parse_fn = create_mtd_partitions, - .name = "ar7part", -}; -module_mtd_part_parser(ar7_parser); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, " - "Eugene Konev <ejka@openwrt.org>"); -MODULE_DESCRIPTION("MTD partitioning for TI AR7"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index fef86a8b4163..1627b2f819db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -550,9 +550,9 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(RFI_CONFIG_CMD), HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), - HCMD_NAME(RFI_DEACTIVATE_NOTIF), HCMD_NAME(SYSTEM_STATISTICS_CMD), HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF), + HCMD_NAME(RFI_DEACTIVATE_NOTIF), }; /* Please keep this array *SORTED* by hex value. diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile index eedca8c72098..74f59ceed3d5 100644 --- a/drivers/nvme/Makefile +++ b/drivers/nvme/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_NVME_COMMON) += common/ +obj-y += common/ obj-y += host/ obj-y += target/ diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig index 06c8df00d1e2..244432e0b73d 100644 --- a/drivers/nvme/common/Kconfig +++ b/drivers/nvme/common/Kconfig @@ -1,14 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only -config NVME_COMMON - tristate - config NVME_KEYRING - bool + tristate select KEYS config NVME_AUTH - bool + tristate select CRYPTO select CRYPTO_HMAC select CRYPTO_SHA256 diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile index 0cbd0b0b8d49..681514cf2e2f 100644 --- a/drivers/nvme/common/Makefile +++ b/drivers/nvme/common/Makefile @@ -2,7 +2,8 @@ ccflags-y += -I$(src) -obj-$(CONFIG_NVME_COMMON) += nvme-common.o +obj-$(CONFIG_NVME_AUTH) += nvme-auth.o +obj-$(CONFIG_NVME_KEYRING) += nvme-keyring.o -nvme-common-$(CONFIG_NVME_AUTH) += auth.o -nvme-common-$(CONFIG_NVME_KEYRING) += keyring.o +nvme-auth-y += auth.o +nvme-keyring-y += keyring.o diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index a8e87dfbeab2..a23ab5c968b9 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -341,7 +341,6 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, u8 *challenge, u8 *aug, size_t hlen) { struct crypto_shash *tfm; - struct shash_desc *desc; u8 *hashed_key; const char *hmac_name; int ret; @@ -369,29 +368,11 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, goto out_free_key; } - desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm), - GFP_KERNEL); - if (!desc) { - ret = -ENOMEM; - goto out_free_hash; - } - desc->tfm = tfm; - ret = crypto_shash_setkey(tfm, hashed_key, hlen); if (ret) - goto out_free_desc; - - ret = crypto_shash_init(desc); - if (ret) - goto out_free_desc; - - ret = crypto_shash_update(desc, challenge, hlen); - if (ret) - goto out_free_desc; + goto out_free_hash; - ret = crypto_shash_final(desc, aug); -out_free_desc: - kfree_sensitive(desc); + ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug); out_free_hash: crypto_free_shash(tfm); out_free_key: diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c index f8d9a208397b..ee341b83eeba 100644 --- a/drivers/nvme/common/keyring.c +++ b/drivers/nvme/common/keyring.c @@ -151,7 +151,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring, } EXPORT_SYMBOL_GPL(nvme_tls_psk_default); -int nvme_keyring_init(void) +static int __init nvme_keyring_init(void) { int err; @@ -171,12 +171,15 @@ int nvme_keyring_init(void) } return 0; } -EXPORT_SYMBOL_GPL(nvme_keyring_init); -void nvme_keyring_exit(void) +static void __exit nvme_keyring_exit(void) { unregister_key_type(&nvme_tls_psk_key_type); key_revoke(nvme_keyring); key_put(nvme_keyring); } -EXPORT_SYMBOL_GPL(nvme_keyring_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); +module_init(nvme_keyring_init); +module_exit(nvme_keyring_exit); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 48f7d72de5e9..8fe2dd619e80 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -95,7 +95,6 @@ config NVME_TCP config NVME_TCP_TLS bool "NVMe over Fabrics TCP TLS encryption support" depends on NVME_TCP - select NVME_COMMON select NVME_KEYRING select NET_HANDSHAKE select KEYS @@ -110,7 +109,6 @@ config NVME_TCP_TLS config NVME_HOST_AUTH bool "NVM Express over Fabrics In-Band Authentication" depends on NVME_CORE - select NVME_COMMON select NVME_AUTH help This provides support for NVMe over Fabrics In-Band Authentication. diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index eaefebb2a799..48328e36e93b 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -29,6 +29,7 @@ struct nvme_dhchap_queue_context { int error; u32 s1; u32 s2; + bool bi_directional; u16 transaction; u8 status; u8 dhgroup_id; @@ -312,17 +313,17 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, data->dhvlen = cpu_to_le16(chap->host_key_len); memcpy(data->rval, chap->response, chap->hash_len); if (ctrl->ctrl_key) { + chap->bi_directional = true; get_random_bytes(chap->c2, chap->hash_len); data->cvalid = 1; - chap->s2 = nvme_auth_get_seqnum(); memcpy(data->rval + chap->hash_len, chap->c2, chap->hash_len); dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", __func__, chap->qid, (int)chap->hash_len, chap->c2); } else { memset(chap->c2, 0, chap->hash_len); - chap->s2 = 0; } + chap->s2 = nvme_auth_get_seqnum(); data->seqnum = cpu_to_le32(chap->s2); if (chap->host_key_len) { dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", @@ -339,10 +340,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, struct nvme_dhchap_queue_context *chap) { struct nvmf_auth_dhchap_success1_data *data = chap->buf; - size_t size = sizeof(*data); - - if (chap->s2) - size += chap->hash_len; + size_t size = sizeof(*data) + chap->hash_len; if (size > CHAP_BUF_SIZE) { chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; @@ -663,6 +661,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) chap->error = 0; chap->s1 = 0; chap->s2 = 0; + chap->bi_directional = false; chap->transaction = 0; memset(chap->c1, 0, sizeof(chap->c1)); memset(chap->c2, 0, sizeof(chap->c2)); @@ -825,7 +824,7 @@ static void nvme_queue_auth_work(struct work_struct *work) goto fail2; } - if (chap->s2) { + if (chap->bi_directional) { /* DH-HMAC-CHAP Step 5: send success2 */ dev_dbg(ctrl->device, "%s: qid %d send success2\n", __func__, chap->qid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 62612f87aafa..88b54cdcbd68 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -25,7 +25,6 @@ #include "nvme.h" #include "fabrics.h" #include <linux/nvme-auth.h> -#include <linux/nvme-keyring.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -483,6 +482,7 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) { + nvme_stop_keep_alive(ctrl); if (ctrl->admin_tagset) { blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl); @@ -3200,6 +3200,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); ctrl->identified = true; + nvme_start_keep_alive(ctrl); + return 0; } EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); @@ -4074,8 +4076,21 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) return; if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, - log, sizeof(*log), 0)) + log, sizeof(*log), 0)) { dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); + goto out_free_log; + } + + if (log->afi & 0x70 || !(log->afi & 0x7)) { + dev_info(ctrl->device, + "Firmware is activated after next Controller Level Reset\n"); + goto out_free_log; + } + + memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1], + sizeof(ctrl->subsys->firmware_rev)); + +out_free_log: kfree(log); } @@ -4333,7 +4348,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) { nvme_mpath_stop(ctrl); nvme_auth_stop(ctrl); - nvme_stop_keep_alive(ctrl); nvme_stop_failfast_work(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fw_act_work); @@ -4344,8 +4358,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl); void nvme_start_ctrl(struct nvme_ctrl *ctrl) { - nvme_start_keep_alive(ctrl); - nvme_enable_aen(ctrl); /* @@ -4724,16 +4736,11 @@ static int __init nvme_core_init(void) result = PTR_ERR(nvme_ns_chr_class); goto unregister_generic_ns; } - result = nvme_keyring_init(); - if (result) - goto destroy_ns_chr; result = nvme_init_auth(); if (result) - goto keyring_exit; + goto destroy_ns_chr; return 0; -keyring_exit: - nvme_keyring_exit(); destroy_ns_chr: class_destroy(nvme_ns_chr_class); unregister_generic_ns: @@ -4757,7 +4764,6 @@ out: static void __exit nvme_core_exit(void) { nvme_exit_auth(); - nvme_keyring_exit(); class_destroy(nvme_ns_chr_class); class_destroy(nvme_subsys_class); class_destroy(nvme_class); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index a15b37750d6e..49c3e46eaa1e 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2530,6 +2530,12 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) * clean up the admin queue. Same thing as above. */ nvme_quiesce_admin_queue(&ctrl->ctrl); + + /* + * Open-coding nvme_cancel_admin_tagset() as fc + * is not using nvme_cancel_request(). + */ + nvme_stop_keep_alive(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_fc_terminate_exchange, &ctrl->ctrl); diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 747c879e8982..529b9954d2b8 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); req->bio = pdu->bio; - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { pdu->nvme_status = -EINTR; - else + } else { pdu->nvme_status = nvme_req(req)->status; + if (!pdu->nvme_status) + pdu->nvme_status = blk_status_to_errno(err); + } pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); /* diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 4714a902f4ca..89661a9cf850 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1423,13 +1423,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) nvme_tcp_queue_id(queue), ret); goto free_icresp; } + ret = -ENOTCONN; if (queue->ctrl->ctrl.opts->tls) { ctype = tls_get_record_type(queue->sock->sk, (struct cmsghdr *)cbuf); if (ctype != TLS_RECORD_TYPE_DATA) { pr_err("queue %d: unhandled TLS record %d\n", nvme_tcp_queue_id(queue), ctype); - return -ENOTCONN; + goto free_icresp; } } ret = -EINVAL; @@ -2236,11 +2237,7 @@ destroy_io: nvme_tcp_destroy_io_queues(ctrl, new); } destroy_admin: - nvme_quiesce_admin_queue(ctrl); - blk_sync_queue(ctrl->admin_q); - nvme_tcp_stop_queue(ctrl, 0); - nvme_cancel_admin_tagset(ctrl); - nvme_tcp_destroy_admin_queue(ctrl, new); + nvme_tcp_teardown_admin_queue(ctrl, false); return ret; } diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index fa479c9f5c3d..31633da9427c 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -87,7 +87,6 @@ config NVME_TARGET_TCP config NVME_TARGET_TCP_TLS bool "NVMe over Fabrics TCP target TLS encryption support" depends on NVME_TARGET_TCP - select NVME_COMMON select NVME_KEYRING select NET_HANDSHAKE select KEYS @@ -102,7 +101,6 @@ config NVME_TARGET_TCP_TLS config NVME_TARGET_AUTH bool "NVMe over Fabrics In-band Authentication support" depends on NVME_TARGET - select NVME_COMMON select NVME_AUTH help This enables support for NVMe over Fabrics In-band Authentication diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index 1d9854484e2e..eb7785be0ca7 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -163,11 +163,11 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) pr_debug("%s: ctrl %d qid %d challenge %*ph\n", __func__, ctrl->cntlid, req->sq->qid, data->hl, req->sq->dhchap_c2); - req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); } else { req->sq->authenticated = true; req->sq->dhchap_c2 = NULL; } + req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); return 0; } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 48d5df054cd0..9cb434c58075 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -466,6 +466,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) out_destroy_io: nvme_loop_destroy_io_queues(ctrl); out_destroy_admin: + nvme_quiesce_admin_queue(&ctrl->ctrl); + nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_loop_destroy_admin_queue(ctrl); out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); @@ -600,6 +602,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, return &ctrl->ctrl; out_remove_admin_queue: + nvme_quiesce_admin_queue(&ctrl->ctrl); + nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_loop_destroy_admin_queue(ctrl); out_free_queues: kfree(ctrl->queues); diff --git a/drivers/of/property.c b/drivers/of/property.c index cf8dacf3e3b8..afdaefbd03f6 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1267,6 +1267,7 @@ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") DEFINE_SIMPLE_PROP(leds, "leds", NULL) DEFINE_SIMPLE_PROP(backlight, "backlight", NULL) DEFINE_SIMPLE_PROP(panel, "panel", NULL) +DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells") DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") @@ -1356,6 +1357,7 @@ static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_leds, }, { .parse_prop = parse_backlight, }, { .parse_prop = parse_panel, }, + { .parse_prop = parse_msi_parent, }, { .parse_prop = parse_gpio_compat, }, { .parse_prop = parse_interrupts, }, { .parse_prop = parse_regulators, }, diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 5e4475254bd0..c7e18382dc01 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -392,7 +392,7 @@ static struct parisc_driver parport_driver __refdata = { .remove = __exit_p(parport_remove_chip), }; -int parport_gsc_init(void) +static int parport_gsc_init(void) { return register_parisc_driver(&parport_driver); } diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c index 42b72042f6b3..2cc35dded007 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.c +++ b/drivers/perf/arm_cspmu/arm_cspmu.c @@ -676,6 +676,9 @@ static int arm_cspmu_event_init(struct perf_event *event) cspmu = to_arm_cspmu(event->pmu); + if (event->attr.type != event->pmu->type) + return -ENOENT; + /* * Following other "uncore" PMUs, we do not support sampling mode or * attach to a task (per-process mode). diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 18b91b56af1d..6ca7be05229c 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -428,12 +428,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event) #define ARMV8_IDX_TO_COUNTER(x) \ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) -static inline u32 armv8pmu_pmcr_read(void) +static inline u64 armv8pmu_pmcr_read(void) { return read_pmcr(); } -static inline void armv8pmu_pmcr_write(u32 val) +static inline void armv8pmu_pmcr_write(u64 val) { val &= ARMV8_PMU_PMCR_MASK; isb(); @@ -957,7 +957,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, static void armv8pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u32 pmcr; + u64 pmcr; /* The counter and interrupt enable registers are unknown at reset. */ armv8pmu_disable_counter(U32_MAX); diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 96c7f670c8f0..16acd4dcdb96 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -22,7 +22,7 @@ #include <asm/errata_list.h> #include <asm/sbi.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #define SYSCTL_NO_USER_ACCESS 0 #define SYSCTL_USER_ACCESS 1 @@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - on_each_cpu_mask(mm_cpumask(event->owner->mm), - pmu_sbi_set_scounteren, (void *)event, 1); + pmu_sbi_set_scounteren((void *)event); } static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) @@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - on_each_cpu_mask(mm_cpumask(event->owner->mm), - pmu_sbi_reset_scounteren, (void *)event, 1); + pmu_sbi_reset_scounteren((void *)event); ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && @@ -689,6 +687,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) /* Firmware counter don't support overflow yet */ fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); + if (fidx == RISCV_MAX_COUNTERS) { + csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); + return IRQ_NONE; + } + event = cpu_hw_evt->events[fidx]; if (!event) { csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index 568491ed6829..69d9f4577b34 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -631,7 +631,7 @@ static int pwm_samsung_resume(struct device *dev) struct pwm_device *pwm = &chip->pwms[i]; struct samsung_pwm_channel *chan = &our_chip->channel[i]; - if (!(pwm->flags & PWMF_REQUESTED)) + if (!test_bit(PWMF_REQUESTED, &pwm->flags)) continue; if (our_chip->variant.output_mask & BIT(i)) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 35dbfacecf1c..70c9dd6b6a31 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1169,6 +1169,7 @@ config SPI_XTENSA_XTFPGA config SPI_ZYNQ_QSPI tristate "Xilinx Zynq QSPI controller" depends on ARCH_ZYNQ || COMPILE_TEST + depends on SPI_MEM help This enables support for the Zynq Quad SPI controller in master mode. diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 791df0e69105..8ead7acb99f3 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -3317,33 +3317,52 @@ void spi_unregister_controller(struct spi_controller *ctlr) } EXPORT_SYMBOL_GPL(spi_unregister_controller); +static inline int __spi_check_suspended(const struct spi_controller *ctlr) +{ + return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0; +} + +static inline void __spi_mark_suspended(struct spi_controller *ctlr) +{ + mutex_lock(&ctlr->bus_lock_mutex); + ctlr->flags |= SPI_CONTROLLER_SUSPENDED; + mutex_unlock(&ctlr->bus_lock_mutex); +} + +static inline void __spi_mark_resumed(struct spi_controller *ctlr) +{ + mutex_lock(&ctlr->bus_lock_mutex); + ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED; + mutex_unlock(&ctlr->bus_lock_mutex); +} + int spi_controller_suspend(struct spi_controller *ctlr) { - int ret; + int ret = 0; /* Basically no-ops for non-queued controllers */ - if (!ctlr->queued) - return 0; - - ret = spi_stop_queue(ctlr); - if (ret) - dev_err(&ctlr->dev, "queue stop failed\n"); + if (ctlr->queued) { + ret = spi_stop_queue(ctlr); + if (ret) + dev_err(&ctlr->dev, "queue stop failed\n"); + } + __spi_mark_suspended(ctlr); return ret; } EXPORT_SYMBOL_GPL(spi_controller_suspend); int spi_controller_resume(struct spi_controller *ctlr) { - int ret; - - if (!ctlr->queued) - return 0; + int ret = 0; - ret = spi_start_queue(ctlr); - if (ret) - dev_err(&ctlr->dev, "queue restart failed\n"); + __spi_mark_resumed(ctlr); + if (ctlr->queued) { + ret = spi_start_queue(ctlr); + if (ret) + dev_err(&ctlr->dev, "queue restart failed\n"); + } return ret; } EXPORT_SYMBOL_GPL(spi_controller_resume); @@ -4147,8 +4166,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s ctlr->cur_msg = msg; ret = __spi_pump_transfer_message(ctlr, msg, was_busy); if (ret) - goto out; - + dev_err(&ctlr->dev, "noqueue transfer failed\n"); ctlr->cur_msg = NULL; ctlr->fallback = false; @@ -4164,7 +4182,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s spi_idle_runtime_pm(ctlr); } -out: mutex_unlock(&ctlr->io_mutex); } @@ -4187,6 +4204,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) int status; struct spi_controller *ctlr = spi->controller; + if (__spi_check_suspended(ctlr)) { + dev_warn_once(&spi->dev, "Attempted to sync while suspend\n"); + return -ESHUTDOWN; + } + status = __spi_validate(spi, message); if (status != 0) return status; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 925ee1d61afb..8ca061d3bbb9 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -170,13 +170,6 @@ static const struct serial8250_config uart_config[] = { .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO, }, - [PORT_AR7] = { - .name = "AR7", - .fifo_size = 16, - .tx_loadsz = 16, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, - .flags = UART_CAP_FIFO /* | UART_CAP_AFE */, - }, [PORT_U6_16550A] = { .name = "U6_16550A", .fifo_size = 64, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c index b3a3cb165795..b137f3679343 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c @@ -437,7 +437,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, if (blk->shared_backend) { blk->buffer = shared_buffer; } else { - blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT, + blk->buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT, GFP_KERNEL); if (!blk->buffer) { ret = -ENOMEM; @@ -495,7 +495,7 @@ static int __init vdpasim_blk_init(void) goto parent_err; if (shared_backend) { - shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT, + shared_buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT, GFP_KERNEL); if (!shared_buffer) { ret = -ENOMEM; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 30df5c58db73..da7ec77cdaff 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1582,7 +1582,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) err: put_device(&v->dev); - ida_simple_remove(&vhost_vdpa_ida, v->minor); return r; } diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c index b18c6b4f129a..305f396c764c 100644 --- a/drivers/video/fbdev/amifb.c +++ b/drivers/video/fbdev/amifb.c @@ -3752,7 +3752,7 @@ release: } -static int __exit amifb_remove(struct platform_device *pdev) +static void __exit amifb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); @@ -3765,11 +3765,16 @@ static int __exit amifb_remove(struct platform_device *pdev) chipfree(); framebuffer_release(info); amifb_video_off(); - return 0; } -static struct platform_driver amifb_driver = { - .remove = __exit_p(amifb_remove), +/* + * amifb_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this ok because they cannot get unboud at + * runtime. The driver needs to be marked with __refdata, otherwise modpost + * triggers a section mismatch warning. + */ +static struct platform_driver amifb_driver __refdata = { + .remove_new = __exit_p(amifb_remove), .driver = { .name = "amiga-video", }, diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index a908db233409..9e391e5eaf9d 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -220,7 +220,7 @@ static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int } } -static const struct fb_fix_screeninfo atmel_lcdfb_fix __initconst = { +static const struct fb_fix_screeninfo atmel_lcdfb_fix = { .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, @@ -841,7 +841,7 @@ static void atmel_lcdfb_task(struct work_struct *work) atmel_lcdfb_reset(sinfo); } -static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo) +static int atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo) { struct fb_info *info = sinfo->info; int ret = 0; @@ -1017,7 +1017,7 @@ put_display_node: return ret; } -static int __init atmel_lcdfb_probe(struct platform_device *pdev) +static int atmel_lcdfb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fb_info *info; @@ -1223,14 +1223,14 @@ out: return ret; } -static int __exit atmel_lcdfb_remove(struct platform_device *pdev) +static void atmel_lcdfb_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fb_info *info = dev_get_drvdata(dev); struct atmel_lcdfb_info *sinfo; if (!info || !info->par) - return 0; + return; sinfo = info->par; cancel_work_sync(&sinfo->task); @@ -1252,8 +1252,6 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev) } framebuffer_release(info); - - return 0; } #ifdef CONFIG_PM @@ -1301,7 +1299,8 @@ static int atmel_lcdfb_resume(struct platform_device *pdev) #endif static struct platform_driver atmel_lcdfb_driver = { - .remove = __exit_p(atmel_lcdfb_remove), + .probe = atmel_lcdfb_probe, + .remove_new = atmel_lcdfb_remove, .suspend = atmel_lcdfb_suspend, .resume = atmel_lcdfb_resume, .driver = { @@ -1309,8 +1308,7 @@ static struct platform_driver atmel_lcdfb_driver = { .of_match_table = atmel_lcdfb_dt_ids, }, }; - -module_platform_driver_probe(atmel_lcdfb_driver, atmel_lcdfb_probe); +module_platform_driver(atmel_lcdfb_driver); MODULE_DESCRIPTION("AT91 LCD Controller framebuffer driver"); MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 7fbd9f069ac2..0bced82fa494 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s) * Workaround for failed writing desc register of planes. * Needed with MPC5121 DIU rev 2.0 silicon. */ -void wr_reg_wa(u32 *reg, u32 val) +static void wr_reg_wa(u32 *reg, u32 val) { do { out_be32(reg, val); diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index bf59daf862fc..a80939fe2ee6 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -1013,6 +1013,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) } else if (IS_ENABLED(CONFIG_SYSFB)) { base = screen_info.lfb_base; size = screen_info.lfb_size; + } else { + goto err1; } /* diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index e7e03e920729..660499260f46 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1421,7 +1421,6 @@ static int init_imstt(struct fb_info *info) if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); - framebuffer_release(info); return -ENODEV; } @@ -1453,14 +1452,11 @@ static int init_imstt(struct fb_info *info) FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_YPAN; - if (fb_alloc_cmap(&info->cmap, 0, 0)) { - framebuffer_release(info); + if (fb_alloc_cmap(&info->cmap, 0, 0)) return -ENODEV; - } if (register_framebuffer(info) < 0) { fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); return -ENODEV; } @@ -1500,8 +1496,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!request_mem_region(addr, size, "imsttfb")) { printk(KERN_ERR "imsttfb: Can't reserve memory region\n"); - framebuffer_release(info); - return -ENODEV; + ret = -ENODEV; + goto release_info; } switch (pdev->device) { @@ -1518,36 +1514,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) printk(KERN_INFO "imsttfb: Device 0x%x unknown, " "contact maintainer.\n", pdev->device); ret = -ENODEV; - goto error; + goto release_mem_region; } info->fix.smem_start = addr; info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000); if (!info->screen_base) - goto error; + goto release_mem_region; info->fix.mmio_start = addr + 0x800000; par->dc_regs = ioremap(addr + 0x800000, 0x1000); if (!par->dc_regs) - goto error; + goto unmap_screen_base; par->cmap_regs_phys = addr + 0x840000; par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000); if (!par->cmap_regs) - goto error; + goto unmap_dc_regs; info->pseudo_palette = par->palette; ret = init_imstt(info); if (ret) - goto error; + goto unmap_cmap_regs; pci_set_drvdata(pdev, info); - return ret; + return 0; -error: - if (par->dc_regs) - iounmap(par->dc_regs); - if (info->screen_base) - iounmap(info->screen_base); +unmap_cmap_regs: + iounmap(par->cmap_regs); +unmap_dc_regs: + iounmap(par->dc_regs); +unmap_screen_base: + iounmap(info->screen_base); +release_mem_region: release_mem_region(addr, size); +release_info: framebuffer_release(info); return ret; } diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index dcb1b81d35db..b421b46d88ef 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -423,11 +423,9 @@ static void offb_init_fb(struct platform_device *parent, const char *name, fix = &info->fix; var = &info->var; - if (name) { - strcpy(fix->id, "OFfb "); - strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb ")); - fix->id[sizeof(fix->id) - 1] = '\0'; - } else + if (name) + snprintf(fix->id, sizeof(fix->id), "OFfb %s", name); + else snprintf(fix->id, sizeof(fix->id), "OFfb %pOFn", dp); diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 42c96f1cfc93..694cf6318782 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -1643,17 +1643,16 @@ static int omapfb_do_probe(struct platform_device *pdev, r = -ENOMEM; goto cleanup; } - fbdev->int_irq = platform_get_irq(pdev, 0); - if (fbdev->int_irq < 0) { - r = -ENXIO; + + r = platform_get_irq(pdev, 0); + if (r < 0) goto cleanup; - } + fbdev->int_irq = r; - fbdev->ext_irq = platform_get_irq(pdev, 1); - if (fbdev->ext_irq < 0) { - r = -ENXIO; + r = platform_get_irq(pdev, 1); + if (r < 0) goto cleanup; - } + fbdev->ext_irq = r; init_state++; @@ -1857,20 +1856,13 @@ static int __init omapfb_setup(char *options) if (!strncmp(this_opt, "accel", 5)) def_accel = 1; else if (!strncmp(this_opt, "vram:", 5)) { + unsigned long long vram; char *suffix; - unsigned long vram; - vram = (simple_strtoul(this_opt + 5, &suffix, 0)); + + vram = memparse(this_opt + 5, &suffix); switch (suffix[0]) { case '\0': break; - case 'm': - case 'M': - vram *= 1024; - fallthrough; - case 'k': - case 'K': - vram *= 1024; - break; default: pr_debug("omapfb: invalid vram suffix %c\n", suffix[0]); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c index 0daaf9f89bab..c6786726a1af 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c @@ -221,7 +221,7 @@ err_reg: return r; } -static int __exit tvc_remove(struct platform_device *pdev) +static void tvc_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -233,8 +233,6 @@ static int __exit tvc_remove(struct platform_device *pdev) tvc_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id tvc_of_match[] = { @@ -247,11 +245,10 @@ MODULE_DEVICE_TABLE(of, tvc_of_match); static struct platform_driver tvc_connector_driver = { .probe = tvc_probe, - .remove = __exit_p(tvc_remove), + .remove_new = tvc_remove, .driver = { .name = "connector-analog-tv", .of_match_table = tvc_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c index c8ad3ef42bd3..0cc9294f89b4 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c @@ -303,7 +303,7 @@ err_reg: return r; } -static int __exit dvic_remove(struct platform_device *pdev) +static void dvic_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -317,8 +317,6 @@ static int __exit dvic_remove(struct platform_device *pdev) omap_dss_put_device(in); i2c_put_adapter(ddata->i2c_adapter); - - return 0; } static const struct of_device_id dvic_of_match[] = { @@ -330,11 +328,10 @@ MODULE_DEVICE_TABLE(of, dvic_of_match); static struct platform_driver dvi_connector_driver = { .probe = dvic_probe, - .remove = __exit_p(dvic_remove), + .remove_new = dvic_remove, .driver = { .name = "connector-dvi", .of_match_table = dvic_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c index 8f9ff9fb4ca4..b862a32670ae 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c @@ -249,7 +249,7 @@ err_reg: return r; } -static int __exit hdmic_remove(struct platform_device *pdev) +static void hdmic_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -261,8 +261,6 @@ static int __exit hdmic_remove(struct platform_device *pdev) hdmic_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id hdmic_of_match[] = { @@ -274,11 +272,10 @@ MODULE_DEVICE_TABLE(of, hdmic_of_match); static struct platform_driver hdmi_connector_driver = { .probe = hdmic_probe, - .remove = __exit_p(hdmic_remove), + .remove_new = hdmic_remove, .driver = { .name = "connector-hdmi", .of_match_table = hdmic_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c index dd29dc5c77ec..f0d3eb581166 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c @@ -231,7 +231,7 @@ err_reg: return r; } -static int __exit opa362_remove(struct platform_device *pdev) +static void opa362_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -248,8 +248,6 @@ static int __exit opa362_remove(struct platform_device *pdev) opa362_disconnect(dssdev, dssdev->dst); omap_dss_put_device(in); - - return 0; } static const struct of_device_id opa362_of_match[] = { @@ -260,11 +258,10 @@ MODULE_DEVICE_TABLE(of, opa362_of_match); static struct platform_driver opa362_driver = { .probe = opa362_probe, - .remove = __exit_p(opa362_remove), + .remove_new = opa362_remove, .driver = { .name = "amplifier-opa362", .of_match_table = opa362_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c index 7bac420169a6..c8aca4592949 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c @@ -217,7 +217,7 @@ err_reg: return r; } -static int __exit tfp410_remove(struct platform_device *pdev) +static void tfp410_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -234,8 +234,6 @@ static int __exit tfp410_remove(struct platform_device *pdev) tfp410_disconnect(dssdev, dssdev->dst); omap_dss_put_device(in); - - return 0; } static const struct of_device_id tfp410_of_match[] = { @@ -247,11 +245,10 @@ MODULE_DEVICE_TABLE(of, tfp410_of_match); static struct platform_driver tfp410_driver = { .probe = tfp410_probe, - .remove = __exit_p(tfp410_remove), + .remove_new = tfp410_remove, .driver = { .name = "tfp410", .of_match_table = tfp410_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c index 67f0c9250e9e..eb3926d0361b 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c @@ -283,7 +283,7 @@ err_gpio: return r; } -static int __exit tpd_remove(struct platform_device *pdev) +static void tpd_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -300,8 +300,6 @@ static int __exit tpd_remove(struct platform_device *pdev) tpd_disconnect(dssdev, dssdev->dst); omap_dss_put_device(in); - - return 0; } static const struct of_device_id tpd_of_match[] = { @@ -313,11 +311,10 @@ MODULE_DEVICE_TABLE(of, tpd_of_match); static struct platform_driver tpd_driver = { .probe = tpd_probe, - .remove = __exit_p(tpd_remove), + .remove_new = tpd_remove, .driver = { .name = "tpd12s015", .of_match_table = tpd_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c index 9790053c5877..937f9091274f 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c @@ -211,7 +211,7 @@ err_reg: return r; } -static int __exit panel_dpi_remove(struct platform_device *pdev) +static void panel_dpi_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -223,8 +223,6 @@ static int __exit panel_dpi_remove(struct platform_device *pdev) panel_dpi_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id panel_dpi_of_match[] = { @@ -236,11 +234,10 @@ MODULE_DEVICE_TABLE(of, panel_dpi_of_match); static struct platform_driver panel_dpi_driver = { .probe = panel_dpi_probe, - .remove = __exit_p(panel_dpi_remove), + .remove_new = panel_dpi_remove, .driver = { .name = "panel-dpi", .of_match_table = panel_dpi_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 77fce1223a64..adb8881bac28 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -1241,7 +1241,7 @@ err_reg: return r; } -static int __exit dsicm_remove(struct platform_device *pdev) +static void dsicm_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -1269,8 +1269,6 @@ static int __exit dsicm_remove(struct platform_device *pdev) /* reset, to be sure that the panel is in a valid state */ dsicm_hw_reset(ddata); - - return 0; } static const struct of_device_id dsicm_of_match[] = { @@ -1282,11 +1280,10 @@ MODULE_DEVICE_TABLE(of, dsicm_of_match); static struct platform_driver dsicm_driver = { .probe = dsicm_probe, - .remove = __exit_p(dsicm_remove), + .remove_new = dsicm_remove, .driver = { .name = "panel-dsi-cm", .of_match_table = dsicm_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c index cc30758300e2..e37268cf8dca 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c @@ -292,7 +292,7 @@ err_reg: return r; } -static int __exit sharp_ls_remove(struct platform_device *pdev) +static void sharp_ls_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -304,8 +304,6 @@ static int __exit sharp_ls_remove(struct platform_device *pdev) sharp_ls_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id sharp_ls_of_match[] = { @@ -317,11 +315,10 @@ MODULE_DEVICE_TABLE(of, sharp_ls_of_match); static struct platform_driver sharp_ls_driver = { .probe = sharp_ls_probe, - .remove = __exit_p(sharp_ls_remove), + .remove_new = sharp_ls_remove, .driver = { .name = "panel-sharp-ls037v7dw01", .of_match_table = sharp_ls_of_match, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/vrfb.c b/drivers/video/fbdev/omap2/omapfb/vrfb.c index ee0dd4c6a646..568e6e1eca62 100644 --- a/drivers/video/fbdev/omap2/omapfb/vrfb.c +++ b/drivers/video/fbdev/omap2/omapfb/vrfb.c @@ -368,17 +368,10 @@ static int __init vrfb_probe(struct platform_device *pdev) return 0; } -static void __exit vrfb_remove(struct platform_device *pdev) -{ - vrfb_loaded = false; -} - static struct platform_driver vrfb_driver = { .driver.name = "omapvrfb", - .remove = __exit_p(vrfb_remove), }; - -module_platform_driver_probe(vrfb_driver, vrfb_probe); +builtin_platform_driver_probe(vrfb_driver, vrfb_probe); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); MODULE_DESCRIPTION("OMAP VRFB"); diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index 58868f8880d6..a52b1ba43a48 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c @@ -574,7 +574,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg) break; case VIAFB_SET_GAMMA_LUT: - viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32)); + viafb_gamma_table = memdup_array_user(argp, 256, sizeof(u32)); if (IS_ERR(viafb_gamma_table)) return PTR_ERR(viafb_gamma_table); viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index c2524a7207cf..7a5593997e0e 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -242,7 +242,7 @@ void vp_del_vqs(struct virtio_device *vdev) if (v != VIRTIO_MSI_NO_VECTOR) { int irq = pci_irq_vector(vp_dev->pci_dev, v); - irq_set_affinity_hint(irq, NULL); + irq_update_affinity_hint(irq, NULL); free_irq(irq, vq); } } @@ -443,10 +443,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) mask = vp_dev->msix_affinity_masks[info->msix_vector]; irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); if (!cpu_mask) - irq_set_affinity_hint(irq, NULL); + irq_update_affinity_hint(irq, NULL); else { cpumask_copy(mask, cpu_mask); - irq_set_affinity_hint(irq, mask); + irq_set_affinity_and_hint(irq, mask); } } return 0; diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index e2a1fe7bb66c..7de8b1ebabac 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -294,9 +294,10 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) err = -EINVAL; mdev->common = vp_modern_map_capability(mdev, common, - sizeof(struct virtio_pci_common_cfg), 4, - 0, sizeof(struct virtio_pci_modern_common_cfg), - &mdev->common_len, NULL); + sizeof(struct virtio_pci_common_cfg), 4, 0, + offsetofend(struct virtio_pci_modern_common_cfg, + queue_reset), + &mdev->common_len, NULL); if (!mdev->common) goto err_map_common; mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig deleted file mode 100644 index e7f9492a0b04..000000000000 --- a/drivers/vlynq/Kconfig +++ /dev/null @@ -1,21 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -menu "TI VLYNQ" - depends on AR7 - -config VLYNQ - bool "TI VLYNQ bus support" - help - Support for Texas Instruments(R) VLYNQ bus. - The VLYNQ bus is a high-speed, serial and packetized - data bus which allows external peripherals of a SoC - to appear into the system's main memory. - - If unsure, say N - -config VLYNQ_DEBUG - bool "VLYNQ bus debug" - depends on VLYNQ && DEBUG_KERNEL - help - Turn on VLYNQ bus debugging. - -endmenu diff --git a/drivers/vlynq/Makefile b/drivers/vlynq/Makefile deleted file mode 100644 index d9ce5b2b5ce0..000000000000 --- a/drivers/vlynq/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for kernel vlynq drivers -# - -obj-$(CONFIG_VLYNQ) += vlynq.o diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c deleted file mode 100644 index 4af6615808cc..000000000000 --- a/drivers/vlynq/vlynq.c +++ /dev/null @@ -1,799 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> - * - * Parts of the VLYNQ specification can be found here: - * http://www.ti.com/litv/pdf/sprue36a - */ - -#include <linux/init.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/slab.h> -#include <linux/irq.h> - -#include <linux/vlynq.h> - -#define VLYNQ_CTRL_PM_ENABLE 0x80000000 -#define VLYNQ_CTRL_CLOCK_INT 0x00008000 -#define VLYNQ_CTRL_CLOCK_DIV(x) (((x) & 7) << 16) -#define VLYNQ_CTRL_INT_LOCAL 0x00004000 -#define VLYNQ_CTRL_INT_ENABLE 0x00002000 -#define VLYNQ_CTRL_INT_VECTOR(x) (((x) & 0x1f) << 8) -#define VLYNQ_CTRL_INT2CFG 0x00000080 -#define VLYNQ_CTRL_RESET 0x00000001 - -#define VLYNQ_CTRL_CLOCK_MASK (0x7 << 16) - -#define VLYNQ_INT_OFFSET 0x00000014 -#define VLYNQ_REMOTE_OFFSET 0x00000080 - -#define VLYNQ_STATUS_LINK 0x00000001 -#define VLYNQ_STATUS_LERROR 0x00000080 -#define VLYNQ_STATUS_RERROR 0x00000100 - -#define VINT_ENABLE 0x00000100 -#define VINT_TYPE_EDGE 0x00000080 -#define VINT_LEVEL_LOW 0x00000040 -#define VINT_VECTOR(x) ((x) & 0x1f) -#define VINT_OFFSET(irq) (8 * ((irq) % 4)) - -#define VLYNQ_AUTONEGO_V2 0x00010000 - -struct vlynq_regs { - u32 revision; - u32 control; - u32 status; - u32 int_prio; - u32 int_status; - u32 int_pending; - u32 int_ptr; - u32 tx_offset; - struct vlynq_mapping rx_mapping[4]; - u32 chip; - u32 autonego; - u32 unused[6]; - u32 int_device[8]; -}; - -#ifdef CONFIG_VLYNQ_DEBUG -static void vlynq_dump_regs(struct vlynq_device *dev) -{ - int i; - - printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n", - dev->local, dev->remote); - for (i = 0; i < 32; i++) { - printk(KERN_DEBUG "VLYNQ: local %d: %08x\n", - i + 1, ((u32 *)dev->local)[i]); - printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n", - i + 1, ((u32 *)dev->remote)[i]); - } -} - -static void vlynq_dump_mem(u32 *base, int count) -{ - int i; - - for (i = 0; i < (count + 3) / 4; i++) { - if (i % 4 == 0) - printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4); - printk(KERN_DEBUG " 0x%08x", *(base + i)); - } - printk(KERN_DEBUG "\n"); -} -#endif - -/* Check the VLYNQ link status with a given device */ -static int vlynq_linked(struct vlynq_device *dev) -{ - int i; - - for (i = 0; i < 100; i++) - if (readl(&dev->local->status) & VLYNQ_STATUS_LINK) - return 1; - else - cpu_relax(); - - return 0; -} - -static void vlynq_reset(struct vlynq_device *dev) -{ - writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET, - &dev->local->control); - - /* Wait for the devices to finish resetting */ - msleep(5); - - /* Remove reset bit */ - writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET, - &dev->local->control); - - /* Give some time for the devices to settle */ - msleep(5); -} - -static void vlynq_irq_unmask(struct irq_data *d) -{ - struct vlynq_device *dev = irq_data_get_irq_chip_data(d); - int virq; - u32 val; - - BUG_ON(!dev); - virq = d->irq - dev->irq_start; - val = readl(&dev->remote->int_device[virq >> 2]); - val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); - writel(val, &dev->remote->int_device[virq >> 2]); -} - -static void vlynq_irq_mask(struct irq_data *d) -{ - struct vlynq_device *dev = irq_data_get_irq_chip_data(d); - int virq; - u32 val; - - BUG_ON(!dev); - virq = d->irq - dev->irq_start; - val = readl(&dev->remote->int_device[virq >> 2]); - val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); - writel(val, &dev->remote->int_device[virq >> 2]); -} - -static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type) -{ - struct vlynq_device *dev = irq_data_get_irq_chip_data(d); - int virq; - u32 val; - - BUG_ON(!dev); - virq = d->irq - dev->irq_start; - val = readl(&dev->remote->int_device[virq >> 2]); - switch (flow_type & IRQ_TYPE_SENSE_MASK) { - case IRQ_TYPE_EDGE_RISING: - case IRQ_TYPE_EDGE_FALLING: - case IRQ_TYPE_EDGE_BOTH: - val |= VINT_TYPE_EDGE << VINT_OFFSET(virq); - val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq)); - break; - case IRQ_TYPE_LEVEL_HIGH: - val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq)); - val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq)); - break; - case IRQ_TYPE_LEVEL_LOW: - val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq)); - val |= VINT_LEVEL_LOW << VINT_OFFSET(virq); - break; - default: - return -EINVAL; - } - writel(val, &dev->remote->int_device[virq >> 2]); - return 0; -} - -static void vlynq_local_ack(struct irq_data *d) -{ - struct vlynq_device *dev = irq_data_get_irq_chip_data(d); - u32 status = readl(&dev->local->status); - - pr_debug("%s: local status: 0x%08x\n", - dev_name(&dev->dev), status); - writel(status, &dev->local->status); -} - -static void vlynq_remote_ack(struct irq_data *d) -{ - struct vlynq_device *dev = irq_data_get_irq_chip_data(d); - u32 status = readl(&dev->remote->status); - - pr_debug("%s: remote status: 0x%08x\n", - dev_name(&dev->dev), status); - writel(status, &dev->remote->status); -} - -static irqreturn_t vlynq_irq(int irq, void *dev_id) -{ - struct vlynq_device *dev = dev_id; - u32 status; - int virq = 0; - - status = readl(&dev->local->int_status); - writel(status, &dev->local->int_status); - - if (unlikely(!status)) - spurious_interrupt(); - - while (status) { - if (status & 1) - do_IRQ(dev->irq_start + virq); - status >>= 1; - virq++; - } - - return IRQ_HANDLED; -} - -static struct irq_chip vlynq_irq_chip = { - .name = "vlynq", - .irq_unmask = vlynq_irq_unmask, - .irq_mask = vlynq_irq_mask, - .irq_set_type = vlynq_irq_type, -}; - -static struct irq_chip vlynq_local_chip = { - .name = "vlynq local error", - .irq_unmask = vlynq_irq_unmask, - .irq_mask = vlynq_irq_mask, - .irq_ack = vlynq_local_ack, -}; - -static struct irq_chip vlynq_remote_chip = { - .name = "vlynq local error", - .irq_unmask = vlynq_irq_unmask, - .irq_mask = vlynq_irq_mask, - .irq_ack = vlynq_remote_ack, -}; - -static int vlynq_setup_irq(struct vlynq_device *dev) -{ - u32 val; - int i, virq; - - if (dev->local_irq == dev->remote_irq) { - printk(KERN_ERR - "%s: local vlynq irq should be different from remote\n", - dev_name(&dev->dev)); - return -EINVAL; - } - - /* Clear local and remote error bits */ - writel(readl(&dev->local->status), &dev->local->status); - writel(readl(&dev->remote->status), &dev->remote->status); - - /* Now setup interrupts */ - val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq); - val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL | - VLYNQ_CTRL_INT2CFG; - val |= readl(&dev->local->control); - writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr); - writel(val, &dev->local->control); - - val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq); - val |= VLYNQ_CTRL_INT_ENABLE; - val |= readl(&dev->remote->control); - writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr); - writel(val, &dev->remote->int_ptr); - writel(val, &dev->remote->control); - - for (i = dev->irq_start; i <= dev->irq_end; i++) { - virq = i - dev->irq_start; - if (virq == dev->local_irq) { - irq_set_chip_and_handler(i, &vlynq_local_chip, - handle_level_irq); - irq_set_chip_data(i, dev); - } else if (virq == dev->remote_irq) { - irq_set_chip_and_handler(i, &vlynq_remote_chip, - handle_level_irq); - irq_set_chip_data(i, dev); - } else { - irq_set_chip_and_handler(i, &vlynq_irq_chip, - handle_simple_irq); - irq_set_chip_data(i, dev); - writel(0, &dev->remote->int_device[virq >> 2]); - } - } - - if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) { - printk(KERN_ERR "%s: request_irq failed\n", - dev_name(&dev->dev)); - return -EAGAIN; - } - - return 0; -} - -static void vlynq_device_release(struct device *dev) -{ - struct vlynq_device *vdev = to_vlynq_device(dev); - kfree(vdev); -} - -static int vlynq_device_match(struct device *dev, - struct device_driver *drv) -{ - struct vlynq_device *vdev = to_vlynq_device(dev); - struct vlynq_driver *vdrv = to_vlynq_driver(drv); - struct vlynq_device_id *ids = vdrv->id_table; - - while (ids->id) { - if (ids->id == vdev->dev_id) { - vdev->divisor = ids->divisor; - vlynq_set_drvdata(vdev, ids); - printk(KERN_INFO "Driver found for VLYNQ " - "device: %08x\n", vdev->dev_id); - return 1; - } - printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver" - " for VLYNQ device: %08x\n", ids->id, vdev->dev_id); - ids++; - } - return 0; -} - -static int vlynq_device_probe(struct device *dev) -{ - struct vlynq_device *vdev = to_vlynq_device(dev); - struct vlynq_driver *drv = to_vlynq_driver(dev->driver); - struct vlynq_device_id *id = vlynq_get_drvdata(vdev); - int result = -ENODEV; - - if (drv->probe) - result = drv->probe(vdev, id); - if (result) - put_device(dev); - return result; -} - -static void vlynq_device_remove(struct device *dev) -{ - struct vlynq_driver *drv = to_vlynq_driver(dev->driver); - - if (drv->remove) - drv->remove(to_vlynq_device(dev)); -} - -int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner) -{ - driver->driver.name = driver->name; - driver->driver.bus = &vlynq_bus_type; - return driver_register(&driver->driver); -} -EXPORT_SYMBOL(__vlynq_register_driver); - -void vlynq_unregister_driver(struct vlynq_driver *driver) -{ - driver_unregister(&driver->driver); -} -EXPORT_SYMBOL(vlynq_unregister_driver); - -/* - * A VLYNQ remote device can clock the VLYNQ bus master - * using a dedicated clock line. In that case, both the - * remove device and the bus master should have the same - * serial clock dividers configured. Iterate through the - * 8 possible dividers until we actually link with the - * device. - */ -static int __vlynq_try_remote(struct vlynq_device *dev) -{ - int i; - - vlynq_reset(dev); - for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ? - i <= vlynq_rdiv8 : i >= vlynq_rdiv2; - dev->dev_id ? i++ : i--) { - - if (!vlynq_linked(dev)) - break; - - writel((readl(&dev->remote->control) & - ~VLYNQ_CTRL_CLOCK_MASK) | - VLYNQ_CTRL_CLOCK_INT | - VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1), - &dev->remote->control); - writel((readl(&dev->local->control) - & ~(VLYNQ_CTRL_CLOCK_INT | - VLYNQ_CTRL_CLOCK_MASK)) | - VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1), - &dev->local->control); - - if (vlynq_linked(dev)) { - printk(KERN_DEBUG - "%s: using remote clock divisor %d\n", - dev_name(&dev->dev), i - vlynq_rdiv1 + 1); - dev->divisor = i; - return 0; - } else { - vlynq_reset(dev); - } - } - - return -ENODEV; -} - -/* - * A VLYNQ remote device can be clocked by the VLYNQ bus - * master using a dedicated clock line. In that case, only - * the bus master configures the serial clock divider. - * Iterate through the 8 possible dividers until we - * actually get a link with the device. - */ -static int __vlynq_try_local(struct vlynq_device *dev) -{ - int i; - - vlynq_reset(dev); - - for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ? - i <= vlynq_ldiv8 : i >= vlynq_ldiv2; - dev->dev_id ? i++ : i--) { - - writel((readl(&dev->local->control) & - ~VLYNQ_CTRL_CLOCK_MASK) | - VLYNQ_CTRL_CLOCK_INT | - VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1), - &dev->local->control); - - if (vlynq_linked(dev)) { - printk(KERN_DEBUG - "%s: using local clock divisor %d\n", - dev_name(&dev->dev), i - vlynq_ldiv1 + 1); - dev->divisor = i; - return 0; - } else { - vlynq_reset(dev); - } - } - - return -ENODEV; -} - -/* - * When using external clocking method, serial clock - * is supplied by an external oscillator, therefore we - * should mask the local clock bit in the clock control - * register for both the bus master and the remote device. - */ -static int __vlynq_try_external(struct vlynq_device *dev) -{ - vlynq_reset(dev); - if (!vlynq_linked(dev)) - return -ENODEV; - - writel((readl(&dev->remote->control) & - ~VLYNQ_CTRL_CLOCK_INT), - &dev->remote->control); - - writel((readl(&dev->local->control) & - ~VLYNQ_CTRL_CLOCK_INT), - &dev->local->control); - - if (vlynq_linked(dev)) { - printk(KERN_DEBUG "%s: using external clock\n", - dev_name(&dev->dev)); - dev->divisor = vlynq_div_external; - return 0; - } - - return -ENODEV; -} - -static int __vlynq_enable_device(struct vlynq_device *dev) -{ - int result; - struct plat_vlynq_ops *ops = dev->dev.platform_data; - - result = ops->on(dev); - if (result) - return result; - - switch (dev->divisor) { - case vlynq_div_external: - case vlynq_div_auto: - /* When the device is brought from reset it should have clock - * generation negotiated by hardware. - * Check which device is generating clocks and perform setup - * accordingly */ - if (vlynq_linked(dev) && readl(&dev->remote->control) & - VLYNQ_CTRL_CLOCK_INT) { - if (!__vlynq_try_remote(dev) || - !__vlynq_try_local(dev) || - !__vlynq_try_external(dev)) - return 0; - } else { - if (!__vlynq_try_external(dev) || - !__vlynq_try_local(dev) || - !__vlynq_try_remote(dev)) - return 0; - } - break; - case vlynq_ldiv1: - case vlynq_ldiv2: - case vlynq_ldiv3: - case vlynq_ldiv4: - case vlynq_ldiv5: - case vlynq_ldiv6: - case vlynq_ldiv7: - case vlynq_ldiv8: - writel(VLYNQ_CTRL_CLOCK_INT | - VLYNQ_CTRL_CLOCK_DIV(dev->divisor - - vlynq_ldiv1), &dev->local->control); - writel(0, &dev->remote->control); - if (vlynq_linked(dev)) { - printk(KERN_DEBUG - "%s: using local clock divisor %d\n", - dev_name(&dev->dev), - dev->divisor - vlynq_ldiv1 + 1); - return 0; - } - break; - case vlynq_rdiv1: - case vlynq_rdiv2: - case vlynq_rdiv3: - case vlynq_rdiv4: - case vlynq_rdiv5: - case vlynq_rdiv6: - case vlynq_rdiv7: - case vlynq_rdiv8: - writel(0, &dev->local->control); - writel(VLYNQ_CTRL_CLOCK_INT | - VLYNQ_CTRL_CLOCK_DIV(dev->divisor - - vlynq_rdiv1), &dev->remote->control); - if (vlynq_linked(dev)) { - printk(KERN_DEBUG - "%s: using remote clock divisor %d\n", - dev_name(&dev->dev), - dev->divisor - vlynq_rdiv1 + 1); - return 0; - } - break; - } - - ops->off(dev); - return -ENODEV; -} - -int vlynq_enable_device(struct vlynq_device *dev) -{ - struct plat_vlynq_ops *ops = dev->dev.platform_data; - int result = -ENODEV; - - result = __vlynq_enable_device(dev); - if (result) - return result; - - result = vlynq_setup_irq(dev); - if (result) - ops->off(dev); - - dev->enabled = !result; - return result; -} -EXPORT_SYMBOL(vlynq_enable_device); - - -void vlynq_disable_device(struct vlynq_device *dev) -{ - struct plat_vlynq_ops *ops = dev->dev.platform_data; - - dev->enabled = 0; - free_irq(dev->irq, dev); - ops->off(dev); -} -EXPORT_SYMBOL(vlynq_disable_device); - -int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, - struct vlynq_mapping *mapping) -{ - int i; - - if (!dev->enabled) - return -ENXIO; - - writel(tx_offset, &dev->local->tx_offset); - for (i = 0; i < 4; i++) { - writel(mapping[i].offset, &dev->local->rx_mapping[i].offset); - writel(mapping[i].size, &dev->local->rx_mapping[i].size); - } - return 0; -} -EXPORT_SYMBOL(vlynq_set_local_mapping); - -int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, - struct vlynq_mapping *mapping) -{ - int i; - - if (!dev->enabled) - return -ENXIO; - - writel(tx_offset, &dev->remote->tx_offset); - for (i = 0; i < 4; i++) { - writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset); - writel(mapping[i].size, &dev->remote->rx_mapping[i].size); - } - return 0; -} -EXPORT_SYMBOL(vlynq_set_remote_mapping); - -int vlynq_set_local_irq(struct vlynq_device *dev, int virq) -{ - int irq = dev->irq_start + virq; - if (dev->enabled) - return -EBUSY; - - if ((irq < dev->irq_start) || (irq > dev->irq_end)) - return -EINVAL; - - if (virq == dev->remote_irq) - return -EINVAL; - - dev->local_irq = virq; - - return 0; -} -EXPORT_SYMBOL(vlynq_set_local_irq); - -int vlynq_set_remote_irq(struct vlynq_device *dev, int virq) -{ - int irq = dev->irq_start + virq; - if (dev->enabled) - return -EBUSY; - - if ((irq < dev->irq_start) || (irq > dev->irq_end)) - return -EINVAL; - - if (virq == dev->local_irq) - return -EINVAL; - - dev->remote_irq = virq; - - return 0; -} -EXPORT_SYMBOL(vlynq_set_remote_irq); - -static int vlynq_probe(struct platform_device *pdev) -{ - struct vlynq_device *dev; - struct resource *regs_res, *mem_res, *irq_res; - int len, result; - - regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); - if (!regs_res) - return -ENODEV; - - mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); - if (!mem_res) - return -ENODEV; - - irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq"); - if (!irq_res) - return -ENODEV; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { - printk(KERN_ERR - "vlynq: failed to allocate device structure\n"); - return -ENOMEM; - } - - dev->id = pdev->id; - dev->dev.bus = &vlynq_bus_type; - dev->dev.parent = &pdev->dev; - dev_set_name(&dev->dev, "vlynq%d", dev->id); - dev->dev.platform_data = pdev->dev.platform_data; - dev->dev.release = vlynq_device_release; - - dev->regs_start = regs_res->start; - dev->regs_end = regs_res->end; - dev->mem_start = mem_res->start; - dev->mem_end = mem_res->end; - - len = resource_size(regs_res); - if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { - printk(KERN_ERR "%s: Can't request vlynq registers\n", - dev_name(&dev->dev)); - result = -ENXIO; - goto fail_request; - } - - dev->local = ioremap(regs_res->start, len); - if (!dev->local) { - printk(KERN_ERR "%s: Can't remap vlynq registers\n", - dev_name(&dev->dev)); - result = -ENXIO; - goto fail_remap; - } - - dev->remote = (struct vlynq_regs *)((void *)dev->local + - VLYNQ_REMOTE_OFFSET); - - dev->irq = platform_get_irq_byname(pdev, "irq"); - dev->irq_start = irq_res->start; - dev->irq_end = irq_res->end; - dev->local_irq = dev->irq_end - dev->irq_start; - dev->remote_irq = dev->local_irq - 1; - - if (device_register(&dev->dev)) - goto fail_register; - platform_set_drvdata(pdev, dev); - - printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n", - dev_name(&dev->dev), (void *)dev->regs_start, dev->irq, - (void *)dev->mem_start); - - dev->dev_id = 0; - dev->divisor = vlynq_div_auto; - result = __vlynq_enable_device(dev); - if (result == 0) { - dev->dev_id = readl(&dev->remote->chip); - ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev); - } - if (dev->dev_id) - printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id); - - return 0; - -fail_register: - iounmap(dev->local); -fail_remap: -fail_request: - release_mem_region(regs_res->start, len); - kfree(dev); - return result; -} - -static int vlynq_remove(struct platform_device *pdev) -{ - struct vlynq_device *dev = platform_get_drvdata(pdev); - - device_unregister(&dev->dev); - iounmap(dev->local); - release_mem_region(dev->regs_start, - dev->regs_end - dev->regs_start + 1); - - kfree(dev); - - return 0; -} - -static struct platform_driver vlynq_platform_driver = { - .driver.name = "vlynq", - .probe = vlynq_probe, - .remove = vlynq_remove, -}; - -struct bus_type vlynq_bus_type = { - .name = "vlynq", - .match = vlynq_device_match, - .probe = vlynq_device_probe, - .remove = vlynq_device_remove, -}; -EXPORT_SYMBOL(vlynq_bus_type); - -static int vlynq_init(void) -{ - int res = 0; - - res = bus_register(&vlynq_bus_type); - if (res) - goto fail_bus; - - res = platform_driver_register(&vlynq_platform_driver); - if (res) - goto fail_platform; - - return 0; - -fail_platform: - bus_unregister(&vlynq_bus_type); -fail_bus: - return res; -} - -static void vlynq_exit(void) -{ - platform_driver_unregister(&vlynq_platform_driver); - bus_unregister(&vlynq_bus_type); -} - -module_init(vlynq_init); -module_exit(vlynq_exit); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 8cb6fa45d599..7d22051b15a2 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1768,12 +1768,6 @@ config SIBYTE_WDOG To compile this driver as a loadable module, choose M here. The module will be called sb_wdog. -config AR7_WDT - tristate "TI AR7 Watchdog Timer" - depends on AR7 || (MIPS && 32BIT && COMPILE_TEST) - help - Hardware driver for the TI AR7 Watchdog Timer. - config TXX9_WDT tristate "Toshiba TXx9 Watchdog Timer" depends on CPU_TX49XX || (MIPS && COMPILE_TEST) diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 7eab9de311cb..7cbc34514ec1 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -168,7 +168,6 @@ obj-$(CONFIG_INDYDOG) += indydog.o obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o -obj-$(CONFIG_AR7_WDT) += ar7_wdt.o obj-$(CONFIG_TXX9_WDT) += txx9wdt.o obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c deleted file mode 100644 index cdcaeb0961ac..000000000000 --- a/drivers/watchdog/ar7_wdt.c +++ /dev/null @@ -1,315 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * drivers/watchdog/ar7_wdt.c - * - * Copyright (C) 2007 Nicolas Thill <nico@openwrt.org> - * Copyright (c) 2005 Enrik Berkhan <Enrik.Berkhan@akk.org> - * - * Some code taken from: - * National Semiconductor SCx200 Watchdog support - * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/errno.h> -#include <linux/miscdevice.h> -#include <linux/platform_device.h> -#include <linux/watchdog.h> -#include <linux/fs.h> -#include <linux/ioport.h> -#include <linux/io.h> -#include <linux/uaccess.h> -#include <linux/clk.h> - -#include <asm/addrspace.h> -#include <asm/mach-ar7/ar7.h> - -#define LONGNAME "TI AR7 Watchdog Timer" - -MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>"); -MODULE_DESCRIPTION(LONGNAME); -MODULE_LICENSE("GPL"); - -static int margin = 60; -module_param(margin, int, 0); -MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); - -static bool nowayout = WATCHDOG_NOWAYOUT; -module_param(nowayout, bool, 0); -MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); - -#define READ_REG(x) readl((void __iomem *)&(x)) -#define WRITE_REG(x, v) writel((v), (void __iomem *)&(x)) - -struct ar7_wdt { - u32 kick_lock; - u32 kick; - u32 change_lock; - u32 change; - u32 disable_lock; - u32 disable; - u32 prescale_lock; - u32 prescale; -}; - -static unsigned long wdt_is_open; -static unsigned expect_close; -static DEFINE_SPINLOCK(wdt_lock); - -/* XXX currently fixed, allows max margin ~68.72 secs */ -#define prescale_value 0xffff - -/* Pointer to the remapped WDT IO space */ -static struct ar7_wdt *ar7_wdt; - -static struct clk *vbus_clk; - -static void ar7_wdt_kick(u32 value) -{ - WRITE_REG(ar7_wdt->kick_lock, 0x5555); - if ((READ_REG(ar7_wdt->kick_lock) & 3) == 1) { - WRITE_REG(ar7_wdt->kick_lock, 0xaaaa); - if ((READ_REG(ar7_wdt->kick_lock) & 3) == 3) { - WRITE_REG(ar7_wdt->kick, value); - return; - } - } - pr_err("failed to unlock WDT kick reg\n"); -} - -static void ar7_wdt_prescale(u32 value) -{ - WRITE_REG(ar7_wdt->prescale_lock, 0x5a5a); - if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 1) { - WRITE_REG(ar7_wdt->prescale_lock, 0xa5a5); - if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 3) { - WRITE_REG(ar7_wdt->prescale, value); - return; - } - } - pr_err("failed to unlock WDT prescale reg\n"); -} - -static void ar7_wdt_change(u32 value) -{ - WRITE_REG(ar7_wdt->change_lock, 0x6666); - if ((READ_REG(ar7_wdt->change_lock) & 3) == 1) { - WRITE_REG(ar7_wdt->change_lock, 0xbbbb); - if ((READ_REG(ar7_wdt->change_lock) & 3) == 3) { - WRITE_REG(ar7_wdt->change, value); - return; - } - } - pr_err("failed to unlock WDT change reg\n"); -} - -static void ar7_wdt_disable(u32 value) -{ - WRITE_REG(ar7_wdt->disable_lock, 0x7777); - if ((READ_REG(ar7_wdt->disable_lock) & 3) == 1) { - WRITE_REG(ar7_wdt->disable_lock, 0xcccc); - if ((READ_REG(ar7_wdt->disable_lock) & 3) == 2) { - WRITE_REG(ar7_wdt->disable_lock, 0xdddd); - if ((READ_REG(ar7_wdt->disable_lock) & 3) == 3) { - WRITE_REG(ar7_wdt->disable, value); - return; - } - } - } - pr_err("failed to unlock WDT disable reg\n"); -} - -static void ar7_wdt_update_margin(int new_margin) -{ - u32 change; - u32 vbus_rate; - - vbus_rate = clk_get_rate(vbus_clk); - change = new_margin * (vbus_rate / prescale_value); - if (change < 1) - change = 1; - if (change > 0xffff) - change = 0xffff; - ar7_wdt_change(change); - margin = change * prescale_value / vbus_rate; - pr_info("timer margin %d seconds (prescale %d, change %d, freq %d)\n", - margin, prescale_value, change, vbus_rate); -} - -static void ar7_wdt_enable_wdt(void) -{ - pr_debug("enabling watchdog timer\n"); - ar7_wdt_disable(1); - ar7_wdt_kick(1); -} - -static void ar7_wdt_disable_wdt(void) -{ - pr_debug("disabling watchdog timer\n"); - ar7_wdt_disable(0); -} - -static int ar7_wdt_open(struct inode *inode, struct file *file) -{ - /* only allow one at a time */ - if (test_and_set_bit(0, &wdt_is_open)) - return -EBUSY; - ar7_wdt_enable_wdt(); - expect_close = 0; - - return stream_open(inode, file); -} - -static int ar7_wdt_release(struct inode *inode, struct file *file) -{ - if (!expect_close) - pr_warn("watchdog device closed unexpectedly, will not disable the watchdog timer\n"); - else if (!nowayout) - ar7_wdt_disable_wdt(); - clear_bit(0, &wdt_is_open); - return 0; -} - -static ssize_t ar7_wdt_write(struct file *file, const char *data, - size_t len, loff_t *ppos) -{ - /* check for a magic close character */ - if (len) { - size_t i; - - spin_lock(&wdt_lock); - ar7_wdt_kick(1); - spin_unlock(&wdt_lock); - - expect_close = 0; - for (i = 0; i < len; ++i) { - char c; - if (get_user(c, data + i)) - return -EFAULT; - if (c == 'V') - expect_close = 1; - } - - } - return len; -} - -static long ar7_wdt_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - static const struct watchdog_info ident = { - .identity = LONGNAME, - .firmware_version = 1, - .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | - WDIOF_MAGICCLOSE), - }; - int new_margin; - - switch (cmd) { - case WDIOC_GETSUPPORT: - if (copy_to_user((struct watchdog_info *)arg, &ident, - sizeof(ident))) - return -EFAULT; - return 0; - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - if (put_user(0, (int *)arg)) - return -EFAULT; - return 0; - case WDIOC_KEEPALIVE: - ar7_wdt_kick(1); - return 0; - case WDIOC_SETTIMEOUT: - if (get_user(new_margin, (int *)arg)) - return -EFAULT; - if (new_margin < 1) - return -EINVAL; - - spin_lock(&wdt_lock); - ar7_wdt_update_margin(new_margin); - ar7_wdt_kick(1); - spin_unlock(&wdt_lock); - fallthrough; - case WDIOC_GETTIMEOUT: - if (put_user(margin, (int *)arg)) - return -EFAULT; - return 0; - default: - return -ENOTTY; - } -} - -static const struct file_operations ar7_wdt_fops = { - .owner = THIS_MODULE, - .write = ar7_wdt_write, - .unlocked_ioctl = ar7_wdt_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .open = ar7_wdt_open, - .release = ar7_wdt_release, - .llseek = no_llseek, -}; - -static struct miscdevice ar7_wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &ar7_wdt_fops, -}; - -static int ar7_wdt_probe(struct platform_device *pdev) -{ - int rc; - - ar7_wdt = devm_platform_ioremap_resource_byname(pdev, "regs"); - if (IS_ERR(ar7_wdt)) - return PTR_ERR(ar7_wdt); - - vbus_clk = clk_get(NULL, "vbus"); - if (IS_ERR(vbus_clk)) { - pr_err("could not get vbus clock\n"); - return PTR_ERR(vbus_clk); - } - - ar7_wdt_disable_wdt(); - ar7_wdt_prescale(prescale_value); - ar7_wdt_update_margin(margin); - - rc = misc_register(&ar7_wdt_miscdev); - if (rc) { - pr_err("unable to register misc device\n"); - goto out; - } - return 0; - -out: - clk_put(vbus_clk); - vbus_clk = NULL; - return rc; -} - -static void ar7_wdt_remove(struct platform_device *pdev) -{ - misc_deregister(&ar7_wdt_miscdev); - clk_put(vbus_clk); - vbus_clk = NULL; -} - -static void ar7_wdt_shutdown(struct platform_device *pdev) -{ - if (!nowayout) - ar7_wdt_disable_wdt(); -} - -static struct platform_driver ar7_wdt_driver = { - .probe = ar7_wdt_probe, - .remove_new = ar7_wdt_remove, - .shutdown = ar7_wdt_shutdown, - .driver = { - .name = "ar7_wdt", - }, -}; - -module_platform_driver(ar7_wdt_driver); diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index b8f2f971c2f0..e3585330cf98 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -171,11 +171,11 @@ static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl) int i; struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); + evtchn_port_t evtchn; /* Timer interrupt has highest priority. */ - irq = irq_from_virq(cpu, VIRQ_TIMER); + irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn); if (irq != -1) { - evtchn_port_t evtchn = evtchn_from_irq(irq); word_idx = evtchn / BITS_PER_LONG; bit_idx = evtchn % BITS_PER_LONG; if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) @@ -328,9 +328,9 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) { if (sync_test_bit(i, BM(sh->evtchn_pending))) { int word_idx = i / BITS_PER_EVTCHN_WORD; - printk(" %d: event %d -> irq %d%s%s%s\n", + printk(" %d: event %d -> irq %u%s%s%s\n", cpu_from_evtchn(i), i, - get_evtchn_to_irq(i), + irq_from_evtchn(i), sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) ? "" : " l2-clear", !sync_test_bit(i, BM(sh->evtchn_mask)) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 6de6b084ea60..f5edb9e27e3c 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; /* IRQ <-> IPI mapping */ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; +/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */ +static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0}; /* Event channel distribution data */ static atomic_t channels_on_cpu[NR_CPUS]; @@ -172,7 +174,7 @@ static int **evtchn_to_irq; #ifdef CONFIG_X86 static unsigned long *pirq_eoi_map; #endif -static bool (*pirq_needs_eoi)(unsigned irq); +static bool (*pirq_needs_eoi)(struct irq_info *info); #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) @@ -188,7 +190,6 @@ static struct irq_chip xen_lateeoi_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; static void enable_dynirq(struct irq_data *data); -static void disable_dynirq(struct irq_data *data); static DEFINE_PER_CPU(unsigned int, irq_epoch); @@ -246,15 +247,6 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) return 0; } -int get_evtchn_to_irq(evtchn_port_t evtchn) -{ - if (evtchn >= xen_evtchn_max_channels()) - return -1; - if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) - return -1; - return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); -} - /* Get info for IRQ */ static struct irq_info *info_for_irq(unsigned irq) { @@ -272,6 +264,19 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info) irq_set_chip_data(irq, info); } +static struct irq_info *evtchn_to_info(evtchn_port_t evtchn) +{ + int irq; + + if (evtchn >= xen_evtchn_max_channels()) + return NULL; + if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) + return NULL; + irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); + + return (irq < 0) ? NULL : info_for_irq(irq); +} + /* Per CPU channel accounting */ static void channels_on_cpu_dec(struct irq_info *info) { @@ -298,6 +303,13 @@ static void channels_on_cpu_inc(struct irq_info *info) info->is_accounted = 1; } +static void xen_irq_free_desc(unsigned int irq) +{ + /* Legacy IRQ descriptors are managed by the arch. */ + if (irq >= nr_legacy_irqs()) + irq_free_desc(irq); +} + static void delayed_free_irq(struct work_struct *work) { struct irq_info *info = container_of(to_rcu_work(work), struct irq_info, @@ -309,14 +321,11 @@ static void delayed_free_irq(struct work_struct *work) kfree(info); - /* Legacy IRQ descriptors are managed by the arch. */ - if (irq >= nr_legacy_irqs()) - irq_free_desc(irq); + xen_irq_free_desc(irq); } /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, - unsigned irq, enum xen_irq_type type, evtchn_port_t evtchn, unsigned short cpu) @@ -326,29 +335,27 @@ static int xen_irq_info_common_setup(struct irq_info *info, BUG_ON(info->type != IRQT_UNBOUND && info->type != type); info->type = type; - info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; info->mask_reason = EVT_MASK_REASON_EXPLICIT; raw_spin_lock_init(&info->lock); - ret = set_evtchn_to_irq(evtchn, irq); + ret = set_evtchn_to_irq(evtchn, info->irq); if (ret < 0) return ret; - irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); + irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN); return xen_evtchn_port_setup(evtchn); } -static int xen_irq_info_evtchn_setup(unsigned irq, +static int xen_irq_info_evtchn_setup(struct irq_info *info, evtchn_port_t evtchn, struct xenbus_device *dev) { - struct irq_info *info = info_for_irq(irq); int ret; - ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); + ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0); info->u.interdomain = dev; if (dev) atomic_inc(&dev->event_channels); @@ -356,49 +363,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq, return ret; } -static int xen_irq_info_ipi_setup(unsigned cpu, - unsigned irq, - evtchn_port_t evtchn, - enum ipi_vector ipi) +static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu, + evtchn_port_t evtchn, enum ipi_vector ipi) { - struct irq_info *info = info_for_irq(irq); - info->u.ipi = ipi; - per_cpu(ipi_to_irq, cpu)[ipi] = irq; + per_cpu(ipi_to_irq, cpu)[ipi] = info->irq; + per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn; - return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); + return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0); } -static int xen_irq_info_virq_setup(unsigned cpu, - unsigned irq, - evtchn_port_t evtchn, - unsigned virq) +static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu, + evtchn_port_t evtchn, unsigned int virq) { - struct irq_info *info = info_for_irq(irq); - info->u.virq = virq; - per_cpu(virq_to_irq, cpu)[virq] = irq; + per_cpu(virq_to_irq, cpu)[virq] = info->irq; - return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); + return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0); } -static int xen_irq_info_pirq_setup(unsigned irq, - evtchn_port_t evtchn, - unsigned pirq, - unsigned gsi, - uint16_t domid, - unsigned char flags) +static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn, + unsigned int pirq, unsigned int gsi, + uint16_t domid, unsigned char flags) { - struct irq_info *info = info_for_irq(irq); - info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; info->u.pirq.domid = domid; info->u.pirq.flags = flags; - return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); + return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0); } static void xen_irq_info_cleanup(struct irq_info *info) @@ -412,7 +407,7 @@ static void xen_irq_info_cleanup(struct irq_info *info) /* * Accessors for packed IRQ information. */ -evtchn_port_t evtchn_from_irq(unsigned irq) +static evtchn_port_t evtchn_from_irq(unsigned int irq) { const struct irq_info *info = NULL; @@ -426,64 +421,51 @@ evtchn_port_t evtchn_from_irq(unsigned irq) unsigned int irq_from_evtchn(evtchn_port_t evtchn) { - return get_evtchn_to_irq(evtchn); + struct irq_info *info = evtchn_to_info(evtchn); + + return info ? info->irq : -1; } EXPORT_SYMBOL_GPL(irq_from_evtchn); -int irq_from_virq(unsigned int cpu, unsigned int virq) +int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq, + evtchn_port_t *evtchn) { - return per_cpu(virq_to_irq, cpu)[virq]; + int irq = per_cpu(virq_to_irq, cpu)[virq]; + + *evtchn = evtchn_from_irq(irq); + + return irq; } -static enum ipi_vector ipi_from_irq(unsigned irq) +static enum ipi_vector ipi_from_irq(struct irq_info *info) { - struct irq_info *info = info_for_irq(irq); - BUG_ON(info == NULL); BUG_ON(info->type != IRQT_IPI); return info->u.ipi; } -static unsigned virq_from_irq(unsigned irq) +static unsigned int virq_from_irq(struct irq_info *info) { - struct irq_info *info = info_for_irq(irq); - BUG_ON(info == NULL); BUG_ON(info->type != IRQT_VIRQ); return info->u.virq; } -static unsigned pirq_from_irq(unsigned irq) +static unsigned int pirq_from_irq(struct irq_info *info) { - struct irq_info *info = info_for_irq(irq); - BUG_ON(info == NULL); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.pirq; } -static enum xen_irq_type type_from_irq(unsigned irq) -{ - return info_for_irq(irq)->type; -} - -static unsigned cpu_from_irq(unsigned irq) -{ - return info_for_irq(irq)->cpu; -} - unsigned int cpu_from_evtchn(evtchn_port_t evtchn) { - int irq = get_evtchn_to_irq(evtchn); - unsigned ret = 0; - - if (irq != -1) - ret = cpu_from_irq(irq); + struct irq_info *info = evtchn_to_info(evtchn); - return ret; + return info ? info->cpu : 0; } static void do_mask(struct irq_info *info, u8 reason) @@ -515,36 +497,30 @@ static void do_unmask(struct irq_info *info, u8 reason) } #ifdef CONFIG_X86 -static bool pirq_check_eoi_map(unsigned irq) +static bool pirq_check_eoi_map(struct irq_info *info) { - return test_bit(pirq_from_irq(irq), pirq_eoi_map); + return test_bit(pirq_from_irq(info), pirq_eoi_map); } #endif -static bool pirq_needs_eoi_flag(unsigned irq) +static bool pirq_needs_eoi_flag(struct irq_info *info) { - struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.flags & PIRQ_NEEDS_EOI; } -static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu, +static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu, bool force_affinity) { - int irq = get_evtchn_to_irq(evtchn); - struct irq_info *info = info_for_irq(irq); - - BUG_ON(irq == -1); - if (IS_ENABLED(CONFIG_SMP) && force_affinity) { - struct irq_data *data = irq_get_irq_data(irq); + struct irq_data *data = irq_get_irq_data(info->irq); irq_data_update_affinity(data, cpumask_of(cpu)); irq_data_update_effective_affinity(data, cpumask_of(cpu)); } - xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); + xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu); channels_on_cpu_dec(info); info->cpu = cpu; @@ -601,7 +577,9 @@ static void lateeoi_list_add(struct irq_info *info) spin_lock_irqsave(&eoi->eoi_list_lock, flags); - if (list_empty(&eoi->eoi_list)) { + elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, + eoi_list); + if (!elem || info->eoi_time < elem->eoi_time) { list_add(&info->eoi_list, &eoi->eoi_list); mod_delayed_work_on(info->eoi_cpu, system_wq, &eoi->delayed, delay); @@ -732,50 +710,49 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) } EXPORT_SYMBOL_GPL(xen_irq_lateeoi); -static void xen_irq_init(unsigned irq) +static struct irq_info *xen_irq_init(unsigned int irq) { struct irq_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); - if (info == NULL) - panic("Unable to allocate metadata for IRQ%d\n", irq); + if (info) { + info->irq = irq; + info->type = IRQT_UNBOUND; + info->refcnt = -1; + INIT_RCU_WORK(&info->rwork, delayed_free_irq); - info->type = IRQT_UNBOUND; - info->refcnt = -1; - INIT_RCU_WORK(&info->rwork, delayed_free_irq); + set_info_for_irq(irq, info); + /* + * Interrupt affinity setting can be immediate. No point + * in delaying it until an interrupt is handled. + */ + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - set_info_for_irq(irq, info); - /* - * Interrupt affinity setting can be immediate. No point - * in delaying it until an interrupt is handled. - */ - irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); + INIT_LIST_HEAD(&info->eoi_list); + list_add_tail(&info->list, &xen_irq_list_head); + } - INIT_LIST_HEAD(&info->eoi_list); - list_add_tail(&info->list, &xen_irq_list_head); + return info; } -static int __must_check xen_allocate_irqs_dynamic(int nvec) +static struct irq_info *xen_allocate_irq_dynamic(void) { - int i, irq = irq_alloc_descs(-1, 0, nvec, -1); + int irq = irq_alloc_desc_from(0, -1); + struct irq_info *info = NULL; if (irq >= 0) { - for (i = 0; i < nvec; i++) - xen_irq_init(irq + i); + info = xen_irq_init(irq); + if (!info) + xen_irq_free_desc(irq); } - return irq; -} - -static inline int __must_check xen_allocate_irq_dynamic(void) -{ - - return xen_allocate_irqs_dynamic(1); + return info; } -static int __must_check xen_allocate_irq_gsi(unsigned gsi) +static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi) { int irq; + struct irq_info *info; /* * A PV guest has no concept of a GSI (since it has no ACPI @@ -792,15 +769,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) else irq = irq_alloc_desc_at(gsi, -1); - xen_irq_init(irq); + info = xen_irq_init(irq); + if (!info) + xen_irq_free_desc(irq); - return irq; + return info; } -static void xen_free_irq(unsigned irq) +static void xen_free_irq(struct irq_info *info) { - struct irq_info *info = info_for_irq(irq); - if (WARN_ON(!info)) return; @@ -821,14 +798,11 @@ static void event_handler_exit(struct irq_info *info) clear_evtchn(info->evtchn); } -static void pirq_query_unmask(int irq) +static void pirq_query_unmask(struct irq_info *info) { struct physdev_irq_status_query irq_status; - struct irq_info *info = info_for_irq(irq); - - BUG_ON(info->type != IRQT_PIRQ); - irq_status.irq = pirq_from_irq(irq); + irq_status.irq = pirq_from_irq(info); if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) irq_status.flags = 0; @@ -837,61 +811,81 @@ static void pirq_query_unmask(int irq) info->u.pirq.flags |= PIRQ_NEEDS_EOI; } -static void eoi_pirq(struct irq_data *data) +static void do_eoi_pirq(struct irq_info *info) { - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; - struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; + struct physdev_eoi eoi = { .irq = pirq_from_irq(info) }; int rc = 0; - if (!VALID_EVTCHN(evtchn)) + if (!VALID_EVTCHN(info->evtchn)) return; event_handler_exit(info); - if (pirq_needs_eoi(data->irq)) { + if (pirq_needs_eoi(info)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); WARN_ON(rc); } } +static void eoi_pirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + + do_eoi_pirq(info); +} + +static void do_disable_dynirq(struct irq_info *info) +{ + if (VALID_EVTCHN(info->evtchn)) + do_mask(info, EVT_MASK_REASON_EXPLICIT); +} + +static void disable_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + + if (info) + do_disable_dynirq(info); +} + static void mask_ack_pirq(struct irq_data *data) { - disable_dynirq(data); - eoi_pirq(data); + struct irq_info *info = info_for_irq(data->irq); + + if (info) { + do_disable_dynirq(info); + do_eoi_pirq(info); + } } -static unsigned int __startup_pirq(unsigned int irq) +static unsigned int __startup_pirq(struct irq_info *info) { struct evtchn_bind_pirq bind_pirq; - struct irq_info *info = info_for_irq(irq); - evtchn_port_t evtchn = evtchn_from_irq(irq); + evtchn_port_t evtchn = info->evtchn; int rc; - BUG_ON(info->type != IRQT_PIRQ); - if (VALID_EVTCHN(evtchn)) goto out; - bind_pirq.pirq = pirq_from_irq(irq); + bind_pirq.pirq = pirq_from_irq(info); /* NB. We are happy to share unless we are probing. */ bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? BIND_PIRQ__WILL_SHARE : 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (rc != 0) { - pr_warn("Failed to obtain physical IRQ %d\n", irq); + pr_warn("Failed to obtain physical IRQ %d\n", info->irq); return 0; } evtchn = bind_pirq.port; - pirq_query_unmask(irq); + pirq_query_unmask(info); - rc = set_evtchn_to_irq(evtchn, irq); + rc = set_evtchn_to_irq(evtchn, info->irq); if (rc) goto err; info->evtchn = evtchn; - bind_evtchn_to_cpu(evtchn, 0, false); + bind_evtchn_to_cpu(info, 0, false); rc = xen_evtchn_port_setup(evtchn); if (rc) @@ -900,26 +894,28 @@ static unsigned int __startup_pirq(unsigned int irq) out: do_unmask(info, EVT_MASK_REASON_EXPLICIT); - eoi_pirq(irq_get_irq_data(irq)); + do_eoi_pirq(info); return 0; err: - pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); + pr_err("irq%d: Failed to set port to irq mapping (%d)\n", info->irq, + rc); xen_evtchn_close(evtchn); return 0; } static unsigned int startup_pirq(struct irq_data *data) { - return __startup_pirq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + + return __startup_pirq(info); } static void shutdown_pirq(struct irq_data *data) { - unsigned int irq = data->irq; - struct irq_info *info = info_for_irq(irq); - evtchn_port_t evtchn = evtchn_from_irq(irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info->evtchn; BUG_ON(info->type != IRQT_PIRQ); @@ -957,10 +953,14 @@ int xen_irq_from_gsi(unsigned gsi) } EXPORT_SYMBOL_GPL(xen_irq_from_gsi); -static void __unbind_from_irq(unsigned int irq) +static void __unbind_from_irq(struct irq_info *info, unsigned int irq) { - evtchn_port_t evtchn = evtchn_from_irq(irq); - struct irq_info *info = info_for_irq(irq); + evtchn_port_t evtchn; + + if (!info) { + xen_irq_free_desc(irq); + return; + } if (info->refcnt > 0) { info->refcnt--; @@ -968,19 +968,22 @@ static void __unbind_from_irq(unsigned int irq) return; } + evtchn = info->evtchn; + if (VALID_EVTCHN(evtchn)) { - unsigned int cpu = cpu_from_irq(irq); + unsigned int cpu = info->cpu; struct xenbus_device *dev; if (!info->is_static) xen_evtchn_close(evtchn); - switch (type_from_irq(irq)) { + switch (info->type) { case IRQT_VIRQ: - per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; + per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1; break; case IRQT_IPI: - per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; + per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1; + per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0; break; case IRQT_EVTCHN: dev = info->u.interdomain; @@ -994,7 +997,7 @@ static void __unbind_from_irq(unsigned int irq) xen_irq_info_cleanup(info); } - xen_free_irq(irq); + xen_free_irq(info); } /* @@ -1010,24 +1013,24 @@ static void __unbind_from_irq(unsigned int irq) int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name) { - int irq; + struct irq_info *info; struct physdev_irq irq_op; int ret; mutex_lock(&irq_mapping_update_lock); - irq = xen_irq_from_gsi(gsi); - if (irq != -1) { + ret = xen_irq_from_gsi(gsi); + if (ret != -1) { pr_info("%s: returning irq %d for gsi %u\n", - __func__, irq, gsi); + __func__, ret, gsi); goto out; } - irq = xen_allocate_irq_gsi(gsi); - if (irq < 0) + info = xen_allocate_irq_gsi(gsi); + if (!info) goto out; - irq_op.irq = irq; + irq_op.irq = info->irq; irq_op.vector = 0; /* Only the privileged domain can do this. For non-priv, the pcifront @@ -1035,20 +1038,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, * this in the priv domain. */ if (xen_initial_domain() && HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { - xen_free_irq(irq); - irq = -ENOSPC; + xen_free_irq(info); + ret = -ENOSPC; goto out; } - ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, + ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); if (ret < 0) { - __unbind_from_irq(irq); - irq = ret; + __unbind_from_irq(info, info->irq); goto out; } - pirq_query_unmask(irq); + pirq_query_unmask(info); /* We try to use the handler with the appropriate semantic for the * type of interrupt: if the interrupt is an edge triggered * interrupt we use handle_edge_irq. @@ -1065,16 +1067,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, * is the right choice either way. */ if (shareable) - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, + irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, handle_fasteoi_irq, name); else - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, + irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip, handle_edge_irq, name); + ret = info->irq; + out: mutex_unlock(&irq_mapping_update_lock); - return irq; + return ret; } #ifdef CONFIG_PCI_MSI @@ -1096,17 +1100,22 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int pirq, int nvec, const char *name, domid_t domid) { int i, irq, ret; + struct irq_info *info; mutex_lock(&irq_mapping_update_lock); - irq = xen_allocate_irqs_dynamic(nvec); + irq = irq_alloc_descs(-1, 0, nvec, -1); if (irq < 0) goto out; for (i = 0; i < nvec; i++) { + info = xen_irq_init(irq + i); + if (!info) + goto error_irq; + irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); - ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, + ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid, i == 0 ? 0 : PIRQ_MSI_GROUP); if (ret < 0) goto error_irq; @@ -1118,9 +1127,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, out: mutex_unlock(&irq_mapping_update_lock); return irq; + error_irq: - while (nvec--) - __unbind_from_irq(irq + nvec); + while (nvec--) { + info = info_for_irq(irq + nvec); + __unbind_from_irq(info, irq + nvec); + } mutex_unlock(&irq_mapping_update_lock); return ret; } @@ -1156,67 +1168,45 @@ int xen_destroy_irq(int irq) } } - xen_free_irq(irq); + xen_free_irq(info); out: mutex_unlock(&irq_mapping_update_lock); return rc; } -int xen_irq_from_pirq(unsigned pirq) -{ - int irq; - - struct irq_info *info; - - mutex_lock(&irq_mapping_update_lock); - - list_for_each_entry(info, &xen_irq_list_head, list) { - if (info->type != IRQT_PIRQ) - continue; - irq = info->irq; - if (info->u.pirq.pirq == pirq) - goto out; - } - irq = -1; -out: - mutex_unlock(&irq_mapping_update_lock); - - return irq; -} - - int xen_pirq_from_irq(unsigned irq) { - return pirq_from_irq(irq); + struct irq_info *info = info_for_irq(irq); + + return pirq_from_irq(info); } EXPORT_SYMBOL_GPL(xen_pirq_from_irq); static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip, struct xenbus_device *dev) { - int irq; - int ret; + int ret = -ENOMEM; + struct irq_info *info; if (evtchn >= xen_evtchn_max_channels()) return -ENOMEM; mutex_lock(&irq_mapping_update_lock); - irq = get_evtchn_to_irq(evtchn); + info = evtchn_to_info(evtchn); - if (irq == -1) { - irq = xen_allocate_irq_dynamic(); - if (irq < 0) + if (!info) { + info = xen_allocate_irq_dynamic(); + if (!info) goto out; - irq_set_chip_and_handler_name(irq, chip, + irq_set_chip_and_handler_name(info->irq, chip, handle_edge_irq, "event"); - ret = xen_irq_info_evtchn_setup(irq, evtchn, dev); + ret = xen_irq_info_evtchn_setup(info, evtchn, dev); if (ret < 0) { - __unbind_from_irq(irq); - irq = ret; + __unbind_from_irq(info, info->irq); goto out; } /* @@ -1226,17 +1216,17 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip, * affinity setting is not invoked on them so nothing would * bind the channel. */ - bind_evtchn_to_cpu(evtchn, 0, false); - } else { - struct irq_info *info = info_for_irq(irq); - if (!WARN_ON(!info || info->type != IRQT_EVTCHN)) - info->refcnt++; + bind_evtchn_to_cpu(info, 0, false); + } else if (!WARN_ON(info->type != IRQT_EVTCHN)) { + info->refcnt++; } + ret = info->irq; + out: mutex_unlock(&irq_mapping_update_lock); - return irq; + return ret; } int bind_evtchn_to_irq(evtchn_port_t evtchn) @@ -1255,18 +1245,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; - int ret, irq; + struct irq_info *info; + int ret; mutex_lock(&irq_mapping_update_lock); - irq = per_cpu(ipi_to_irq, cpu)[ipi]; + ret = per_cpu(ipi_to_irq, cpu)[ipi]; - if (irq == -1) { - irq = xen_allocate_irq_dynamic(); - if (irq < 0) + if (ret == -1) { + info = xen_allocate_irq_dynamic(); + if (!info) goto out; - irq_set_chip_and_handler_name(irq, &xen_percpu_chip, + irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, handle_percpu_irq, "ipi"); bind_ipi.vcpu = xen_vcpu_nr(cpu); @@ -1275,25 +1266,25 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) BUG(); evtchn = bind_ipi.port; - ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); + ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi); if (ret < 0) { - __unbind_from_irq(irq); - irq = ret; + __unbind_from_irq(info, info->irq); goto out; } /* * Force the affinity mask to the target CPU so proc shows * the correct target. */ - bind_evtchn_to_cpu(evtchn, cpu, true); + bind_evtchn_to_cpu(info, cpu, true); + ret = info->irq; } else { - struct irq_info *info = info_for_irq(irq); + info = info_for_irq(ret); WARN_ON(info == NULL || info->type != IRQT_IPI); } out: mutex_unlock(&irq_mapping_update_lock); - return irq; + return ret; } static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev, @@ -1361,22 +1352,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn = 0; - int irq, ret; + struct irq_info *info; + int ret; mutex_lock(&irq_mapping_update_lock); - irq = per_cpu(virq_to_irq, cpu)[virq]; + ret = per_cpu(virq_to_irq, cpu)[virq]; - if (irq == -1) { - irq = xen_allocate_irq_dynamic(); - if (irq < 0) + if (ret == -1) { + info = xen_allocate_irq_dynamic(); + if (!info) goto out; if (percpu) - irq_set_chip_and_handler_name(irq, &xen_percpu_chip, + irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip, handle_percpu_irq, "virq"); else - irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, + irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip, handle_edge_irq, "virq"); bind_virq.virq = virq; @@ -1391,10 +1383,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) BUG_ON(ret < 0); } - ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); + ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq); if (ret < 0) { - __unbind_from_irq(irq); - irq = ret; + __unbind_from_irq(info, info->irq); goto out; } @@ -1402,22 +1393,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) * Force the affinity mask for percpu interrupts so proc * shows the correct target. */ - bind_evtchn_to_cpu(evtchn, cpu, percpu); + bind_evtchn_to_cpu(info, cpu, percpu); + ret = info->irq; } else { - struct irq_info *info = info_for_irq(irq); + info = info_for_irq(ret); WARN_ON(info == NULL || info->type != IRQT_VIRQ); } out: mutex_unlock(&irq_mapping_update_lock); - return irq; + return ret; } static void unbind_from_irq(unsigned int irq) { + struct irq_info *info; + mutex_lock(&irq_mapping_update_lock); - __unbind_from_irq(irq); + info = info_for_irq(irq); + __unbind_from_irq(info, irq); mutex_unlock(&irq_mapping_update_lock); } @@ -1568,13 +1563,7 @@ EXPORT_SYMBOL_GPL(xen_set_irq_priority); int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static) { - int irq = get_evtchn_to_irq(evtchn); - struct irq_info *info; - - if (irq == -1) - return -ENOENT; - - info = info_for_irq(irq); + struct irq_info *info = evtchn_to_info(evtchn); if (!info) return -ENOENT; @@ -1590,7 +1579,6 @@ EXPORT_SYMBOL_GPL(evtchn_make_refcounted); int evtchn_get(evtchn_port_t evtchn) { - int irq; struct irq_info *info; int err = -ENOENT; @@ -1599,11 +1587,7 @@ int evtchn_get(evtchn_port_t evtchn) mutex_lock(&irq_mapping_update_lock); - irq = get_evtchn_to_irq(evtchn); - if (irq == -1) - goto done; - - info = info_for_irq(irq); + info = evtchn_to_info(evtchn); if (!info) goto done; @@ -1623,16 +1607,17 @@ EXPORT_SYMBOL_GPL(evtchn_get); void evtchn_put(evtchn_port_t evtchn) { - int irq = get_evtchn_to_irq(evtchn); - if (WARN_ON(irq == -1)) + struct irq_info *info = evtchn_to_info(evtchn); + + if (WARN_ON(!info)) return; - unbind_from_irq(irq); + unbind_from_irq(info->irq); } EXPORT_SYMBOL_GPL(evtchn_put); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) { - int irq; + evtchn_port_t evtchn; #ifdef CONFIG_X86 if (unlikely(vector == XEN_NMI_VECTOR)) { @@ -1643,9 +1628,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) return; } #endif - irq = per_cpu(ipi_to_irq, cpu)[vector]; - BUG_ON(irq < 0); - notify_remote_via_irq(irq); + evtchn = per_cpu(ipi_to_evtchn, cpu)[vector]; + BUG_ON(evtchn == 0); + notify_remote_via_evtchn(evtchn); } struct evtchn_loop_ctrl { @@ -1656,12 +1641,10 @@ struct evtchn_loop_ctrl { void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) { - int irq; - struct irq_info *info; + struct irq_info *info = evtchn_to_info(port); struct xenbus_device *dev; - irq = get_evtchn_to_irq(port); - if (irq == -1) + if (!info) return; /* @@ -1686,7 +1669,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) } } - info = info_for_irq(irq); if (xchg_acquire(&info->is_active, 1)) return; @@ -1700,7 +1682,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) info->eoi_time = get_jiffies_64() + event_eoi_delay; } - generic_handle_irq(irq); + generic_handle_irq(info->irq); } int xen_evtchn_do_upcall(void) @@ -1758,16 +1740,17 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) mutex_lock(&irq_mapping_update_lock); /* After resume the irq<->evtchn mappings are all cleared out */ - BUG_ON(get_evtchn_to_irq(evtchn) != -1); + BUG_ON(evtchn_to_info(evtchn)); /* Expect irq to have been bound before, so there should be a proper type */ BUG_ON(info->type == IRQT_UNBOUND); - (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL); + info->irq = irq; + (void)xen_irq_info_evtchn_setup(info, evtchn, NULL); mutex_unlock(&irq_mapping_update_lock); - bind_evtchn_to_cpu(evtchn, info->cpu, false); + bind_evtchn_to_cpu(info, info->cpu, false); /* Unmask the event channel. */ enable_irq(irq); @@ -1801,7 +1784,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) * it, but don't do the xenlinux-level rebind in that case. */ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) - bind_evtchn_to_cpu(evtchn, tcpu, false); + bind_evtchn_to_cpu(info, tcpu, false); do_unmask(info, EVT_MASK_REASON_TEMPORARY); @@ -1858,28 +1841,30 @@ static void enable_dynirq(struct irq_data *data) do_unmask(info, EVT_MASK_REASON_EXPLICIT); } -static void disable_dynirq(struct irq_data *data) +static void do_ack_dynirq(struct irq_info *info) { - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; + evtchn_port_t evtchn = info->evtchn; if (VALID_EVTCHN(evtchn)) - do_mask(info, EVT_MASK_REASON_EXPLICIT); + event_handler_exit(info); } static void ack_dynirq(struct irq_data *data) { struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; - if (VALID_EVTCHN(evtchn)) - event_handler_exit(info); + if (info) + do_ack_dynirq(info); } static void mask_ack_dynirq(struct irq_data *data) { - disable_dynirq(data); - ack_dynirq(data); + struct irq_info *info = info_for_irq(data->irq); + + if (info) { + do_disable_dynirq(info); + do_ack_dynirq(info); + } } static void lateeoi_ack_dynirq(struct irq_data *data) @@ -1952,13 +1937,13 @@ static void restore_pirqs(void) if (rc) { pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", gsi, irq, pirq, rc); - xen_free_irq(irq); + xen_free_irq(info); continue; } printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); - __startup_pirq(irq); + __startup_pirq(info); } } @@ -1966,13 +1951,15 @@ static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn; + struct irq_info *info; int virq, irq; for (virq = 0; virq < NR_VIRQS; virq++) { if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; + info = info_for_irq(irq); - BUG_ON(virq_from_irq(irq) != virq); + BUG_ON(virq_from_irq(info) != virq); /* Get a new binding from Xen. */ bind_virq.virq = virq; @@ -1983,9 +1970,9 @@ static void restore_cpu_virqs(unsigned int cpu) evtchn = bind_virq.port; /* Record the new mapping. */ - (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); + xen_irq_info_virq_setup(info, cpu, evtchn, virq); /* The affinity mask is still valid */ - bind_evtchn_to_cpu(evtchn, cpu, false); + bind_evtchn_to_cpu(info, cpu, false); } } @@ -1993,13 +1980,15 @@ static void restore_cpu_ipis(unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; + struct irq_info *info; int ipi, irq; for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) continue; + info = info_for_irq(irq); - BUG_ON(ipi_from_irq(irq) != ipi); + BUG_ON(ipi_from_irq(info) != ipi); /* Get a new binding from Xen. */ bind_ipi.vcpu = xen_vcpu_nr(cpu); @@ -2009,9 +1998,9 @@ static void restore_cpu_ipis(unsigned int cpu) evtchn = bind_ipi.port; /* Record the new mapping. */ - (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); + xen_irq_info_ipi_setup(info, cpu, evtchn, ipi); /* The affinity mask is still valid */ - bind_evtchn_to_cpu(evtchn, cpu, false); + bind_evtchn_to_cpu(info, cpu, false); } } @@ -2025,13 +2014,6 @@ void xen_clear_irq_pending(int irq) event_handler_exit(info); } EXPORT_SYMBOL(xen_clear_irq_pending); -void xen_set_irq_pending(int irq) -{ - evtchn_port_t evtchn = evtchn_from_irq(irq); - - if (VALID_EVTCHN(evtchn)) - set_evtchn(evtchn); -} bool xen_test_irq_pending(int irq) { diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index 4d3398eff9cd..19ae31695edc 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -33,7 +33,6 @@ struct evtchn_ops { extern const struct evtchn_ops *evtchn_ops; -int get_evtchn_to_irq(evtchn_port_t evtchn); void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl); unsigned int cpu_from_evtchn(evtchn_port_t evtchn); diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index b3e3d1bb37f3..508655273145 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -47,6 +47,9 @@ #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> +#ifdef CONFIG_ACPI +#include <acpi/processor.h> +#endif /* * @cpu_id: Xen physical cpu logic number @@ -400,4 +403,23 @@ bool __init xen_processor_present(uint32_t acpi_id) return online; } + +void xen_sanitize_proc_cap_bits(uint32_t *cap) +{ + struct xen_platform_op op = { + .cmd = XENPF_set_processor_pminfo, + .u.set_pminfo.id = -1, + .u.set_pminfo.type = XEN_PM_PDC, + }; + u32 buf[3] = { ACPI_PDC_REVISION_ID, 1, *cap }; + int ret; + + set_xen_guest_handle(op.u.set_pminfo.pdc, buf); + ret = HYPERVISOR_platform_op(&op); + if (ret) + pr_err("sanitize of _PDC buffer bits from Xen failed: %d\n", + ret); + else + *cap = buf[2]; +} #endif diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c index b52e0fa595a9..223870a0111b 100644 --- a/drivers/xen/xen-front-pgdir-shbuf.c +++ b/drivers/xen/xen-front-pgdir-shbuf.c @@ -21,7 +21,7 @@ #include <xen/xen-front-pgdir-shbuf.h> -/** +/* * This structure represents the structure of a shared page * that contains grant references to the pages of the shared * buffer. This structure is common to many Xen para-virtualized @@ -33,7 +33,7 @@ struct xen_page_directory { grant_ref_t gref[]; /* Variable length */ }; -/** +/* * Shared buffer ops which are differently implemented * depending on the allocation mode, e.g. if the buffer * is allocated by the corresponding backend or frontend. @@ -61,7 +61,7 @@ struct xen_front_pgdir_shbuf_ops { int (*unmap)(struct xen_front_pgdir_shbuf *buf); }; -/** +/* * Get granted reference to the very first page of the * page directory. Usually this is passed to the backend, * so it can find/fill the grant references to the buffer's @@ -81,7 +81,7 @@ xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf) } EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start); -/** +/* * Map granted references of the shared buffer. * * Depending on the shared buffer mode of allocation @@ -102,7 +102,7 @@ int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf) } EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map); -/** +/* * Unmap granted references of the shared buffer. * * Depending on the shared buffer mode of allocation @@ -123,7 +123,7 @@ int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf) } EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap); -/** +/* * Free all the resources of the shared buffer. * * \param buf shared buffer which resources to be freed. @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free); offsetof(struct xen_page_directory, \ gref)) / sizeof(grant_ref_t)) -/** +/* * Get the number of pages the page directory consumes itself. * * \param buf shared buffer. @@ -160,7 +160,7 @@ static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf) return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE); } -/** +/* * Calculate the number of grant references needed to share the buffer * and its pages when backend allocates the buffer. * @@ -172,7 +172,7 @@ static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf) buf->num_grefs = get_num_pages_dir(buf); } -/** +/* * Calculate the number of grant references needed to share the buffer * and its pages when frontend allocates the buffer. * @@ -190,7 +190,7 @@ static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf) #define xen_page_to_vaddr(page) \ ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page))) -/** +/* * Unmap the buffer previously mapped with grant references * provided by the backend. * @@ -238,7 +238,7 @@ static int backend_unmap(struct xen_front_pgdir_shbuf *buf) return ret; } -/** +/* * Map the buffer with grant references provided by the backend. * * \param buf shared buffer. @@ -320,7 +320,7 @@ static int backend_map(struct xen_front_pgdir_shbuf *buf) return ret; } -/** +/* * Fill page directory with grant references to the pages of the * page directory itself. * @@ -350,7 +350,7 @@ static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf) page_dir->gref_dir_next_page = XEN_GREF_LIST_END; } -/** +/* * Fill page directory with grant references to the pages of the * page directory and the buffer we share with the backend. * @@ -389,7 +389,7 @@ static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf) } } -/** +/* * Grant references to the frontend's buffer pages. * * These will be shared with the backend, so it can @@ -418,7 +418,7 @@ static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf, return 0; } -/** +/* * Grant all the references needed to share the buffer. * * Grant references to the page directory pages and, if @@ -466,7 +466,7 @@ static int grant_references(struct xen_front_pgdir_shbuf *buf) return 0; } -/** +/* * Allocate all required structures to mange shared buffer. * * \param buf shared buffer. @@ -506,7 +506,7 @@ static const struct xen_front_pgdir_shbuf_ops local_ops = { .grant_refs_for_buffer = guest_grant_refs_for_buffer, }; -/** +/* * Allocate a new instance of a shared buffer. * * \param cfg configuration to be used while allocating a new shared buffer. |