summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-05-02 22:05:13 +0300
committerJakub Kicinski <kuba@kernel.org>2024-05-02 22:06:25 +0300
commite958da0ddbe831197a0023251880a4a09d5ba268 (patch)
treeeacded26f9563064a44fd1afe730493898adadb9 /drivers
parentdcc61472534e48a200262fd297ab21f8dd94d6cc (diff)
parent545c494465d24b10a4370545ba213c0916f70b95 (diff)
downloadlinux-e958da0ddbe831197a0023251880a4a09d5ba268.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: include/linux/filter.h kernel/bpf/core.c 66e13b615a0c ("bpf: verifier: prevent userspace memory access") d503a04f8bc0 ("bpf: Add support for certain atomics in bpf_arena to x86 JIT") https://lore.kernel.org/all/20240429114939.210328b0@canb.auug.org.au/ No adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/cppc_acpi.c57
-rw-r--r--drivers/acpi/x86/s2idle.c8
-rw-r--r--drivers/cxl/core/mbox.c38
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/cdev.c5
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/device.c8
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/xilinx/xdma-regs.h3
-rw-r--r--drivers/dma/xilinx/xdma.c42
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c13
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c137
-rw-r--r--drivers/firmware/qcom/qcom_scm.c37
-rw-r--r--drivers/gpio/gpio-tangier.c9
-rw-r--r--drivers/gpio/gpio-tegra186.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c25
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c34
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c80
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c4
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c9
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c2
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/misc/eeprom/at24.c18
-rw-r--r--drivers/mmc/host/moxart-mmc.c1
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c14
-rw-r--r--drivers/net/vxlan/vxlan_core.c49
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c6
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c9
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h1
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c36
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c31
-rw-r--r--drivers/phy/ti/phy-tusb1210.c23
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c34
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c78
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c40
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c14
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/power/supply/mt6360_charger.c2
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/regulator/irq_helpers.c3
-rw-r--r--drivers/regulator/mt6360-regulator.c32
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c61
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-svs.c7
-rw-r--r--drivers/soundwire/amd_manager.c15
-rw-r--r--drivers/soundwire/amd_manager.h3
-rw-r--r--drivers/vdpa/vdpa.c6
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
108 files changed, 768 insertions, 683 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 4bfbe55553f4..a40b6f3946ef 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -170,8 +170,8 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
- GENMASK(((reg)->bit_width), 0)))
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+ GENMASK(((reg)->bit_width) - 1, 0))
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -1002,14 +1002,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
+ size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- &val_u32, width);
+ &val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@@ -1018,17 +1018,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
switch (size) {
case 8:
@@ -1044,8 +1049,13 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
return -EFAULT;
}
@@ -1063,12 +1073,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ size = GET_BIT_WIDTH(reg);
+
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
- (u32)val, width);
+ (u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@@ -1076,17 +1087,22 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
val = MASK_VAL(reg, val);
@@ -1105,8 +1121,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
ret_val = -EFAULT;
break;
}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index cd84af23f7ea..dd0b40b9bbe8 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -492,16 +492,14 @@ static int lps0_device_attach(struct acpi_device *adev,
unsigned int func_mask;
/*
- * Avoid evaluating the same _DSM function for two
- * different UUIDs and prioritize the MSFT one.
+ * Log a message if the _DSM function sets for two
+ * different UUIDs overlap.
*/
func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
- if (func_mask) {
+ if (func_mask)
acpi_handle_info(adev->handle,
"Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
func_mask);
- lps0_dsm_func_mask &= ~func_mask;
- }
}
}
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index f0f54aeccc87..65185c9fa001 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -946,25 +946,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
- struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
- .payload_in = &log_type,
- .size_in = sizeof(log_type),
- .payload_out = payload,
- .min_out = struct_size(payload, records, 0),
- };
-
do {
int rc, i;
-
- mbox_cmd.size_out = mds->payload_size;
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@@ -1297,7 +1294,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
- struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@@ -1309,16 +1305,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_POISON,
- .size_in = sizeof(pi),
- .payload_in = &pi,
- .size_out = mds->payload_size,
- .payload_out = po,
- .min_out = struct_size(po, record, 0),
- };
-
do {
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = mds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d..1398814d8fbb 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 8078ab9acfbc..c095a2c8f659 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676f3..ad4245cb301d 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ecfdf4a8f1f8..c41ef195eeb9 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index a4099a1e2340..7b98944135eb 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -293,7 +293,7 @@ struct idxd_driver_data {
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 4954adc6bb60..264c4e47d7cc 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -354,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 348aa21389a9..8dc029c86551 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d604262..5e94247e1ea7 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d39..e001f4f7aa64 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5f6d7f1e095f..ad8e3da1b2cd 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
- if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
- UNTIL(thrd, PL330_STATE_WFP);
-
return true;
}
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 88547a23825b..3642508e88bb 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 98f5f6fb9ff9..6ad08878e938 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 170017ff2aad..313b217388fe 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
return 0;
}
@@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
- u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
@@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
-
- /* Clear the channel status register */
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
- if (ret)
- return ret;
-
- return 0;
+ return ret;
}
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/**
- * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
- * @sw_desc: tx descriptor state container
- * @src_addr: Value for a ->src_addr field of a first descriptor
- * @dst_addr: Value for a ->dst_addr field of a first descriptor
- * @size: Total size of a contiguous memory block
- * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
desc_num = 0;
for (i = 0; i < periods; i++) {
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
- addr += i * period_size;
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
+
spin_lock(&xchan->vchan.lock);
/* get submitted request */
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b82815e64d24..eb0637d90342 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 32188f098ef3..bc550ad0dbe0 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -221,6 +221,19 @@ struct qsee_rsp_uefi_query_variable_info {
* alignment of 8 bytes (64 bits) for GUIDs. Our definition of efi_guid_t,
* however, has an alignment of 4 byte (32 bits). So far, this seems to work
* fine here. See also the comment on the typedef of efi_guid_t.
+ *
+ * Note: It looks like uefisecapp is quite picky about how the memory passed to
+ * it is structured and aligned. In particular the request/response setup used
+ * for QSEE_CMD_UEFI_GET_VARIABLE. While qcom_qseecom_app_send(), in theory,
+ * accepts separate buffers/addresses for the request and response parts, in
+ * practice, however, it seems to expect them to be both part of a larger
+ * contiguous block. We initially allocated separate buffers for the request
+ * and response but this caused the QSEE_CMD_UEFI_GET_VARIABLE command to
+ * either not write any response to the response buffer or outright crash the
+ * device. Therefore, we now allocate a single contiguous block of DMA memory
+ * for both and properly align the data using the macros below. In particular,
+ * request and response structs are aligned at 8 byte (via __reqdata_offs()),
+ * following the driver that this has been reverse-engineered from.
*/
#define qcuefi_buf_align_fields(fields...) \
({ \
@@ -244,6 +257,12 @@ struct qsee_rsp_uefi_query_variable_info {
#define __array_offs(type, count, offset) \
__field_impl(sizeof(type) * (count), __alignof__(type), offset)
+#define __array_offs_aligned(type, count, align, offset) \
+ __field_impl(sizeof(type) * (count), align, offset)
+
+#define __reqdata_offs(size, offset) \
+ __array_offs_aligned(u8, size, 8, offset)
+
#define __array(type, count) __array_offs(type, count, NULL)
#define __field_offs(type, offset) __array_offs(type, 1, offset)
#define __field(type) __array_offs(type, 1, NULL)
@@ -277,10 +296,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
unsigned long buffer_size = *data_size;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -304,17 +328,19 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__array(u8, buffer_size)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_VARIABLE;
req_data->data_size = buffer_size;
@@ -332,7 +358,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -407,9 +435,7 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -422,10 +448,15 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
struct qsee_rsp_uefi_set_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
size_t req_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -450,17 +481,19 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__array_offs(u8, data_size, &data_offs)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_SET_VARIABLE;
req_data->attributes = attributes;
@@ -483,8 +516,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
if (data_size)
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -507,9 +541,7 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -521,10 +553,15 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name_size || !name || !guid)
@@ -545,17 +582,19 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__array(*name, *name_size / sizeof(*name))
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_NEXT_VARIABLE;
req_data->guid_offset = guid_offs;
@@ -572,7 +611,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
goto out_free;
}
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -645,9 +686,7 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -659,26 +698,34 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
+ size_t req_offs;
+ size_t rsp_offs;
int status;
- req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(sizeof(*req_data), &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_QUERY_VARIABLE_INFO;
req_data->attributes = attr;
req_data->length = sizeof(*req_data);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, sizeof(*req_data), rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, sizeof(*req_data),
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -711,9 +758,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
*max_variable_size = rsp_data->max_variable_size;
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 520de9b5633a..90283f160a22 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1576,9 +1576,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1589,33 +1589,13 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
- dma_addr_t req_phys;
- dma_addr_t rsp_phys;
int status;
- /* Map request buffer */
- req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, req_phys);
- if (status) {
- dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
- return status;
- }
-
- /* Map response buffer */
- rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
- status = dma_mapping_error(__scm->dev, rsp_phys);
- if (status) {
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
- dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
- return status;
- }
-
- /* Set up SCM call data */
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1623,18 +1603,13 @@ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req_phys;
+ desc.args[1] = req;
desc.args[2] = req_size;
- desc.args[3] = rsp_phys;
+ desc.args[3] = rsp;
desc.args[4] = rsp_size;
- /* Perform call */
status = qcom_scm_qseecom_call(&desc, &res);
- /* Unmap buffers */
- dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
-
if (status)
return status;
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index b75e0b12087a..4b29abafecf6 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -195,7 +195,8 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void tng_irq_ack(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
void __iomem *gisr;
u8 shift;
@@ -227,7 +228,8 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
static void tng_irq_mask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
tng_irq_unmask_mask(priv, gpio, false);
@@ -236,7 +238,8 @@ static void tng_irq_mask(struct irq_data *d)
static void tng_irq_unmask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
gpiochip_enable_irq(&priv->chip, gpio);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d87dd06db40d..9130c691a2dd 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -36,12 +36,6 @@
#define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
#define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
#define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
-#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN | \
- TEGRA186_GPIO_SCR_SEC_G1R | \
- TEGRA186_GPIO_SCR_SEC_G1W)
-#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN)
/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
@@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
value = __raw_readl(secure + TEGRA186_GPIO_SCR);
- if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
- return true;
+ /*
+ * When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
+ * registers for given GPIO pin.
+ * When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
+ * SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
+ * GPIO pin.
+ */
- if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
+ if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
return true;
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df58a6a1a67e..2131de36e3da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1854,6 +1854,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
- /* Validate BOs and map them to GPUVM (update VM page tables). */
+ /* Validate BOs managed by KFD */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
- struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
goto validate_map_fail;
}
}
+ }
+
+ if (failed_size)
+ pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
+ goto validate_map_fail;
+ }
+
+ /* Update mappings managed by KFD. */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct kfd_mem_attachment *attachment;
+
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
@@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
}
- if (failed_size)
- pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
-
- /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
- * validations above would invalidate DMABuf imports again.
- */
- ret = process_validate_vms(process_info, &exec.ticket);
- if (ret) {
- pr_debug("Validating VMs failed, ret: %d\n", ret);
- goto validate_map_fail;
- }
-
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index a00cf4756ad0..1569bef030ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1132,6 +1132,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
return;
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
amdgpu_ring_fini(ring);
kfree(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2099159a693f..ce733e3cb35d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -605,6 +605,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
else
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 2;
+ else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
bo->tbo.priority = 1;
if (!bp->destroy)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index 0df97c3e3a70..f7c73533e336 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -774,6 +774,9 @@ static int umsch_mm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+ return 0;
+
return umsch_mm_test(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 6695481f870f..c23d97d34b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -205,7 +205,7 @@ disable_dpm:
dpm_ctl &= 0xfffffffe; /* Disable DPM */
WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
- return 0;
+ return -EINVAL;
}
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f90905ef32c7..701146d649c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -9186,7 +9186,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -9276,7 +9276,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f7325b02a191..f00e05aba46a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -6192,7 +6192,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
5 + /* COND_EXEC */
@@ -6278,7 +6278,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6f97a6d0e6d0..99dbd2341120 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -6981,7 +6981,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
@@ -7019,7 +7018,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 82eab49be82b..e708468ac54d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -368,7 +368,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
+ << (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 42f4bd250def..da01b524b9f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ if (ring->me > 1) {
+ amdgpu_asic_flush_hdp(adev, ring);
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index 769eb8f7bb3c..09315dd5a1ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -144,6 +144,12 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
}
+ /* setup collaborate mode */
+ vpe_v6_1_set_collaborate_mode(vpe, true);
+ /* setup DPM */
+ if (amdgpu_vpe_configure_dpm(vpe))
+ dev_warn(adev->dev, "VPE failed to enable DPM\n");
+
/*
* For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
* Here use instance 0 as master.
@@ -159,11 +165,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[0] = f32_offset;
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
- amdgpu_vpe_psp_update_sram(adev);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
-
- return 0;
+ return amdgpu_vpe_psp_update_sram(adev);
}
vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
@@ -196,8 +198,6 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index bdc01ca9609a..5c8d81bfce7a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -509,10 +509,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
+ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+ if (r) {
+ dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
+ return -ENOSPC;
+ }
+
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
- return r;
+ goto out;
}
ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
@@ -545,6 +554,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
svm_range_vram_node_free(prange);
}
+out:
+ amdgpu_amdkfd_unreserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b79986412cd8..58c1fe542193 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p)
rcu_read_lock();
ef = dma_fence_get_rcu_safe(&p->ef);
rcu_read_unlock();
+ if (!ef)
+ return -EINVAL;
ret = dma_fence_signal(ef);
dma_fence_put(ef);
@@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work)
* they are responsible stopping the queues and scheduling
* the restore work.
*/
- if (!signal_eviction_fence(p))
- queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- else
+ if (signal_eviction_fence(p) ||
+ mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
@@ -2011,9 +2012,9 @@ static void restore_process_worker(struct work_struct *work)
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
- ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
- WARN(!ret, "reschedule restore work failed\n");
+ if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f0f7f48af413..386875e6eb96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3426,7 +3426,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
- return r;
+ return 0;
}
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6d2f60c61dec..f3f94d109726 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3029,6 +3029,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
+ dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f09b9d49297e..bbd0169010c2 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -4261,6 +4261,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
+ /*
+ * If gpu_od is the only member in the list, that means gpu_od is an
+ * empty directory, so remove it.
+ */
+ if (list_is_singular(&adev->pm.od_kobj_list))
+ goto err_out;
+
return 0;
err_out:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 3957af057d54..c977ebe88001 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -2294,6 +2294,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return sizeof(*gpu_metrics);
}
+static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(adev->pdev, i * 4,
+ adev->pdev->saved_config_space[i]);
+ pci_restore_msi_state(adev->pdev);
+}
+
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -2315,6 +2326,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
+ /* Certain platforms have switches which assign virtual BAR values to
+ * devices. OS uses the virtual BAR values and device behind the switch
+ * is assgined another BAR value. When device's config space registers
+ * are queried, switch returns the virtual BAR values. When mode-2 reset
+ * is performed, switch is unaware of it, and will continue to return
+ * the same virtual values to the OS.This affects
+ * pci_restore_config_space() API as it doesn't write the value saved if
+ * the current value read from config space is the same as what is
+ * saved. As a workaround, make sure the config space is restored
+ * always.
+ */
+ if (!(adev->flags & AMD_IS_APU))
+ smu_v13_0_6_restore_pci_config(smu);
+
dev_dbg(smu->adev->dev, "wait for reset ack\n");
do {
ret = smu_cmn_wait_for_response(smu);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e440f458b663..93337543aac3 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -224,8 +224,8 @@ __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
- drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
- &new_shadow_plane_state->fmtcnv_state);
+ drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
+ &shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 734412aae94d..a9bf426f69b3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -164,26 +164,6 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.eco_id;
break;
- case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
- *value = gpu->identity.nn_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
- *value = gpu->identity.nn_mad_per_core;
- break;
-
- case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
- *value = gpu->identity.tp_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
- *value = gpu->identity.on_chip_sram_size;
- break;
-
- case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
- *value = gpu->identity.axi_sram_size;
- break;
-
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -663,8 +643,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
/* Disable TX clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6203))
+ etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
+ etnaviv_is_model_rev(gpu, GC7000, 0x6203))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
/* Disable SE and RA clock gating on affected core revisions. */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 7d5e9158e13c..197e0037732e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -54,18 +54,6 @@ struct etnaviv_chip_identity {
/* Number of Neural Network cores. */
u32 nn_core_count;
- /* Number of MAD units per Neural Network core. */
- u32 nn_mad_per_core;
-
- /* Number of Tensor Processing cores. */
- u32 tp_core_count;
-
- /* Size in bytes of the SRAM inside the NPU. */
- u32 on_chip_sram_size;
-
- /* Size in bytes of the SRAM across the AXI bus. */
- u32 axi_sram_size;
-
/* Size of the vertex cache. */
u32 vertex_cache_size;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index d8e7334de8ce..8665f2658d51 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -17,10 +17,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -52,11 +48,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
@@ -89,10 +80,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -125,10 +112,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -160,11 +143,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -197,10 +175,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -233,10 +207,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
- .nn_mad_per_core = 64,
- .tp_core_count = 4,
- .on_chip_sram_size = 524288,
- .axi_sram_size = 1048576,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -269,10 +239,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
- .nn_mad_per_core = 64,
- .tp_core_count = 3,
- .on_chip_sram_size = 262144,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 4f302cd5e1a6..58fed80c7392 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
- psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index dcfcd7b89d4a..6dece8f0e380 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
- /* This must occur after the backlight is properly initialised */
- psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index c5edfa4aa4cc..83c17689c454 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -162,7 +162,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
-#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@@ -491,11 +490,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
- /* LID-Switch */
- spinlock_t lid_lock;
- struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@@ -591,10 +586,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
-/* psb_lid.c */
-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
-
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
deleted file mode 100644
index 58a7fe392636..000000000000
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- **************************************************************************/
-
-#include <linux/spinlock.h>
-
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_reg.h"
-
-static void psb_lid_timer_func(struct timer_list *t)
-{
- struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
- u32 __iomem *lid_state = dev_priv->opregion.lid_state;
- u32 pp_status;
-
- if (readl(lid_state) == dev_priv->lid_last_state)
- goto lid_timer_schedule;
-
- if ((readl(lid_state)) & 0x01) {
- /*lid state is open*/
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0 &&
- (pp_status & PP_SEQUENCE_MASK) != 0);
-
- if (REG_READ(PP_STATUS) & PP_ON) {
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
- } else {
- DRM_DEBUG("LVDS panel never powered up");
- return;
- }
- } else {
- psb_intel_lvds_set_brightness(dev, 0);
-
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
- }
- dev_priv->lid_last_state = readl(lid_state);
-
-lid_timer_schedule:
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
- if (!timer_pending(lid_timer)) {
- lid_timer->expires = jiffies + PSB_LID_DELAY;
- add_timer(lid_timer);
- }
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
-{
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
-
- spin_lock_init(&dev_priv->lid_lock);
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-
- timer_setup(lid_timer, psb_lid_timer_func, 0);
-
- lid_timer->expires = jiffies + PSB_LID_DELAY;
-
- add_timer(lid_timer);
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
-{
- del_timer_sync(&dev_priv->lid_timer);
-}
-
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index a0afe1ba6dd5..f9705430ada9 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -378,7 +378,9 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err);
/* Initialize CCS mode sysfs after early initialization of HW engines */
- xe_gt_ccs_mode_sysfs_init(gt);
+ err = xe_gt_ccs_mode_sysfs_init(gt);
+ if (err)
+ goto err_force_wake;
/*
* Stash hardware-reported version. Since this register does not exist
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index 529fc286cd06..396aeb5b9924 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -167,25 +167,20 @@ static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
* and it is expected that there are no open drm clients while doing so.
* The number of available compute slices is exposed to user through a per-gt
* 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
*/
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (!xe_gt_ccs_mode_enabled(gt))
- return;
+ return 0;
err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
- if (err) {
- drm_warn(&xe->drm, "Sysfs creation for ccs_mode failed err: %d\n", err);
- return;
- }
+ if (err)
+ return err;
- err = drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
- if (err) {
- sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- }
+ return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
index f39975aaaab0..f8779852cf0d 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
@@ -12,7 +12,7 @@
#include "xe_platform_types.h"
void xe_gt_apply_ccs_mode(struct xe_gt *gt);
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 355edd4d758a..7f32547f94b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -1054,10 +1054,10 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
- ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
break;
case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
- ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
break;
default:
drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index b545f850087c..6b9b1cbedd37 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -53,7 +53,6 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
- int err;
/* we use a single object for both input and output */
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
@@ -66,13 +65,7 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
huc->gsc_pkt = bo;
- err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
- if (err) {
- free_gsc_pkt(&xe->drm, huc);
- return err;
- }
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
}
int xe_huc_init(struct xe_huc *huc)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ff5c486a1dbb..db0d1ac82910 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2200,13 +2200,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
- * takes place. adap->algo->master_xfer existence isn't checked.
+ * takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
+ if (!adap->algo->master_xfer) {
+ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@@ -2273,11 +2278,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
- if (!adap->algo->master_xfer) {
- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
- return -EOPNOTSUPP;
- }
-
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2a537cbfcb07..5f7d3db3afd8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4567,13 +4567,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
index 01d2743444ec..3a989efae142 100644
--- a/drivers/md/dm-vdo/murmurhash3.c
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -137,7 +137,7 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
break;
default:
break;
- };
+ }
}
/* finalization */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 56aa2a8b9d71..7d0746b37c8e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -765,7 +765,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
return td;
out_blkdev_put:
- fput(bdev_file);
+ __fput_sync(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -778,7 +778,13 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- fput(td->dm_dev.bdev_file);
+
+ /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
+ if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ fput(td->dm_dev.bdev_file);
+ else
+ __fput_sync(td->dm_dev.bdev_file);
+
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 572333ead5fb..4bd4f32bcdab 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -758,15 +758,6 @@ static int at24_probe(struct i2c_client *client)
}
pm_runtime_enable(dev);
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem)) {
- pm_runtime_disable(dev);
- if (!pm_runtime_status_suspended(dev))
- regulator_disable(at24->vcc_reg);
- return dev_err_probe(dev, PTR_ERR(at24->nvmem),
- "failed to register nvmem\n");
- }
-
/*
* Perform a one-byte test read to verify that the chip is functional,
* unless powering on the device is to be avoided during probe (i.e.
@@ -782,6 +773,15 @@ static int at24_probe(struct i2c_client *client)
}
}
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
+ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+ "failed to register nvmem\n");
+ }
+
/* If this a SPD EEPROM, probe for DDR3 thermal sensor */
if (cdata == &at24_data_spd)
at24_probe_temp_sensor(client);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b88d6dec209f..9a5f75163aca 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -300,6 +300,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
remain = sgm->length;
if (remain > host->data_len)
remain = host->data_len;
+ sgm->consumed = 0;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 668e0aceeeba..e113b99a3eab 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 1d8f5a76096a..f2e4a93ed1d6 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -626,6 +626,7 @@ static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
/* perform tuning */
sdhci_start_tuning(host);
+ host->tuning_loop_count = 128;
host->tuning_err = __sdhci_execute_tuning(host, opcode);
if (host->tuning_err) {
/* disable auto-tuning upon tuning error */
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5887feb347a4..0de87bc63840 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -900,7 +900,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
- config.add_legacy_fixed_of_cells = true;
+ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index a8d12c71f987..1b2ec0fec60c 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -857,7 +857,7 @@ static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
struct brcmnand_soc *soc = ctrl->soc;
int i;
- if (soc->read_data_bus) {
+ if (soc && soc->read_data_bus) {
soc->read_data_bus(soc, flash_cache, buffer, fc_words);
} else {
for (i = 0; i < fc_words; i++)
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5243fab9face..8db7fc424571 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1554,7 +1554,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index b079605c84d3..b8cff9240b28 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
- } else {
+ } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
return 0;
}
@@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
- 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
- NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == OP_BLOCK_ERASE)
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f29ef72a2f1d..0fae2b96b21f 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -5758,7 +5758,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
@@ -6217,7 +6217,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b1f84b37032a..c7e7dac057a3 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -3334,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3595,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -4003,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7523b60b3c1c..43b923c48b14 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 7a41cad5788f..1248792d7fd4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Can't suspend with WoL if MAC is still in reset */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (reg & CMD_SW_RESET)
reg &= ~CMD_SW_RESET;
@@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* disable RX */
reg &= ~CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
mdelay(10);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
@@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
@@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
@@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ada89355747..c4a3698cef66 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#include <linux/acpi.h>
@@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
@@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* block for the interface to work, unconditionally clear the
* Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
@@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
reg |= RGMII_MODE_EN;
}
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7246e13dd559..97291bfbeea5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 49d5808b7d11..de52bcb884c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2670,12 +2670,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2710,7 +2710,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 93544f1cc2a5..f7ae0e0aa4a4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success) {
*data = (u16)mdic;
@@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success)
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index d252d98218d0..9fc0fd95a13d 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -171,7 +171,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 8)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -257,7 +257,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 4)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -332,7 +332,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 2)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -428,7 +428,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 5)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2500f5ba4f5a..881d704644fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count + 1);
+ cmd_buf = memdup_user_nul(buffer, count);
if (IS_ERR(cmd_buf))
return -ENOMEM;
- cmd_buf[count] = '\0';
-
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a5ac21a0ee33..cb6b33a228ea 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f)
{
struct qede_arfs_fltr_node *n;
- int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
+ int min_hlen, rc;
__qede_lock(edev);
@@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+ rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
+ if (rc)
goto unlock;
/* Validate profile mode and number of filters */
@@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ rc = -EINVAL;
goto unlock;
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+ rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
+ if (rc)
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
- err = -EINVAL;
+ err = qede_parse_flow_attr(edev, proto, flow->rule, t);
+ if (err)
goto err_out;
- }
/* Make sure location is valid and filter isn't already set */
err = qede_flow_spec_validate(edev, &flow->rule->action, t,
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index a9a85d0f5a5d..8884913e0473 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1675,6 +1675,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
bool raw_proto = false;
void *oiph;
__be32 vni = 0;
+ int nh;
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1765,12 +1766,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(vxlan->dev, rx_length_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
@@ -1840,7 +1857,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
}
parp = arp_hdr(skb);
@@ -1896,7 +1915,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2055,7 +2074,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2266,7 +2285,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
len);
} else {
drop:
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2298,7 +2317,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
addr_family, dst_port,
vxlan->cfg.flags);
if (!dst_vxlan) {
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
@@ -2563,7 +2582,7 @@ out_unlock:
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2571,11 +2590,11 @@ drop:
tx_error:
rcu_read_unlock();
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(ndst);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2608,7 +2627,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2646,7 +2665,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2743,7 +2762,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index b700f52b7b67..11fcb1867118 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -110,8 +110,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ if (imx8_phy->drvdata->variant != IMX8MM) {
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ }
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 41162d7228c9..1d1db1737422 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -603,7 +603,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
u16 val;
fix_idx = 0;
- for (addr = 0; addr < 512; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
@@ -611,11 +611,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
* comparison to 3.125 Gbps values. These register values are
* stored in "gbe_phy_init_fix" array.
*/
- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
+ if (!is_1gbps &&
+ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
+ gbe_phy_init_fix[fix_idx].addr == addr) {
/* Use new value */
val = gbe_phy_init_fix[fix_idx].value;
- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
- fix_idx++;
+ fix_idx++;
} else {
val = gbe_phy_init[addr];
}
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 03fb0d4b75d7..20d4c020a83c 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -297,7 +297,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(qphy->phy),
"failed to create phy\n");
- qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ qphy->vreg = devm_regulator_get(dev, "vdd");
if (IS_ERR(qphy->vreg))
return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7d585a4bbbba..c21cdb8dbfe7 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -77,6 +77,7 @@ enum qphy_reg_layout {
QPHY_COM_BIAS_EN_CLKBUFLR_EN,
QPHY_DP_PHY_STATUS,
+ QPHY_DP_PHY_VCO_DIV,
QPHY_TX_TX_POL_INV,
QPHY_TX_TX_DRV_LVL,
@@ -102,6 +103,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V3_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
@@ -126,6 +128,7 @@ static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V4_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
@@ -150,6 +153,7 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V5_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
@@ -174,6 +178,7 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
@@ -2150,9 +2155,9 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
if (reverse)
- writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x4c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
else
- writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
return reverse;
}
@@ -2162,6 +2167,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 phy_vco_div;
unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
switch (dp_opts->link_rate) {
case 1620:
@@ -2184,7 +2190,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_VCO_DIV]);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
index f5cfacf9be96..181057421c11 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V5_H_
/* Only for QMP V5 PHY - DP PHY registers */
+#define QSERDES_V5_DP_PHY_VCO_DIV 0x070
#define QSERDES_V5_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V5_DP_PHY_STATUS 0x0dc
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
index 01a20d3be4b8..fa967a1af058 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V6_H_
/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_VCO_DIV 0x070
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index a34f67bb7e61..b60a4b60451e 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -87,6 +87,7 @@ config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select RATIONAL
help
Enable this to support the Rockchip HDMI/eDP Combo PHY
with Samsung IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 76b9cf417591..bf74e429ff46 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -125,12 +125,15 @@ struct rockchip_combphy_grfcfg {
};
struct rockchip_combphy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[3];
const struct rockchip_combphy_grfcfg *grfcfg;
int (*combphy_cfg)(struct rockchip_combphy_priv *priv);
};
struct rockchip_combphy_priv {
u8 type;
+ int id;
void __iomem *mmio;
int num_clks;
struct clk_bulk_data *clks;
@@ -320,7 +323,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
struct rockchip_combphy_priv *priv;
const struct rockchip_combphy_cfg *phy_cfg;
struct resource *res;
- int ret;
+ int ret, id;
phy_cfg = of_device_get_match_data(dev);
if (!phy_cfg) {
@@ -338,6 +341,15 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return ret;
}
+ /* find the phy-id from the io address */
+ priv->id = -ENODEV;
+ for (id = 0; id < phy_cfg->num_phys; id++) {
+ if (res->start == phy_cfg->phy_ids[id]) {
+ priv->id = id;
+ break;
+ }
+ }
+
priv->dev = dev;
priv->type = PHY_NONE;
priv->cfg = phy_cfg;
@@ -562,6 +574,12 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfe820000,
+ 0xfe830000,
+ 0xfe840000,
+ },
.grfcfg = &rk3568_combphy_grfcfgs,
.combphy_cfg = rk3568_combphy_cfg,
};
@@ -578,8 +596,14 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ switch (priv->id) {
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
+ break;
+ case 2:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ break;
+ }
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
@@ -736,6 +760,12 @@ static const struct rockchip_combphy_grfcfg rk3588_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfee00000,
+ 0xfee10000,
+ 0xfee20000,
+ },
.grfcfg = &rk3588_combphy_grfcfgs,
.combphy_cfg = rk3588_combphy_cfg,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 121e5961ce11..9857ee45b89e 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -40,6 +40,8 @@
#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
#define RK3588_LANE_AGGREGATION BIT(2)
+#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
+#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
struct rockchip_p3phy_ops;
@@ -132,7 +134,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
{
u32 reg = 0;
- u8 mode = 0;
+ u8 mode = RK3588_LANE_AGGREGATION; /* default */
int ret;
/* Deassert PCIe PMA output clamp mode */
@@ -140,31 +142,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
/* Set bifurcation if needed */
for (int i = 0; i < priv->num_lanes; i++) {
- if (!priv->lanes[i])
- mode |= (BIT(i) << 3);
-
if (priv->lanes[i] > 1)
- mode |= (BIT(i) >> 1);
- }
-
- if (!mode)
- reg = RK3588_LANE_AGGREGATION;
- else {
- if (mode & (BIT(0) | BIT(1)))
- reg |= RK3588_BIFURCATION_LANE_0_1;
-
- if (mode & (BIT(2) | BIT(3)))
- reg |= RK3588_BIFURCATION_LANE_2_3;
+ mode &= ~RK3588_LANE_AGGREGATION;
+ if (priv->lanes[i] == 3)
+ mode |= RK3588_BIFURCATION_LANE_0_1;
+ if (priv->lanes[i] == 4)
+ mode |= RK3588_BIFURCATION_LANE_2_3;
}
- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+ reg = mode;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
+ RK3588_PCIE30_PHY_MODE_EN | reg);
/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
if (!IS_ERR(priv->pipe_grf)) {
- reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
if (reg)
regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
- (reg << 16) | reg);
+ RK3588_PCIE1LN_SEL_EN | reg);
}
reset_control_deassert(priv->p30phy);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 13cd614e12a1..751fecd466e3 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -69,7 +69,6 @@ struct tusb1210 {
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
- struct power_supply *charger;
#endif
};
@@ -236,19 +235,24 @@ static const char * const tusb1210_chargers[] = {
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
+ struct power_supply *charger = NULL;
union power_supply_propval val;
- int i;
+ bool online = false;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
+ charger = power_supply_get_by_name(tusb1210_chargers[i]);
- if (!tusb->charger)
+ if (!charger)
return false;
- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
- return false;
+ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0)
+ online = val.intval;
+
+ power_supply_put(charger);
- return val.intval;
+ return online;
}
static void tusb1210_chg_det_work(struct work_struct *work)
@@ -473,9 +477,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
-
- if (tusb->charger)
- power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index d376fa7114d1..029efe16f8cc 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -43,7 +43,7 @@
#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
-#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */
#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU690 0x690 /* Multi-function Pin Control #24 */
@@ -2495,38 +2495,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
/* GPIOS7 */
- ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
/* GPIOS6 */
- ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
/* GPIOS5 */
- ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
/* GPIOS4 */
- ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
/* GPIOS3*/
- ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
/* GPIOS2 */
- ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
/* GPIOS1 */
- ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
/* GPIOS0 */
- ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
/* GPIOR7 */
- ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
/* GPIOR6 */
- ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
/* GPIOR5 */
- ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
/* GPIOR4 */
- ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
/* GPIOR3*/
- ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
/* GPIOR2 */
- ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
/* GPIOR1 */
- ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
/* GPIOR0 */
- ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
/* GPIOX7 */
ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 6649357637ff..cffeb869130d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2124,13 +2124,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
error = pinctrl_claim_hogs(pctldev);
if (error) {
- dev_err(pctldev->dev, "could not claim hogs: %i\n",
- error);
- pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
- pctldev->desc->npins);
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
-
+ dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
return error;
}
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index df1efc2e5202..6a94ecd6a8de 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
- if (!propname)
- return -ENOMEM;
+ if (!propname) {
+ ret = -ENOMEM;
+ goto err;
+ }
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
if (state == 0) {
- of_node_put(np);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
break;
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index ac97724c59ba..4e87f5b875c0 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -231,6 +231,7 @@ static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
/* SCORE groups */
static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const unsigned int byt_score_uart3_pins[] = { 57, 61 };
static const unsigned int byt_score_pwm0_pins[] = { 94 };
static const unsigned int byt_score_pwm1_pins[] = { 95 };
@@ -278,37 +279,38 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
static const struct intel_pingroup byt_score_groups[] = {
- PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
- PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
- PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
- PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
- PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
- PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
- PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
- PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
- PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
- PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
- PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
- PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
- PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
- PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
- PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
- PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
- PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
- PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
- PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
- PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
- PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
- PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
- PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
- PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
- PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
- PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
- PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
+ PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
+ PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
+ PIN_GROUP_GPIO("uart3_grp", byt_score_uart3_pins, 1),
+ PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
+ PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
+ PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
+ PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
+ PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
+ PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
+ PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
+ PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
+ PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
+ PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
+ PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
+ PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
+ PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
+ PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+ PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
+ PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
+ PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
+ PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
+ PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+ PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+ PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+ PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+ PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+ PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+ PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
};
static const char * const byt_score_uart_groups[] = {
- "uart1_grp", "uart2_grp",
+ "uart1_grp", "uart2_grp", "uart3_grp",
};
static const char * const byt_score_pwm_groups[] = {
"pwm0_grp", "pwm1_grp",
@@ -332,12 +334,14 @@ static const char * const byt_score_plt_clk_groups[] = {
};
static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
static const char * const byt_score_gpio_groups[] = {
- "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
- "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
- "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
- "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
- "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
- "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+ "uart1_grp_gpio", "uart2_grp_gpio", "uart3_grp_gpio", "pwm0_grp_gpio",
+ "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
+ "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
+ "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
+ "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
+ "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
+ "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
+ "plt_clk5_grp_gpio", "smbus_grp_gpio",
};
static const struct intel_function byt_score_functions[] = {
@@ -456,8 +460,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
- PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
- PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
};
static const char * const byt_sus_usb_groups[] = {
@@ -469,7 +473,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
};
static const char * const byt_sus_gpio_groups[] = {
"usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
- "pmu_clk1_grp", "pmu_clk2_grp",
+ "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
};
static const struct intel_function byt_sus_functions[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index fde65e18cd14..6981e2fab93f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -179,6 +179,10 @@ struct intel_community {
.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
+#define PIN_GROUP_GPIO(n, p, m) \
+ PIN_GROUP(n, p, m), \
+ PIN_GROUP(n "_gpio", p, 0)
+
#define FUNCTION(n, g) \
{ \
.func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index b6bc31abd2b0..b19bc391705e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
break;
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
+ if (!ret)
+ err = -EINVAL;
+ break;
+ case PIN_CONFIG_OUTPUT:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
break;
- /* CONFIG Current direction return value
- * ------------- ----------------- ----------------------
- * OUTPUT_ENABLE output 1 (= HW value)
- * input 0 (= HW value)
- * INPUT_ENABLE output 0 (= reverse HW value)
- * input 1 (= reverse HW value)
- */
- if (param == PIN_CONFIG_INPUT_ENABLE)
- ret = !ret;
+ if (!ret) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
@@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
}
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
+ if (!ret)
+ err = -EINVAL;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (!hw->soc->drive_get)
@@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
err = hw->soc->bias_set_combo(hw, desc, 0, arg);
break;
- case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
- MTK_DISABLE);
- /* Keep set direction to consider the case that a GPIO pin
- * does not have SMT control
- */
- if (err != -ENOTSUPP)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_OUTPUT);
- break;
case PIN_CONFIG_INPUT_ENABLE:
/* regard all non-zero value as enable */
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
- if (err)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_INPUT);
break;
case PIN_CONFIG_SLEW_RATE:
/* regard all non-zero value as enable */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index 79f5d753d7e1..50a87d9618a8 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
-static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+static const unsigned int pdm_dclk_a_pins[] = { GPIOA_9 };
/* gen_clk */
static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
GROUP(pdm_din2_a, 3),
GROUP(pdm_din1_a, 3),
GROUP(pdm_din0_a, 3),
- GROUP(pdm_dclk, 3),
+ GROUP(pdm_dclk_a, 3),
GROUP(pwm_c_a, 3),
GROUP(pwm_b_a, 3),
@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
static const char * const pdm_groups[] = {
"pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
- "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
};
static const char * const gen_clk_groups[] = {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index eb5a8c654260..20425afc6b33 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2045,7 +2045,9 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
struct irq_data *data;
+ unsigned long flags;
unsigned int virq;
+ int ret;
if (!pctrl->hwirq[i])
continue;
@@ -2063,8 +2065,18 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
continue;
}
- if (!irqd_irq_disabled(data))
+ /*
+ * This has to be atomically executed to protect against a concurrent
+ * interrupt.
+ */
+ raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+ if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
+ raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+
+ if (ret)
+ dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
}
}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 30951f7131cd..1accdaaf282c 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -721,6 +721,7 @@ static struct miscdevice isst_if_char_driver = {
static const struct x86_cpu_id hpm_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
};
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
index 1305cba61edd..aca123783efc 100644
--- a/drivers/power/supply/mt6360_charger.c
+++ b/drivers/power/supply/mt6360_charger.c
@@ -588,7 +588,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = {
};
static const struct regulator_desc mt6360_otg_rdesc = {
- .of_match = "usb-otg-vbus",
+ .of_match = "usb-otg-vbus-regulator",
.name = "usb-otg-vbus",
.ops = &mt6360_chg_otg_ops,
.owner = THIS_MODULE,
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index c345a77f9f78..e4dbacd50a43 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -192,6 +192,7 @@ static const int rt9455_voreg_values[] = {
4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000
};
+#if IS_ENABLED(CONFIG_USB_PHY)
/*
* When the charger is in boost mode, REG02[7:2] represent boost output
* voltage.
@@ -207,6 +208,7 @@ static const int rt9455_boost_voltage_values[] = {
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
};
+#endif
/* REG07[3:0] (VMREG) in uV */
static const int rt9455_vmreg_values[] = {
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
index fe7ae0f3f46a..5ab1a0befe12 100644
--- a/drivers/regulator/irq_helpers.c
+++ b/drivers/regulator/irq_helpers.c
@@ -352,6 +352,9 @@ void *regulator_irq_helper(struct device *dev,
h->irq = irq;
h->desc = *d;
+ h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
+ if (!h->desc.name)
+ return ERR_PTR(-ENOMEM);
ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
rdev_amount);
diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c
index ad6587a378d0..24cc9fc94e90 100644
--- a/drivers/regulator/mt6360-regulator.c
+++ b/drivers/regulator/mt6360-regulator.c
@@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
}
-#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \
- mreg, mmask, streg, stmask, vranges, \
- vcnts, offon_delay, irq_tbls) \
+#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg, \
+ vmask, mreg, mmask, streg, stmask, \
+ vranges, vcnts, offon_delay, irq_tbls) \
{ \
.desc = { \
.name = #_name, \
.supply_name = #_sname, \
.id = MT6360_REGULATOR_##_name, \
- .of_match = of_match_ptr(#_name), \
+ .of_match = of_match_ptr(match), \
.regulators_node = of_match_ptr("regulator"), \
.of_map_mode = mt6360_regulator_of_map_mode, \
.owner = THIS_MODULE, \
@@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
static const struct mt6360_regulator_desc mt6360_regulator_descs[] = {
- MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+ MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN,
+ 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
buck_vout_ranges, 256, 0, buck1_irq_tbls),
- MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+ MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN,
+ 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
buck_vout_ranges, 256, 0, buck2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+ MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3,
+ 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
- MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+ MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3,
+ 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
- MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+ MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1,
+ 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
- MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+ MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1,
+ 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+ MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1,
+ 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
- MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+ MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2,
+ 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
};
diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
index 656fe330d38f..063e12c08e75 100644
--- a/drivers/regulator/qcom-refgen-regulator.c
+++ b/drivers/regulator/qcom-refgen-regulator.c
@@ -140,6 +140,7 @@ static const struct of_device_id qcom_refgen_match_table[] = {
{ .compatible = "qcom,sm8250-refgen-regulator", .data = &sm8250_refgen_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_refgen_match_table);
static struct platform_driver qcom_refgen_driver = {
.probe = qcom_refgen_probe,
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
index 086da36abc0b..4955616517ce 100644
--- a/drivers/regulator/vqmmc-ipq4019-regulator.c
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -84,6 +84,7 @@ static const struct of_device_id regulator_ipq4019_of_match[] = {
{ .compatible = "qcom,vqmmc-ipq4019-regulator", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_ipq4019_of_match);
static struct platform_driver ipq4019_regulator_driver = {
.probe = ipq4019_regulator_probe,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f0b8b709649f..a3adaec5504e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -364,30 +364,33 @@ out:
return rc;
}
+static void qeth_free_cq(struct qeth_card *card)
+{
+ if (card->qdio.c_q) {
+ qeth_free_qdio_queue(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+ }
+}
+
static int qeth_alloc_cq(struct qeth_card *card)
{
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqon");
- card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
- dev_err(&card->gdev->dev, "Failed to create completion queue\n");
- return -ENOMEM;
+ card->qdio.c_q = qeth_alloc_qdio_queue();
+ if (!card->qdio.c_q) {
+ dev_err(&card->gdev->dev,
+ "Failed to create completion queue\n");
+ return -ENOMEM;
+ }
}
} else {
QETH_CARD_TEXT(card, 2, "nocq");
- card->qdio.c_q = NULL;
+ qeth_free_cq(card);
}
return 0;
}
-static void qeth_free_cq(struct qeth_card *card)
-{
- if (card->qdio.c_q) {
- qeth_free_qdio_queue(card->qdio.c_q);
- card->qdio.c_q = NULL;
- }
-}
-
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed)
{
@@ -2628,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "allcqdbf");
+ /* completion */
+ if (qeth_alloc_cq(card))
+ goto out_err;
+
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
@@ -2663,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
- /* completion */
- if (qeth_alloc_cq(card))
- goto out_freeoutq;
-
return 0;
out_freeoutq:
@@ -2677,6 +2680,8 @@ out_freeoutq:
qeth_free_buffer_pool(card);
out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ qeth_free_cq(card);
+out_err:
return -ENOMEM;
}
@@ -2684,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
+ qeth_free_cq(card);
+
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
- qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
@@ -3742,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
- int rc;
-
- if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
- rc = -1;
- goto out;
- } else {
- if (card->options.cq == cq) {
- rc = 0;
- goto out;
- }
-
- qeth_free_qdio_queues(card);
- card->options.cq = cq;
- rc = 0;
- }
-out:
- return rc;
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+ return -1;
+ card->options.cq = cq;
+ return 0;
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 58fdf679341d..65cdc8b77e35 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3120,6 +3120,7 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
{
struct scsi_device *sdp = sdkp->device;
const struct scsi_io_group_descriptor *desc, *start, *end;
+ u16 permanent_stream_count_old;
struct scsi_sense_hdr sshdr;
struct scsi_mode_data data;
int res;
@@ -3140,12 +3141,13 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
for (desc = start; desc < end; desc++)
if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
break;
+ permanent_stream_count_old = sdkp->permanent_stream_count;
sdkp->permanent_stream_count = desc - start;
if (sdkp->rscs && sdkp->permanent_stream_count < 2)
sd_printk(KERN_INFO, sdkp,
"Unexpected: RSCS has been set and the permanent stream count is %u\n",
sdkp->permanent_stream_count);
- else if (sdkp->permanent_stream_count)
+ else if (sdkp->permanent_stream_count != permanent_stream_count_old)
sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
sdkp->permanent_stream_count);
}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 50c664b65f4d..1b7afb19ccd6 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -72,6 +72,7 @@ config MTK_SOCINFO
tristate "MediaTek SoC Information"
default y
depends on NVMEM_MTK_EFUSE
+ select SOC_BUS
help
The MediaTek SoC Information (mtk-socinfo) driver provides
information about the SoC to the userspace including the
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index c832f5c670bc..9a91298c1253 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -1768,6 +1768,7 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
struct dev_pm_opp *opp;
+ char tz_name_buf[20];
unsigned long freq;
int count, ret;
u32 idx, i;
@@ -1819,10 +1820,12 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
}
if (!IS_ERR_OR_NULL(bdata->tzone_name)) {
- svsb->tzd = thermal_zone_get_zone_by_name(bdata->tzone_name);
+ snprintf(tz_name_buf, ARRAY_SIZE(tz_name_buf),
+ "%s-thermal", bdata->tzone_name);
+ svsb->tzd = thermal_zone_get_zone_by_name(tz_name_buf);
if (IS_ERR(svsb->tzd)) {
dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
- bdata->tzone_name);
+ tz_name_buf);
return PTR_ERR(svsb->tzd);
}
}
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 7cd24bd8e224..6bcf8e75273c 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -130,6 +130,19 @@ static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
}
+static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
+{
+ u32 wake_ctrl;
+
+ wake_ctrl = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+ if (enable)
+ wake_ctrl |= AMD_SDW_WAKE_INTR_MASK;
+ else
+ wake_ctrl &= ~AMD_SDW_WAKE_INTR_MASK;
+
+ writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+}
+
static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
int cmd_offset)
{
@@ -1095,6 +1108,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, false);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
/*
@@ -1121,6 +1135,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
return 0;
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, true);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
ret = amd_sdw_clock_stop(amd_manager);
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 418b679e0b1a..707065468e05 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -152,7 +152,7 @@
#define AMD_SDW0_EXT_INTR_MASK 0x200000
#define AMD_SDW1_EXT_INTR_MASK 4
#define AMD_SDW_IRQ_MASK_0TO7 0x77777777
-#define AMD_SDW_IRQ_MASK_8TO11 0x000d7777
+#define AMD_SDW_IRQ_MASK_8TO11 0x000c7777
#define AMD_SDW_IRQ_ERROR_MASK 0xff
#define AMD_SDW_MAX_FREQ_NUM 1
#define AMD_SDW0_MAX_TX_PORTS 3
@@ -190,6 +190,7 @@
#define AMD_SDW_CLK_RESUME_REQ 2
#define AMD_SDW_CLK_RESUME_DONE 3
#define AMD_SDW_WAKE_STAT_MASK BIT(16)
+#define AMD_SDW_WAKE_INTR_MASK BIT(16)
static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
AMD_SDW_DEFAULT_CLK_FREQ,
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index b246067e074b..6cb96a1e8b7d 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -967,7 +967,7 @@ vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
val_u32 = __virtio32_to_cpu(true, config->size_max);
- return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, val_u32);
}
/* fill the block size*/
@@ -1089,7 +1089,7 @@ static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
u8 ro;
ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_READ_ONLY, ro))
return -EMSGSIZE;
return 0;
@@ -1100,7 +1100,7 @@ static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
u8 flush;
flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_FLUSH, flush))
return -EMSGSIZE;
return 0;
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index dae96c9f61cf..806ecd32219b 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -196,7 +196,7 @@ err_mutex_unlock:
*/
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
- unsigned long offset = vmf->address - vmf->vma->vm_start;
+ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);