summaryrefslogtreecommitdiff
path: root/drivers/accel/ivpu/ivpu_ipc.c
diff options
context:
space:
mode:
authorStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>2023-10-28 18:59:31 +0300
committerStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>2023-10-31 18:14:17 +0300
commit57c7e3e4800ad65048a7044b03723cd85d9595af (patch)
tree82e870cd80b7deba2134c5a7677962db251a30cf /drivers/accel/ivpu/ivpu_ipc.c
parenta06eb9be49a66e16e0c7819c630c1267c25b36dc (diff)
downloadlinux-57c7e3e4800ad65048a7044b03723cd85d9595af.tar.xz
accel/ivpu: Stop job_done_thread on suspend
Stop job_done thread when going to suspend. Use kthread_park() instead of kthread_stop() to avoid memory allocation and potential failure on resume. Use separate function as thread wake up condition. Use spin lock to assure rx_msg_list is properly protected against concurrent access. This avoid race condition when the rx_msg_list list is modified and read in ivpu_ipc_recive() at the same time. Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com> Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com> Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-4-stanislaw.gruszka@linux.intel.com
Diffstat (limited to 'drivers/accel/ivpu/ivpu_ipc.c')
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index d069d1e1f91d..270caef789bf 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -202,6 +202,20 @@ unlock:
return ret;
}
+static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
+{
+ int ret = 0;
+
+ if (IS_KTHREAD())
+ ret |= (kthread_should_stop() || kthread_should_park());
+
+ spin_lock_irq(&cons->rx_msg_lock);
+ ret |= !list_empty(&cons->rx_msg_list);
+ spin_unlock_irq(&cons->rx_msg_lock);
+
+ return ret;
+}
+
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf,
struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
@@ -211,8 +225,7 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
int wait_ret, ret = 0;
wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
- (IS_KTHREAD() && kthread_should_stop()) ||
- !list_empty(&cons->rx_msg_list),
+ ivpu_ipc_rx_need_wakeup(cons),
msecs_to_jiffies(timeout_ms));
if (IS_KTHREAD() && kthread_should_stop())