summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw@amazon.co.uk>2020-10-27 16:55:21 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-09-19 13:20:06 +0300
commit755e86f28ab9b85484567893aa5266e4e30a8fe9 (patch)
tree6c85a1be1c6162558719269278dbca398fde6aab /include
parent6b5d585e23022aa58fe494b16237c2a27efff58e (diff)
downloadlinux-755e86f28ab9b85484567893aa5266e4e30a8fe9.tar.xz
eventfd: Export eventfd_ctx_do_read()
[ Upstream commit 28f1326710555bbe666f64452d08f2d7dd657cae ] Where events are consumed in the kernel, for example by KVM's irqfd_wakeup() and VFIO's virqfd_wakeup(), they currently lack a mechanism to drain the eventfd's counter. Since the wait queue is already locked while the wakeup functions are invoked, all they really need to do is call eventfd_ctx_do_read(). Add a check for the lock, and export it for them. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Message-Id: <20201027135523.646811-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Stable-dep-of: 758b49204781 ("eventfd: prevent underflow for eventfd semaphores") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/eventfd.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 6cd2a92daf20..c1bd4883e2fa 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -42,6 +42,7 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
DECLARE_PER_CPU(int, eventfd_wake_count);
@@ -89,6 +90,11 @@ static inline bool eventfd_signal_count(void)
return false;
}
+static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+{
+
+}
+
#endif
#endif /* _LINUX_EVENTFD_H */