summaryrefslogtreecommitdiff
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 64659b110973..4f757a71f99b 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -483,8 +483,8 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
* (efd1) notices that it may have some event ready, so it needs to wake up
* the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
* that ends up in another wake_up(), after having checked about the
- * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
- * avoid stack blasting.
+ * recursion constraints. That are, no more than EP_MAX_NESTS, to avoid
+ * stack blasting.
*
* When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
* this special case of epoll.
@@ -2042,6 +2042,19 @@ SYSCALL_DEFINE1(epoll_create, int, size)
return do_epoll_create(0);
}
+#ifdef CONFIG_PM_SLEEP
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
+ epev->events &= ~EPOLLWAKEUP;
+}
+#else
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ epev->events &= ~EPOLLWAKEUP;
+}
+#endif
+
static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
bool nonblock)
{