|
@@ -670,6 +670,8 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
|
|
struct epitem *epi, *nepi;
|
|
struct epitem *epi, *nepi;
|
|
LIST_HEAD(txlist);
|
|
LIST_HEAD(txlist);
|
|
|
|
|
|
|
|
+ lockdep_assert_irqs_enabled();
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* We need to lock this because we could be hit by
|
|
* We need to lock this because we could be hit by
|
|
* eventpoll_release_file() and epoll_ctl().
|
|
* eventpoll_release_file() and epoll_ctl().
|
|
@@ -764,6 +766,8 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
|
|
{
|
|
{
|
|
struct file *file = epi->ffd.file;
|
|
struct file *file = epi->ffd.file;
|
|
|
|
|
|
|
|
+ lockdep_assert_irqs_enabled();
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Removes poll wait queue hooks.
|
|
* Removes poll wait queue hooks.
|
|
*/
|
|
*/
|
|
@@ -1412,6 +1416,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
|
struct epitem *epi;
|
|
struct epitem *epi;
|
|
struct ep_pqueue epq;
|
|
struct ep_pqueue epq;
|
|
|
|
|
|
|
|
+ lockdep_assert_irqs_enabled();
|
|
|
|
+
|
|
user_watches = atomic_long_read(&ep->user->epoll_watches);
|
|
user_watches = atomic_long_read(&ep->user->epoll_watches);
|
|
if (unlikely(user_watches >= max_user_watches))
|
|
if (unlikely(user_watches >= max_user_watches))
|
|
return -ENOSPC;
|
|
return -ENOSPC;
|
|
@@ -1540,6 +1546,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
|
|
int pwake = 0;
|
|
int pwake = 0;
|
|
poll_table pt;
|
|
poll_table pt;
|
|
|
|
|
|
|
|
+ lockdep_assert_irqs_enabled();
|
|
|
|
+
|
|
init_poll_funcptr(&pt, NULL);
|
|
init_poll_funcptr(&pt, NULL);
|
|
|
|
|
|
/*
|
|
/*
|