summaryrefslogtreecommitdiff
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2020-08-31 13:16:39 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2020-10-25 20:01:58 -0400
commit1ec09974d845bdf827028aa7deb96378f54bcd06 (patch)
tree02bbed249a6ef721a4abeb8856d1730cacb22b27 /fs/eventpoll.c
parentdb502f8a3b0bb5188f92d9d6a68aed223892689b (diff)
downloadlwn-1ec09974d845bdf827028aa7deb96378f54bcd06.tar.gz
lwn-1ec09974d845bdf827028aa7deb96378f54bcd06.zip
lift the calls of ep_read_events_proc() into the callers
Expand the calls of ep_scan_ready_list() that get ep_read_events_proc(). As a side benefit we can pass depth to ep_read_events_proc() by value and not by address - the latter used to be forced by the signature expected from ep_scan_ready_list() callback. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index eb012fdc152e..9b9e29e0c85f 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -774,7 +774,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
}
static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
- void *priv);
+ int depth);
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt);
@@ -787,6 +787,8 @@ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
int depth)
{
struct eventpoll *ep;
+ LIST_HEAD(txlist);
+ __poll_t res;
bool locked;
pt->_key = epi->event.events;
@@ -797,20 +799,19 @@ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
poll_wait(epi->ffd.file, &ep->poll_wait, pt);
locked = pt && (pt->_qproc == ep_ptable_queue_proc);
- return ep_scan_ready_list(epi->ffd.file->private_data,
- ep_read_events_proc, &depth, depth,
- locked) & epi->event.events;
+ ep_start_scan(ep, depth, locked, &txlist);
+ res = ep_read_events_proc(ep, &txlist, depth + 1);
+ ep_done_scan(ep, depth, locked, &txlist);
+ return res & epi->event.events;
}
static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
- void *priv)
+ int depth)
{
struct epitem *epi, *tmp;
poll_table pt;
- int depth = *(int *)priv;
init_poll_funcptr(&pt, NULL);
- depth++;
list_for_each_entry_safe(epi, tmp, head, rdllink) {
if (ep_item_poll(epi, &pt, depth)) {
@@ -832,7 +833,8 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
struct eventpoll *ep = file->private_data;
- int depth = 0;
+ LIST_HEAD(txlist);
+ __poll_t res;
/* Insert inside our poll wait queue */
poll_wait(file, &ep->poll_wait, wait);
@@ -841,8 +843,10 @@ static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
* Proceed to find out if wanted events are really available inside
* the ready list.
*/
- return ep_scan_ready_list(ep, ep_read_events_proc,
- &depth, depth, false);
+ ep_start_scan(ep, 0, false, &txlist);
+ res = ep_read_events_proc(ep, &txlist, 1);
+ ep_done_scan(ep, 0, false, &txlist);
+ return res;
}
#ifdef CONFIG_PROC_FS