summaryrefslogtreecommitdiff
path: root/fs/select.c
diff options
context:
space:
mode:
authorEliezer Tamir <eliezer.tamir@linux.intel.com>2013-07-08 16:20:34 +0300
committerDavid S. Miller <davem@davemloft.net>2013-07-08 19:25:45 -0700
commitcbf55001b2ddb814329735641be5d29b08c82b08 (patch)
tree110c1191f4b6699bef04ebdf45e4677c623a7ceb /fs/select.c
parentc7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1 (diff)
downloadlwn-cbf55001b2ddb814329735641be5d29b08c82b08.tar.gz
lwn-cbf55001b2ddb814329735641be5d29b08c82b08.zip
net: rename low latency sockets functions to busy poll
Rename functions in include/net/ll_poll.h to busy wait. Clarify documentation about expected power use increase. Rename POLL_LL to POLL_BUSY_LOOP. Add need_resched() testing to poll/select busy loops. Note, that in select and poll can_busy_poll is dynamic and is updated continuously to reflect the existence of supported sockets with valid queue information. Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'fs/select.c')
-rw-r--r--fs/select.c60
1 files changed, 37 insertions, 23 deletions
diff --git a/fs/select.c b/fs/select.c
index f28a58592725..25cac5faf6d6 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -402,9 +402,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
poll_table *wait;
int retval, i, timed_out = 0;
unsigned long slack = 0;
- unsigned int ll_flag = ll_get_flag();
- u64 ll_start = ll_start_time(ll_flag);
- u64 ll_time = ll_run_time();
+ unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
+ u64 busy_start = busy_loop_start_time(busy_flag);
+ u64 busy_end = busy_loop_end_time();
rcu_read_lock();
retval = max_select_fd(n, fds);
@@ -427,7 +427,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
retval = 0;
for (;;) {
unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
- bool can_ll = false;
+ bool can_busy_loop = false;
inp = fds->in; outp = fds->out; exp = fds->ex;
rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
@@ -456,7 +456,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
mask = DEFAULT_POLLMASK;
if (f_op && f_op->poll) {
wait_key_set(wait, in, out,
- bit, ll_flag);
+ bit, busy_flag);
mask = (*f_op->poll)(f.file, wait);
}
fdput(f);
@@ -475,11 +475,18 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
retval++;
wait->_qproc = NULL;
}
- if (mask & POLL_LL)
- can_ll = true;
/* got something, stop busy polling */
- if (retval)
- ll_flag = 0;
+ if (retval) {
+ can_busy_loop = false;
+ busy_flag = 0;
+
+ /*
+ * only remember a returned
+ * POLL_BUSY_LOOP if we asked for it
+ */
+ } else if (busy_flag & mask)
+ can_busy_loop = true;
+
}
}
if (res_in)
@@ -498,8 +505,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
break;
}
- /* only if on, have sockets with POLL_LL and not out of time */
- if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time))
+ /* only if found POLL_BUSY_LOOP sockets && not out of time */
+ if (!need_resched() && can_busy_loop &&
+ busy_loop_range(busy_start, busy_end))
continue;
/*
@@ -734,7 +742,8 @@ struct poll_list {
* if pwait->_qproc is non-NULL.
*/
static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
- bool *can_ll, unsigned int ll_flag)
+ bool *can_busy_poll,
+ unsigned int busy_flag)
{
unsigned int mask;
int fd;
@@ -748,10 +757,10 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
mask = DEFAULT_POLLMASK;
if (f.file->f_op && f.file->f_op->poll) {
pwait->_key = pollfd->events|POLLERR|POLLHUP;
- pwait->_key |= ll_flag;
+ pwait->_key |= busy_flag;
mask = f.file->f_op->poll(f.file, pwait);
- if (mask & POLL_LL)
- *can_ll = true;
+ if (mask & busy_flag)
+ *can_busy_poll = true;
}
/* Mask out unneeded events. */
mask &= pollfd->events | POLLERR | POLLHUP;
@@ -770,9 +779,10 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
ktime_t expire, *to = NULL;
int timed_out = 0, count = 0;
unsigned long slack = 0;
- unsigned int ll_flag = ll_get_flag();
- u64 ll_start = ll_start_time(ll_flag);
- u64 ll_time = ll_run_time();
+ unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
+ u64 busy_start = busy_loop_start_time(busy_flag);
+ u64 busy_end = busy_loop_end_time();
+
/* Optimise the no-wait case */
if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -785,7 +795,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
for (;;) {
struct poll_list *walk;
- bool can_ll = false;
+ bool can_busy_loop = false;
for (walk = list; walk != NULL; walk = walk->next) {
struct pollfd * pfd, * pfd_end;
@@ -800,10 +810,13 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
* this. They'll get immediately deregistered
* when we break out and return.
*/
- if (do_pollfd(pfd, pt, &can_ll, ll_flag)) {
+ if (do_pollfd(pfd, pt, &can_busy_loop,
+ busy_flag)) {
count++;
pt->_qproc = NULL;
- ll_flag = 0;
+ /* found something, stop busy polling */
+ busy_flag = 0;
+ can_busy_loop = false;
}
}
}
@@ -820,8 +833,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
if (count || timed_out)
break;
- /* only if on, have sockets with POLL_LL and not out of time */
- if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time))
+ /* only if found POLL_BUSY_LOOP sockets && not out of time */
+ if (!need_resched() && can_busy_loop &&
+ busy_loop_range(busy_start, busy_end))
continue;
/*