summaryrefslogtreecommitdiff
path: root/include/net/request_sock.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-19 19:04:20 -0700
committerDavid S. Miller <davem@davemloft.net>2015-03-20 12:40:25 -0400
commitfa76ce7328b289b6edd476e24eb52fd634261720 (patch)
tree2e4c116a4e299700c185d73018bbb3518e46e1bb /include/net/request_sock.h
parent52452c542559ac980b48dbf22a30ee7fa0af507c (diff)
downloadlwn-fa76ce7328b289b6edd476e24eb52fd634261720.tar.gz
lwn-fa76ce7328b289b6edd476e24eb52fd634261720.zip
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling, done by inet_csk_reqsk_queue_prune(), fired by the keepalive timer of a TCP_LISTEN socket. This function runs for awful long times, with socket lock held, meaning that other cpus needing this lock have to spin for hundred of ms. SYNACK are sent in huge bursts, likely to cause severe drops anyway. This model was OK 15 years ago when memory was very tight. We now can afford to have a timer per request sock. Timer invocations no longer need to lock the listener, and can be run from all cpus in parallel. With following patch increasing somaxconn width to 32 bits, I tested a listener with more than 4 million active request sockets, and a steady SYNFLOOD of ~200,000 SYN per second. Host was sending ~830,000 SYNACK per second. This is ~100 times more what we could achieve before this patch. Later, we will get rid of the listener hash and use ehash instead. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/request_sock.h')
-rw-r--r--include/net/request_sock.h87
1 files changed, 41 insertions, 46 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 65223905d139..6a91261d9b7b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -62,7 +62,7 @@ struct request_sock {
u32 window_clamp; /* window clamp at creation time */
u32 rcv_wnd; /* rcv_wnd offered first time */
u32 ts_recent;
- unsigned long expires;
+ struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
u32 secid;
@@ -110,9 +110,6 @@ static inline void reqsk_free(struct request_sock *req)
static inline void reqsk_put(struct request_sock *req)
{
- /* temporary debugging, until req sock are put into ehash table */
- WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 1);
-
if (atomic_dec_and_test(&req->rsk_refcnt))
reqsk_free(req);
}
@@ -124,12 +121,16 @@ extern int sysctl_max_syn_backlog;
* @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
*/
struct listen_sock {
- u8 max_qlen_log;
+ int qlen_inc; /* protected by listener lock */
+ int young_inc;/* protected by listener lock */
+
+ /* following fields can be updated by timer */
+ atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
+ atomic_t young_dec;
+
+ u8 max_qlen_log ____cacheline_aligned_in_smp;
u8 synflood_warned;
/* 2 bytes hole, try to use */
- int qlen;
- int qlen_young;
- int clock_hand;
u32 hash_rnd;
u32 nr_table_entries;
struct request_sock *syn_table[0];
@@ -182,9 +183,7 @@ struct fastopen_queue {
struct request_sock_queue {
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
- rwlock_t syn_wait_lock;
u8 rskq_defer_accept;
- /* 3 bytes hole, try to pack */
struct listen_sock *listen_opt;
struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
* enabled on this listener. Check
@@ -192,6 +191,9 @@ struct request_sock_queue {
* to determine if TFO is enabled
* right at this moment.
*/
+
+ /* temporary alignment, our goal is to get rid of this lock */
+ rwlock_t syn_wait_lock ____cacheline_aligned_in_smp;
};
int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -223,11 +225,15 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
struct request_sock **prev;
write_lock(&queue->syn_wait_lock);
+
prev = &lopt->syn_table[req->rsk_hash];
while (*prev != req)
prev = &(*prev)->dl_next;
*prev = req->dl_next;
+
write_unlock(&queue->syn_wait_lock);
+ if (del_timer(&req->rsk_timer))
+ reqsk_put(req);
}
static inline void reqsk_queue_add(struct request_sock_queue *queue,
@@ -260,64 +266,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
return req;
}
-static inline int reqsk_queue_removed(struct request_sock_queue *queue,
- struct request_sock *req)
+static inline void reqsk_queue_removed(struct request_sock_queue *queue,
+ const struct request_sock *req)
{
struct listen_sock *lopt = queue->listen_opt;
if (req->num_timeout == 0)
- --lopt->qlen_young;
-
- return --lopt->qlen;
+ atomic_inc(&lopt->young_dec);
+ atomic_inc(&lopt->qlen_dec);
}
-static inline int reqsk_queue_added(struct request_sock_queue *queue)
+static inline void reqsk_queue_added(struct request_sock_queue *queue)
{
struct listen_sock *lopt = queue->listen_opt;
- const int prev_qlen = lopt->qlen;
- lopt->qlen_young++;
- lopt->qlen++;
- return prev_qlen;
+ lopt->young_inc++;
+ lopt->qlen_inc++;
}
-static inline int reqsk_queue_len(const struct request_sock_queue *queue)
+static inline int listen_sock_qlen(const struct listen_sock *lopt)
{
- return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+ return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
}
-static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
+static inline int listen_sock_young(const struct listen_sock *lopt)
{
- return queue->listen_opt->qlen_young;
+ return lopt->young_inc - atomic_read(&lopt->young_dec);
}
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
- return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+ const struct listen_sock *lopt = queue->listen_opt;
+
+ return lopt ? listen_sock_qlen(lopt) : 0;
}
-static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
- u32 hash, struct request_sock *req,
- unsigned long timeout)
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
- struct listen_sock *lopt = queue->listen_opt;
-
- req->expires = jiffies + timeout;
- req->num_retrans = 0;
- req->num_timeout = 0;
- req->sk = NULL;
-
- /* before letting lookups find us, make sure all req fields
- * are committed to memory and refcnt initialized.
- */
- smp_wmb();
- atomic_set(&req->rsk_refcnt, 1);
+ return listen_sock_young(queue->listen_opt);
+}
- req->rsk_hash = hash;
- write_lock(&queue->syn_wait_lock);
- req->dl_next = lopt->syn_table[hash];
- lopt->syn_table[hash] = req;
- write_unlock(&queue->syn_wait_lock);
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+{
+ return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
}
+void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ u32 hash, struct request_sock *req,
+ unsigned long timeout);
+
#endif /* _REQUEST_SOCK_H */