summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2009-11-23 15:54:01 -0500
committerVlad Yasevich <vladislav.yasevich@hp.com>2009-11-23 15:54:01 -0500
commit4814326b59db0cfd18ac652626d955ad3f57fb0f (patch)
tree41975c6b2eea1812012802bdb2337c43fa240178 /net
parentda85b7396f3b6cb3fea7d77091498bfa1051ef7c (diff)
downloadlwn-4814326b59db0cfd18ac652626d955ad3f57fb0f.tar.gz
lwn-4814326b59db0cfd18ac652626d955ad3f57fb0f.zip
sctp: prevent too-fast association id reuse
We use the idr subsystem and always ask for an id at or above 1. This results in a id reuse when one association is terminated while another is created. To prevent re-use, we keep track of the last id returned and ask for that id + 1 as a base for each query. We let the idr spin lock protect this base id as well. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sctp/associola.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6e96f83570c9..df5abbff63e2 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -63,6 +63,12 @@
static void sctp_assoc_bh_rcv(struct work_struct *work);
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
+/* Keep track of the new idr low so that we don't re-use association id
+ * numbers too fast. It is protected by they idr spin lock is in the
+ * range of 1 - INT_MAX.
+ */
+static u32 idr_low = 1;
+
/* 1st Level Abstractions. */
@@ -1553,7 +1559,12 @@ retry:
spin_lock_bh(&sctp_assocs_id_lock);
error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
- 1, &assoc_id);
+ idr_low, &assoc_id);
+ if (!error) {
+ idr_low = assoc_id + 1;
+ if (idr_low == INT_MAX)
+ idr_low = 1;
+ }
spin_unlock_bh(&sctp_assocs_id_lock);
if (error == -EAGAIN)
goto retry;