summaryrefslogtreecommitdiff
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2013-05-28 10:09:22 +0200
committerJiri Kosina <jkosina@suse.cz>2013-05-28 10:09:29 +0200
commit864bfb25b57a6766ea689befa5cf09a4353281ce (patch)
tree478941ca6e76b8d77b71f5827a3ce751c46a72b5 /net/sctp/ulpqueue.c
parent071361d3473ebb8142907470ff12d59c59f6be72 (diff)
parent49717cb40410fe4b563968680ff7c513967504c6 (diff)
downloadlwn-864bfb25b57a6766ea689befa5cf09a4353281ce.tar.gz
lwn-864bfb25b57a6766ea689befa5cf09a4353281ce.zip
Merge branch 'master' into for-next
Merge with 49717cb ("kthread: Document ways of reducing OS jitter due to per-CPU kthreads") to be able to apply fixup patch on top of it. Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c87
1 files changed, 71 insertions, 16 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ada17464b65b..0fd5b3d2df03 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
{
struct sk_buff_head temp;
struct sctp_ulpevent *event;
+ int event_eor = 0;
/* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
- if (event)
+ if (event) {
+ event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
sctp_ulpq_tail_event(ulpq, event);
+ }
- return 0;
+ return event_eor;
}
/* Add a new event for propagation to the ULP. */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (!first_frag)
+ return NULL;
+ goto done;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
- } else if (next_tsn == ctsn)
+ } else if (next_tsn == ctsn) {
next_tsn++;
- else
+ last_frag = pos;
+ } else
goto done;
break;
case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
} else
goto done;
break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag)
+ return NULL;
+ else
+ goto done;
+ break;
+
default:
return NULL;
}
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head *list, __u16 needed)
{
__u16 freed = 0;
- __u32 tsn;
- struct sk_buff *skb;
+ __u32 tsn, last_tsn;
+ struct sk_buff *skb, *flist, *last;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
- while ((skb = __skb_dequeue_tail(list)) != NULL) {
- freed += skb_headlen(skb);
+ while ((skb = skb_peek_tail(list)) != NULL) {
event = sctp_skb2event(skb);
tsn = event->tsn;
+ /* Don't renege below the Cumulative TSN ACK Point. */
+ if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
+ break;
+
+ /* Events in ordering queue may have multiple fragments
+ * corresponding to additional TSNs. Sum the total
+ * freed space; find the last TSN.
+ */
+ freed += skb_headlen(skb);
+ flist = skb_shinfo(skb)->frag_list;
+ for (last = flist; flist; flist = flist->next) {
+ last = flist;
+ freed += skb_headlen(last);
+ }
+ if (last)
+ last_tsn = sctp_skb2event(last)->tsn;
+ else
+ last_tsn = tsn;
+
+ /* Unlink the event, then renege all applicable TSNs. */
+ __skb_unlink(skb, list);
sctp_ulpevent_free(event);
- sctp_tsnmap_renege(tsnmap, tsn);
+ while (TSN_lte(tsn, last_tsn)) {
+ sctp_tsnmap_renege(tsnmap, tsn);
+ tsn++;
+ }
if (freed >= needed)
return freed;
}
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event;
struct sctp_association *asoc;
struct sctp_sock *sp;
+ __u32 ctsn;
+ struct sk_buff *skb;
asoc = ulpq->asoc;
sp = sctp_sk(asoc->base.sk);
/* If the association is already in Partial Delivery mode
- * we have noting to do.
+ * we have nothing to do.
*/
if (ulpq->pd_mode)
return;
+ /* Data must be at or below the Cumulative TSN ACK Point to
+ * start partial delivery.
+ */
+ skb = skb_peek(&asoc->ulpq.reasm);
+ if (skb != NULL) {
+ ctsn = sctp_skb2event(skb)->tsn;
+ if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
+ return;
+ }
+
/* If the user enabled fragment interleave socket option,
* multiple associations can enter partial delivery.
* Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
- __u32 tsn;
- tsn = ntohl(chunk->subh.data_hdr->tsn);
- sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
- sctp_ulpq_tail_data(ulpq, chunk, gfp);
-
- sctp_ulpq_partial_delivery(ulpq, gfp);
+ int retval;
+ retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+ /*
+ * Enter partial delivery if chunk has not been
+ * delivered; otherwise, drain the reassembly queue.
+ */
+ if (retval <= 0)
+ sctp_ulpq_partial_delivery(ulpq, gfp);
+ else if (retval == 1)
+ sctp_ulpq_reasm_drain(ulpq);
}
sk_mem_reclaim(asoc->base.sk);