123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359 |
- // SPDX-License-Identifier: GPL-2.0-or-later
- /* SCTP kernel implementation
- * (C) Copyright Red Hat Inc. 2017
- *
- * This file is part of the SCTP kernel implementation
- *
- * These functions implement sctp stream message interleaving, mostly
- * including I-DATA and I-FORWARD-TSN chunks process.
- *
- * Please send any bug reports or fixes you make to the
- * email addresched(es):
- * lksctp developers <[email protected]>
- *
- * Written or modified by:
- * Xin Long <[email protected]>
- */
- #include <net/busy_poll.h>
- #include <net/sctp/sctp.h>
- #include <net/sctp/sm.h>
- #include <net/sctp/ulpevent.h>
- #include <linux/sctp.h>
- static struct sctp_chunk *sctp_make_idatafrag_empty(
- const struct sctp_association *asoc,
- const struct sctp_sndrcvinfo *sinfo,
- int len, __u8 flags, gfp_t gfp)
- {
- struct sctp_chunk *retval;
- struct sctp_idatahdr dp;
- memset(&dp, 0, sizeof(dp));
- dp.stream = htons(sinfo->sinfo_stream);
- if (sinfo->sinfo_flags & SCTP_UNORDERED)
- flags |= SCTP_DATA_UNORDERED;
- retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
- if (!retval)
- return NULL;
- retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
- memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
- return retval;
- }
- static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
- {
- struct sctp_stream *stream;
- struct sctp_chunk *lchunk;
- __u32 cfsn = 0;
- __u16 sid;
- if (chunk->has_mid)
- return;
- sid = sctp_chunk_stream_no(chunk);
- stream = &chunk->asoc->stream;
- list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
- struct sctp_idatahdr *hdr;
- __u32 mid;
- lchunk->has_mid = 1;
- hdr = lchunk->subh.idata_hdr;
- if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
- hdr->ppid = lchunk->sinfo.sinfo_ppid;
- else
- hdr->fsn = htonl(cfsn++);
- if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
- mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
- sctp_mid_uo_next(stream, out, sid) :
- sctp_mid_uo_peek(stream, out, sid);
- } else {
- mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
- sctp_mid_next(stream, out, sid) :
- sctp_mid_peek(stream, out, sid);
- }
- hdr->mid = htonl(mid);
- }
- }
- static bool sctp_validate_data(struct sctp_chunk *chunk)
- {
- struct sctp_stream *stream;
- __u16 sid, ssn;
- if (chunk->chunk_hdr->type != SCTP_CID_DATA)
- return false;
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- return true;
- stream = &chunk->asoc->stream;
- sid = sctp_chunk_stream_no(chunk);
- ssn = ntohs(chunk->subh.data_hdr->ssn);
- return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
- }
- static bool sctp_validate_idata(struct sctp_chunk *chunk)
- {
- struct sctp_stream *stream;
- __u32 mid;
- __u16 sid;
- if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
- return false;
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- return true;
- stream = &chunk->asoc->stream;
- sid = sctp_chunk_stream_no(chunk);
- mid = ntohl(chunk->subh.idata_hdr->mid);
- return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
- }
- static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_ulpevent *cevent;
- struct sk_buff *pos, *loc;
- pos = skb_peek_tail(&ulpq->reasm);
- if (!pos) {
- __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
- return;
- }
- cevent = sctp_skb2event(pos);
- if (event->stream == cevent->stream &&
- event->mid == cevent->mid &&
- (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
- (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
- event->fsn > cevent->fsn))) {
- __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
- return;
- }
- if ((event->stream == cevent->stream &&
- MID_lt(cevent->mid, event->mid)) ||
- event->stream > cevent->stream) {
- __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
- return;
- }
- loc = NULL;
- skb_queue_walk(&ulpq->reasm, pos) {
- cevent = sctp_skb2event(pos);
- if (event->stream < cevent->stream ||
- (event->stream == cevent->stream &&
- MID_lt(event->mid, cevent->mid))) {
- loc = pos;
- break;
- }
- if (event->stream == cevent->stream &&
- event->mid == cevent->mid &&
- !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
- (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
- event->fsn < cevent->fsn)) {
- loc = pos;
- break;
- }
- }
- if (!loc)
- __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
- else
- __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
- }
- static struct sctp_ulpevent *sctp_intl_retrieve_partial(
- struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sk_buff *first_frag = NULL;
- struct sk_buff *last_frag = NULL;
- struct sctp_ulpevent *retval;
- struct sctp_stream_in *sin;
- struct sk_buff *pos;
- __u32 next_fsn = 0;
- int is_last = 0;
- sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
- skb_queue_walk(&ulpq->reasm, pos) {
- struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- if (cevent->stream < event->stream)
- continue;
- if (cevent->stream > event->stream ||
- cevent->mid != sin->mid)
- break;
- switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
- case SCTP_DATA_FIRST_FRAG:
- goto out;
- case SCTP_DATA_MIDDLE_FRAG:
- if (!first_frag) {
- if (cevent->fsn == sin->fsn) {
- first_frag = pos;
- last_frag = pos;
- next_fsn = cevent->fsn + 1;
- }
- } else if (cevent->fsn == next_fsn) {
- last_frag = pos;
- next_fsn++;
- } else {
- goto out;
- }
- break;
- case SCTP_DATA_LAST_FRAG:
- if (!first_frag) {
- if (cevent->fsn == sin->fsn) {
- first_frag = pos;
- last_frag = pos;
- next_fsn = 0;
- is_last = 1;
- }
- } else if (cevent->fsn == next_fsn) {
- last_frag = pos;
- next_fsn = 0;
- is_last = 1;
- }
- goto out;
- default:
- goto out;
- }
- }
- out:
- if (!first_frag)
- return NULL;
- retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
- first_frag, last_frag);
- if (retval) {
- sin->fsn = next_fsn;
- if (is_last) {
- retval->msg_flags |= MSG_EOR;
- sin->pd_mode = 0;
- }
- }
- return retval;
- }
- static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
- struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_association *asoc = ulpq->asoc;
- struct sk_buff *pos, *first_frag = NULL;
- struct sctp_ulpevent *retval = NULL;
- struct sk_buff *pd_first = NULL;
- struct sk_buff *pd_last = NULL;
- struct sctp_stream_in *sin;
- __u32 next_fsn = 0;
- __u32 pd_point = 0;
- __u32 pd_len = 0;
- __u32 mid = 0;
- sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
- skb_queue_walk(&ulpq->reasm, pos) {
- struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- if (cevent->stream < event->stream)
- continue;
- if (cevent->stream > event->stream)
- break;
- if (MID_lt(cevent->mid, event->mid))
- continue;
- if (MID_lt(event->mid, cevent->mid))
- break;
- switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
- case SCTP_DATA_FIRST_FRAG:
- if (cevent->mid == sin->mid) {
- pd_first = pos;
- pd_last = pos;
- pd_len = pos->len;
- }
- first_frag = pos;
- next_fsn = 0;
- mid = cevent->mid;
- break;
- case SCTP_DATA_MIDDLE_FRAG:
- if (first_frag && cevent->mid == mid &&
- cevent->fsn == next_fsn) {
- next_fsn++;
- if (pd_first) {
- pd_last = pos;
- pd_len += pos->len;
- }
- } else {
- first_frag = NULL;
- }
- break;
- case SCTP_DATA_LAST_FRAG:
- if (first_frag && cevent->mid == mid &&
- cevent->fsn == next_fsn)
- goto found;
- else
- first_frag = NULL;
- break;
- }
- }
- if (!pd_first)
- goto out;
- pd_point = sctp_sk(asoc->base.sk)->pd_point;
- if (pd_point && pd_point <= pd_len) {
- retval = sctp_make_reassembled_event(asoc->base.net,
- &ulpq->reasm,
- pd_first, pd_last);
- if (retval) {
- sin->fsn = next_fsn;
- sin->pd_mode = 1;
- }
- }
- goto out;
- found:
- retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
- first_frag, pos);
- if (retval)
- retval->msg_flags |= MSG_EOR;
- out:
- return retval;
- }
- static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_ulpevent *retval = NULL;
- struct sctp_stream_in *sin;
- if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
- event->msg_flags |= MSG_EOR;
- return event;
- }
- sctp_intl_store_reasm(ulpq, event);
- sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
- if (sin->pd_mode && event->mid == sin->mid &&
- event->fsn == sin->fsn)
- retval = sctp_intl_retrieve_partial(ulpq, event);
- if (!retval)
- retval = sctp_intl_retrieve_reassembled(ulpq, event);
- return retval;
- }
- static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_ulpevent *cevent;
- struct sk_buff *pos, *loc;
- pos = skb_peek_tail(&ulpq->lobby);
- if (!pos) {
- __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
- return;
- }
- cevent = (struct sctp_ulpevent *)pos->cb;
- if (event->stream == cevent->stream &&
- MID_lt(cevent->mid, event->mid)) {
- __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
- return;
- }
- if (event->stream > cevent->stream) {
- __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
- return;
- }
- loc = NULL;
- skb_queue_walk(&ulpq->lobby, pos) {
- cevent = (struct sctp_ulpevent *)pos->cb;
- if (cevent->stream > event->stream) {
- loc = pos;
- break;
- }
- if (cevent->stream == event->stream &&
- MID_lt(event->mid, cevent->mid)) {
- loc = pos;
- break;
- }
- }
- if (!loc)
- __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
- else
- __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
- }
- static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sk_buff_head *event_list;
- struct sctp_stream *stream;
- struct sk_buff *pos, *tmp;
- __u16 sid = event->stream;
- stream = &ulpq->asoc->stream;
- event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
- sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
- struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
- if (cevent->stream > sid)
- break;
- if (cevent->stream < sid)
- continue;
- if (cevent->mid != sctp_mid_peek(stream, in, sid))
- break;
- sctp_mid_next(stream, in, sid);
- __skb_unlink(pos, &ulpq->lobby);
- __skb_queue_tail(event_list, pos);
- }
- }
- static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_stream *stream;
- __u16 sid;
- stream = &ulpq->asoc->stream;
- sid = event->stream;
- if (event->mid != sctp_mid_peek(stream, in, sid)) {
- sctp_intl_store_ordered(ulpq, event);
- return NULL;
- }
- sctp_mid_next(stream, in, sid);
- sctp_intl_retrieve_ordered(ulpq, event);
- return event;
- }
- static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
- struct sk_buff_head *skb_list)
- {
- struct sock *sk = ulpq->asoc->base.sk;
- struct sctp_sock *sp = sctp_sk(sk);
- struct sctp_ulpevent *event;
- struct sk_buff *skb;
- skb = __skb_peek(skb_list);
- event = sctp_skb2event(skb);
- if (sk->sk_shutdown & RCV_SHUTDOWN &&
- (sk->sk_shutdown & SEND_SHUTDOWN ||
- !sctp_ulpevent_is_notification(event)))
- goto out_free;
- if (!sctp_ulpevent_is_notification(event)) {
- sk_mark_napi_id(sk, skb);
- sk_incoming_cpu_update(sk);
- }
- if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
- goto out_free;
- if (skb_list)
- skb_queue_splice_tail_init(skb_list,
- &sk->sk_receive_queue);
- else
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- if (!sp->data_ready_signalled) {
- sp->data_ready_signalled = 1;
- sk->sk_data_ready(sk);
- }
- return 1;
- out_free:
- if (skb_list)
- sctp_queue_purge_ulpevents(skb_list);
- else
- sctp_ulpevent_free(event);
- return 0;
- }
- static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_ulpevent *cevent;
- struct sk_buff *pos;
- pos = skb_peek_tail(&ulpq->reasm_uo);
- if (!pos) {
- __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
- return;
- }
- cevent = sctp_skb2event(pos);
- if (event->stream == cevent->stream &&
- event->mid == cevent->mid &&
- (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
- (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
- event->fsn > cevent->fsn))) {
- __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
- return;
- }
- if ((event->stream == cevent->stream &&
- MID_lt(cevent->mid, event->mid)) ||
- event->stream > cevent->stream) {
- __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
- return;
- }
- skb_queue_walk(&ulpq->reasm_uo, pos) {
- cevent = sctp_skb2event(pos);
- if (event->stream < cevent->stream ||
- (event->stream == cevent->stream &&
- MID_lt(event->mid, cevent->mid)))
- break;
- if (event->stream == cevent->stream &&
- event->mid == cevent->mid &&
- !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
- (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
- event->fsn < cevent->fsn))
- break;
- }
- __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
- }
- static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
- struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sk_buff *first_frag = NULL;
- struct sk_buff *last_frag = NULL;
- struct sctp_ulpevent *retval;
- struct sctp_stream_in *sin;
- struct sk_buff *pos;
- __u32 next_fsn = 0;
- int is_last = 0;
- sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
- skb_queue_walk(&ulpq->reasm_uo, pos) {
- struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- if (cevent->stream < event->stream)
- continue;
- if (cevent->stream > event->stream)
- break;
- if (MID_lt(cevent->mid, sin->mid_uo))
- continue;
- if (MID_lt(sin->mid_uo, cevent->mid))
- break;
- switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
- case SCTP_DATA_FIRST_FRAG:
- goto out;
- case SCTP_DATA_MIDDLE_FRAG:
- if (!first_frag) {
- if (cevent->fsn == sin->fsn_uo) {
- first_frag = pos;
- last_frag = pos;
- next_fsn = cevent->fsn + 1;
- }
- } else if (cevent->fsn == next_fsn) {
- last_frag = pos;
- next_fsn++;
- } else {
- goto out;
- }
- break;
- case SCTP_DATA_LAST_FRAG:
- if (!first_frag) {
- if (cevent->fsn == sin->fsn_uo) {
- first_frag = pos;
- last_frag = pos;
- next_fsn = 0;
- is_last = 1;
- }
- } else if (cevent->fsn == next_fsn) {
- last_frag = pos;
- next_fsn = 0;
- is_last = 1;
- }
- goto out;
- default:
- goto out;
- }
- }
- out:
- if (!first_frag)
- return NULL;
- retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
- &ulpq->reasm_uo, first_frag,
- last_frag);
- if (retval) {
- sin->fsn_uo = next_fsn;
- if (is_last) {
- retval->msg_flags |= MSG_EOR;
- sin->pd_mode_uo = 0;
- }
- }
- return retval;
- }
- static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
- struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_association *asoc = ulpq->asoc;
- struct sk_buff *pos, *first_frag = NULL;
- struct sctp_ulpevent *retval = NULL;
- struct sk_buff *pd_first = NULL;
- struct sk_buff *pd_last = NULL;
- struct sctp_stream_in *sin;
- __u32 next_fsn = 0;
- __u32 pd_point = 0;
- __u32 pd_len = 0;
- __u32 mid = 0;
- sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
- skb_queue_walk(&ulpq->reasm_uo, pos) {
- struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- if (cevent->stream < event->stream)
- continue;
- if (cevent->stream > event->stream)
- break;
- if (MID_lt(cevent->mid, event->mid))
- continue;
- if (MID_lt(event->mid, cevent->mid))
- break;
- switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
- case SCTP_DATA_FIRST_FRAG:
- if (!sin->pd_mode_uo) {
- sin->mid_uo = cevent->mid;
- pd_first = pos;
- pd_last = pos;
- pd_len = pos->len;
- }
- first_frag = pos;
- next_fsn = 0;
- mid = cevent->mid;
- break;
- case SCTP_DATA_MIDDLE_FRAG:
- if (first_frag && cevent->mid == mid &&
- cevent->fsn == next_fsn) {
- next_fsn++;
- if (pd_first) {
- pd_last = pos;
- pd_len += pos->len;
- }
- } else {
- first_frag = NULL;
- }
- break;
- case SCTP_DATA_LAST_FRAG:
- if (first_frag && cevent->mid == mid &&
- cevent->fsn == next_fsn)
- goto found;
- else
- first_frag = NULL;
- break;
- }
- }
- if (!pd_first)
- goto out;
- pd_point = sctp_sk(asoc->base.sk)->pd_point;
- if (pd_point && pd_point <= pd_len) {
- retval = sctp_make_reassembled_event(asoc->base.net,
- &ulpq->reasm_uo,
- pd_first, pd_last);
- if (retval) {
- sin->fsn_uo = next_fsn;
- sin->pd_mode_uo = 1;
- }
- }
- goto out;
- found:
- retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
- first_frag, pos);
- if (retval)
- retval->msg_flags |= MSG_EOR;
- out:
- return retval;
- }
- static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sctp_ulpevent *retval = NULL;
- struct sctp_stream_in *sin;
- if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
- event->msg_flags |= MSG_EOR;
- return event;
- }
- sctp_intl_store_reasm_uo(ulpq, event);
- sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
- if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
- event->fsn == sin->fsn_uo)
- retval = sctp_intl_retrieve_partial_uo(ulpq, event);
- if (!retval)
- retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
- return retval;
- }
- static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
- {
- struct sctp_stream_in *csin, *sin = NULL;
- struct sk_buff *first_frag = NULL;
- struct sk_buff *last_frag = NULL;
- struct sctp_ulpevent *retval;
- struct sk_buff *pos;
- __u32 next_fsn = 0;
- __u16 sid = 0;
- skb_queue_walk(&ulpq->reasm_uo, pos) {
- struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
- if (csin->pd_mode_uo)
- continue;
- switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
- case SCTP_DATA_FIRST_FRAG:
- if (first_frag)
- goto out;
- first_frag = pos;
- last_frag = pos;
- next_fsn = 0;
- sin = csin;
- sid = cevent->stream;
- sin->mid_uo = cevent->mid;
- break;
- case SCTP_DATA_MIDDLE_FRAG:
- if (!first_frag)
- break;
- if (cevent->stream == sid &&
- cevent->mid == sin->mid_uo &&
- cevent->fsn == next_fsn) {
- next_fsn++;
- last_frag = pos;
- } else {
- goto out;
- }
- break;
- case SCTP_DATA_LAST_FRAG:
- if (first_frag)
- goto out;
- break;
- default:
- break;
- }
- }
- if (!first_frag)
- return NULL;
- out:
- retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
- &ulpq->reasm_uo, first_frag,
- last_frag);
- if (retval) {
- sin->fsn_uo = next_fsn;
- sin->pd_mode_uo = 1;
- }
- return retval;
- }
- static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
- struct sctp_chunk *chunk, gfp_t gfp)
- {
- struct sctp_ulpevent *event;
- struct sk_buff_head temp;
- int event_eor = 0;
- event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
- if (!event)
- return -ENOMEM;
- event->mid = ntohl(chunk->subh.idata_hdr->mid);
- if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
- event->ppid = chunk->subh.idata_hdr->ppid;
- else
- event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
- if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
- event = sctp_intl_reasm(ulpq, event);
- if (event) {
- skb_queue_head_init(&temp);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- if (event->msg_flags & MSG_EOR)
- event = sctp_intl_order(ulpq, event);
- }
- } else {
- event = sctp_intl_reasm_uo(ulpq, event);
- if (event) {
- skb_queue_head_init(&temp);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- }
- }
- if (event) {
- event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
- sctp_enqueue_event(ulpq, &temp);
- }
- return event_eor;
- }
- static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
- {
- struct sctp_stream_in *csin, *sin = NULL;
- struct sk_buff *first_frag = NULL;
- struct sk_buff *last_frag = NULL;
- struct sctp_ulpevent *retval;
- struct sk_buff *pos;
- __u32 next_fsn = 0;
- __u16 sid = 0;
- skb_queue_walk(&ulpq->reasm, pos) {
- struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
- if (csin->pd_mode)
- continue;
- switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
- case SCTP_DATA_FIRST_FRAG:
- if (first_frag)
- goto out;
- if (cevent->mid == csin->mid) {
- first_frag = pos;
- last_frag = pos;
- next_fsn = 0;
- sin = csin;
- sid = cevent->stream;
- }
- break;
- case SCTP_DATA_MIDDLE_FRAG:
- if (!first_frag)
- break;
- if (cevent->stream == sid &&
- cevent->mid == sin->mid &&
- cevent->fsn == next_fsn) {
- next_fsn++;
- last_frag = pos;
- } else {
- goto out;
- }
- break;
- case SCTP_DATA_LAST_FRAG:
- if (first_frag)
- goto out;
- break;
- default:
- break;
- }
- }
- if (!first_frag)
- return NULL;
- out:
- retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
- &ulpq->reasm, first_frag,
- last_frag);
- if (retval) {
- sin->fsn = next_fsn;
- sin->pd_mode = 1;
- }
- return retval;
- }
- static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
- {
- struct sctp_ulpevent *event;
- struct sk_buff_head temp;
- if (!skb_queue_empty(&ulpq->reasm)) {
- do {
- event = sctp_intl_retrieve_first(ulpq);
- if (event) {
- skb_queue_head_init(&temp);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- sctp_enqueue_event(ulpq, &temp);
- }
- } while (event);
- }
- if (!skb_queue_empty(&ulpq->reasm_uo)) {
- do {
- event = sctp_intl_retrieve_first_uo(ulpq);
- if (event) {
- skb_queue_head_init(&temp);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- sctp_enqueue_event(ulpq, &temp);
- }
- } while (event);
- }
- }
- static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
- gfp_t gfp)
- {
- struct sctp_association *asoc = ulpq->asoc;
- __u32 freed = 0;
- __u16 needed;
- needed = ntohs(chunk->chunk_hdr->length) -
- sizeof(struct sctp_idata_chunk);
- if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
- freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
- if (freed < needed)
- freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
- needed);
- if (freed < needed)
- freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
- needed);
- }
- if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
- sctp_intl_start_pd(ulpq, gfp);
- }
- static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
- __u32 mid, __u16 flags, gfp_t gfp)
- {
- struct sock *sk = ulpq->asoc->base.sk;
- struct sctp_ulpevent *ev = NULL;
- if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
- SCTP_PARTIAL_DELIVERY_EVENT))
- return;
- ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
- sid, mid, flags, gfp);
- if (ev) {
- struct sctp_sock *sp = sctp_sk(sk);
- __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
- if (!sp->data_ready_signalled) {
- sp->data_ready_signalled = 1;
- sk->sk_data_ready(sk);
- }
- }
- }
- static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
- {
- struct sctp_stream *stream = &ulpq->asoc->stream;
- struct sctp_ulpevent *cevent, *event = NULL;
- struct sk_buff_head *lobby = &ulpq->lobby;
- struct sk_buff *pos, *tmp;
- struct sk_buff_head temp;
- __u16 csid;
- __u32 cmid;
- skb_queue_head_init(&temp);
- sctp_skb_for_each(pos, lobby, tmp) {
- cevent = (struct sctp_ulpevent *)pos->cb;
- csid = cevent->stream;
- cmid = cevent->mid;
- if (csid > sid)
- break;
- if (csid < sid)
- continue;
- if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
- break;
- __skb_unlink(pos, lobby);
- if (!event)
- event = sctp_skb2event(pos);
- __skb_queue_tail(&temp, pos);
- }
- if (!event && pos != (struct sk_buff *)lobby) {
- cevent = (struct sctp_ulpevent *)pos->cb;
- csid = cevent->stream;
- cmid = cevent->mid;
- if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
- sctp_mid_next(stream, in, csid);
- __skb_unlink(pos, lobby);
- __skb_queue_tail(&temp, pos);
- event = sctp_skb2event(pos);
- }
- }
- if (event) {
- sctp_intl_retrieve_ordered(ulpq, event);
- sctp_enqueue_event(ulpq, &temp);
- }
- }
- static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
- {
- struct sctp_stream *stream = &ulpq->asoc->stream;
- __u16 sid;
- for (sid = 0; sid < stream->incnt; sid++) {
- struct sctp_stream_in *sin = SCTP_SI(stream, sid);
- __u32 mid;
- if (sin->pd_mode_uo) {
- sin->pd_mode_uo = 0;
- mid = sin->mid_uo;
- sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
- }
- if (sin->pd_mode) {
- sin->pd_mode = 0;
- mid = sin->mid;
- sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
- sctp_mid_skip(stream, in, sid, mid);
- sctp_intl_reap_ordered(ulpq, sid);
- }
- }
- /* intl abort pd happens only when all data needs to be cleaned */
- sctp_ulpq_flush(ulpq);
- }
- static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
- int nskips, __be16 stream, __u8 flags)
- {
- int i;
- for (i = 0; i < nskips; i++)
- if (skiplist[i].stream == stream &&
- skiplist[i].flags == flags)
- return i;
- return i;
- }
- #define SCTP_FTSN_U_BIT 0x1
- static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
- {
- struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
- struct sctp_association *asoc = q->asoc;
- struct sctp_chunk *ftsn_chunk = NULL;
- struct list_head *lchunk, *temp;
- int nskips = 0, skip_pos;
- struct sctp_chunk *chunk;
- __u32 tsn;
- if (!asoc->peer.prsctp_capable)
- return;
- if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
- asoc->adv_peer_ack_point = ctsn;
- list_for_each_safe(lchunk, temp, &q->abandoned) {
- chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
- tsn = ntohl(chunk->subh.data_hdr->tsn);
- if (TSN_lte(tsn, ctsn)) {
- list_del_init(lchunk);
- sctp_chunk_free(chunk);
- } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
- __be16 sid = chunk->subh.idata_hdr->stream;
- __be32 mid = chunk->subh.idata_hdr->mid;
- __u8 flags = 0;
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- flags |= SCTP_FTSN_U_BIT;
- asoc->adv_peer_ack_point = tsn;
- skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
- sid, flags);
- ftsn_skip_arr[skip_pos].stream = sid;
- ftsn_skip_arr[skip_pos].reserved = 0;
- ftsn_skip_arr[skip_pos].flags = flags;
- ftsn_skip_arr[skip_pos].mid = mid;
- if (skip_pos == nskips)
- nskips++;
- if (nskips == 10)
- break;
- } else {
- break;
- }
- }
- if (asoc->adv_peer_ack_point > ctsn)
- ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
- nskips, &ftsn_skip_arr[0]);
- if (ftsn_chunk) {
- list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
- SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
- }
- }
- #define _sctp_walk_ifwdtsn(pos, chunk, end) \
- for (pos = chunk->subh.ifwdtsn_hdr->skip; \
- (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
- sizeof(struct sctp_ifwdtsn_skip); pos++)
- #define sctp_walk_ifwdtsn(pos, ch) \
- _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
- sizeof(struct sctp_ifwdtsn_chunk))
- static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
- {
- struct sctp_fwdtsn_skip *skip;
- __u16 incnt;
- if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
- return false;
- incnt = chunk->asoc->stream.incnt;
- sctp_walk_fwdtsn(skip, chunk)
- if (ntohs(skip->stream) >= incnt)
- return false;
- return true;
- }
- static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
- {
- struct sctp_ifwdtsn_skip *skip;
- __u16 incnt;
- if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
- return false;
- incnt = chunk->asoc->stream.incnt;
- sctp_walk_ifwdtsn(skip, chunk)
- if (ntohs(skip->stream) >= incnt)
- return false;
- return true;
- }
- static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
- {
- /* Move the Cumulattive TSN Ack ahead. */
- sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
- /* purge the fragmentation queue */
- sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
- /* Abort any in progress partial delivery. */
- sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
- }
- static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
- {
- struct sk_buff *pos, *tmp;
- skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
- struct sctp_ulpevent *event = sctp_skb2event(pos);
- __u32 tsn = event->tsn;
- if (TSN_lte(tsn, ftsn)) {
- __skb_unlink(pos, &ulpq->reasm);
- sctp_ulpevent_free(event);
- }
- }
- skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
- struct sctp_ulpevent *event = sctp_skb2event(pos);
- __u32 tsn = event->tsn;
- if (TSN_lte(tsn, ftsn)) {
- __skb_unlink(pos, &ulpq->reasm_uo);
- sctp_ulpevent_free(event);
- }
- }
- }
- static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
- {
- /* Move the Cumulattive TSN Ack ahead. */
- sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
- /* purge the fragmentation queue */
- sctp_intl_reasm_flushtsn(ulpq, ftsn);
- /* abort only when it's for all data */
- if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
- sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
- }
- static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
- {
- struct sctp_fwdtsn_skip *skip;
- /* Walk through all the skipped SSNs */
- sctp_walk_fwdtsn(skip, chunk)
- sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
- }
- static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
- __u8 flags)
- {
- struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
- struct sctp_stream *stream = &ulpq->asoc->stream;
- if (flags & SCTP_FTSN_U_BIT) {
- if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
- sin->pd_mode_uo = 0;
- sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
- GFP_ATOMIC);
- }
- return;
- }
- if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
- return;
- if (sin->pd_mode) {
- sin->pd_mode = 0;
- sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
- }
- sctp_mid_skip(stream, in, sid, mid);
- sctp_intl_reap_ordered(ulpq, sid);
- }
- static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
- {
- struct sctp_ifwdtsn_skip *skip;
- /* Walk through all the skipped MIDs and abort stream pd if possible */
- sctp_walk_ifwdtsn(skip, chunk)
- sctp_intl_skip(ulpq, ntohs(skip->stream),
- ntohl(skip->mid), skip->flags);
- }
- static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
- {
- struct sk_buff_head temp;
- skb_queue_head_init(&temp);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- return sctp_ulpq_tail_event(ulpq, &temp);
- }
- static struct sctp_stream_interleave sctp_stream_interleave_0 = {
- .data_chunk_len = sizeof(struct sctp_data_chunk),
- .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
- /* DATA process functions */
- .make_datafrag = sctp_make_datafrag_empty,
- .assign_number = sctp_chunk_assign_ssn,
- .validate_data = sctp_validate_data,
- .ulpevent_data = sctp_ulpq_tail_data,
- .enqueue_event = do_ulpq_tail_event,
- .renege_events = sctp_ulpq_renege,
- .start_pd = sctp_ulpq_partial_delivery,
- .abort_pd = sctp_ulpq_abort_pd,
- /* FORWARD-TSN process functions */
- .generate_ftsn = sctp_generate_fwdtsn,
- .validate_ftsn = sctp_validate_fwdtsn,
- .report_ftsn = sctp_report_fwdtsn,
- .handle_ftsn = sctp_handle_fwdtsn,
- };
- static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
- {
- struct sk_buff_head temp;
- skb_queue_head_init(&temp);
- __skb_queue_tail(&temp, sctp_event2skb(event));
- return sctp_enqueue_event(ulpq, &temp);
- }
- static struct sctp_stream_interleave sctp_stream_interleave_1 = {
- .data_chunk_len = sizeof(struct sctp_idata_chunk),
- .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
- /* I-DATA process functions */
- .make_datafrag = sctp_make_idatafrag_empty,
- .assign_number = sctp_chunk_assign_mid,
- .validate_data = sctp_validate_idata,
- .ulpevent_data = sctp_ulpevent_idata,
- .enqueue_event = do_sctp_enqueue_event,
- .renege_events = sctp_renege_events,
- .start_pd = sctp_intl_start_pd,
- .abort_pd = sctp_intl_abort_pd,
- /* I-FORWARD-TSN process functions */
- .generate_ftsn = sctp_generate_iftsn,
- .validate_ftsn = sctp_validate_iftsn,
- .report_ftsn = sctp_report_iftsn,
- .handle_ftsn = sctp_handle_iftsn,
- };
- void sctp_stream_interleave_init(struct sctp_stream *stream)
- {
- struct sctp_association *asoc;
- asoc = container_of(stream, struct sctp_association, stream);
- stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
- : &sctp_stream_interleave_0;
- }
|