ulpqueue.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143
  1. /* SCTP kernel implementation
  2. * (C) Copyright IBM Corp. 2001, 2004
  3. * Copyright (c) 1999-2000 Cisco, Inc.
  4. * Copyright (c) 1999-2001 Motorola, Inc.
  5. * Copyright (c) 2001 Intel Corp.
  6. * Copyright (c) 2001 Nokia, Inc.
  7. * Copyright (c) 2001 La Monte H.P. Yarroll
  8. *
  9. * This abstraction carries sctp events to the ULP (sockets).
  10. *
  11. * This SCTP implementation is free software;
  12. * you can redistribute it and/or modify it under the terms of
  13. * the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This SCTP implementation is distributed in the hope that it
  18. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19. * ************************
  20. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21. * See the GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with GNU CC; see the file COPYING. If not, see
  25. * <http://www.gnu.org/licenses/>.
  26. *
  27. * Please send any bug reports or fixes you make to the
  28. * email address(es):
  29. * lksctp developers <linux-sctp@vger.kernel.org>
  30. *
  31. * Written or modified by:
  32. * Jon Grimm <jgrimm@us.ibm.com>
  33. * La Monte H.P. Yarroll <piggy@acm.org>
  34. * Sridhar Samudrala <sri@us.ibm.com>
  35. */
  36. #include <linux/slab.h>
  37. #include <linux/types.h>
  38. #include <linux/skbuff.h>
  39. #include <net/sock.h>
  40. #include <net/busy_poll.h>
  41. #include <net/sctp/structs.h>
  42. #include <net/sctp/sctp.h>
  43. #include <net/sctp/sm.h>
  44. /* Forward declarations for internal helpers. */
  45. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  46. struct sctp_ulpevent *);
  47. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  48. struct sctp_ulpevent *);
  49. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  50. /* 1st Level Abstractions */
  51. /* Initialize a ULP queue from a block of memory. */
  52. struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  53. struct sctp_association *asoc)
  54. {
  55. memset(ulpq, 0, sizeof(struct sctp_ulpq));
  56. ulpq->asoc = asoc;
  57. skb_queue_head_init(&ulpq->reasm);
  58. skb_queue_head_init(&ulpq->lobby);
  59. ulpq->pd_mode = 0;
  60. return ulpq;
  61. }
  62. /* Flush the reassembly and ordering queues. */
  63. void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  64. {
  65. struct sk_buff *skb;
  66. struct sctp_ulpevent *event;
  67. while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  68. event = sctp_skb2event(skb);
  69. sctp_ulpevent_free(event);
  70. }
  71. while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  72. event = sctp_skb2event(skb);
  73. sctp_ulpevent_free(event);
  74. }
  75. }
  76. /* Dispose of a ulpqueue. */
  77. void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  78. {
  79. sctp_ulpq_flush(ulpq);
  80. }
  81. /* Process an incoming DATA chunk. */
  82. int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  83. gfp_t gfp)
  84. {
  85. struct sk_buff_head temp;
  86. struct sctp_ulpevent *event;
  87. int event_eor = 0;
  88. /* Create an event from the incoming chunk. */
  89. event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  90. if (!event)
  91. return -ENOMEM;
  92. /* Do reassembly if needed. */
  93. event = sctp_ulpq_reasm(ulpq, event);
  94. /* Do ordering if needed. */
  95. if ((event) && (event->msg_flags & MSG_EOR)) {
  96. /* Create a temporary list to collect chunks on. */
  97. skb_queue_head_init(&temp);
  98. __skb_queue_tail(&temp, sctp_event2skb(event));
  99. event = sctp_ulpq_order(ulpq, event);
  100. }
  101. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  102. * very first SKB on the 'temp' list.
  103. */
  104. if (event) {
  105. event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
  106. sctp_ulpq_tail_event(ulpq, event);
  107. }
  108. return event_eor;
  109. }
  110. /* Add a new event for propagation to the ULP. */
  111. /* Clear the partial delivery mode for this socket. Note: This
  112. * assumes that no association is currently in partial delivery mode.
  113. */
  114. int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
  115. {
  116. struct sctp_sock *sp = sctp_sk(sk);
  117. if (atomic_dec_and_test(&sp->pd_mode)) {
  118. /* This means there are no other associations in PD, so
  119. * we can go ahead and clear out the lobby in one shot
  120. */
  121. if (!skb_queue_empty(&sp->pd_lobby)) {
  122. struct list_head *list;
  123. sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
  124. list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
  125. INIT_LIST_HEAD(list);
  126. return 1;
  127. }
  128. } else {
  129. /* There are other associations in PD, so we only need to
  130. * pull stuff out of the lobby that belongs to the
  131. * associations that is exiting PD (all of its notifications
  132. * are posted here).
  133. */
  134. if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
  135. struct sk_buff *skb, *tmp;
  136. struct sctp_ulpevent *event;
  137. sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
  138. event = sctp_skb2event(skb);
  139. if (event->asoc == asoc) {
  140. __skb_unlink(skb, &sp->pd_lobby);
  141. __skb_queue_tail(&sk->sk_receive_queue,
  142. skb);
  143. }
  144. }
  145. }
  146. }
  147. return 0;
  148. }
  149. /* Set the pd_mode on the socket and ulpq */
  150. static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
  151. {
  152. struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
  153. atomic_inc(&sp->pd_mode);
  154. ulpq->pd_mode = 1;
  155. }
  156. /* Clear the pd_mode and restart any pending messages waiting for delivery. */
  157. static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
  158. {
  159. ulpq->pd_mode = 0;
  160. sctp_ulpq_reasm_drain(ulpq);
  161. return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
  162. }
  163. /* If the SKB of 'event' is on a list, it is the first such member
  164. * of that list.
  165. */
  166. int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
  167. {
  168. struct sock *sk = ulpq->asoc->base.sk;
  169. struct sk_buff_head *queue, *skb_list;
  170. struct sk_buff *skb = sctp_event2skb(event);
  171. int clear_pd = 0;
  172. skb_list = (struct sk_buff_head *) skb->prev;
  173. /* If the socket is just going to throw this away, do not
  174. * even try to deliver it.
  175. */
  176. if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
  177. goto out_free;
  178. if (!sctp_ulpevent_is_notification(event))
  179. sk_mark_napi_id(sk, skb);
  180. /* Check if the user wishes to receive this event. */
  181. if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
  182. goto out_free;
  183. /* If we are in partial delivery mode, post to the lobby until
  184. * partial delivery is cleared, unless, of course _this_ is
  185. * the association the cause of the partial delivery.
  186. */
  187. if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
  188. queue = &sk->sk_receive_queue;
  189. } else {
  190. if (ulpq->pd_mode) {
  191. /* If the association is in partial delivery, we
  192. * need to finish delivering the partially processed
  193. * packet before passing any other data. This is
  194. * because we don't truly support stream interleaving.
  195. */
  196. if ((event->msg_flags & MSG_NOTIFICATION) ||
  197. (SCTP_DATA_NOT_FRAG ==
  198. (event->msg_flags & SCTP_DATA_FRAG_MASK)))
  199. queue = &sctp_sk(sk)->pd_lobby;
  200. else {
  201. clear_pd = event->msg_flags & MSG_EOR;
  202. queue = &sk->sk_receive_queue;
  203. }
  204. } else {
  205. /*
  206. * If fragment interleave is enabled, we
  207. * can queue this to the receive queue instead
  208. * of the lobby.
  209. */
  210. if (sctp_sk(sk)->frag_interleave)
  211. queue = &sk->sk_receive_queue;
  212. else
  213. queue = &sctp_sk(sk)->pd_lobby;
  214. }
  215. }
  216. /* If we are harvesting multiple skbs they will be
  217. * collected on a list.
  218. */
  219. if (skb_list)
  220. sctp_skb_list_tail(skb_list, queue);
  221. else
  222. __skb_queue_tail(queue, skb);
  223. /* Did we just complete partial delivery and need to get
  224. * rolling again? Move pending data to the receive
  225. * queue.
  226. */
  227. if (clear_pd)
  228. sctp_ulpq_clear_pd(ulpq);
  229. if (queue == &sk->sk_receive_queue)
  230. sk->sk_data_ready(sk);
  231. return 1;
  232. out_free:
  233. if (skb_list)
  234. sctp_queue_purge_ulpevents(skb_list);
  235. else
  236. sctp_ulpevent_free(event);
  237. return 0;
  238. }
  239. /* 2nd Level Abstractions */
  240. /* Helper function to store chunks that need to be reassembled. */
  241. static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  242. struct sctp_ulpevent *event)
  243. {
  244. struct sk_buff *pos;
  245. struct sctp_ulpevent *cevent;
  246. __u32 tsn, ctsn;
  247. tsn = event->tsn;
  248. /* See if it belongs at the end. */
  249. pos = skb_peek_tail(&ulpq->reasm);
  250. if (!pos) {
  251. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  252. return;
  253. }
  254. /* Short circuit just dropping it at the end. */
  255. cevent = sctp_skb2event(pos);
  256. ctsn = cevent->tsn;
  257. if (TSN_lt(ctsn, tsn)) {
  258. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  259. return;
  260. }
  261. /* Find the right place in this list. We store them by TSN. */
  262. skb_queue_walk(&ulpq->reasm, pos) {
  263. cevent = sctp_skb2event(pos);
  264. ctsn = cevent->tsn;
  265. if (TSN_lt(tsn, ctsn))
  266. break;
  267. }
  268. /* Insert before pos. */
  269. __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
  270. }
  271. /* Helper function to return an event corresponding to the reassembled
  272. * datagram.
  273. * This routine creates a re-assembled skb given the first and last skb's
  274. * as stored in the reassembly queue. The skb's may be non-linear if the sctp
  275. * payload was fragmented on the way and ip had to reassemble them.
  276. * We add the rest of skb's to the first skb's fraglist.
  277. */
  278. static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
  279. struct sk_buff_head *queue, struct sk_buff *f_frag,
  280. struct sk_buff *l_frag)
  281. {
  282. struct sk_buff *pos;
  283. struct sk_buff *new = NULL;
  284. struct sctp_ulpevent *event;
  285. struct sk_buff *pnext, *last;
  286. struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
  287. /* Store the pointer to the 2nd skb */
  288. if (f_frag == l_frag)
  289. pos = NULL;
  290. else
  291. pos = f_frag->next;
  292. /* Get the last skb in the f_frag's frag_list if present. */
  293. for (last = list; list; last = list, list = list->next)
  294. ;
  295. /* Add the list of remaining fragments to the first fragments
  296. * frag_list.
  297. */
  298. if (last)
  299. last->next = pos;
  300. else {
  301. if (skb_cloned(f_frag)) {
  302. /* This is a cloned skb, we can't just modify
  303. * the frag_list. We need a new skb to do that.
  304. * Instead of calling skb_unshare(), we'll do it
  305. * ourselves since we need to delay the free.
  306. */
  307. new = skb_copy(f_frag, GFP_ATOMIC);
  308. if (!new)
  309. return NULL; /* try again later */
  310. sctp_skb_set_owner_r(new, f_frag->sk);
  311. skb_shinfo(new)->frag_list = pos;
  312. } else
  313. skb_shinfo(f_frag)->frag_list = pos;
  314. }
  315. /* Remove the first fragment from the reassembly queue. */
  316. __skb_unlink(f_frag, queue);
  317. /* if we did unshare, then free the old skb and re-assign */
  318. if (new) {
  319. kfree_skb(f_frag);
  320. f_frag = new;
  321. }
  322. while (pos) {
  323. pnext = pos->next;
  324. /* Update the len and data_len fields of the first fragment. */
  325. f_frag->len += pos->len;
  326. f_frag->data_len += pos->len;
  327. /* Remove the fragment from the reassembly queue. */
  328. __skb_unlink(pos, queue);
  329. /* Break if we have reached the last fragment. */
  330. if (pos == l_frag)
  331. break;
  332. pos->next = pnext;
  333. pos = pnext;
  334. }
  335. event = sctp_skb2event(f_frag);
  336. SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
  337. return event;
  338. }
  339. /* Helper function to check if an incoming chunk has filled up the last
  340. * missing fragment in a SCTP datagram and return the corresponding event.
  341. */
  342. static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
  343. {
  344. struct sk_buff *pos;
  345. struct sctp_ulpevent *cevent;
  346. struct sk_buff *first_frag = NULL;
  347. __u32 ctsn, next_tsn;
  348. struct sctp_ulpevent *retval = NULL;
  349. struct sk_buff *pd_first = NULL;
  350. struct sk_buff *pd_last = NULL;
  351. size_t pd_len = 0;
  352. struct sctp_association *asoc;
  353. u32 pd_point;
  354. /* Initialized to 0 just to avoid compiler warning message. Will
  355. * never be used with this value. It is referenced only after it
  356. * is set when we find the first fragment of a message.
  357. */
  358. next_tsn = 0;
  359. /* The chunks are held in the reasm queue sorted by TSN.
  360. * Walk through the queue sequentially and look for a sequence of
  361. * fragmented chunks that complete a datagram.
  362. * 'first_frag' and next_tsn are reset when we find a chunk which
  363. * is the first fragment of a datagram. Once these 2 fields are set
  364. * we expect to find the remaining middle fragments and the last
  365. * fragment in order. If not, first_frag is reset to NULL and we
  366. * start the next pass when we find another first fragment.
  367. *
  368. * There is a potential to do partial delivery if user sets
  369. * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
  370. * to see if can do PD.
  371. */
  372. skb_queue_walk(&ulpq->reasm, pos) {
  373. cevent = sctp_skb2event(pos);
  374. ctsn = cevent->tsn;
  375. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  376. case SCTP_DATA_FIRST_FRAG:
  377. /* If this "FIRST_FRAG" is the first
  378. * element in the queue, then count it towards
  379. * possible PD.
  380. */
  381. if (pos == ulpq->reasm.next) {
  382. pd_first = pos;
  383. pd_last = pos;
  384. pd_len = pos->len;
  385. } else {
  386. pd_first = NULL;
  387. pd_last = NULL;
  388. pd_len = 0;
  389. }
  390. first_frag = pos;
  391. next_tsn = ctsn + 1;
  392. break;
  393. case SCTP_DATA_MIDDLE_FRAG:
  394. if ((first_frag) && (ctsn == next_tsn)) {
  395. next_tsn++;
  396. if (pd_first) {
  397. pd_last = pos;
  398. pd_len += pos->len;
  399. }
  400. } else
  401. first_frag = NULL;
  402. break;
  403. case SCTP_DATA_LAST_FRAG:
  404. if (first_frag && (ctsn == next_tsn))
  405. goto found;
  406. else
  407. first_frag = NULL;
  408. break;
  409. }
  410. }
  411. asoc = ulpq->asoc;
  412. if (pd_first) {
  413. /* Make sure we can enter partial deliver.
  414. * We can trigger partial delivery only if framgent
  415. * interleave is set, or the socket is not already
  416. * in partial delivery.
  417. */
  418. if (!sctp_sk(asoc->base.sk)->frag_interleave &&
  419. atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
  420. goto done;
  421. cevent = sctp_skb2event(pd_first);
  422. pd_point = sctp_sk(asoc->base.sk)->pd_point;
  423. if (pd_point && pd_point <= pd_len) {
  424. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  425. &ulpq->reasm,
  426. pd_first,
  427. pd_last);
  428. if (retval)
  429. sctp_ulpq_set_pd(ulpq);
  430. }
  431. }
  432. done:
  433. return retval;
  434. found:
  435. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  436. &ulpq->reasm, first_frag, pos);
  437. if (retval)
  438. retval->msg_flags |= MSG_EOR;
  439. goto done;
  440. }
  441. /* Retrieve the next set of fragments of a partial message. */
  442. static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
  443. {
  444. struct sk_buff *pos, *last_frag, *first_frag;
  445. struct sctp_ulpevent *cevent;
  446. __u32 ctsn, next_tsn;
  447. int is_last;
  448. struct sctp_ulpevent *retval;
  449. /* The chunks are held in the reasm queue sorted by TSN.
  450. * Walk through the queue sequentially and look for the first
  451. * sequence of fragmented chunks.
  452. */
  453. if (skb_queue_empty(&ulpq->reasm))
  454. return NULL;
  455. last_frag = first_frag = NULL;
  456. retval = NULL;
  457. next_tsn = 0;
  458. is_last = 0;
  459. skb_queue_walk(&ulpq->reasm, pos) {
  460. cevent = sctp_skb2event(pos);
  461. ctsn = cevent->tsn;
  462. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  463. case SCTP_DATA_FIRST_FRAG:
  464. if (!first_frag)
  465. return NULL;
  466. goto done;
  467. case SCTP_DATA_MIDDLE_FRAG:
  468. if (!first_frag) {
  469. first_frag = pos;
  470. next_tsn = ctsn + 1;
  471. last_frag = pos;
  472. } else if (next_tsn == ctsn) {
  473. next_tsn++;
  474. last_frag = pos;
  475. } else
  476. goto done;
  477. break;
  478. case SCTP_DATA_LAST_FRAG:
  479. if (!first_frag)
  480. first_frag = pos;
  481. else if (ctsn != next_tsn)
  482. goto done;
  483. last_frag = pos;
  484. is_last = 1;
  485. goto done;
  486. default:
  487. return NULL;
  488. }
  489. }
  490. /* We have the reassembled event. There is no need to look
  491. * further.
  492. */
  493. done:
  494. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  495. &ulpq->reasm, first_frag, last_frag);
  496. if (retval && is_last)
  497. retval->msg_flags |= MSG_EOR;
  498. return retval;
  499. }
  500. /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
  501. * need reassembling.
  502. */
  503. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  504. struct sctp_ulpevent *event)
  505. {
  506. struct sctp_ulpevent *retval = NULL;
  507. /* Check if this is part of a fragmented message. */
  508. if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
  509. event->msg_flags |= MSG_EOR;
  510. return event;
  511. }
  512. sctp_ulpq_store_reasm(ulpq, event);
  513. if (!ulpq->pd_mode)
  514. retval = sctp_ulpq_retrieve_reassembled(ulpq);
  515. else {
  516. __u32 ctsn, ctsnap;
  517. /* Do not even bother unless this is the next tsn to
  518. * be delivered.
  519. */
  520. ctsn = event->tsn;
  521. ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
  522. if (TSN_lte(ctsn, ctsnap))
  523. retval = sctp_ulpq_retrieve_partial(ulpq);
  524. }
  525. return retval;
  526. }
  527. /* Retrieve the first part (sequential fragments) for partial delivery. */
  528. static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
  529. {
  530. struct sk_buff *pos, *last_frag, *first_frag;
  531. struct sctp_ulpevent *cevent;
  532. __u32 ctsn, next_tsn;
  533. struct sctp_ulpevent *retval;
  534. /* The chunks are held in the reasm queue sorted by TSN.
  535. * Walk through the queue sequentially and look for a sequence of
  536. * fragmented chunks that start a datagram.
  537. */
  538. if (skb_queue_empty(&ulpq->reasm))
  539. return NULL;
  540. last_frag = first_frag = NULL;
  541. retval = NULL;
  542. next_tsn = 0;
  543. skb_queue_walk(&ulpq->reasm, pos) {
  544. cevent = sctp_skb2event(pos);
  545. ctsn = cevent->tsn;
  546. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  547. case SCTP_DATA_FIRST_FRAG:
  548. if (!first_frag) {
  549. first_frag = pos;
  550. next_tsn = ctsn + 1;
  551. last_frag = pos;
  552. } else
  553. goto done;
  554. break;
  555. case SCTP_DATA_MIDDLE_FRAG:
  556. if (!first_frag)
  557. return NULL;
  558. if (ctsn == next_tsn) {
  559. next_tsn++;
  560. last_frag = pos;
  561. } else
  562. goto done;
  563. break;
  564. case SCTP_DATA_LAST_FRAG:
  565. if (!first_frag)
  566. return NULL;
  567. else
  568. goto done;
  569. break;
  570. default:
  571. return NULL;
  572. }
  573. }
  574. /* We have the reassembled event. There is no need to look
  575. * further.
  576. */
  577. done:
  578. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  579. &ulpq->reasm, first_frag, last_frag);
  580. return retval;
  581. }
  582. /*
  583. * Flush out stale fragments from the reassembly queue when processing
  584. * a Forward TSN.
  585. *
  586. * RFC 3758, Section 3.6
  587. *
  588. * After receiving and processing a FORWARD TSN, the data receiver MUST
  589. * take cautions in updating its re-assembly queue. The receiver MUST
  590. * remove any partially reassembled message, which is still missing one
  591. * or more TSNs earlier than or equal to the new cumulative TSN point.
  592. * In the event that the receiver has invoked the partial delivery API,
  593. * a notification SHOULD also be generated to inform the upper layer API
  594. * that the message being partially delivered will NOT be completed.
  595. */
  596. void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
  597. {
  598. struct sk_buff *pos, *tmp;
  599. struct sctp_ulpevent *event;
  600. __u32 tsn;
  601. if (skb_queue_empty(&ulpq->reasm))
  602. return;
  603. skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
  604. event = sctp_skb2event(pos);
  605. tsn = event->tsn;
  606. /* Since the entire message must be abandoned by the
  607. * sender (item A3 in Section 3.5, RFC 3758), we can
  608. * free all fragments on the list that are less then
  609. * or equal to ctsn_point
  610. */
  611. if (TSN_lte(tsn, fwd_tsn)) {
  612. __skb_unlink(pos, &ulpq->reasm);
  613. sctp_ulpevent_free(event);
  614. } else
  615. break;
  616. }
  617. }
  618. /*
  619. * Drain the reassembly queue. If we just cleared parted delivery, it
  620. * is possible that the reassembly queue will contain already reassembled
  621. * messages. Retrieve any such messages and give them to the user.
  622. */
  623. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
  624. {
  625. struct sctp_ulpevent *event = NULL;
  626. struct sk_buff_head temp;
  627. if (skb_queue_empty(&ulpq->reasm))
  628. return;
  629. while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
  630. /* Do ordering if needed. */
  631. if ((event) && (event->msg_flags & MSG_EOR)) {
  632. skb_queue_head_init(&temp);
  633. __skb_queue_tail(&temp, sctp_event2skb(event));
  634. event = sctp_ulpq_order(ulpq, event);
  635. }
  636. /* Send event to the ULP. 'event' is the
  637. * sctp_ulpevent for very first SKB on the temp' list.
  638. */
  639. if (event)
  640. sctp_ulpq_tail_event(ulpq, event);
  641. }
  642. }
  643. /* Helper function to gather skbs that have possibly become
  644. * ordered by an an incoming chunk.
  645. */
  646. static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
  647. struct sctp_ulpevent *event)
  648. {
  649. struct sk_buff_head *event_list;
  650. struct sk_buff *pos, *tmp;
  651. struct sctp_ulpevent *cevent;
  652. struct sctp_stream *in;
  653. __u16 sid, csid, cssn;
  654. sid = event->stream;
  655. in = &ulpq->asoc->ssnmap->in;
  656. event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
  657. /* We are holding the chunks by stream, by SSN. */
  658. sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
  659. cevent = (struct sctp_ulpevent *) pos->cb;
  660. csid = cevent->stream;
  661. cssn = cevent->ssn;
  662. /* Have we gone too far? */
  663. if (csid > sid)
  664. break;
  665. /* Have we not gone far enough? */
  666. if (csid < sid)
  667. continue;
  668. if (cssn != sctp_ssn_peek(in, sid))
  669. break;
  670. /* Found it, so mark in the ssnmap. */
  671. sctp_ssn_next(in, sid);
  672. __skb_unlink(pos, &ulpq->lobby);
  673. /* Attach all gathered skbs to the event. */
  674. __skb_queue_tail(event_list, pos);
  675. }
  676. }
  677. /* Helper function to store chunks needing ordering. */
  678. static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
  679. struct sctp_ulpevent *event)
  680. {
  681. struct sk_buff *pos;
  682. struct sctp_ulpevent *cevent;
  683. __u16 sid, csid;
  684. __u16 ssn, cssn;
  685. pos = skb_peek_tail(&ulpq->lobby);
  686. if (!pos) {
  687. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  688. return;
  689. }
  690. sid = event->stream;
  691. ssn = event->ssn;
  692. cevent = (struct sctp_ulpevent *) pos->cb;
  693. csid = cevent->stream;
  694. cssn = cevent->ssn;
  695. if (sid > csid) {
  696. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  697. return;
  698. }
  699. if ((sid == csid) && SSN_lt(cssn, ssn)) {
  700. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  701. return;
  702. }
  703. /* Find the right place in this list. We store them by
  704. * stream ID and then by SSN.
  705. */
  706. skb_queue_walk(&ulpq->lobby, pos) {
  707. cevent = (struct sctp_ulpevent *) pos->cb;
  708. csid = cevent->stream;
  709. cssn = cevent->ssn;
  710. if (csid > sid)
  711. break;
  712. if (csid == sid && SSN_lt(ssn, cssn))
  713. break;
  714. }
  715. /* Insert before pos. */
  716. __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
  717. }
  718. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
  719. struct sctp_ulpevent *event)
  720. {
  721. __u16 sid, ssn;
  722. struct sctp_stream *in;
  723. /* Check if this message needs ordering. */
  724. if (SCTP_DATA_UNORDERED & event->msg_flags)
  725. return event;
  726. /* Note: The stream ID must be verified before this routine. */
  727. sid = event->stream;
  728. ssn = event->ssn;
  729. in = &ulpq->asoc->ssnmap->in;
  730. /* Is this the expected SSN for this stream ID? */
  731. if (ssn != sctp_ssn_peek(in, sid)) {
  732. /* We've received something out of order, so find where it
  733. * needs to be placed. We order by stream and then by SSN.
  734. */
  735. sctp_ulpq_store_ordered(ulpq, event);
  736. return NULL;
  737. }
  738. /* Mark that the next chunk has been found. */
  739. sctp_ssn_next(in, sid);
  740. /* Go find any other chunks that were waiting for
  741. * ordering.
  742. */
  743. sctp_ulpq_retrieve_ordered(ulpq, event);
  744. return event;
  745. }
  746. /* Helper function to gather skbs that have possibly become
  747. * ordered by forward tsn skipping their dependencies.
  748. */
  749. static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
  750. {
  751. struct sk_buff *pos, *tmp;
  752. struct sctp_ulpevent *cevent;
  753. struct sctp_ulpevent *event;
  754. struct sctp_stream *in;
  755. struct sk_buff_head temp;
  756. struct sk_buff_head *lobby = &ulpq->lobby;
  757. __u16 csid, cssn;
  758. in = &ulpq->asoc->ssnmap->in;
  759. /* We are holding the chunks by stream, by SSN. */
  760. skb_queue_head_init(&temp);
  761. event = NULL;
  762. sctp_skb_for_each(pos, lobby, tmp) {
  763. cevent = (struct sctp_ulpevent *) pos->cb;
  764. csid = cevent->stream;
  765. cssn = cevent->ssn;
  766. /* Have we gone too far? */
  767. if (csid > sid)
  768. break;
  769. /* Have we not gone far enough? */
  770. if (csid < sid)
  771. continue;
  772. /* see if this ssn has been marked by skipping */
  773. if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
  774. break;
  775. __skb_unlink(pos, lobby);
  776. if (!event)
  777. /* Create a temporary list to collect chunks on. */
  778. event = sctp_skb2event(pos);
  779. /* Attach all gathered skbs to the event. */
  780. __skb_queue_tail(&temp, pos);
  781. }
  782. /* If we didn't reap any data, see if the next expected SSN
  783. * is next on the queue and if so, use that.
  784. */
  785. if (event == NULL && pos != (struct sk_buff *)lobby) {
  786. cevent = (struct sctp_ulpevent *) pos->cb;
  787. csid = cevent->stream;
  788. cssn = cevent->ssn;
  789. if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
  790. sctp_ssn_next(in, csid);
  791. __skb_unlink(pos, lobby);
  792. __skb_queue_tail(&temp, pos);
  793. event = sctp_skb2event(pos);
  794. }
  795. }
  796. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  797. * very first SKB on the 'temp' list.
  798. */
  799. if (event) {
  800. /* see if we have more ordered that we can deliver */
  801. sctp_ulpq_retrieve_ordered(ulpq, event);
  802. sctp_ulpq_tail_event(ulpq, event);
  803. }
  804. }
  805. /* Skip over an SSN. This is used during the processing of
  806. * Forwared TSN chunk to skip over the abandoned ordered data
  807. */
  808. void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
  809. {
  810. struct sctp_stream *in;
  811. /* Note: The stream ID must be verified before this routine. */
  812. in = &ulpq->asoc->ssnmap->in;
  813. /* Is this an old SSN? If so ignore. */
  814. if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
  815. return;
  816. /* Mark that we are no longer expecting this SSN or lower. */
  817. sctp_ssn_skip(in, sid, ssn);
  818. /* Go find any other chunks that were waiting for
  819. * ordering and deliver them if needed.
  820. */
  821. sctp_ulpq_reap_ordered(ulpq, sid);
  822. }
  823. static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
  824. struct sk_buff_head *list, __u16 needed)
  825. {
  826. __u16 freed = 0;
  827. __u32 tsn, last_tsn;
  828. struct sk_buff *skb, *flist, *last;
  829. struct sctp_ulpevent *event;
  830. struct sctp_tsnmap *tsnmap;
  831. tsnmap = &ulpq->asoc->peer.tsn_map;
  832. while ((skb = skb_peek_tail(list)) != NULL) {
  833. event = sctp_skb2event(skb);
  834. tsn = event->tsn;
  835. /* Don't renege below the Cumulative TSN ACK Point. */
  836. if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
  837. break;
  838. /* Events in ordering queue may have multiple fragments
  839. * corresponding to additional TSNs. Sum the total
  840. * freed space; find the last TSN.
  841. */
  842. freed += skb_headlen(skb);
  843. flist = skb_shinfo(skb)->frag_list;
  844. for (last = flist; flist; flist = flist->next) {
  845. last = flist;
  846. freed += skb_headlen(last);
  847. }
  848. if (last)
  849. last_tsn = sctp_skb2event(last)->tsn;
  850. else
  851. last_tsn = tsn;
  852. /* Unlink the event, then renege all applicable TSNs. */
  853. __skb_unlink(skb, list);
  854. sctp_ulpevent_free(event);
  855. while (TSN_lte(tsn, last_tsn)) {
  856. sctp_tsnmap_renege(tsnmap, tsn);
  857. tsn++;
  858. }
  859. if (freed >= needed)
  860. return freed;
  861. }
  862. return freed;
  863. }
  864. /* Renege 'needed' bytes from the ordering queue. */
  865. static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
  866. {
  867. return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
  868. }
  869. /* Renege 'needed' bytes from the reassembly queue. */
  870. static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
  871. {
  872. return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
  873. }
  874. /* Partial deliver the first message as there is pressure on rwnd. */
  875. void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
  876. gfp_t gfp)
  877. {
  878. struct sctp_ulpevent *event;
  879. struct sctp_association *asoc;
  880. struct sctp_sock *sp;
  881. __u32 ctsn;
  882. struct sk_buff *skb;
  883. asoc = ulpq->asoc;
  884. sp = sctp_sk(asoc->base.sk);
  885. /* If the association is already in Partial Delivery mode
  886. * we have nothing to do.
  887. */
  888. if (ulpq->pd_mode)
  889. return;
  890. /* Data must be at or below the Cumulative TSN ACK Point to
  891. * start partial delivery.
  892. */
  893. skb = skb_peek(&asoc->ulpq.reasm);
  894. if (skb != NULL) {
  895. ctsn = sctp_skb2event(skb)->tsn;
  896. if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
  897. return;
  898. }
  899. /* If the user enabled fragment interleave socket option,
  900. * multiple associations can enter partial delivery.
  901. * Otherwise, we can only enter partial delivery if the
  902. * socket is not in partial deliver mode.
  903. */
  904. if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
  905. /* Is partial delivery possible? */
  906. event = sctp_ulpq_retrieve_first(ulpq);
  907. /* Send event to the ULP. */
  908. if (event) {
  909. sctp_ulpq_tail_event(ulpq, event);
  910. sctp_ulpq_set_pd(ulpq);
  911. return;
  912. }
  913. }
  914. }
  915. /* Renege some packets to make room for an incoming chunk. */
  916. void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  917. gfp_t gfp)
  918. {
  919. struct sctp_association *asoc;
  920. __u16 needed, freed;
  921. asoc = ulpq->asoc;
  922. if (chunk) {
  923. needed = ntohs(chunk->chunk_hdr->length);
  924. needed -= sizeof(sctp_data_chunk_t);
  925. } else
  926. needed = SCTP_DEFAULT_MAXWINDOW;
  927. freed = 0;
  928. if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
  929. freed = sctp_ulpq_renege_order(ulpq, needed);
  930. if (freed < needed) {
  931. freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
  932. }
  933. }
  934. /* If able to free enough room, accept this chunk. */
  935. if (chunk && (freed >= needed)) {
  936. int retval;
  937. retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
  938. /*
  939. * Enter partial delivery if chunk has not been
  940. * delivered; otherwise, drain the reassembly queue.
  941. */
  942. if (retval <= 0)
  943. sctp_ulpq_partial_delivery(ulpq, gfp);
  944. else if (retval == 1)
  945. sctp_ulpq_reasm_drain(ulpq);
  946. }
  947. sk_mem_reclaim(asoc->base.sk);
  948. }
  949. /* Notify the application if an association is aborted and in
  950. * partial delivery mode. Send up any pending received messages.
  951. */
  952. void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
  953. {
  954. struct sctp_ulpevent *ev = NULL;
  955. struct sock *sk;
  956. if (!ulpq->pd_mode)
  957. return;
  958. sk = ulpq->asoc->base.sk;
  959. if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
  960. &sctp_sk(sk)->subscribe))
  961. ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
  962. SCTP_PARTIAL_DELIVERY_ABORTED,
  963. gfp);
  964. if (ev)
  965. __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
  966. /* If there is data waiting, send it up the socket now. */
  967. if (sctp_ulpq_clear_pd(ulpq) || ev)
  968. sk->sk_data_ready(sk);
  969. }