ulpqueue.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. /* SCTP kernel implementation
  2. * (C) Copyright IBM Corp. 2001, 2004
  3. * Copyright (c) 1999-2000 Cisco, Inc.
  4. * Copyright (c) 1999-2001 Motorola, Inc.
  5. * Copyright (c) 2001 Intel Corp.
  6. * Copyright (c) 2001 Nokia, Inc.
  7. * Copyright (c) 2001 La Monte H.P. Yarroll
  8. *
  9. * This abstraction carries sctp events to the ULP (sockets).
  10. *
  11. * This SCTP implementation is free software;
  12. * you can redistribute it and/or modify it under the terms of
  13. * the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This SCTP implementation is distributed in the hope that it
  18. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19. * ************************
  20. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21. * See the GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with GNU CC; see the file COPYING. If not, see
  25. * <http://www.gnu.org/licenses/>.
  26. *
  27. * Please send any bug reports or fixes you make to the
  28. * email address(es):
  29. * lksctp developers <linux-sctp@vger.kernel.org>
  30. *
  31. * Written or modified by:
  32. * Jon Grimm <jgrimm@us.ibm.com>
  33. * La Monte H.P. Yarroll <piggy@acm.org>
  34. * Sridhar Samudrala <sri@us.ibm.com>
  35. */
  36. #include <linux/slab.h>
  37. #include <linux/types.h>
  38. #include <linux/skbuff.h>
  39. #include <net/sock.h>
  40. #include <net/sctp/structs.h>
  41. #include <net/sctp/sctp.h>
  42. #include <net/sctp/sm.h>
  43. /* Forward declarations for internal helpers. */
  44. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  45. struct sctp_ulpevent *);
  46. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  47. struct sctp_ulpevent *);
  48. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  49. /* 1st Level Abstractions */
  50. /* Initialize a ULP queue from a block of memory. */
  51. struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  52. struct sctp_association *asoc)
  53. {
  54. memset(ulpq, 0, sizeof(struct sctp_ulpq));
  55. ulpq->asoc = asoc;
  56. skb_queue_head_init(&ulpq->reasm);
  57. skb_queue_head_init(&ulpq->lobby);
  58. ulpq->pd_mode = 0;
  59. return ulpq;
  60. }
  61. /* Flush the reassembly and ordering queues. */
  62. void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  63. {
  64. struct sk_buff *skb;
  65. struct sctp_ulpevent *event;
  66. while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  67. event = sctp_skb2event(skb);
  68. sctp_ulpevent_free(event);
  69. }
  70. while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  71. event = sctp_skb2event(skb);
  72. sctp_ulpevent_free(event);
  73. }
  74. }
  75. /* Dispose of a ulpqueue. */
  76. void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  77. {
  78. sctp_ulpq_flush(ulpq);
  79. }
  80. /* Process an incoming DATA chunk. */
  81. int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  82. gfp_t gfp)
  83. {
  84. struct sk_buff_head temp;
  85. struct sctp_ulpevent *event;
  86. int event_eor = 0;
  87. /* Create an event from the incoming chunk. */
  88. event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  89. if (!event)
  90. return -ENOMEM;
  91. /* Do reassembly if needed. */
  92. event = sctp_ulpq_reasm(ulpq, event);
  93. /* Do ordering if needed. */
  94. if ((event) && (event->msg_flags & MSG_EOR)) {
  95. /* Create a temporary list to collect chunks on. */
  96. skb_queue_head_init(&temp);
  97. __skb_queue_tail(&temp, sctp_event2skb(event));
  98. event = sctp_ulpq_order(ulpq, event);
  99. }
  100. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  101. * very first SKB on the 'temp' list.
  102. */
  103. if (event) {
  104. event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
  105. sctp_ulpq_tail_event(ulpq, event);
  106. }
  107. return event_eor;
  108. }
  109. /* Add a new event for propagation to the ULP. */
  110. /* Clear the partial delivery mode for this socket. Note: This
  111. * assumes that no association is currently in partial delivery mode.
  112. */
  113. int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
  114. {
  115. struct sctp_sock *sp = sctp_sk(sk);
  116. if (atomic_dec_and_test(&sp->pd_mode)) {
  117. /* This means there are no other associations in PD, so
  118. * we can go ahead and clear out the lobby in one shot
  119. */
  120. if (!skb_queue_empty(&sp->pd_lobby)) {
  121. struct list_head *list;
  122. sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
  123. list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
  124. INIT_LIST_HEAD(list);
  125. return 1;
  126. }
  127. } else {
  128. /* There are other associations in PD, so we only need to
  129. * pull stuff out of the lobby that belongs to the
  130. * associations that is exiting PD (all of its notifications
  131. * are posted here).
  132. */
  133. if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
  134. struct sk_buff *skb, *tmp;
  135. struct sctp_ulpevent *event;
  136. sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
  137. event = sctp_skb2event(skb);
  138. if (event->asoc == asoc) {
  139. __skb_unlink(skb, &sp->pd_lobby);
  140. __skb_queue_tail(&sk->sk_receive_queue,
  141. skb);
  142. }
  143. }
  144. }
  145. }
  146. return 0;
  147. }
  148. /* Set the pd_mode on the socket and ulpq */
  149. static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
  150. {
  151. struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
  152. atomic_inc(&sp->pd_mode);
  153. ulpq->pd_mode = 1;
  154. }
  155. /* Clear the pd_mode and restart any pending messages waiting for delivery. */
  156. static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
  157. {
  158. ulpq->pd_mode = 0;
  159. sctp_ulpq_reasm_drain(ulpq);
  160. return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
  161. }
  162. /* If the SKB of 'event' is on a list, it is the first such member
  163. * of that list.
  164. */
  165. int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
  166. {
  167. struct sock *sk = ulpq->asoc->base.sk;
  168. struct sk_buff_head *queue, *skb_list;
  169. struct sk_buff *skb = sctp_event2skb(event);
  170. int clear_pd = 0;
  171. skb_list = (struct sk_buff_head *) skb->prev;
  172. /* If the socket is just going to throw this away, do not
  173. * even try to deliver it.
  174. */
  175. if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
  176. goto out_free;
  177. /* Check if the user wishes to receive this event. */
  178. if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
  179. goto out_free;
  180. /* If we are in partial delivery mode, post to the lobby until
  181. * partial delivery is cleared, unless, of course _this_ is
  182. * the association the cause of the partial delivery.
  183. */
  184. if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
  185. queue = &sk->sk_receive_queue;
  186. } else {
  187. if (ulpq->pd_mode) {
  188. /* If the association is in partial delivery, we
  189. * need to finish delivering the partially processed
  190. * packet before passing any other data. This is
  191. * because we don't truly support stream interleaving.
  192. */
  193. if ((event->msg_flags & MSG_NOTIFICATION) ||
  194. (SCTP_DATA_NOT_FRAG ==
  195. (event->msg_flags & SCTP_DATA_FRAG_MASK)))
  196. queue = &sctp_sk(sk)->pd_lobby;
  197. else {
  198. clear_pd = event->msg_flags & MSG_EOR;
  199. queue = &sk->sk_receive_queue;
  200. }
  201. } else {
  202. /*
  203. * If fragment interleave is enabled, we
  204. * can queue this to the receive queue instead
  205. * of the lobby.
  206. */
  207. if (sctp_sk(sk)->frag_interleave)
  208. queue = &sk->sk_receive_queue;
  209. else
  210. queue = &sctp_sk(sk)->pd_lobby;
  211. }
  212. }
  213. /* If we are harvesting multiple skbs they will be
  214. * collected on a list.
  215. */
  216. if (skb_list)
  217. sctp_skb_list_tail(skb_list, queue);
  218. else
  219. __skb_queue_tail(queue, skb);
  220. /* Did we just complete partial delivery and need to get
  221. * rolling again? Move pending data to the receive
  222. * queue.
  223. */
  224. if (clear_pd)
  225. sctp_ulpq_clear_pd(ulpq);
  226. if (queue == &sk->sk_receive_queue)
  227. sk->sk_data_ready(sk, 0);
  228. return 1;
  229. out_free:
  230. if (skb_list)
  231. sctp_queue_purge_ulpevents(skb_list);
  232. else
  233. sctp_ulpevent_free(event);
  234. return 0;
  235. }
  236. /* 2nd Level Abstractions */
  237. /* Helper function to store chunks that need to be reassembled. */
  238. static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  239. struct sctp_ulpevent *event)
  240. {
  241. struct sk_buff *pos;
  242. struct sctp_ulpevent *cevent;
  243. __u32 tsn, ctsn;
  244. tsn = event->tsn;
  245. /* See if it belongs at the end. */
  246. pos = skb_peek_tail(&ulpq->reasm);
  247. if (!pos) {
  248. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  249. return;
  250. }
  251. /* Short circuit just dropping it at the end. */
  252. cevent = sctp_skb2event(pos);
  253. ctsn = cevent->tsn;
  254. if (TSN_lt(ctsn, tsn)) {
  255. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  256. return;
  257. }
  258. /* Find the right place in this list. We store them by TSN. */
  259. skb_queue_walk(&ulpq->reasm, pos) {
  260. cevent = sctp_skb2event(pos);
  261. ctsn = cevent->tsn;
  262. if (TSN_lt(tsn, ctsn))
  263. break;
  264. }
  265. /* Insert before pos. */
  266. __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
  267. }
  268. /* Helper function to return an event corresponding to the reassembled
  269. * datagram.
  270. * This routine creates a re-assembled skb given the first and last skb's
  271. * as stored in the reassembly queue. The skb's may be non-linear if the sctp
  272. * payload was fragmented on the way and ip had to reassemble them.
  273. * We add the rest of skb's to the first skb's fraglist.
  274. */
  275. static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
  276. struct sk_buff_head *queue, struct sk_buff *f_frag,
  277. struct sk_buff *l_frag)
  278. {
  279. struct sk_buff *pos;
  280. struct sk_buff *new = NULL;
  281. struct sctp_ulpevent *event;
  282. struct sk_buff *pnext, *last;
  283. struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
  284. /* Store the pointer to the 2nd skb */
  285. if (f_frag == l_frag)
  286. pos = NULL;
  287. else
  288. pos = f_frag->next;
  289. /* Get the last skb in the f_frag's frag_list if present. */
  290. for (last = list; list; last = list, list = list->next);
  291. /* Add the list of remaining fragments to the first fragments
  292. * frag_list.
  293. */
  294. if (last)
  295. last->next = pos;
  296. else {
  297. if (skb_cloned(f_frag)) {
  298. /* This is a cloned skb, we can't just modify
  299. * the frag_list. We need a new skb to do that.
  300. * Instead of calling skb_unshare(), we'll do it
  301. * ourselves since we need to delay the free.
  302. */
  303. new = skb_copy(f_frag, GFP_ATOMIC);
  304. if (!new)
  305. return NULL; /* try again later */
  306. sctp_skb_set_owner_r(new, f_frag->sk);
  307. skb_shinfo(new)->frag_list = pos;
  308. } else
  309. skb_shinfo(f_frag)->frag_list = pos;
  310. }
  311. /* Remove the first fragment from the reassembly queue. */
  312. __skb_unlink(f_frag, queue);
  313. /* if we did unshare, then free the old skb and re-assign */
  314. if (new) {
  315. kfree_skb(f_frag);
  316. f_frag = new;
  317. }
  318. while (pos) {
  319. pnext = pos->next;
  320. /* Update the len and data_len fields of the first fragment. */
  321. f_frag->len += pos->len;
  322. f_frag->data_len += pos->len;
  323. /* Remove the fragment from the reassembly queue. */
  324. __skb_unlink(pos, queue);
  325. /* Break if we have reached the last fragment. */
  326. if (pos == l_frag)
  327. break;
  328. pos->next = pnext;
  329. pos = pnext;
  330. }
  331. event = sctp_skb2event(f_frag);
  332. SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
  333. return event;
  334. }
  335. /* Helper function to check if an incoming chunk has filled up the last
  336. * missing fragment in a SCTP datagram and return the corresponding event.
  337. */
  338. static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
  339. {
  340. struct sk_buff *pos;
  341. struct sctp_ulpevent *cevent;
  342. struct sk_buff *first_frag = NULL;
  343. __u32 ctsn, next_tsn;
  344. struct sctp_ulpevent *retval = NULL;
  345. struct sk_buff *pd_first = NULL;
  346. struct sk_buff *pd_last = NULL;
  347. size_t pd_len = 0;
  348. struct sctp_association *asoc;
  349. u32 pd_point;
  350. /* Initialized to 0 just to avoid compiler warning message. Will
  351. * never be used with this value. It is referenced only after it
  352. * is set when we find the first fragment of a message.
  353. */
  354. next_tsn = 0;
  355. /* The chunks are held in the reasm queue sorted by TSN.
  356. * Walk through the queue sequentially and look for a sequence of
  357. * fragmented chunks that complete a datagram.
  358. * 'first_frag' and next_tsn are reset when we find a chunk which
  359. * is the first fragment of a datagram. Once these 2 fields are set
  360. * we expect to find the remaining middle fragments and the last
  361. * fragment in order. If not, first_frag is reset to NULL and we
  362. * start the next pass when we find another first fragment.
  363. *
  364. * There is a potential to do partial delivery if user sets
  365. * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
  366. * to see if can do PD.
  367. */
  368. skb_queue_walk(&ulpq->reasm, pos) {
  369. cevent = sctp_skb2event(pos);
  370. ctsn = cevent->tsn;
  371. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  372. case SCTP_DATA_FIRST_FRAG:
  373. /* If this "FIRST_FRAG" is the first
  374. * element in the queue, then count it towards
  375. * possible PD.
  376. */
  377. if (pos == ulpq->reasm.next) {
  378. pd_first = pos;
  379. pd_last = pos;
  380. pd_len = pos->len;
  381. } else {
  382. pd_first = NULL;
  383. pd_last = NULL;
  384. pd_len = 0;
  385. }
  386. first_frag = pos;
  387. next_tsn = ctsn + 1;
  388. break;
  389. case SCTP_DATA_MIDDLE_FRAG:
  390. if ((first_frag) && (ctsn == next_tsn)) {
  391. next_tsn++;
  392. if (pd_first) {
  393. pd_last = pos;
  394. pd_len += pos->len;
  395. }
  396. } else
  397. first_frag = NULL;
  398. break;
  399. case SCTP_DATA_LAST_FRAG:
  400. if (first_frag && (ctsn == next_tsn))
  401. goto found;
  402. else
  403. first_frag = NULL;
  404. break;
  405. }
  406. }
  407. asoc = ulpq->asoc;
  408. if (pd_first) {
  409. /* Make sure we can enter partial deliver.
  410. * We can trigger partial delivery only if framgent
  411. * interleave is set, or the socket is not already
  412. * in partial delivery.
  413. */
  414. if (!sctp_sk(asoc->base.sk)->frag_interleave &&
  415. atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
  416. goto done;
  417. cevent = sctp_skb2event(pd_first);
  418. pd_point = sctp_sk(asoc->base.sk)->pd_point;
  419. if (pd_point && pd_point <= pd_len) {
  420. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  421. &ulpq->reasm,
  422. pd_first,
  423. pd_last);
  424. if (retval)
  425. sctp_ulpq_set_pd(ulpq);
  426. }
  427. }
  428. done:
  429. return retval;
  430. found:
  431. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  432. &ulpq->reasm, first_frag, pos);
  433. if (retval)
  434. retval->msg_flags |= MSG_EOR;
  435. goto done;
  436. }
  437. /* Retrieve the next set of fragments of a partial message. */
  438. static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
  439. {
  440. struct sk_buff *pos, *last_frag, *first_frag;
  441. struct sctp_ulpevent *cevent;
  442. __u32 ctsn, next_tsn;
  443. int is_last;
  444. struct sctp_ulpevent *retval;
  445. /* The chunks are held in the reasm queue sorted by TSN.
  446. * Walk through the queue sequentially and look for the first
  447. * sequence of fragmented chunks.
  448. */
  449. if (skb_queue_empty(&ulpq->reasm))
  450. return NULL;
  451. last_frag = first_frag = NULL;
  452. retval = NULL;
  453. next_tsn = 0;
  454. is_last = 0;
  455. skb_queue_walk(&ulpq->reasm, pos) {
  456. cevent = sctp_skb2event(pos);
  457. ctsn = cevent->tsn;
  458. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  459. case SCTP_DATA_FIRST_FRAG:
  460. if (!first_frag)
  461. return NULL;
  462. goto done;
  463. case SCTP_DATA_MIDDLE_FRAG:
  464. if (!first_frag) {
  465. first_frag = pos;
  466. next_tsn = ctsn + 1;
  467. last_frag = pos;
  468. } else if (next_tsn == ctsn) {
  469. next_tsn++;
  470. last_frag = pos;
  471. } else
  472. goto done;
  473. break;
  474. case SCTP_DATA_LAST_FRAG:
  475. if (!first_frag)
  476. first_frag = pos;
  477. else if (ctsn != next_tsn)
  478. goto done;
  479. last_frag = pos;
  480. is_last = 1;
  481. goto done;
  482. default:
  483. return NULL;
  484. }
  485. }
  486. /* We have the reassembled event. There is no need to look
  487. * further.
  488. */
  489. done:
  490. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  491. &ulpq->reasm, first_frag, last_frag);
  492. if (retval && is_last)
  493. retval->msg_flags |= MSG_EOR;
  494. return retval;
  495. }
  496. /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
  497. * need reassembling.
  498. */
  499. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  500. struct sctp_ulpevent *event)
  501. {
  502. struct sctp_ulpevent *retval = NULL;
  503. /* Check if this is part of a fragmented message. */
  504. if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
  505. event->msg_flags |= MSG_EOR;
  506. return event;
  507. }
  508. sctp_ulpq_store_reasm(ulpq, event);
  509. if (!ulpq->pd_mode)
  510. retval = sctp_ulpq_retrieve_reassembled(ulpq);
  511. else {
  512. __u32 ctsn, ctsnap;
  513. /* Do not even bother unless this is the next tsn to
  514. * be delivered.
  515. */
  516. ctsn = event->tsn;
  517. ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
  518. if (TSN_lte(ctsn, ctsnap))
  519. retval = sctp_ulpq_retrieve_partial(ulpq);
  520. }
  521. return retval;
  522. }
  523. /* Retrieve the first part (sequential fragments) for partial delivery. */
  524. static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
  525. {
  526. struct sk_buff *pos, *last_frag, *first_frag;
  527. struct sctp_ulpevent *cevent;
  528. __u32 ctsn, next_tsn;
  529. struct sctp_ulpevent *retval;
  530. /* The chunks are held in the reasm queue sorted by TSN.
  531. * Walk through the queue sequentially and look for a sequence of
  532. * fragmented chunks that start a datagram.
  533. */
  534. if (skb_queue_empty(&ulpq->reasm))
  535. return NULL;
  536. last_frag = first_frag = NULL;
  537. retval = NULL;
  538. next_tsn = 0;
  539. skb_queue_walk(&ulpq->reasm, pos) {
  540. cevent = sctp_skb2event(pos);
  541. ctsn = cevent->tsn;
  542. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  543. case SCTP_DATA_FIRST_FRAG:
  544. if (!first_frag) {
  545. first_frag = pos;
  546. next_tsn = ctsn + 1;
  547. last_frag = pos;
  548. } else
  549. goto done;
  550. break;
  551. case SCTP_DATA_MIDDLE_FRAG:
  552. if (!first_frag)
  553. return NULL;
  554. if (ctsn == next_tsn) {
  555. next_tsn++;
  556. last_frag = pos;
  557. } else
  558. goto done;
  559. break;
  560. case SCTP_DATA_LAST_FRAG:
  561. if (!first_frag)
  562. return NULL;
  563. else
  564. goto done;
  565. break;
  566. default:
  567. return NULL;
  568. }
  569. }
  570. /* We have the reassembled event. There is no need to look
  571. * further.
  572. */
  573. done:
  574. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  575. &ulpq->reasm, first_frag, last_frag);
  576. return retval;
  577. }
  578. /*
  579. * Flush out stale fragments from the reassembly queue when processing
  580. * a Forward TSN.
  581. *
  582. * RFC 3758, Section 3.6
  583. *
  584. * After receiving and processing a FORWARD TSN, the data receiver MUST
  585. * take cautions in updating its re-assembly queue. The receiver MUST
  586. * remove any partially reassembled message, which is still missing one
  587. * or more TSNs earlier than or equal to the new cumulative TSN point.
  588. * In the event that the receiver has invoked the partial delivery API,
  589. * a notification SHOULD also be generated to inform the upper layer API
  590. * that the message being partially delivered will NOT be completed.
  591. */
  592. void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
  593. {
  594. struct sk_buff *pos, *tmp;
  595. struct sctp_ulpevent *event;
  596. __u32 tsn;
  597. if (skb_queue_empty(&ulpq->reasm))
  598. return;
  599. skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
  600. event = sctp_skb2event(pos);
  601. tsn = event->tsn;
  602. /* Since the entire message must be abandoned by the
  603. * sender (item A3 in Section 3.5, RFC 3758), we can
  604. * free all fragments on the list that are less then
  605. * or equal to ctsn_point
  606. */
  607. if (TSN_lte(tsn, fwd_tsn)) {
  608. __skb_unlink(pos, &ulpq->reasm);
  609. sctp_ulpevent_free(event);
  610. } else
  611. break;
  612. }
  613. }
  614. /*
  615. * Drain the reassembly queue. If we just cleared parted delivery, it
  616. * is possible that the reassembly queue will contain already reassembled
  617. * messages. Retrieve any such messages and give them to the user.
  618. */
  619. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
  620. {
  621. struct sctp_ulpevent *event = NULL;
  622. struct sk_buff_head temp;
  623. if (skb_queue_empty(&ulpq->reasm))
  624. return;
  625. while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
  626. /* Do ordering if needed. */
  627. if ((event) && (event->msg_flags & MSG_EOR)) {
  628. skb_queue_head_init(&temp);
  629. __skb_queue_tail(&temp, sctp_event2skb(event));
  630. event = sctp_ulpq_order(ulpq, event);
  631. }
  632. /* Send event to the ULP. 'event' is the
  633. * sctp_ulpevent for very first SKB on the temp' list.
  634. */
  635. if (event)
  636. sctp_ulpq_tail_event(ulpq, event);
  637. }
  638. }
  639. /* Helper function to gather skbs that have possibly become
  640. * ordered by an an incoming chunk.
  641. */
  642. static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
  643. struct sctp_ulpevent *event)
  644. {
  645. struct sk_buff_head *event_list;
  646. struct sk_buff *pos, *tmp;
  647. struct sctp_ulpevent *cevent;
  648. struct sctp_stream *in;
  649. __u16 sid, csid, cssn;
  650. sid = event->stream;
  651. in = &ulpq->asoc->ssnmap->in;
  652. event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
  653. /* We are holding the chunks by stream, by SSN. */
  654. sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
  655. cevent = (struct sctp_ulpevent *) pos->cb;
  656. csid = cevent->stream;
  657. cssn = cevent->ssn;
  658. /* Have we gone too far? */
  659. if (csid > sid)
  660. break;
  661. /* Have we not gone far enough? */
  662. if (csid < sid)
  663. continue;
  664. if (cssn != sctp_ssn_peek(in, sid))
  665. break;
  666. /* Found it, so mark in the ssnmap. */
  667. sctp_ssn_next(in, sid);
  668. __skb_unlink(pos, &ulpq->lobby);
  669. /* Attach all gathered skbs to the event. */
  670. __skb_queue_tail(event_list, pos);
  671. }
  672. }
  673. /* Helper function to store chunks needing ordering. */
  674. static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
  675. struct sctp_ulpevent *event)
  676. {
  677. struct sk_buff *pos;
  678. struct sctp_ulpevent *cevent;
  679. __u16 sid, csid;
  680. __u16 ssn, cssn;
  681. pos = skb_peek_tail(&ulpq->lobby);
  682. if (!pos) {
  683. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  684. return;
  685. }
  686. sid = event->stream;
  687. ssn = event->ssn;
  688. cevent = (struct sctp_ulpevent *) pos->cb;
  689. csid = cevent->stream;
  690. cssn = cevent->ssn;
  691. if (sid > csid) {
  692. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  693. return;
  694. }
  695. if ((sid == csid) && SSN_lt(cssn, ssn)) {
  696. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  697. return;
  698. }
  699. /* Find the right place in this list. We store them by
  700. * stream ID and then by SSN.
  701. */
  702. skb_queue_walk(&ulpq->lobby, pos) {
  703. cevent = (struct sctp_ulpevent *) pos->cb;
  704. csid = cevent->stream;
  705. cssn = cevent->ssn;
  706. if (csid > sid)
  707. break;
  708. if (csid == sid && SSN_lt(ssn, cssn))
  709. break;
  710. }
  711. /* Insert before pos. */
  712. __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
  713. }
  714. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
  715. struct sctp_ulpevent *event)
  716. {
  717. __u16 sid, ssn;
  718. struct sctp_stream *in;
  719. /* Check if this message needs ordering. */
  720. if (SCTP_DATA_UNORDERED & event->msg_flags)
  721. return event;
  722. /* Note: The stream ID must be verified before this routine. */
  723. sid = event->stream;
  724. ssn = event->ssn;
  725. in = &ulpq->asoc->ssnmap->in;
  726. /* Is this the expected SSN for this stream ID? */
  727. if (ssn != sctp_ssn_peek(in, sid)) {
  728. /* We've received something out of order, so find where it
  729. * needs to be placed. We order by stream and then by SSN.
  730. */
  731. sctp_ulpq_store_ordered(ulpq, event);
  732. return NULL;
  733. }
  734. /* Mark that the next chunk has been found. */
  735. sctp_ssn_next(in, sid);
  736. /* Go find any other chunks that were waiting for
  737. * ordering.
  738. */
  739. sctp_ulpq_retrieve_ordered(ulpq, event);
  740. return event;
  741. }
  742. /* Helper function to gather skbs that have possibly become
  743. * ordered by forward tsn skipping their dependencies.
  744. */
  745. static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
  746. {
  747. struct sk_buff *pos, *tmp;
  748. struct sctp_ulpevent *cevent;
  749. struct sctp_ulpevent *event;
  750. struct sctp_stream *in;
  751. struct sk_buff_head temp;
  752. struct sk_buff_head *lobby = &ulpq->lobby;
  753. __u16 csid, cssn;
  754. in = &ulpq->asoc->ssnmap->in;
  755. /* We are holding the chunks by stream, by SSN. */
  756. skb_queue_head_init(&temp);
  757. event = NULL;
  758. sctp_skb_for_each(pos, lobby, tmp) {
  759. cevent = (struct sctp_ulpevent *) pos->cb;
  760. csid = cevent->stream;
  761. cssn = cevent->ssn;
  762. /* Have we gone too far? */
  763. if (csid > sid)
  764. break;
  765. /* Have we not gone far enough? */
  766. if (csid < sid)
  767. continue;
  768. /* see if this ssn has been marked by skipping */
  769. if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
  770. break;
  771. __skb_unlink(pos, lobby);
  772. if (!event)
  773. /* Create a temporary list to collect chunks on. */
  774. event = sctp_skb2event(pos);
  775. /* Attach all gathered skbs to the event. */
  776. __skb_queue_tail(&temp, pos);
  777. }
  778. /* If we didn't reap any data, see if the next expected SSN
  779. * is next on the queue and if so, use that.
  780. */
  781. if (event == NULL && pos != (struct sk_buff *)lobby) {
  782. cevent = (struct sctp_ulpevent *) pos->cb;
  783. csid = cevent->stream;
  784. cssn = cevent->ssn;
  785. if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
  786. sctp_ssn_next(in, csid);
  787. __skb_unlink(pos, lobby);
  788. __skb_queue_tail(&temp, pos);
  789. event = sctp_skb2event(pos);
  790. }
  791. }
  792. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  793. * very first SKB on the 'temp' list.
  794. */
  795. if (event) {
  796. /* see if we have more ordered that we can deliver */
  797. sctp_ulpq_retrieve_ordered(ulpq, event);
  798. sctp_ulpq_tail_event(ulpq, event);
  799. }
  800. }
  801. /* Skip over an SSN. This is used during the processing of
  802. * Forwared TSN chunk to skip over the abandoned ordered data
  803. */
  804. void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
  805. {
  806. struct sctp_stream *in;
  807. /* Note: The stream ID must be verified before this routine. */
  808. in = &ulpq->asoc->ssnmap->in;
  809. /* Is this an old SSN? If so ignore. */
  810. if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
  811. return;
  812. /* Mark that we are no longer expecting this SSN or lower. */
  813. sctp_ssn_skip(in, sid, ssn);
  814. /* Go find any other chunks that were waiting for
  815. * ordering and deliver them if needed.
  816. */
  817. sctp_ulpq_reap_ordered(ulpq, sid);
  818. }
  819. static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
  820. struct sk_buff_head *list, __u16 needed)
  821. {
  822. __u16 freed = 0;
  823. __u32 tsn, last_tsn;
  824. struct sk_buff *skb, *flist, *last;
  825. struct sctp_ulpevent *event;
  826. struct sctp_tsnmap *tsnmap;
  827. tsnmap = &ulpq->asoc->peer.tsn_map;
  828. while ((skb = skb_peek_tail(list)) != NULL) {
  829. event = sctp_skb2event(skb);
  830. tsn = event->tsn;
  831. /* Don't renege below the Cumulative TSN ACK Point. */
  832. if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
  833. break;
  834. /* Events in ordering queue may have multiple fragments
  835. * corresponding to additional TSNs. Sum the total
  836. * freed space; find the last TSN.
  837. */
  838. freed += skb_headlen(skb);
  839. flist = skb_shinfo(skb)->frag_list;
  840. for (last = flist; flist; flist = flist->next) {
  841. last = flist;
  842. freed += skb_headlen(last);
  843. }
  844. if (last)
  845. last_tsn = sctp_skb2event(last)->tsn;
  846. else
  847. last_tsn = tsn;
  848. /* Unlink the event, then renege all applicable TSNs. */
  849. __skb_unlink(skb, list);
  850. sctp_ulpevent_free(event);
  851. while (TSN_lte(tsn, last_tsn)) {
  852. sctp_tsnmap_renege(tsnmap, tsn);
  853. tsn++;
  854. }
  855. if (freed >= needed)
  856. return freed;
  857. }
  858. return freed;
  859. }
  860. /* Renege 'needed' bytes from the ordering queue. */
  861. static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
  862. {
  863. return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
  864. }
  865. /* Renege 'needed' bytes from the reassembly queue. */
  866. static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
  867. {
  868. return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
  869. }
  870. /* Partial deliver the first message as there is pressure on rwnd. */
  871. void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
  872. gfp_t gfp)
  873. {
  874. struct sctp_ulpevent *event;
  875. struct sctp_association *asoc;
  876. struct sctp_sock *sp;
  877. __u32 ctsn;
  878. struct sk_buff *skb;
  879. asoc = ulpq->asoc;
  880. sp = sctp_sk(asoc->base.sk);
  881. /* If the association is already in Partial Delivery mode
  882. * we have nothing to do.
  883. */
  884. if (ulpq->pd_mode)
  885. return;
  886. /* Data must be at or below the Cumulative TSN ACK Point to
  887. * start partial delivery.
  888. */
  889. skb = skb_peek(&asoc->ulpq.reasm);
  890. if (skb != NULL) {
  891. ctsn = sctp_skb2event(skb)->tsn;
  892. if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
  893. return;
  894. }
  895. /* If the user enabled fragment interleave socket option,
  896. * multiple associations can enter partial delivery.
  897. * Otherwise, we can only enter partial delivery if the
  898. * socket is not in partial deliver mode.
  899. */
  900. if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
  901. /* Is partial delivery possible? */
  902. event = sctp_ulpq_retrieve_first(ulpq);
  903. /* Send event to the ULP. */
  904. if (event) {
  905. sctp_ulpq_tail_event(ulpq, event);
  906. sctp_ulpq_set_pd(ulpq);
  907. return;
  908. }
  909. }
  910. }
  911. /* Renege some packets to make room for an incoming chunk. */
  912. void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  913. gfp_t gfp)
  914. {
  915. struct sctp_association *asoc;
  916. __u16 needed, freed;
  917. asoc = ulpq->asoc;
  918. if (chunk) {
  919. needed = ntohs(chunk->chunk_hdr->length);
  920. needed -= sizeof(sctp_data_chunk_t);
  921. } else
  922. needed = SCTP_DEFAULT_MAXWINDOW;
  923. freed = 0;
  924. if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
  925. freed = sctp_ulpq_renege_order(ulpq, needed);
  926. if (freed < needed) {
  927. freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
  928. }
  929. }
  930. /* If able to free enough room, accept this chunk. */
  931. if (chunk && (freed >= needed)) {
  932. int retval;
  933. retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
  934. /*
  935. * Enter partial delivery if chunk has not been
  936. * delivered; otherwise, drain the reassembly queue.
  937. */
  938. if (retval <= 0)
  939. sctp_ulpq_partial_delivery(ulpq, gfp);
  940. else if (retval == 1)
  941. sctp_ulpq_reasm_drain(ulpq);
  942. }
  943. sk_mem_reclaim(asoc->base.sk);
  944. }
  945. /* Notify the application if an association is aborted and in
  946. * partial delivery mode. Send up any pending received messages.
  947. */
  948. void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
  949. {
  950. struct sctp_ulpevent *ev = NULL;
  951. struct sock *sk;
  952. if (!ulpq->pd_mode)
  953. return;
  954. sk = ulpq->asoc->base.sk;
  955. if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
  956. &sctp_sk(sk)->subscribe))
  957. ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
  958. SCTP_PARTIAL_DELIVERY_ABORTED,
  959. gfp);
  960. if (ev)
  961. __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
  962. /* If there is data waiting, send it up the socket now. */
  963. if (sctp_ulpq_clear_pd(ulpq) || ev)
  964. sk->sk_data_ready(sk, 0);
  965. }