ulpqueue.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156
  1. /* SCTP kernel implementation
  2. * (C) Copyright IBM Corp. 2001, 2004
  3. * Copyright (c) 1999-2000 Cisco, Inc.
  4. * Copyright (c) 1999-2001 Motorola, Inc.
  5. * Copyright (c) 2001 Intel Corp.
  6. * Copyright (c) 2001 Nokia, Inc.
  7. * Copyright (c) 2001 La Monte H.P. Yarroll
  8. *
  9. * This abstraction carries sctp events to the ULP (sockets).
  10. *
  11. * This SCTP implementation is free software;
  12. * you can redistribute it and/or modify it under the terms of
  13. * the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This SCTP implementation is distributed in the hope that it
  18. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19. * ************************
  20. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21. * See the GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with GNU CC; see the file COPYING. If not, see
  25. * <http://www.gnu.org/licenses/>.
  26. *
  27. * Please send any bug reports or fixes you make to the
  28. * email address(es):
  29. * lksctp developers <linux-sctp@vger.kernel.org>
  30. *
  31. * Written or modified by:
  32. * Jon Grimm <jgrimm@us.ibm.com>
  33. * La Monte H.P. Yarroll <piggy@acm.org>
  34. * Sridhar Samudrala <sri@us.ibm.com>
  35. */
  36. #include <linux/slab.h>
  37. #include <linux/types.h>
  38. #include <linux/skbuff.h>
  39. #include <net/sock.h>
  40. #include <net/busy_poll.h>
  41. #include <net/sctp/structs.h>
  42. #include <net/sctp/sctp.h>
  43. #include <net/sctp/sm.h>
  44. /* Forward declarations for internal helpers. */
  45. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  46. struct sctp_ulpevent *);
  47. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  48. struct sctp_ulpevent *);
  49. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  50. /* 1st Level Abstractions */
  51. /* Initialize a ULP queue from a block of memory. */
  52. struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  53. struct sctp_association *asoc)
  54. {
  55. memset(ulpq, 0, sizeof(struct sctp_ulpq));
  56. ulpq->asoc = asoc;
  57. skb_queue_head_init(&ulpq->reasm);
  58. skb_queue_head_init(&ulpq->lobby);
  59. ulpq->pd_mode = 0;
  60. return ulpq;
  61. }
  62. /* Flush the reassembly and ordering queues. */
  63. void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  64. {
  65. struct sk_buff *skb;
  66. struct sctp_ulpevent *event;
  67. while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  68. event = sctp_skb2event(skb);
  69. sctp_ulpevent_free(event);
  70. }
  71. while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  72. event = sctp_skb2event(skb);
  73. sctp_ulpevent_free(event);
  74. }
  75. }
  76. /* Dispose of a ulpqueue. */
  77. void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  78. {
  79. sctp_ulpq_flush(ulpq);
  80. }
  81. /* Process an incoming DATA chunk. */
  82. int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  83. gfp_t gfp)
  84. {
  85. struct sk_buff_head temp;
  86. struct sctp_ulpevent *event;
  87. int event_eor = 0;
  88. /* Create an event from the incoming chunk. */
  89. event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  90. if (!event)
  91. return -ENOMEM;
  92. event->ssn = ntohs(chunk->subh.data_hdr->ssn);
  93. event->ppid = chunk->subh.data_hdr->ppid;
  94. /* Do reassembly if needed. */
  95. event = sctp_ulpq_reasm(ulpq, event);
  96. /* Do ordering if needed. */
  97. if ((event) && (event->msg_flags & MSG_EOR)) {
  98. /* Create a temporary list to collect chunks on. */
  99. skb_queue_head_init(&temp);
  100. __skb_queue_tail(&temp, sctp_event2skb(event));
  101. event = sctp_ulpq_order(ulpq, event);
  102. }
  103. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  104. * very first SKB on the 'temp' list.
  105. */
  106. if (event) {
  107. event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
  108. sctp_ulpq_tail_event(ulpq, event);
  109. }
  110. return event_eor;
  111. }
  112. /* Add a new event for propagation to the ULP. */
  113. /* Clear the partial delivery mode for this socket. Note: This
  114. * assumes that no association is currently in partial delivery mode.
  115. */
  116. int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
  117. {
  118. struct sctp_sock *sp = sctp_sk(sk);
  119. if (atomic_dec_and_test(&sp->pd_mode)) {
  120. /* This means there are no other associations in PD, so
  121. * we can go ahead and clear out the lobby in one shot
  122. */
  123. if (!skb_queue_empty(&sp->pd_lobby)) {
  124. skb_queue_splice_tail_init(&sp->pd_lobby,
  125. &sk->sk_receive_queue);
  126. return 1;
  127. }
  128. } else {
  129. /* There are other associations in PD, so we only need to
  130. * pull stuff out of the lobby that belongs to the
  131. * associations that is exiting PD (all of its notifications
  132. * are posted here).
  133. */
  134. if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
  135. struct sk_buff *skb, *tmp;
  136. struct sctp_ulpevent *event;
  137. sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
  138. event = sctp_skb2event(skb);
  139. if (event->asoc == asoc) {
  140. __skb_unlink(skb, &sp->pd_lobby);
  141. __skb_queue_tail(&sk->sk_receive_queue,
  142. skb);
  143. }
  144. }
  145. }
  146. }
  147. return 0;
  148. }
  149. /* Set the pd_mode on the socket and ulpq */
  150. static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
  151. {
  152. struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
  153. atomic_inc(&sp->pd_mode);
  154. ulpq->pd_mode = 1;
  155. }
  156. /* Clear the pd_mode and restart any pending messages waiting for delivery. */
  157. static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
  158. {
  159. ulpq->pd_mode = 0;
  160. sctp_ulpq_reasm_drain(ulpq);
  161. return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
  162. }
  163. /* If the SKB of 'event' is on a list, it is the first such member
  164. * of that list.
  165. */
  166. int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
  167. {
  168. struct sock *sk = ulpq->asoc->base.sk;
  169. struct sctp_sock *sp = sctp_sk(sk);
  170. struct sk_buff_head *queue, *skb_list;
  171. struct sk_buff *skb = sctp_event2skb(event);
  172. int clear_pd = 0;
  173. skb_list = (struct sk_buff_head *) skb->prev;
  174. /* If the socket is just going to throw this away, do not
  175. * even try to deliver it.
  176. */
  177. if (sk->sk_shutdown & RCV_SHUTDOWN &&
  178. (sk->sk_shutdown & SEND_SHUTDOWN ||
  179. !sctp_ulpevent_is_notification(event)))
  180. goto out_free;
  181. if (!sctp_ulpevent_is_notification(event)) {
  182. sk_mark_napi_id(sk, skb);
  183. sk_incoming_cpu_update(sk);
  184. }
  185. /* Check if the user wishes to receive this event. */
  186. if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
  187. goto out_free;
  188. /* If we are in partial delivery mode, post to the lobby until
  189. * partial delivery is cleared, unless, of course _this_ is
  190. * the association the cause of the partial delivery.
  191. */
  192. if (atomic_read(&sp->pd_mode) == 0) {
  193. queue = &sk->sk_receive_queue;
  194. } else {
  195. if (ulpq->pd_mode) {
  196. /* If the association is in partial delivery, we
  197. * need to finish delivering the partially processed
  198. * packet before passing any other data. This is
  199. * because we don't truly support stream interleaving.
  200. */
  201. if ((event->msg_flags & MSG_NOTIFICATION) ||
  202. (SCTP_DATA_NOT_FRAG ==
  203. (event->msg_flags & SCTP_DATA_FRAG_MASK)))
  204. queue = &sp->pd_lobby;
  205. else {
  206. clear_pd = event->msg_flags & MSG_EOR;
  207. queue = &sk->sk_receive_queue;
  208. }
  209. } else {
  210. /*
  211. * If fragment interleave is enabled, we
  212. * can queue this to the receive queue instead
  213. * of the lobby.
  214. */
  215. if (sp->frag_interleave)
  216. queue = &sk->sk_receive_queue;
  217. else
  218. queue = &sp->pd_lobby;
  219. }
  220. }
  221. /* If we are harvesting multiple skbs they will be
  222. * collected on a list.
  223. */
  224. if (skb_list)
  225. skb_queue_splice_tail_init(skb_list, queue);
  226. else
  227. __skb_queue_tail(queue, skb);
  228. /* Did we just complete partial delivery and need to get
  229. * rolling again? Move pending data to the receive
  230. * queue.
  231. */
  232. if (clear_pd)
  233. sctp_ulpq_clear_pd(ulpq);
  234. if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
  235. if (!sock_owned_by_user(sk))
  236. sp->data_ready_signalled = 1;
  237. sk->sk_data_ready(sk);
  238. }
  239. return 1;
  240. out_free:
  241. if (skb_list)
  242. sctp_queue_purge_ulpevents(skb_list);
  243. else
  244. sctp_ulpevent_free(event);
  245. return 0;
  246. }
  247. /* 2nd Level Abstractions */
  248. /* Helper function to store chunks that need to be reassembled. */
  249. static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  250. struct sctp_ulpevent *event)
  251. {
  252. struct sk_buff *pos;
  253. struct sctp_ulpevent *cevent;
  254. __u32 tsn, ctsn;
  255. tsn = event->tsn;
  256. /* See if it belongs at the end. */
  257. pos = skb_peek_tail(&ulpq->reasm);
  258. if (!pos) {
  259. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  260. return;
  261. }
  262. /* Short circuit just dropping it at the end. */
  263. cevent = sctp_skb2event(pos);
  264. ctsn = cevent->tsn;
  265. if (TSN_lt(ctsn, tsn)) {
  266. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  267. return;
  268. }
  269. /* Find the right place in this list. We store them by TSN. */
  270. skb_queue_walk(&ulpq->reasm, pos) {
  271. cevent = sctp_skb2event(pos);
  272. ctsn = cevent->tsn;
  273. if (TSN_lt(tsn, ctsn))
  274. break;
  275. }
  276. /* Insert before pos. */
  277. __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
  278. }
  279. /* Helper function to return an event corresponding to the reassembled
  280. * datagram.
  281. * This routine creates a re-assembled skb given the first and last skb's
  282. * as stored in the reassembly queue. The skb's may be non-linear if the sctp
  283. * payload was fragmented on the way and ip had to reassemble them.
  284. * We add the rest of skb's to the first skb's fraglist.
  285. */
  286. struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
  287. struct sk_buff_head *queue,
  288. struct sk_buff *f_frag,
  289. struct sk_buff *l_frag)
  290. {
  291. struct sk_buff *pos;
  292. struct sk_buff *new = NULL;
  293. struct sctp_ulpevent *event;
  294. struct sk_buff *pnext, *last;
  295. struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
  296. /* Store the pointer to the 2nd skb */
  297. if (f_frag == l_frag)
  298. pos = NULL;
  299. else
  300. pos = f_frag->next;
  301. /* Get the last skb in the f_frag's frag_list if present. */
  302. for (last = list; list; last = list, list = list->next)
  303. ;
  304. /* Add the list of remaining fragments to the first fragments
  305. * frag_list.
  306. */
  307. if (last)
  308. last->next = pos;
  309. else {
  310. if (skb_cloned(f_frag)) {
  311. /* This is a cloned skb, we can't just modify
  312. * the frag_list. We need a new skb to do that.
  313. * Instead of calling skb_unshare(), we'll do it
  314. * ourselves since we need to delay the free.
  315. */
  316. new = skb_copy(f_frag, GFP_ATOMIC);
  317. if (!new)
  318. return NULL; /* try again later */
  319. sctp_skb_set_owner_r(new, f_frag->sk);
  320. skb_shinfo(new)->frag_list = pos;
  321. } else
  322. skb_shinfo(f_frag)->frag_list = pos;
  323. }
  324. /* Remove the first fragment from the reassembly queue. */
  325. __skb_unlink(f_frag, queue);
  326. /* if we did unshare, then free the old skb and re-assign */
  327. if (new) {
  328. kfree_skb(f_frag);
  329. f_frag = new;
  330. }
  331. while (pos) {
  332. pnext = pos->next;
  333. /* Update the len and data_len fields of the first fragment. */
  334. f_frag->len += pos->len;
  335. f_frag->data_len += pos->len;
  336. /* Remove the fragment from the reassembly queue. */
  337. __skb_unlink(pos, queue);
  338. /* Break if we have reached the last fragment. */
  339. if (pos == l_frag)
  340. break;
  341. pos->next = pnext;
  342. pos = pnext;
  343. }
  344. event = sctp_skb2event(f_frag);
  345. SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
  346. return event;
  347. }
  348. /* Helper function to check if an incoming chunk has filled up the last
  349. * missing fragment in a SCTP datagram and return the corresponding event.
  350. */
  351. static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
  352. {
  353. struct sk_buff *pos;
  354. struct sctp_ulpevent *cevent;
  355. struct sk_buff *first_frag = NULL;
  356. __u32 ctsn, next_tsn;
  357. struct sctp_ulpevent *retval = NULL;
  358. struct sk_buff *pd_first = NULL;
  359. struct sk_buff *pd_last = NULL;
  360. size_t pd_len = 0;
  361. struct sctp_association *asoc;
  362. u32 pd_point;
  363. /* Initialized to 0 just to avoid compiler warning message. Will
  364. * never be used with this value. It is referenced only after it
  365. * is set when we find the first fragment of a message.
  366. */
  367. next_tsn = 0;
  368. /* The chunks are held in the reasm queue sorted by TSN.
  369. * Walk through the queue sequentially and look for a sequence of
  370. * fragmented chunks that complete a datagram.
  371. * 'first_frag' and next_tsn are reset when we find a chunk which
  372. * is the first fragment of a datagram. Once these 2 fields are set
  373. * we expect to find the remaining middle fragments and the last
  374. * fragment in order. If not, first_frag is reset to NULL and we
  375. * start the next pass when we find another first fragment.
  376. *
  377. * There is a potential to do partial delivery if user sets
  378. * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
  379. * to see if can do PD.
  380. */
  381. skb_queue_walk(&ulpq->reasm, pos) {
  382. cevent = sctp_skb2event(pos);
  383. ctsn = cevent->tsn;
  384. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  385. case SCTP_DATA_FIRST_FRAG:
  386. /* If this "FIRST_FRAG" is the first
  387. * element in the queue, then count it towards
  388. * possible PD.
  389. */
  390. if (pos == ulpq->reasm.next) {
  391. pd_first = pos;
  392. pd_last = pos;
  393. pd_len = pos->len;
  394. } else {
  395. pd_first = NULL;
  396. pd_last = NULL;
  397. pd_len = 0;
  398. }
  399. first_frag = pos;
  400. next_tsn = ctsn + 1;
  401. break;
  402. case SCTP_DATA_MIDDLE_FRAG:
  403. if ((first_frag) && (ctsn == next_tsn)) {
  404. next_tsn++;
  405. if (pd_first) {
  406. pd_last = pos;
  407. pd_len += pos->len;
  408. }
  409. } else
  410. first_frag = NULL;
  411. break;
  412. case SCTP_DATA_LAST_FRAG:
  413. if (first_frag && (ctsn == next_tsn))
  414. goto found;
  415. else
  416. first_frag = NULL;
  417. break;
  418. }
  419. }
  420. asoc = ulpq->asoc;
  421. if (pd_first) {
  422. /* Make sure we can enter partial deliver.
  423. * We can trigger partial delivery only if framgent
  424. * interleave is set, or the socket is not already
  425. * in partial delivery.
  426. */
  427. if (!sctp_sk(asoc->base.sk)->frag_interleave &&
  428. atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
  429. goto done;
  430. cevent = sctp_skb2event(pd_first);
  431. pd_point = sctp_sk(asoc->base.sk)->pd_point;
  432. if (pd_point && pd_point <= pd_len) {
  433. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  434. &ulpq->reasm,
  435. pd_first,
  436. pd_last);
  437. if (retval)
  438. sctp_ulpq_set_pd(ulpq);
  439. }
  440. }
  441. done:
  442. return retval;
  443. found:
  444. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  445. &ulpq->reasm, first_frag, pos);
  446. if (retval)
  447. retval->msg_flags |= MSG_EOR;
  448. goto done;
  449. }
  450. /* Retrieve the next set of fragments of a partial message. */
  451. static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
  452. {
  453. struct sk_buff *pos, *last_frag, *first_frag;
  454. struct sctp_ulpevent *cevent;
  455. __u32 ctsn, next_tsn;
  456. int is_last;
  457. struct sctp_ulpevent *retval;
  458. /* The chunks are held in the reasm queue sorted by TSN.
  459. * Walk through the queue sequentially and look for the first
  460. * sequence of fragmented chunks.
  461. */
  462. if (skb_queue_empty(&ulpq->reasm))
  463. return NULL;
  464. last_frag = first_frag = NULL;
  465. retval = NULL;
  466. next_tsn = 0;
  467. is_last = 0;
  468. skb_queue_walk(&ulpq->reasm, pos) {
  469. cevent = sctp_skb2event(pos);
  470. ctsn = cevent->tsn;
  471. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  472. case SCTP_DATA_FIRST_FRAG:
  473. if (!first_frag)
  474. return NULL;
  475. goto done;
  476. case SCTP_DATA_MIDDLE_FRAG:
  477. if (!first_frag) {
  478. first_frag = pos;
  479. next_tsn = ctsn + 1;
  480. last_frag = pos;
  481. } else if (next_tsn == ctsn) {
  482. next_tsn++;
  483. last_frag = pos;
  484. } else
  485. goto done;
  486. break;
  487. case SCTP_DATA_LAST_FRAG:
  488. if (!first_frag)
  489. first_frag = pos;
  490. else if (ctsn != next_tsn)
  491. goto done;
  492. last_frag = pos;
  493. is_last = 1;
  494. goto done;
  495. default:
  496. return NULL;
  497. }
  498. }
  499. /* We have the reassembled event. There is no need to look
  500. * further.
  501. */
  502. done:
  503. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  504. &ulpq->reasm, first_frag, last_frag);
  505. if (retval && is_last)
  506. retval->msg_flags |= MSG_EOR;
  507. return retval;
  508. }
  509. /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
  510. * need reassembling.
  511. */
  512. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  513. struct sctp_ulpevent *event)
  514. {
  515. struct sctp_ulpevent *retval = NULL;
  516. /* Check if this is part of a fragmented message. */
  517. if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
  518. event->msg_flags |= MSG_EOR;
  519. return event;
  520. }
  521. sctp_ulpq_store_reasm(ulpq, event);
  522. if (!ulpq->pd_mode)
  523. retval = sctp_ulpq_retrieve_reassembled(ulpq);
  524. else {
  525. __u32 ctsn, ctsnap;
  526. /* Do not even bother unless this is the next tsn to
  527. * be delivered.
  528. */
  529. ctsn = event->tsn;
  530. ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
  531. if (TSN_lte(ctsn, ctsnap))
  532. retval = sctp_ulpq_retrieve_partial(ulpq);
  533. }
  534. return retval;
  535. }
  536. /* Retrieve the first part (sequential fragments) for partial delivery. */
  537. static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
  538. {
  539. struct sk_buff *pos, *last_frag, *first_frag;
  540. struct sctp_ulpevent *cevent;
  541. __u32 ctsn, next_tsn;
  542. struct sctp_ulpevent *retval;
  543. /* The chunks are held in the reasm queue sorted by TSN.
  544. * Walk through the queue sequentially and look for a sequence of
  545. * fragmented chunks that start a datagram.
  546. */
  547. if (skb_queue_empty(&ulpq->reasm))
  548. return NULL;
  549. last_frag = first_frag = NULL;
  550. retval = NULL;
  551. next_tsn = 0;
  552. skb_queue_walk(&ulpq->reasm, pos) {
  553. cevent = sctp_skb2event(pos);
  554. ctsn = cevent->tsn;
  555. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  556. case SCTP_DATA_FIRST_FRAG:
  557. if (!first_frag) {
  558. first_frag = pos;
  559. next_tsn = ctsn + 1;
  560. last_frag = pos;
  561. } else
  562. goto done;
  563. break;
  564. case SCTP_DATA_MIDDLE_FRAG:
  565. if (!first_frag)
  566. return NULL;
  567. if (ctsn == next_tsn) {
  568. next_tsn++;
  569. last_frag = pos;
  570. } else
  571. goto done;
  572. break;
  573. case SCTP_DATA_LAST_FRAG:
  574. if (!first_frag)
  575. return NULL;
  576. else
  577. goto done;
  578. break;
  579. default:
  580. return NULL;
  581. }
  582. }
  583. /* We have the reassembled event. There is no need to look
  584. * further.
  585. */
  586. done:
  587. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  588. &ulpq->reasm, first_frag, last_frag);
  589. return retval;
  590. }
  591. /*
  592. * Flush out stale fragments from the reassembly queue when processing
  593. * a Forward TSN.
  594. *
  595. * RFC 3758, Section 3.6
  596. *
  597. * After receiving and processing a FORWARD TSN, the data receiver MUST
  598. * take cautions in updating its re-assembly queue. The receiver MUST
  599. * remove any partially reassembled message, which is still missing one
  600. * or more TSNs earlier than or equal to the new cumulative TSN point.
  601. * In the event that the receiver has invoked the partial delivery API,
  602. * a notification SHOULD also be generated to inform the upper layer API
  603. * that the message being partially delivered will NOT be completed.
  604. */
  605. void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
  606. {
  607. struct sk_buff *pos, *tmp;
  608. struct sctp_ulpevent *event;
  609. __u32 tsn;
  610. if (skb_queue_empty(&ulpq->reasm))
  611. return;
  612. skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
  613. event = sctp_skb2event(pos);
  614. tsn = event->tsn;
  615. /* Since the entire message must be abandoned by the
  616. * sender (item A3 in Section 3.5, RFC 3758), we can
  617. * free all fragments on the list that are less then
  618. * or equal to ctsn_point
  619. */
  620. if (TSN_lte(tsn, fwd_tsn)) {
  621. __skb_unlink(pos, &ulpq->reasm);
  622. sctp_ulpevent_free(event);
  623. } else
  624. break;
  625. }
  626. }
  627. /*
  628. * Drain the reassembly queue. If we just cleared parted delivery, it
  629. * is possible that the reassembly queue will contain already reassembled
  630. * messages. Retrieve any such messages and give them to the user.
  631. */
  632. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
  633. {
  634. struct sctp_ulpevent *event = NULL;
  635. struct sk_buff_head temp;
  636. if (skb_queue_empty(&ulpq->reasm))
  637. return;
  638. while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
  639. /* Do ordering if needed. */
  640. if ((event) && (event->msg_flags & MSG_EOR)) {
  641. skb_queue_head_init(&temp);
  642. __skb_queue_tail(&temp, sctp_event2skb(event));
  643. event = sctp_ulpq_order(ulpq, event);
  644. }
  645. /* Send event to the ULP. 'event' is the
  646. * sctp_ulpevent for very first SKB on the temp' list.
  647. */
  648. if (event)
  649. sctp_ulpq_tail_event(ulpq, event);
  650. }
  651. }
  652. /* Helper function to gather skbs that have possibly become
  653. * ordered by an an incoming chunk.
  654. */
  655. static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
  656. struct sctp_ulpevent *event)
  657. {
  658. struct sk_buff_head *event_list;
  659. struct sk_buff *pos, *tmp;
  660. struct sctp_ulpevent *cevent;
  661. struct sctp_stream *stream;
  662. __u16 sid, csid, cssn;
  663. sid = event->stream;
  664. stream = &ulpq->asoc->stream;
  665. event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
  666. /* We are holding the chunks by stream, by SSN. */
  667. sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
  668. cevent = (struct sctp_ulpevent *) pos->cb;
  669. csid = cevent->stream;
  670. cssn = cevent->ssn;
  671. /* Have we gone too far? */
  672. if (csid > sid)
  673. break;
  674. /* Have we not gone far enough? */
  675. if (csid < sid)
  676. continue;
  677. if (cssn != sctp_ssn_peek(stream, in, sid))
  678. break;
  679. /* Found it, so mark in the stream. */
  680. sctp_ssn_next(stream, in, sid);
  681. __skb_unlink(pos, &ulpq->lobby);
  682. /* Attach all gathered skbs to the event. */
  683. __skb_queue_tail(event_list, pos);
  684. }
  685. }
  686. /* Helper function to store chunks needing ordering. */
  687. static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
  688. struct sctp_ulpevent *event)
  689. {
  690. struct sk_buff *pos;
  691. struct sctp_ulpevent *cevent;
  692. __u16 sid, csid;
  693. __u16 ssn, cssn;
  694. pos = skb_peek_tail(&ulpq->lobby);
  695. if (!pos) {
  696. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  697. return;
  698. }
  699. sid = event->stream;
  700. ssn = event->ssn;
  701. cevent = (struct sctp_ulpevent *) pos->cb;
  702. csid = cevent->stream;
  703. cssn = cevent->ssn;
  704. if (sid > csid) {
  705. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  706. return;
  707. }
  708. if ((sid == csid) && SSN_lt(cssn, ssn)) {
  709. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  710. return;
  711. }
  712. /* Find the right place in this list. We store them by
  713. * stream ID and then by SSN.
  714. */
  715. skb_queue_walk(&ulpq->lobby, pos) {
  716. cevent = (struct sctp_ulpevent *) pos->cb;
  717. csid = cevent->stream;
  718. cssn = cevent->ssn;
  719. if (csid > sid)
  720. break;
  721. if (csid == sid && SSN_lt(ssn, cssn))
  722. break;
  723. }
  724. /* Insert before pos. */
  725. __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
  726. }
  727. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
  728. struct sctp_ulpevent *event)
  729. {
  730. __u16 sid, ssn;
  731. struct sctp_stream *stream;
  732. /* Check if this message needs ordering. */
  733. if (event->msg_flags & SCTP_DATA_UNORDERED)
  734. return event;
  735. /* Note: The stream ID must be verified before this routine. */
  736. sid = event->stream;
  737. ssn = event->ssn;
  738. stream = &ulpq->asoc->stream;
  739. /* Is this the expected SSN for this stream ID? */
  740. if (ssn != sctp_ssn_peek(stream, in, sid)) {
  741. /* We've received something out of order, so find where it
  742. * needs to be placed. We order by stream and then by SSN.
  743. */
  744. sctp_ulpq_store_ordered(ulpq, event);
  745. return NULL;
  746. }
  747. /* Mark that the next chunk has been found. */
  748. sctp_ssn_next(stream, in, sid);
  749. /* Go find any other chunks that were waiting for
  750. * ordering.
  751. */
  752. sctp_ulpq_retrieve_ordered(ulpq, event);
  753. return event;
  754. }
  755. /* Helper function to gather skbs that have possibly become
  756. * ordered by forward tsn skipping their dependencies.
  757. */
  758. static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
  759. {
  760. struct sk_buff *pos, *tmp;
  761. struct sctp_ulpevent *cevent;
  762. struct sctp_ulpevent *event;
  763. struct sctp_stream *stream;
  764. struct sk_buff_head temp;
  765. struct sk_buff_head *lobby = &ulpq->lobby;
  766. __u16 csid, cssn;
  767. stream = &ulpq->asoc->stream;
  768. /* We are holding the chunks by stream, by SSN. */
  769. skb_queue_head_init(&temp);
  770. event = NULL;
  771. sctp_skb_for_each(pos, lobby, tmp) {
  772. cevent = (struct sctp_ulpevent *) pos->cb;
  773. csid = cevent->stream;
  774. cssn = cevent->ssn;
  775. /* Have we gone too far? */
  776. if (csid > sid)
  777. break;
  778. /* Have we not gone far enough? */
  779. if (csid < sid)
  780. continue;
  781. /* see if this ssn has been marked by skipping */
  782. if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
  783. break;
  784. __skb_unlink(pos, lobby);
  785. if (!event)
  786. /* Create a temporary list to collect chunks on. */
  787. event = sctp_skb2event(pos);
  788. /* Attach all gathered skbs to the event. */
  789. __skb_queue_tail(&temp, pos);
  790. }
  791. /* If we didn't reap any data, see if the next expected SSN
  792. * is next on the queue and if so, use that.
  793. */
  794. if (event == NULL && pos != (struct sk_buff *)lobby) {
  795. cevent = (struct sctp_ulpevent *) pos->cb;
  796. csid = cevent->stream;
  797. cssn = cevent->ssn;
  798. if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
  799. sctp_ssn_next(stream, in, csid);
  800. __skb_unlink(pos, lobby);
  801. __skb_queue_tail(&temp, pos);
  802. event = sctp_skb2event(pos);
  803. }
  804. }
  805. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  806. * very first SKB on the 'temp' list.
  807. */
  808. if (event) {
  809. /* see if we have more ordered that we can deliver */
  810. sctp_ulpq_retrieve_ordered(ulpq, event);
  811. sctp_ulpq_tail_event(ulpq, event);
  812. }
  813. }
  814. /* Skip over an SSN. This is used during the processing of
  815. * Forwared TSN chunk to skip over the abandoned ordered data
  816. */
  817. void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
  818. {
  819. struct sctp_stream *stream;
  820. /* Note: The stream ID must be verified before this routine. */
  821. stream = &ulpq->asoc->stream;
  822. /* Is this an old SSN? If so ignore. */
  823. if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
  824. return;
  825. /* Mark that we are no longer expecting this SSN or lower. */
  826. sctp_ssn_skip(stream, in, sid, ssn);
  827. /* Go find any other chunks that were waiting for
  828. * ordering and deliver them if needed.
  829. */
  830. sctp_ulpq_reap_ordered(ulpq, sid);
  831. }
  832. static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
  833. struct sk_buff_head *list, __u16 needed)
  834. {
  835. __u16 freed = 0;
  836. __u32 tsn, last_tsn;
  837. struct sk_buff *skb, *flist, *last;
  838. struct sctp_ulpevent *event;
  839. struct sctp_tsnmap *tsnmap;
  840. tsnmap = &ulpq->asoc->peer.tsn_map;
  841. while ((skb = skb_peek_tail(list)) != NULL) {
  842. event = sctp_skb2event(skb);
  843. tsn = event->tsn;
  844. /* Don't renege below the Cumulative TSN ACK Point. */
  845. if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
  846. break;
  847. /* Events in ordering queue may have multiple fragments
  848. * corresponding to additional TSNs. Sum the total
  849. * freed space; find the last TSN.
  850. */
  851. freed += skb_headlen(skb);
  852. flist = skb_shinfo(skb)->frag_list;
  853. for (last = flist; flist; flist = flist->next) {
  854. last = flist;
  855. freed += skb_headlen(last);
  856. }
  857. if (last)
  858. last_tsn = sctp_skb2event(last)->tsn;
  859. else
  860. last_tsn = tsn;
  861. /* Unlink the event, then renege all applicable TSNs. */
  862. __skb_unlink(skb, list);
  863. sctp_ulpevent_free(event);
  864. while (TSN_lte(tsn, last_tsn)) {
  865. sctp_tsnmap_renege(tsnmap, tsn);
  866. tsn++;
  867. }
  868. if (freed >= needed)
  869. return freed;
  870. }
  871. return freed;
  872. }
  873. /* Renege 'needed' bytes from the ordering queue. */
  874. static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
  875. {
  876. return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
  877. }
  878. /* Renege 'needed' bytes from the reassembly queue. */
  879. static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
  880. {
  881. return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
  882. }
  883. /* Partial deliver the first message as there is pressure on rwnd. */
  884. void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
  885. gfp_t gfp)
  886. {
  887. struct sctp_ulpevent *event;
  888. struct sctp_association *asoc;
  889. struct sctp_sock *sp;
  890. __u32 ctsn;
  891. struct sk_buff *skb;
  892. asoc = ulpq->asoc;
  893. sp = sctp_sk(asoc->base.sk);
  894. /* If the association is already in Partial Delivery mode
  895. * we have nothing to do.
  896. */
  897. if (ulpq->pd_mode)
  898. return;
  899. /* Data must be at or below the Cumulative TSN ACK Point to
  900. * start partial delivery.
  901. */
  902. skb = skb_peek(&asoc->ulpq.reasm);
  903. if (skb != NULL) {
  904. ctsn = sctp_skb2event(skb)->tsn;
  905. if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
  906. return;
  907. }
  908. /* If the user enabled fragment interleave socket option,
  909. * multiple associations can enter partial delivery.
  910. * Otherwise, we can only enter partial delivery if the
  911. * socket is not in partial deliver mode.
  912. */
  913. if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
  914. /* Is partial delivery possible? */
  915. event = sctp_ulpq_retrieve_first(ulpq);
  916. /* Send event to the ULP. */
  917. if (event) {
  918. sctp_ulpq_tail_event(ulpq, event);
  919. sctp_ulpq_set_pd(ulpq);
  920. return;
  921. }
  922. }
  923. }
  924. /* Renege some packets to make room for an incoming chunk. */
  925. void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  926. gfp_t gfp)
  927. {
  928. struct sctp_association *asoc;
  929. __u16 needed, freed;
  930. asoc = ulpq->asoc;
  931. if (chunk) {
  932. needed = ntohs(chunk->chunk_hdr->length);
  933. needed -= sizeof(struct sctp_data_chunk);
  934. } else
  935. needed = SCTP_DEFAULT_MAXWINDOW;
  936. freed = 0;
  937. if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
  938. freed = sctp_ulpq_renege_order(ulpq, needed);
  939. if (freed < needed) {
  940. freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
  941. }
  942. }
  943. /* If able to free enough room, accept this chunk. */
  944. if (chunk && (freed >= needed)) {
  945. int retval;
  946. retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
  947. /*
  948. * Enter partial delivery if chunk has not been
  949. * delivered; otherwise, drain the reassembly queue.
  950. */
  951. if (retval <= 0)
  952. sctp_ulpq_partial_delivery(ulpq, gfp);
  953. else if (retval == 1)
  954. sctp_ulpq_reasm_drain(ulpq);
  955. }
  956. sk_mem_reclaim(asoc->base.sk);
  957. }
  958. /* Notify the application if an association is aborted and in
  959. * partial delivery mode. Send up any pending received messages.
  960. */
  961. void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
  962. {
  963. struct sctp_ulpevent *ev = NULL;
  964. struct sock *sk;
  965. struct sctp_sock *sp;
  966. if (!ulpq->pd_mode)
  967. return;
  968. sk = ulpq->asoc->base.sk;
  969. sp = sctp_sk(sk);
  970. if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
  971. &sctp_sk(sk)->subscribe))
  972. ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
  973. SCTP_PARTIAL_DELIVERY_ABORTED,
  974. gfp);
  975. if (ev)
  976. __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
  977. /* If there is data waiting, send it up the socket now. */
  978. if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
  979. sp->data_ready_signalled = 1;
  980. sk->sk_data_ready(sk);
  981. }
  982. }