ulpqueue.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. /* SCTP kernel implementation
  2. * (C) Copyright IBM Corp. 2001, 2004
  3. * Copyright (c) 1999-2000 Cisco, Inc.
  4. * Copyright (c) 1999-2001 Motorola, Inc.
  5. * Copyright (c) 2001 Intel Corp.
  6. * Copyright (c) 2001 Nokia, Inc.
  7. * Copyright (c) 2001 La Monte H.P. Yarroll
  8. *
  9. * This abstraction carries sctp events to the ULP (sockets).
  10. *
  11. * This SCTP implementation is free software;
  12. * you can redistribute it and/or modify it under the terms of
  13. * the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This SCTP implementation is distributed in the hope that it
  18. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19. * ************************
  20. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21. * See the GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with GNU CC; see the file COPYING. If not, see
  25. * <http://www.gnu.org/licenses/>.
  26. *
  27. * Please send any bug reports or fixes you make to the
  28. * email address(es):
  29. * lksctp developers <linux-sctp@vger.kernel.org>
  30. *
  31. * Written or modified by:
  32. * Jon Grimm <jgrimm@us.ibm.com>
  33. * La Monte H.P. Yarroll <piggy@acm.org>
  34. * Sridhar Samudrala <sri@us.ibm.com>
  35. */
  36. #include <linux/slab.h>
  37. #include <linux/types.h>
  38. #include <linux/skbuff.h>
  39. #include <net/sock.h>
  40. #include <net/busy_poll.h>
  41. #include <net/sctp/structs.h>
  42. #include <net/sctp/sctp.h>
  43. #include <net/sctp/sm.h>
  44. /* Forward declarations for internal helpers. */
  45. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  46. struct sctp_ulpevent *);
  47. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  48. struct sctp_ulpevent *);
  49. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  50. /* 1st Level Abstractions */
  51. /* Initialize a ULP queue from a block of memory. */
  52. struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  53. struct sctp_association *asoc)
  54. {
  55. memset(ulpq, 0, sizeof(struct sctp_ulpq));
  56. ulpq->asoc = asoc;
  57. skb_queue_head_init(&ulpq->reasm);
  58. skb_queue_head_init(&ulpq->lobby);
  59. ulpq->pd_mode = 0;
  60. return ulpq;
  61. }
  62. /* Flush the reassembly and ordering queues. */
  63. void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  64. {
  65. struct sk_buff *skb;
  66. struct sctp_ulpevent *event;
  67. while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  68. event = sctp_skb2event(skb);
  69. sctp_ulpevent_free(event);
  70. }
  71. while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  72. event = sctp_skb2event(skb);
  73. sctp_ulpevent_free(event);
  74. }
  75. }
  76. /* Dispose of a ulpqueue. */
  77. void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  78. {
  79. sctp_ulpq_flush(ulpq);
  80. }
  81. /* Process an incoming DATA chunk. */
  82. int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  83. gfp_t gfp)
  84. {
  85. struct sk_buff_head temp;
  86. struct sctp_ulpevent *event;
  87. int event_eor = 0;
  88. /* Create an event from the incoming chunk. */
  89. event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  90. if (!event)
  91. return -ENOMEM;
  92. /* Do reassembly if needed. */
  93. event = sctp_ulpq_reasm(ulpq, event);
  94. /* Do ordering if needed. */
  95. if ((event) && (event->msg_flags & MSG_EOR)) {
  96. /* Create a temporary list to collect chunks on. */
  97. skb_queue_head_init(&temp);
  98. __skb_queue_tail(&temp, sctp_event2skb(event));
  99. event = sctp_ulpq_order(ulpq, event);
  100. }
  101. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  102. * very first SKB on the 'temp' list.
  103. */
  104. if (event) {
  105. event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
  106. sctp_ulpq_tail_event(ulpq, event);
  107. }
  108. return event_eor;
  109. }
  110. /* Add a new event for propagation to the ULP. */
  111. /* Clear the partial delivery mode for this socket. Note: This
  112. * assumes that no association is currently in partial delivery mode.
  113. */
  114. int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
  115. {
  116. struct sctp_sock *sp = sctp_sk(sk);
  117. if (atomic_dec_and_test(&sp->pd_mode)) {
  118. /* This means there are no other associations in PD, so
  119. * we can go ahead and clear out the lobby in one shot
  120. */
  121. if (!skb_queue_empty(&sp->pd_lobby)) {
  122. struct list_head *list;
  123. skb_queue_splice_tail_init(&sp->pd_lobby,
  124. &sk->sk_receive_queue);
  125. list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
  126. INIT_LIST_HEAD(list);
  127. return 1;
  128. }
  129. } else {
  130. /* There are other associations in PD, so we only need to
  131. * pull stuff out of the lobby that belongs to the
  132. * associations that is exiting PD (all of its notifications
  133. * are posted here).
  134. */
  135. if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
  136. struct sk_buff *skb, *tmp;
  137. struct sctp_ulpevent *event;
  138. sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
  139. event = sctp_skb2event(skb);
  140. if (event->asoc == asoc) {
  141. __skb_unlink(skb, &sp->pd_lobby);
  142. __skb_queue_tail(&sk->sk_receive_queue,
  143. skb);
  144. }
  145. }
  146. }
  147. }
  148. return 0;
  149. }
  150. /* Set the pd_mode on the socket and ulpq */
  151. static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
  152. {
  153. struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
  154. atomic_inc(&sp->pd_mode);
  155. ulpq->pd_mode = 1;
  156. }
  157. /* Clear the pd_mode and restart any pending messages waiting for delivery. */
  158. static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
  159. {
  160. ulpq->pd_mode = 0;
  161. sctp_ulpq_reasm_drain(ulpq);
  162. return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
  163. }
  164. /* If the SKB of 'event' is on a list, it is the first such member
  165. * of that list.
  166. */
  167. int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
  168. {
  169. struct sock *sk = ulpq->asoc->base.sk;
  170. struct sctp_sock *sp = sctp_sk(sk);
  171. struct sk_buff_head *queue, *skb_list;
  172. struct sk_buff *skb = sctp_event2skb(event);
  173. int clear_pd = 0;
  174. skb_list = (struct sk_buff_head *) skb->prev;
  175. /* If the socket is just going to throw this away, do not
  176. * even try to deliver it.
  177. */
  178. if (sk->sk_shutdown & RCV_SHUTDOWN &&
  179. (sk->sk_shutdown & SEND_SHUTDOWN ||
  180. !sctp_ulpevent_is_notification(event)))
  181. goto out_free;
  182. if (!sctp_ulpevent_is_notification(event)) {
  183. sk_mark_napi_id(sk, skb);
  184. sk_incoming_cpu_update(sk);
  185. }
  186. /* Check if the user wishes to receive this event. */
  187. if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
  188. goto out_free;
  189. /* If we are in partial delivery mode, post to the lobby until
  190. * partial delivery is cleared, unless, of course _this_ is
  191. * the association the cause of the partial delivery.
  192. */
  193. if (atomic_read(&sp->pd_mode) == 0) {
  194. queue = &sk->sk_receive_queue;
  195. } else {
  196. if (ulpq->pd_mode) {
  197. /* If the association is in partial delivery, we
  198. * need to finish delivering the partially processed
  199. * packet before passing any other data. This is
  200. * because we don't truly support stream interleaving.
  201. */
  202. if ((event->msg_flags & MSG_NOTIFICATION) ||
  203. (SCTP_DATA_NOT_FRAG ==
  204. (event->msg_flags & SCTP_DATA_FRAG_MASK)))
  205. queue = &sp->pd_lobby;
  206. else {
  207. clear_pd = event->msg_flags & MSG_EOR;
  208. queue = &sk->sk_receive_queue;
  209. }
  210. } else {
  211. /*
  212. * If fragment interleave is enabled, we
  213. * can queue this to the receive queue instead
  214. * of the lobby.
  215. */
  216. if (sp->frag_interleave)
  217. queue = &sk->sk_receive_queue;
  218. else
  219. queue = &sp->pd_lobby;
  220. }
  221. }
  222. /* If we are harvesting multiple skbs they will be
  223. * collected on a list.
  224. */
  225. if (skb_list)
  226. skb_queue_splice_tail_init(skb_list, queue);
  227. else
  228. __skb_queue_tail(queue, skb);
  229. /* Did we just complete partial delivery and need to get
  230. * rolling again? Move pending data to the receive
  231. * queue.
  232. */
  233. if (clear_pd)
  234. sctp_ulpq_clear_pd(ulpq);
  235. if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
  236. sp->data_ready_signalled = 1;
  237. sk->sk_data_ready(sk);
  238. }
  239. return 1;
  240. out_free:
  241. if (skb_list)
  242. sctp_queue_purge_ulpevents(skb_list);
  243. else
  244. sctp_ulpevent_free(event);
  245. return 0;
  246. }
  247. /* 2nd Level Abstractions */
  248. /* Helper function to store chunks that need to be reassembled. */
  249. static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  250. struct sctp_ulpevent *event)
  251. {
  252. struct sk_buff *pos;
  253. struct sctp_ulpevent *cevent;
  254. __u32 tsn, ctsn;
  255. tsn = event->tsn;
  256. /* See if it belongs at the end. */
  257. pos = skb_peek_tail(&ulpq->reasm);
  258. if (!pos) {
  259. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  260. return;
  261. }
  262. /* Short circuit just dropping it at the end. */
  263. cevent = sctp_skb2event(pos);
  264. ctsn = cevent->tsn;
  265. if (TSN_lt(ctsn, tsn)) {
  266. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  267. return;
  268. }
  269. /* Find the right place in this list. We store them by TSN. */
  270. skb_queue_walk(&ulpq->reasm, pos) {
  271. cevent = sctp_skb2event(pos);
  272. ctsn = cevent->tsn;
  273. if (TSN_lt(tsn, ctsn))
  274. break;
  275. }
  276. /* Insert before pos. */
  277. __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
  278. }
  279. /* Helper function to return an event corresponding to the reassembled
  280. * datagram.
  281. * This routine creates a re-assembled skb given the first and last skb's
  282. * as stored in the reassembly queue. The skb's may be non-linear if the sctp
  283. * payload was fragmented on the way and ip had to reassemble them.
  284. * We add the rest of skb's to the first skb's fraglist.
  285. */
  286. static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
  287. struct sk_buff_head *queue, struct sk_buff *f_frag,
  288. struct sk_buff *l_frag)
  289. {
  290. struct sk_buff *pos;
  291. struct sk_buff *new = NULL;
  292. struct sctp_ulpevent *event;
  293. struct sk_buff *pnext, *last;
  294. struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
  295. /* Store the pointer to the 2nd skb */
  296. if (f_frag == l_frag)
  297. pos = NULL;
  298. else
  299. pos = f_frag->next;
  300. /* Get the last skb in the f_frag's frag_list if present. */
  301. for (last = list; list; last = list, list = list->next)
  302. ;
  303. /* Add the list of remaining fragments to the first fragments
  304. * frag_list.
  305. */
  306. if (last)
  307. last->next = pos;
  308. else {
  309. if (skb_cloned(f_frag)) {
  310. /* This is a cloned skb, we can't just modify
  311. * the frag_list. We need a new skb to do that.
  312. * Instead of calling skb_unshare(), we'll do it
  313. * ourselves since we need to delay the free.
  314. */
  315. new = skb_copy(f_frag, GFP_ATOMIC);
  316. if (!new)
  317. return NULL; /* try again later */
  318. sctp_skb_set_owner_r(new, f_frag->sk);
  319. skb_shinfo(new)->frag_list = pos;
  320. } else
  321. skb_shinfo(f_frag)->frag_list = pos;
  322. }
  323. /* Remove the first fragment from the reassembly queue. */
  324. __skb_unlink(f_frag, queue);
  325. /* if we did unshare, then free the old skb and re-assign */
  326. if (new) {
  327. kfree_skb(f_frag);
  328. f_frag = new;
  329. }
  330. while (pos) {
  331. pnext = pos->next;
  332. /* Update the len and data_len fields of the first fragment. */
  333. f_frag->len += pos->len;
  334. f_frag->data_len += pos->len;
  335. /* Remove the fragment from the reassembly queue. */
  336. __skb_unlink(pos, queue);
  337. /* Break if we have reached the last fragment. */
  338. if (pos == l_frag)
  339. break;
  340. pos->next = pnext;
  341. pos = pnext;
  342. }
  343. event = sctp_skb2event(f_frag);
  344. SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
  345. return event;
  346. }
  347. /* Helper function to check if an incoming chunk has filled up the last
  348. * missing fragment in a SCTP datagram and return the corresponding event.
  349. */
  350. static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
  351. {
  352. struct sk_buff *pos;
  353. struct sctp_ulpevent *cevent;
  354. struct sk_buff *first_frag = NULL;
  355. __u32 ctsn, next_tsn;
  356. struct sctp_ulpevent *retval = NULL;
  357. struct sk_buff *pd_first = NULL;
  358. struct sk_buff *pd_last = NULL;
  359. size_t pd_len = 0;
  360. struct sctp_association *asoc;
  361. u32 pd_point;
  362. /* Initialized to 0 just to avoid compiler warning message. Will
  363. * never be used with this value. It is referenced only after it
  364. * is set when we find the first fragment of a message.
  365. */
  366. next_tsn = 0;
  367. /* The chunks are held in the reasm queue sorted by TSN.
  368. * Walk through the queue sequentially and look for a sequence of
  369. * fragmented chunks that complete a datagram.
  370. * 'first_frag' and next_tsn are reset when we find a chunk which
  371. * is the first fragment of a datagram. Once these 2 fields are set
  372. * we expect to find the remaining middle fragments and the last
  373. * fragment in order. If not, first_frag is reset to NULL and we
  374. * start the next pass when we find another first fragment.
  375. *
  376. * There is a potential to do partial delivery if user sets
  377. * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
  378. * to see if can do PD.
  379. */
  380. skb_queue_walk(&ulpq->reasm, pos) {
  381. cevent = sctp_skb2event(pos);
  382. ctsn = cevent->tsn;
  383. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  384. case SCTP_DATA_FIRST_FRAG:
  385. /* If this "FIRST_FRAG" is the first
  386. * element in the queue, then count it towards
  387. * possible PD.
  388. */
  389. if (pos == ulpq->reasm.next) {
  390. pd_first = pos;
  391. pd_last = pos;
  392. pd_len = pos->len;
  393. } else {
  394. pd_first = NULL;
  395. pd_last = NULL;
  396. pd_len = 0;
  397. }
  398. first_frag = pos;
  399. next_tsn = ctsn + 1;
  400. break;
  401. case SCTP_DATA_MIDDLE_FRAG:
  402. if ((first_frag) && (ctsn == next_tsn)) {
  403. next_tsn++;
  404. if (pd_first) {
  405. pd_last = pos;
  406. pd_len += pos->len;
  407. }
  408. } else
  409. first_frag = NULL;
  410. break;
  411. case SCTP_DATA_LAST_FRAG:
  412. if (first_frag && (ctsn == next_tsn))
  413. goto found;
  414. else
  415. first_frag = NULL;
  416. break;
  417. }
  418. }
  419. asoc = ulpq->asoc;
  420. if (pd_first) {
  421. /* Make sure we can enter partial deliver.
  422. * We can trigger partial delivery only if framgent
  423. * interleave is set, or the socket is not already
  424. * in partial delivery.
  425. */
  426. if (!sctp_sk(asoc->base.sk)->frag_interleave &&
  427. atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
  428. goto done;
  429. cevent = sctp_skb2event(pd_first);
  430. pd_point = sctp_sk(asoc->base.sk)->pd_point;
  431. if (pd_point && pd_point <= pd_len) {
  432. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  433. &ulpq->reasm,
  434. pd_first,
  435. pd_last);
  436. if (retval)
  437. sctp_ulpq_set_pd(ulpq);
  438. }
  439. }
  440. done:
  441. return retval;
  442. found:
  443. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  444. &ulpq->reasm, first_frag, pos);
  445. if (retval)
  446. retval->msg_flags |= MSG_EOR;
  447. goto done;
  448. }
  449. /* Retrieve the next set of fragments of a partial message. */
  450. static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
  451. {
  452. struct sk_buff *pos, *last_frag, *first_frag;
  453. struct sctp_ulpevent *cevent;
  454. __u32 ctsn, next_tsn;
  455. int is_last;
  456. struct sctp_ulpevent *retval;
  457. /* The chunks are held in the reasm queue sorted by TSN.
  458. * Walk through the queue sequentially and look for the first
  459. * sequence of fragmented chunks.
  460. */
  461. if (skb_queue_empty(&ulpq->reasm))
  462. return NULL;
  463. last_frag = first_frag = NULL;
  464. retval = NULL;
  465. next_tsn = 0;
  466. is_last = 0;
  467. skb_queue_walk(&ulpq->reasm, pos) {
  468. cevent = sctp_skb2event(pos);
  469. ctsn = cevent->tsn;
  470. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  471. case SCTP_DATA_FIRST_FRAG:
  472. if (!first_frag)
  473. return NULL;
  474. goto done;
  475. case SCTP_DATA_MIDDLE_FRAG:
  476. if (!first_frag) {
  477. first_frag = pos;
  478. next_tsn = ctsn + 1;
  479. last_frag = pos;
  480. } else if (next_tsn == ctsn) {
  481. next_tsn++;
  482. last_frag = pos;
  483. } else
  484. goto done;
  485. break;
  486. case SCTP_DATA_LAST_FRAG:
  487. if (!first_frag)
  488. first_frag = pos;
  489. else if (ctsn != next_tsn)
  490. goto done;
  491. last_frag = pos;
  492. is_last = 1;
  493. goto done;
  494. default:
  495. return NULL;
  496. }
  497. }
  498. /* We have the reassembled event. There is no need to look
  499. * further.
  500. */
  501. done:
  502. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  503. &ulpq->reasm, first_frag, last_frag);
  504. if (retval && is_last)
  505. retval->msg_flags |= MSG_EOR;
  506. return retval;
  507. }
  508. /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
  509. * need reassembling.
  510. */
  511. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  512. struct sctp_ulpevent *event)
  513. {
  514. struct sctp_ulpevent *retval = NULL;
  515. /* Check if this is part of a fragmented message. */
  516. if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
  517. event->msg_flags |= MSG_EOR;
  518. return event;
  519. }
  520. sctp_ulpq_store_reasm(ulpq, event);
  521. if (!ulpq->pd_mode)
  522. retval = sctp_ulpq_retrieve_reassembled(ulpq);
  523. else {
  524. __u32 ctsn, ctsnap;
  525. /* Do not even bother unless this is the next tsn to
  526. * be delivered.
  527. */
  528. ctsn = event->tsn;
  529. ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
  530. if (TSN_lte(ctsn, ctsnap))
  531. retval = sctp_ulpq_retrieve_partial(ulpq);
  532. }
  533. return retval;
  534. }
  535. /* Retrieve the first part (sequential fragments) for partial delivery. */
  536. static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
  537. {
  538. struct sk_buff *pos, *last_frag, *first_frag;
  539. struct sctp_ulpevent *cevent;
  540. __u32 ctsn, next_tsn;
  541. struct sctp_ulpevent *retval;
  542. /* The chunks are held in the reasm queue sorted by TSN.
  543. * Walk through the queue sequentially and look for a sequence of
  544. * fragmented chunks that start a datagram.
  545. */
  546. if (skb_queue_empty(&ulpq->reasm))
  547. return NULL;
  548. last_frag = first_frag = NULL;
  549. retval = NULL;
  550. next_tsn = 0;
  551. skb_queue_walk(&ulpq->reasm, pos) {
  552. cevent = sctp_skb2event(pos);
  553. ctsn = cevent->tsn;
  554. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  555. case SCTP_DATA_FIRST_FRAG:
  556. if (!first_frag) {
  557. first_frag = pos;
  558. next_tsn = ctsn + 1;
  559. last_frag = pos;
  560. } else
  561. goto done;
  562. break;
  563. case SCTP_DATA_MIDDLE_FRAG:
  564. if (!first_frag)
  565. return NULL;
  566. if (ctsn == next_tsn) {
  567. next_tsn++;
  568. last_frag = pos;
  569. } else
  570. goto done;
  571. break;
  572. case SCTP_DATA_LAST_FRAG:
  573. if (!first_frag)
  574. return NULL;
  575. else
  576. goto done;
  577. break;
  578. default:
  579. return NULL;
  580. }
  581. }
  582. /* We have the reassembled event. There is no need to look
  583. * further.
  584. */
  585. done:
  586. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  587. &ulpq->reasm, first_frag, last_frag);
  588. return retval;
  589. }
  590. /*
  591. * Flush out stale fragments from the reassembly queue when processing
  592. * a Forward TSN.
  593. *
  594. * RFC 3758, Section 3.6
  595. *
  596. * After receiving and processing a FORWARD TSN, the data receiver MUST
  597. * take cautions in updating its re-assembly queue. The receiver MUST
  598. * remove any partially reassembled message, which is still missing one
  599. * or more TSNs earlier than or equal to the new cumulative TSN point.
  600. * In the event that the receiver has invoked the partial delivery API,
  601. * a notification SHOULD also be generated to inform the upper layer API
  602. * that the message being partially delivered will NOT be completed.
  603. */
  604. void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
  605. {
  606. struct sk_buff *pos, *tmp;
  607. struct sctp_ulpevent *event;
  608. __u32 tsn;
  609. if (skb_queue_empty(&ulpq->reasm))
  610. return;
  611. skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
  612. event = sctp_skb2event(pos);
  613. tsn = event->tsn;
  614. /* Since the entire message must be abandoned by the
  615. * sender (item A3 in Section 3.5, RFC 3758), we can
  616. * free all fragments on the list that are less then
  617. * or equal to ctsn_point
  618. */
  619. if (TSN_lte(tsn, fwd_tsn)) {
  620. __skb_unlink(pos, &ulpq->reasm);
  621. sctp_ulpevent_free(event);
  622. } else
  623. break;
  624. }
  625. }
  626. /*
  627. * Drain the reassembly queue. If we just cleared parted delivery, it
  628. * is possible that the reassembly queue will contain already reassembled
  629. * messages. Retrieve any such messages and give them to the user.
  630. */
  631. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
  632. {
  633. struct sctp_ulpevent *event = NULL;
  634. struct sk_buff_head temp;
  635. if (skb_queue_empty(&ulpq->reasm))
  636. return;
  637. while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
  638. /* Do ordering if needed. */
  639. if ((event) && (event->msg_flags & MSG_EOR)) {
  640. skb_queue_head_init(&temp);
  641. __skb_queue_tail(&temp, sctp_event2skb(event));
  642. event = sctp_ulpq_order(ulpq, event);
  643. }
  644. /* Send event to the ULP. 'event' is the
  645. * sctp_ulpevent for very first SKB on the temp' list.
  646. */
  647. if (event)
  648. sctp_ulpq_tail_event(ulpq, event);
  649. }
  650. }
  651. /* Helper function to gather skbs that have possibly become
  652. * ordered by an an incoming chunk.
  653. */
  654. static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
  655. struct sctp_ulpevent *event)
  656. {
  657. struct sk_buff_head *event_list;
  658. struct sk_buff *pos, *tmp;
  659. struct sctp_ulpevent *cevent;
  660. struct sctp_stream *in;
  661. __u16 sid, csid, cssn;
  662. sid = event->stream;
  663. in = &ulpq->asoc->ssnmap->in;
  664. event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
  665. /* We are holding the chunks by stream, by SSN. */
  666. sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
  667. cevent = (struct sctp_ulpevent *) pos->cb;
  668. csid = cevent->stream;
  669. cssn = cevent->ssn;
  670. /* Have we gone too far? */
  671. if (csid > sid)
  672. break;
  673. /* Have we not gone far enough? */
  674. if (csid < sid)
  675. continue;
  676. if (cssn != sctp_ssn_peek(in, sid))
  677. break;
  678. /* Found it, so mark in the ssnmap. */
  679. sctp_ssn_next(in, sid);
  680. __skb_unlink(pos, &ulpq->lobby);
  681. /* Attach all gathered skbs to the event. */
  682. __skb_queue_tail(event_list, pos);
  683. }
  684. }
  685. /* Helper function to store chunks needing ordering. */
  686. static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
  687. struct sctp_ulpevent *event)
  688. {
  689. struct sk_buff *pos;
  690. struct sctp_ulpevent *cevent;
  691. __u16 sid, csid;
  692. __u16 ssn, cssn;
  693. pos = skb_peek_tail(&ulpq->lobby);
  694. if (!pos) {
  695. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  696. return;
  697. }
  698. sid = event->stream;
  699. ssn = event->ssn;
  700. cevent = (struct sctp_ulpevent *) pos->cb;
  701. csid = cevent->stream;
  702. cssn = cevent->ssn;
  703. if (sid > csid) {
  704. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  705. return;
  706. }
  707. if ((sid == csid) && SSN_lt(cssn, ssn)) {
  708. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  709. return;
  710. }
  711. /* Find the right place in this list. We store them by
  712. * stream ID and then by SSN.
  713. */
  714. skb_queue_walk(&ulpq->lobby, pos) {
  715. cevent = (struct sctp_ulpevent *) pos->cb;
  716. csid = cevent->stream;
  717. cssn = cevent->ssn;
  718. if (csid > sid)
  719. break;
  720. if (csid == sid && SSN_lt(ssn, cssn))
  721. break;
  722. }
  723. /* Insert before pos. */
  724. __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
  725. }
  726. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
  727. struct sctp_ulpevent *event)
  728. {
  729. __u16 sid, ssn;
  730. struct sctp_stream *in;
  731. /* Check if this message needs ordering. */
  732. if (SCTP_DATA_UNORDERED & event->msg_flags)
  733. return event;
  734. /* Note: The stream ID must be verified before this routine. */
  735. sid = event->stream;
  736. ssn = event->ssn;
  737. in = &ulpq->asoc->ssnmap->in;
  738. /* Is this the expected SSN for this stream ID? */
  739. if (ssn != sctp_ssn_peek(in, sid)) {
  740. /* We've received something out of order, so find where it
  741. * needs to be placed. We order by stream and then by SSN.
  742. */
  743. sctp_ulpq_store_ordered(ulpq, event);
  744. return NULL;
  745. }
  746. /* Mark that the next chunk has been found. */
  747. sctp_ssn_next(in, sid);
  748. /* Go find any other chunks that were waiting for
  749. * ordering.
  750. */
  751. sctp_ulpq_retrieve_ordered(ulpq, event);
  752. return event;
  753. }
  754. /* Helper function to gather skbs that have possibly become
  755. * ordered by forward tsn skipping their dependencies.
  756. */
  757. static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
  758. {
  759. struct sk_buff *pos, *tmp;
  760. struct sctp_ulpevent *cevent;
  761. struct sctp_ulpevent *event;
  762. struct sctp_stream *in;
  763. struct sk_buff_head temp;
  764. struct sk_buff_head *lobby = &ulpq->lobby;
  765. __u16 csid, cssn;
  766. in = &ulpq->asoc->ssnmap->in;
  767. /* We are holding the chunks by stream, by SSN. */
  768. skb_queue_head_init(&temp);
  769. event = NULL;
  770. sctp_skb_for_each(pos, lobby, tmp) {
  771. cevent = (struct sctp_ulpevent *) pos->cb;
  772. csid = cevent->stream;
  773. cssn = cevent->ssn;
  774. /* Have we gone too far? */
  775. if (csid > sid)
  776. break;
  777. /* Have we not gone far enough? */
  778. if (csid < sid)
  779. continue;
  780. /* see if this ssn has been marked by skipping */
  781. if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
  782. break;
  783. __skb_unlink(pos, lobby);
  784. if (!event)
  785. /* Create a temporary list to collect chunks on. */
  786. event = sctp_skb2event(pos);
  787. /* Attach all gathered skbs to the event. */
  788. __skb_queue_tail(&temp, pos);
  789. }
  790. /* If we didn't reap any data, see if the next expected SSN
  791. * is next on the queue and if so, use that.
  792. */
  793. if (event == NULL && pos != (struct sk_buff *)lobby) {
  794. cevent = (struct sctp_ulpevent *) pos->cb;
  795. csid = cevent->stream;
  796. cssn = cevent->ssn;
  797. if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
  798. sctp_ssn_next(in, csid);
  799. __skb_unlink(pos, lobby);
  800. __skb_queue_tail(&temp, pos);
  801. event = sctp_skb2event(pos);
  802. }
  803. }
  804. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  805. * very first SKB on the 'temp' list.
  806. */
  807. if (event) {
  808. /* see if we have more ordered that we can deliver */
  809. sctp_ulpq_retrieve_ordered(ulpq, event);
  810. sctp_ulpq_tail_event(ulpq, event);
  811. }
  812. }
  813. /* Skip over an SSN. This is used during the processing of
  814. * Forwared TSN chunk to skip over the abandoned ordered data
  815. */
  816. void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
  817. {
  818. struct sctp_stream *in;
  819. /* Note: The stream ID must be verified before this routine. */
  820. in = &ulpq->asoc->ssnmap->in;
  821. /* Is this an old SSN? If so ignore. */
  822. if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
  823. return;
  824. /* Mark that we are no longer expecting this SSN or lower. */
  825. sctp_ssn_skip(in, sid, ssn);
  826. /* Go find any other chunks that were waiting for
  827. * ordering and deliver them if needed.
  828. */
  829. sctp_ulpq_reap_ordered(ulpq, sid);
  830. }
  831. static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
  832. struct sk_buff_head *list, __u16 needed)
  833. {
  834. __u16 freed = 0;
  835. __u32 tsn, last_tsn;
  836. struct sk_buff *skb, *flist, *last;
  837. struct sctp_ulpevent *event;
  838. struct sctp_tsnmap *tsnmap;
  839. tsnmap = &ulpq->asoc->peer.tsn_map;
  840. while ((skb = skb_peek_tail(list)) != NULL) {
  841. event = sctp_skb2event(skb);
  842. tsn = event->tsn;
  843. /* Don't renege below the Cumulative TSN ACK Point. */
  844. if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
  845. break;
  846. /* Events in ordering queue may have multiple fragments
  847. * corresponding to additional TSNs. Sum the total
  848. * freed space; find the last TSN.
  849. */
  850. freed += skb_headlen(skb);
  851. flist = skb_shinfo(skb)->frag_list;
  852. for (last = flist; flist; flist = flist->next) {
  853. last = flist;
  854. freed += skb_headlen(last);
  855. }
  856. if (last)
  857. last_tsn = sctp_skb2event(last)->tsn;
  858. else
  859. last_tsn = tsn;
  860. /* Unlink the event, then renege all applicable TSNs. */
  861. __skb_unlink(skb, list);
  862. sctp_ulpevent_free(event);
  863. while (TSN_lte(tsn, last_tsn)) {
  864. sctp_tsnmap_renege(tsnmap, tsn);
  865. tsn++;
  866. }
  867. if (freed >= needed)
  868. return freed;
  869. }
  870. return freed;
  871. }
  872. /* Renege 'needed' bytes from the ordering queue. */
  873. static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
  874. {
  875. return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
  876. }
  877. /* Renege 'needed' bytes from the reassembly queue. */
  878. static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
  879. {
  880. return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
  881. }
  882. /* Partial deliver the first message as there is pressure on rwnd. */
  883. void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
  884. gfp_t gfp)
  885. {
  886. struct sctp_ulpevent *event;
  887. struct sctp_association *asoc;
  888. struct sctp_sock *sp;
  889. __u32 ctsn;
  890. struct sk_buff *skb;
  891. asoc = ulpq->asoc;
  892. sp = sctp_sk(asoc->base.sk);
  893. /* If the association is already in Partial Delivery mode
  894. * we have nothing to do.
  895. */
  896. if (ulpq->pd_mode)
  897. return;
  898. /* Data must be at or below the Cumulative TSN ACK Point to
  899. * start partial delivery.
  900. */
  901. skb = skb_peek(&asoc->ulpq.reasm);
  902. if (skb != NULL) {
  903. ctsn = sctp_skb2event(skb)->tsn;
  904. if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
  905. return;
  906. }
  907. /* If the user enabled fragment interleave socket option,
  908. * multiple associations can enter partial delivery.
  909. * Otherwise, we can only enter partial delivery if the
  910. * socket is not in partial deliver mode.
  911. */
  912. if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
  913. /* Is partial delivery possible? */
  914. event = sctp_ulpq_retrieve_first(ulpq);
  915. /* Send event to the ULP. */
  916. if (event) {
  917. sctp_ulpq_tail_event(ulpq, event);
  918. sctp_ulpq_set_pd(ulpq);
  919. return;
  920. }
  921. }
  922. }
  923. /* Renege some packets to make room for an incoming chunk. */
  924. void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  925. gfp_t gfp)
  926. {
  927. struct sctp_association *asoc;
  928. __u16 needed, freed;
  929. asoc = ulpq->asoc;
  930. if (chunk) {
  931. needed = ntohs(chunk->chunk_hdr->length);
  932. needed -= sizeof(sctp_data_chunk_t);
  933. } else
  934. needed = SCTP_DEFAULT_MAXWINDOW;
  935. freed = 0;
  936. if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
  937. freed = sctp_ulpq_renege_order(ulpq, needed);
  938. if (freed < needed) {
  939. freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
  940. }
  941. }
  942. /* If able to free enough room, accept this chunk. */
  943. if (chunk && (freed >= needed)) {
  944. int retval;
  945. retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
  946. /*
  947. * Enter partial delivery if chunk has not been
  948. * delivered; otherwise, drain the reassembly queue.
  949. */
  950. if (retval <= 0)
  951. sctp_ulpq_partial_delivery(ulpq, gfp);
  952. else if (retval == 1)
  953. sctp_ulpq_reasm_drain(ulpq);
  954. }
  955. sk_mem_reclaim(asoc->base.sk);
  956. }
  957. /* Notify the application if an association is aborted and in
  958. * partial delivery mode. Send up any pending received messages.
  959. */
  960. void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
  961. {
  962. struct sctp_ulpevent *ev = NULL;
  963. struct sock *sk;
  964. struct sctp_sock *sp;
  965. if (!ulpq->pd_mode)
  966. return;
  967. sk = ulpq->asoc->base.sk;
  968. sp = sctp_sk(sk);
  969. if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
  970. &sctp_sk(sk)->subscribe))
  971. ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
  972. SCTP_PARTIAL_DELIVERY_ABORTED,
  973. gfp);
  974. if (ev)
  975. __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
  976. /* If there is data waiting, send it up the socket now. */
  977. if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
  978. sp->data_ready_signalled = 1;
  979. sk->sk_data_ready(sk);
  980. }
  981. }