ulpqueue.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. /* SCTP kernel implementation
  2. * (C) Copyright IBM Corp. 2001, 2004
  3. * Copyright (c) 1999-2000 Cisco, Inc.
  4. * Copyright (c) 1999-2001 Motorola, Inc.
  5. * Copyright (c) 2001 Intel Corp.
  6. * Copyright (c) 2001 Nokia, Inc.
  7. * Copyright (c) 2001 La Monte H.P. Yarroll
  8. *
  9. * This abstraction carries sctp events to the ULP (sockets).
  10. *
  11. * This SCTP implementation is free software;
  12. * you can redistribute it and/or modify it under the terms of
  13. * the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This SCTP implementation is distributed in the hope that it
  18. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19. * ************************
  20. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21. * See the GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with GNU CC; see the file COPYING. If not, see
  25. * <http://www.gnu.org/licenses/>.
  26. *
  27. * Please send any bug reports or fixes you make to the
  28. * email address(es):
  29. * lksctp developers <linux-sctp@vger.kernel.org>
  30. *
  31. * Written or modified by:
  32. * Jon Grimm <jgrimm@us.ibm.com>
  33. * La Monte H.P. Yarroll <piggy@acm.org>
  34. * Sridhar Samudrala <sri@us.ibm.com>
  35. */
  36. #include <linux/slab.h>
  37. #include <linux/types.h>
  38. #include <linux/skbuff.h>
  39. #include <net/sock.h>
  40. #include <net/busy_poll.h>
  41. #include <net/sctp/structs.h>
  42. #include <net/sctp/sctp.h>
  43. #include <net/sctp/sm.h>
  44. /* Forward declarations for internal helpers. */
  45. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  46. struct sctp_ulpevent *);
  47. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  48. struct sctp_ulpevent *);
  49. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  50. /* 1st Level Abstractions */
  51. /* Initialize a ULP queue from a block of memory. */
  52. struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  53. struct sctp_association *asoc)
  54. {
  55. memset(ulpq, 0, sizeof(struct sctp_ulpq));
  56. ulpq->asoc = asoc;
  57. skb_queue_head_init(&ulpq->reasm);
  58. skb_queue_head_init(&ulpq->lobby);
  59. ulpq->pd_mode = 0;
  60. return ulpq;
  61. }
  62. /* Flush the reassembly and ordering queues. */
  63. void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  64. {
  65. struct sk_buff *skb;
  66. struct sctp_ulpevent *event;
  67. while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  68. event = sctp_skb2event(skb);
  69. sctp_ulpevent_free(event);
  70. }
  71. while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  72. event = sctp_skb2event(skb);
  73. sctp_ulpevent_free(event);
  74. }
  75. }
  76. /* Dispose of a ulpqueue. */
  77. void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  78. {
  79. sctp_ulpq_flush(ulpq);
  80. }
  81. /* Process an incoming DATA chunk. */
  82. int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  83. gfp_t gfp)
  84. {
  85. struct sk_buff_head temp;
  86. struct sctp_ulpevent *event;
  87. int event_eor = 0;
  88. /* Create an event from the incoming chunk. */
  89. event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  90. if (!event)
  91. return -ENOMEM;
  92. /* Do reassembly if needed. */
  93. event = sctp_ulpq_reasm(ulpq, event);
  94. /* Do ordering if needed. */
  95. if ((event) && (event->msg_flags & MSG_EOR)) {
  96. /* Create a temporary list to collect chunks on. */
  97. skb_queue_head_init(&temp);
  98. __skb_queue_tail(&temp, sctp_event2skb(event));
  99. event = sctp_ulpq_order(ulpq, event);
  100. }
  101. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  102. * very first SKB on the 'temp' list.
  103. */
  104. if (event) {
  105. event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
  106. sctp_ulpq_tail_event(ulpq, event);
  107. }
  108. return event_eor;
  109. }
  110. /* Add a new event for propagation to the ULP. */
  111. /* Clear the partial delivery mode for this socket. Note: This
  112. * assumes that no association is currently in partial delivery mode.
  113. */
  114. int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
  115. {
  116. struct sctp_sock *sp = sctp_sk(sk);
  117. if (atomic_dec_and_test(&sp->pd_mode)) {
  118. /* This means there are no other associations in PD, so
  119. * we can go ahead and clear out the lobby in one shot
  120. */
  121. if (!skb_queue_empty(&sp->pd_lobby)) {
  122. skb_queue_splice_tail_init(&sp->pd_lobby,
  123. &sk->sk_receive_queue);
  124. return 1;
  125. }
  126. } else {
  127. /* There are other associations in PD, so we only need to
  128. * pull stuff out of the lobby that belongs to the
  129. * associations that is exiting PD (all of its notifications
  130. * are posted here).
  131. */
  132. if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
  133. struct sk_buff *skb, *tmp;
  134. struct sctp_ulpevent *event;
  135. sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
  136. event = sctp_skb2event(skb);
  137. if (event->asoc == asoc) {
  138. __skb_unlink(skb, &sp->pd_lobby);
  139. __skb_queue_tail(&sk->sk_receive_queue,
  140. skb);
  141. }
  142. }
  143. }
  144. }
  145. return 0;
  146. }
  147. /* Set the pd_mode on the socket and ulpq */
  148. static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
  149. {
  150. struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
  151. atomic_inc(&sp->pd_mode);
  152. ulpq->pd_mode = 1;
  153. }
  154. /* Clear the pd_mode and restart any pending messages waiting for delivery. */
  155. static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
  156. {
  157. ulpq->pd_mode = 0;
  158. sctp_ulpq_reasm_drain(ulpq);
  159. return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
  160. }
  161. /* If the SKB of 'event' is on a list, it is the first such member
  162. * of that list.
  163. */
  164. int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
  165. {
  166. struct sock *sk = ulpq->asoc->base.sk;
  167. struct sctp_sock *sp = sctp_sk(sk);
  168. struct sk_buff_head *queue, *skb_list;
  169. struct sk_buff *skb = sctp_event2skb(event);
  170. int clear_pd = 0;
  171. skb_list = (struct sk_buff_head *) skb->prev;
  172. /* If the socket is just going to throw this away, do not
  173. * even try to deliver it.
  174. */
  175. if (sk->sk_shutdown & RCV_SHUTDOWN &&
  176. (sk->sk_shutdown & SEND_SHUTDOWN ||
  177. !sctp_ulpevent_is_notification(event)))
  178. goto out_free;
  179. if (!sctp_ulpevent_is_notification(event)) {
  180. sk_mark_napi_id(sk, skb);
  181. sk_incoming_cpu_update(sk);
  182. }
  183. /* Check if the user wishes to receive this event. */
  184. if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
  185. goto out_free;
  186. /* If we are in partial delivery mode, post to the lobby until
  187. * partial delivery is cleared, unless, of course _this_ is
  188. * the association the cause of the partial delivery.
  189. */
  190. if (atomic_read(&sp->pd_mode) == 0) {
  191. queue = &sk->sk_receive_queue;
  192. } else {
  193. if (ulpq->pd_mode) {
  194. /* If the association is in partial delivery, we
  195. * need to finish delivering the partially processed
  196. * packet before passing any other data. This is
  197. * because we don't truly support stream interleaving.
  198. */
  199. if ((event->msg_flags & MSG_NOTIFICATION) ||
  200. (SCTP_DATA_NOT_FRAG ==
  201. (event->msg_flags & SCTP_DATA_FRAG_MASK)))
  202. queue = &sp->pd_lobby;
  203. else {
  204. clear_pd = event->msg_flags & MSG_EOR;
  205. queue = &sk->sk_receive_queue;
  206. }
  207. } else {
  208. /*
  209. * If fragment interleave is enabled, we
  210. * can queue this to the receive queue instead
  211. * of the lobby.
  212. */
  213. if (sp->frag_interleave)
  214. queue = &sk->sk_receive_queue;
  215. else
  216. queue = &sp->pd_lobby;
  217. }
  218. }
  219. /* If we are harvesting multiple skbs they will be
  220. * collected on a list.
  221. */
  222. if (skb_list)
  223. skb_queue_splice_tail_init(skb_list, queue);
  224. else
  225. __skb_queue_tail(queue, skb);
  226. /* Did we just complete partial delivery and need to get
  227. * rolling again? Move pending data to the receive
  228. * queue.
  229. */
  230. if (clear_pd)
  231. sctp_ulpq_clear_pd(ulpq);
  232. if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
  233. sp->data_ready_signalled = 1;
  234. sk->sk_data_ready(sk);
  235. }
  236. return 1;
  237. out_free:
  238. if (skb_list)
  239. sctp_queue_purge_ulpevents(skb_list);
  240. else
  241. sctp_ulpevent_free(event);
  242. return 0;
  243. }
  244. /* 2nd Level Abstractions */
  245. /* Helper function to store chunks that need to be reassembled. */
  246. static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  247. struct sctp_ulpevent *event)
  248. {
  249. struct sk_buff *pos;
  250. struct sctp_ulpevent *cevent;
  251. __u32 tsn, ctsn;
  252. tsn = event->tsn;
  253. /* See if it belongs at the end. */
  254. pos = skb_peek_tail(&ulpq->reasm);
  255. if (!pos) {
  256. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  257. return;
  258. }
  259. /* Short circuit just dropping it at the end. */
  260. cevent = sctp_skb2event(pos);
  261. ctsn = cevent->tsn;
  262. if (TSN_lt(ctsn, tsn)) {
  263. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  264. return;
  265. }
  266. /* Find the right place in this list. We store them by TSN. */
  267. skb_queue_walk(&ulpq->reasm, pos) {
  268. cevent = sctp_skb2event(pos);
  269. ctsn = cevent->tsn;
  270. if (TSN_lt(tsn, ctsn))
  271. break;
  272. }
  273. /* Insert before pos. */
  274. __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
  275. }
  276. /* Helper function to return an event corresponding to the reassembled
  277. * datagram.
  278. * This routine creates a re-assembled skb given the first and last skb's
  279. * as stored in the reassembly queue. The skb's may be non-linear if the sctp
  280. * payload was fragmented on the way and ip had to reassemble them.
  281. * We add the rest of skb's to the first skb's fraglist.
  282. */
  283. static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
  284. struct sk_buff_head *queue, struct sk_buff *f_frag,
  285. struct sk_buff *l_frag)
  286. {
  287. struct sk_buff *pos;
  288. struct sk_buff *new = NULL;
  289. struct sctp_ulpevent *event;
  290. struct sk_buff *pnext, *last;
  291. struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
  292. /* Store the pointer to the 2nd skb */
  293. if (f_frag == l_frag)
  294. pos = NULL;
  295. else
  296. pos = f_frag->next;
  297. /* Get the last skb in the f_frag's frag_list if present. */
  298. for (last = list; list; last = list, list = list->next)
  299. ;
  300. /* Add the list of remaining fragments to the first fragments
  301. * frag_list.
  302. */
  303. if (last)
  304. last->next = pos;
  305. else {
  306. if (skb_cloned(f_frag)) {
  307. /* This is a cloned skb, we can't just modify
  308. * the frag_list. We need a new skb to do that.
  309. * Instead of calling skb_unshare(), we'll do it
  310. * ourselves since we need to delay the free.
  311. */
  312. new = skb_copy(f_frag, GFP_ATOMIC);
  313. if (!new)
  314. return NULL; /* try again later */
  315. sctp_skb_set_owner_r(new, f_frag->sk);
  316. skb_shinfo(new)->frag_list = pos;
  317. } else
  318. skb_shinfo(f_frag)->frag_list = pos;
  319. }
  320. /* Remove the first fragment from the reassembly queue. */
  321. __skb_unlink(f_frag, queue);
  322. /* if we did unshare, then free the old skb and re-assign */
  323. if (new) {
  324. kfree_skb(f_frag);
  325. f_frag = new;
  326. }
  327. while (pos) {
  328. pnext = pos->next;
  329. /* Update the len and data_len fields of the first fragment. */
  330. f_frag->len += pos->len;
  331. f_frag->data_len += pos->len;
  332. /* Remove the fragment from the reassembly queue. */
  333. __skb_unlink(pos, queue);
  334. /* Break if we have reached the last fragment. */
  335. if (pos == l_frag)
  336. break;
  337. pos->next = pnext;
  338. pos = pnext;
  339. }
  340. event = sctp_skb2event(f_frag);
  341. SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
  342. return event;
  343. }
  344. /* Helper function to check if an incoming chunk has filled up the last
  345. * missing fragment in a SCTP datagram and return the corresponding event.
  346. */
  347. static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
  348. {
  349. struct sk_buff *pos;
  350. struct sctp_ulpevent *cevent;
  351. struct sk_buff *first_frag = NULL;
  352. __u32 ctsn, next_tsn;
  353. struct sctp_ulpevent *retval = NULL;
  354. struct sk_buff *pd_first = NULL;
  355. struct sk_buff *pd_last = NULL;
  356. size_t pd_len = 0;
  357. struct sctp_association *asoc;
  358. u32 pd_point;
  359. /* Initialized to 0 just to avoid compiler warning message. Will
  360. * never be used with this value. It is referenced only after it
  361. * is set when we find the first fragment of a message.
  362. */
  363. next_tsn = 0;
  364. /* The chunks are held in the reasm queue sorted by TSN.
  365. * Walk through the queue sequentially and look for a sequence of
  366. * fragmented chunks that complete a datagram.
  367. * 'first_frag' and next_tsn are reset when we find a chunk which
  368. * is the first fragment of a datagram. Once these 2 fields are set
  369. * we expect to find the remaining middle fragments and the last
  370. * fragment in order. If not, first_frag is reset to NULL and we
  371. * start the next pass when we find another first fragment.
  372. *
  373. * There is a potential to do partial delivery if user sets
  374. * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
  375. * to see if can do PD.
  376. */
  377. skb_queue_walk(&ulpq->reasm, pos) {
  378. cevent = sctp_skb2event(pos);
  379. ctsn = cevent->tsn;
  380. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  381. case SCTP_DATA_FIRST_FRAG:
  382. /* If this "FIRST_FRAG" is the first
  383. * element in the queue, then count it towards
  384. * possible PD.
  385. */
  386. if (pos == ulpq->reasm.next) {
  387. pd_first = pos;
  388. pd_last = pos;
  389. pd_len = pos->len;
  390. } else {
  391. pd_first = NULL;
  392. pd_last = NULL;
  393. pd_len = 0;
  394. }
  395. first_frag = pos;
  396. next_tsn = ctsn + 1;
  397. break;
  398. case SCTP_DATA_MIDDLE_FRAG:
  399. if ((first_frag) && (ctsn == next_tsn)) {
  400. next_tsn++;
  401. if (pd_first) {
  402. pd_last = pos;
  403. pd_len += pos->len;
  404. }
  405. } else
  406. first_frag = NULL;
  407. break;
  408. case SCTP_DATA_LAST_FRAG:
  409. if (first_frag && (ctsn == next_tsn))
  410. goto found;
  411. else
  412. first_frag = NULL;
  413. break;
  414. }
  415. }
  416. asoc = ulpq->asoc;
  417. if (pd_first) {
  418. /* Make sure we can enter partial deliver.
  419. * We can trigger partial delivery only if framgent
  420. * interleave is set, or the socket is not already
  421. * in partial delivery.
  422. */
  423. if (!sctp_sk(asoc->base.sk)->frag_interleave &&
  424. atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
  425. goto done;
  426. cevent = sctp_skb2event(pd_first);
  427. pd_point = sctp_sk(asoc->base.sk)->pd_point;
  428. if (pd_point && pd_point <= pd_len) {
  429. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  430. &ulpq->reasm,
  431. pd_first,
  432. pd_last);
  433. if (retval)
  434. sctp_ulpq_set_pd(ulpq);
  435. }
  436. }
  437. done:
  438. return retval;
  439. found:
  440. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  441. &ulpq->reasm, first_frag, pos);
  442. if (retval)
  443. retval->msg_flags |= MSG_EOR;
  444. goto done;
  445. }
  446. /* Retrieve the next set of fragments of a partial message. */
  447. static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
  448. {
  449. struct sk_buff *pos, *last_frag, *first_frag;
  450. struct sctp_ulpevent *cevent;
  451. __u32 ctsn, next_tsn;
  452. int is_last;
  453. struct sctp_ulpevent *retval;
  454. /* The chunks are held in the reasm queue sorted by TSN.
  455. * Walk through the queue sequentially and look for the first
  456. * sequence of fragmented chunks.
  457. */
  458. if (skb_queue_empty(&ulpq->reasm))
  459. return NULL;
  460. last_frag = first_frag = NULL;
  461. retval = NULL;
  462. next_tsn = 0;
  463. is_last = 0;
  464. skb_queue_walk(&ulpq->reasm, pos) {
  465. cevent = sctp_skb2event(pos);
  466. ctsn = cevent->tsn;
  467. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  468. case SCTP_DATA_FIRST_FRAG:
  469. if (!first_frag)
  470. return NULL;
  471. goto done;
  472. case SCTP_DATA_MIDDLE_FRAG:
  473. if (!first_frag) {
  474. first_frag = pos;
  475. next_tsn = ctsn + 1;
  476. last_frag = pos;
  477. } else if (next_tsn == ctsn) {
  478. next_tsn++;
  479. last_frag = pos;
  480. } else
  481. goto done;
  482. break;
  483. case SCTP_DATA_LAST_FRAG:
  484. if (!first_frag)
  485. first_frag = pos;
  486. else if (ctsn != next_tsn)
  487. goto done;
  488. last_frag = pos;
  489. is_last = 1;
  490. goto done;
  491. default:
  492. return NULL;
  493. }
  494. }
  495. /* We have the reassembled event. There is no need to look
  496. * further.
  497. */
  498. done:
  499. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  500. &ulpq->reasm, first_frag, last_frag);
  501. if (retval && is_last)
  502. retval->msg_flags |= MSG_EOR;
  503. return retval;
  504. }
  505. /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
  506. * need reassembling.
  507. */
  508. static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  509. struct sctp_ulpevent *event)
  510. {
  511. struct sctp_ulpevent *retval = NULL;
  512. /* Check if this is part of a fragmented message. */
  513. if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
  514. event->msg_flags |= MSG_EOR;
  515. return event;
  516. }
  517. sctp_ulpq_store_reasm(ulpq, event);
  518. if (!ulpq->pd_mode)
  519. retval = sctp_ulpq_retrieve_reassembled(ulpq);
  520. else {
  521. __u32 ctsn, ctsnap;
  522. /* Do not even bother unless this is the next tsn to
  523. * be delivered.
  524. */
  525. ctsn = event->tsn;
  526. ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
  527. if (TSN_lte(ctsn, ctsnap))
  528. retval = sctp_ulpq_retrieve_partial(ulpq);
  529. }
  530. return retval;
  531. }
  532. /* Retrieve the first part (sequential fragments) for partial delivery. */
  533. static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
  534. {
  535. struct sk_buff *pos, *last_frag, *first_frag;
  536. struct sctp_ulpevent *cevent;
  537. __u32 ctsn, next_tsn;
  538. struct sctp_ulpevent *retval;
  539. /* The chunks are held in the reasm queue sorted by TSN.
  540. * Walk through the queue sequentially and look for a sequence of
  541. * fragmented chunks that start a datagram.
  542. */
  543. if (skb_queue_empty(&ulpq->reasm))
  544. return NULL;
  545. last_frag = first_frag = NULL;
  546. retval = NULL;
  547. next_tsn = 0;
  548. skb_queue_walk(&ulpq->reasm, pos) {
  549. cevent = sctp_skb2event(pos);
  550. ctsn = cevent->tsn;
  551. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  552. case SCTP_DATA_FIRST_FRAG:
  553. if (!first_frag) {
  554. first_frag = pos;
  555. next_tsn = ctsn + 1;
  556. last_frag = pos;
  557. } else
  558. goto done;
  559. break;
  560. case SCTP_DATA_MIDDLE_FRAG:
  561. if (!first_frag)
  562. return NULL;
  563. if (ctsn == next_tsn) {
  564. next_tsn++;
  565. last_frag = pos;
  566. } else
  567. goto done;
  568. break;
  569. case SCTP_DATA_LAST_FRAG:
  570. if (!first_frag)
  571. return NULL;
  572. else
  573. goto done;
  574. break;
  575. default:
  576. return NULL;
  577. }
  578. }
  579. /* We have the reassembled event. There is no need to look
  580. * further.
  581. */
  582. done:
  583. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  584. &ulpq->reasm, first_frag, last_frag);
  585. return retval;
  586. }
  587. /*
  588. * Flush out stale fragments from the reassembly queue when processing
  589. * a Forward TSN.
  590. *
  591. * RFC 3758, Section 3.6
  592. *
  593. * After receiving and processing a FORWARD TSN, the data receiver MUST
  594. * take cautions in updating its re-assembly queue. The receiver MUST
  595. * remove any partially reassembled message, which is still missing one
  596. * or more TSNs earlier than or equal to the new cumulative TSN point.
  597. * In the event that the receiver has invoked the partial delivery API,
  598. * a notification SHOULD also be generated to inform the upper layer API
  599. * that the message being partially delivered will NOT be completed.
  600. */
  601. void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
  602. {
  603. struct sk_buff *pos, *tmp;
  604. struct sctp_ulpevent *event;
  605. __u32 tsn;
  606. if (skb_queue_empty(&ulpq->reasm))
  607. return;
  608. skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
  609. event = sctp_skb2event(pos);
  610. tsn = event->tsn;
  611. /* Since the entire message must be abandoned by the
  612. * sender (item A3 in Section 3.5, RFC 3758), we can
  613. * free all fragments on the list that are less then
  614. * or equal to ctsn_point
  615. */
  616. if (TSN_lte(tsn, fwd_tsn)) {
  617. __skb_unlink(pos, &ulpq->reasm);
  618. sctp_ulpevent_free(event);
  619. } else
  620. break;
  621. }
  622. }
  623. /*
  624. * Drain the reassembly queue. If we just cleared parted delivery, it
  625. * is possible that the reassembly queue will contain already reassembled
  626. * messages. Retrieve any such messages and give them to the user.
  627. */
  628. static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
  629. {
  630. struct sctp_ulpevent *event = NULL;
  631. struct sk_buff_head temp;
  632. if (skb_queue_empty(&ulpq->reasm))
  633. return;
  634. while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
  635. /* Do ordering if needed. */
  636. if ((event) && (event->msg_flags & MSG_EOR)) {
  637. skb_queue_head_init(&temp);
  638. __skb_queue_tail(&temp, sctp_event2skb(event));
  639. event = sctp_ulpq_order(ulpq, event);
  640. }
  641. /* Send event to the ULP. 'event' is the
  642. * sctp_ulpevent for very first SKB on the temp' list.
  643. */
  644. if (event)
  645. sctp_ulpq_tail_event(ulpq, event);
  646. }
  647. }
  648. /* Helper function to gather skbs that have possibly become
  649. * ordered by an an incoming chunk.
  650. */
  651. static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
  652. struct sctp_ulpevent *event)
  653. {
  654. struct sk_buff_head *event_list;
  655. struct sk_buff *pos, *tmp;
  656. struct sctp_ulpevent *cevent;
  657. struct sctp_stream *in;
  658. __u16 sid, csid, cssn;
  659. sid = event->stream;
  660. in = &ulpq->asoc->ssnmap->in;
  661. event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
  662. /* We are holding the chunks by stream, by SSN. */
  663. sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
  664. cevent = (struct sctp_ulpevent *) pos->cb;
  665. csid = cevent->stream;
  666. cssn = cevent->ssn;
  667. /* Have we gone too far? */
  668. if (csid > sid)
  669. break;
  670. /* Have we not gone far enough? */
  671. if (csid < sid)
  672. continue;
  673. if (cssn != sctp_ssn_peek(in, sid))
  674. break;
  675. /* Found it, so mark in the ssnmap. */
  676. sctp_ssn_next(in, sid);
  677. __skb_unlink(pos, &ulpq->lobby);
  678. /* Attach all gathered skbs to the event. */
  679. __skb_queue_tail(event_list, pos);
  680. }
  681. }
  682. /* Helper function to store chunks needing ordering. */
  683. static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
  684. struct sctp_ulpevent *event)
  685. {
  686. struct sk_buff *pos;
  687. struct sctp_ulpevent *cevent;
  688. __u16 sid, csid;
  689. __u16 ssn, cssn;
  690. pos = skb_peek_tail(&ulpq->lobby);
  691. if (!pos) {
  692. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  693. return;
  694. }
  695. sid = event->stream;
  696. ssn = event->ssn;
  697. cevent = (struct sctp_ulpevent *) pos->cb;
  698. csid = cevent->stream;
  699. cssn = cevent->ssn;
  700. if (sid > csid) {
  701. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  702. return;
  703. }
  704. if ((sid == csid) && SSN_lt(cssn, ssn)) {
  705. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  706. return;
  707. }
  708. /* Find the right place in this list. We store them by
  709. * stream ID and then by SSN.
  710. */
  711. skb_queue_walk(&ulpq->lobby, pos) {
  712. cevent = (struct sctp_ulpevent *) pos->cb;
  713. csid = cevent->stream;
  714. cssn = cevent->ssn;
  715. if (csid > sid)
  716. break;
  717. if (csid == sid && SSN_lt(ssn, cssn))
  718. break;
  719. }
  720. /* Insert before pos. */
  721. __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
  722. }
  723. static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
  724. struct sctp_ulpevent *event)
  725. {
  726. __u16 sid, ssn;
  727. struct sctp_stream *in;
  728. /* Check if this message needs ordering. */
  729. if (SCTP_DATA_UNORDERED & event->msg_flags)
  730. return event;
  731. /* Note: The stream ID must be verified before this routine. */
  732. sid = event->stream;
  733. ssn = event->ssn;
  734. in = &ulpq->asoc->ssnmap->in;
  735. /* Is this the expected SSN for this stream ID? */
  736. if (ssn != sctp_ssn_peek(in, sid)) {
  737. /* We've received something out of order, so find where it
  738. * needs to be placed. We order by stream and then by SSN.
  739. */
  740. sctp_ulpq_store_ordered(ulpq, event);
  741. return NULL;
  742. }
  743. /* Mark that the next chunk has been found. */
  744. sctp_ssn_next(in, sid);
  745. /* Go find any other chunks that were waiting for
  746. * ordering.
  747. */
  748. sctp_ulpq_retrieve_ordered(ulpq, event);
  749. return event;
  750. }
  751. /* Helper function to gather skbs that have possibly become
  752. * ordered by forward tsn skipping their dependencies.
  753. */
  754. static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
  755. {
  756. struct sk_buff *pos, *tmp;
  757. struct sctp_ulpevent *cevent;
  758. struct sctp_ulpevent *event;
  759. struct sctp_stream *in;
  760. struct sk_buff_head temp;
  761. struct sk_buff_head *lobby = &ulpq->lobby;
  762. __u16 csid, cssn;
  763. in = &ulpq->asoc->ssnmap->in;
  764. /* We are holding the chunks by stream, by SSN. */
  765. skb_queue_head_init(&temp);
  766. event = NULL;
  767. sctp_skb_for_each(pos, lobby, tmp) {
  768. cevent = (struct sctp_ulpevent *) pos->cb;
  769. csid = cevent->stream;
  770. cssn = cevent->ssn;
  771. /* Have we gone too far? */
  772. if (csid > sid)
  773. break;
  774. /* Have we not gone far enough? */
  775. if (csid < sid)
  776. continue;
  777. /* see if this ssn has been marked by skipping */
  778. if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
  779. break;
  780. __skb_unlink(pos, lobby);
  781. if (!event)
  782. /* Create a temporary list to collect chunks on. */
  783. event = sctp_skb2event(pos);
  784. /* Attach all gathered skbs to the event. */
  785. __skb_queue_tail(&temp, pos);
  786. }
  787. /* If we didn't reap any data, see if the next expected SSN
  788. * is next on the queue and if so, use that.
  789. */
  790. if (event == NULL && pos != (struct sk_buff *)lobby) {
  791. cevent = (struct sctp_ulpevent *) pos->cb;
  792. csid = cevent->stream;
  793. cssn = cevent->ssn;
  794. if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
  795. sctp_ssn_next(in, csid);
  796. __skb_unlink(pos, lobby);
  797. __skb_queue_tail(&temp, pos);
  798. event = sctp_skb2event(pos);
  799. }
  800. }
  801. /* Send event to the ULP. 'event' is the sctp_ulpevent for
  802. * very first SKB on the 'temp' list.
  803. */
  804. if (event) {
  805. /* see if we have more ordered that we can deliver */
  806. sctp_ulpq_retrieve_ordered(ulpq, event);
  807. sctp_ulpq_tail_event(ulpq, event);
  808. }
  809. }
  810. /* Skip over an SSN. This is used during the processing of
  811. * Forwared TSN chunk to skip over the abandoned ordered data
  812. */
  813. void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
  814. {
  815. struct sctp_stream *in;
  816. /* Note: The stream ID must be verified before this routine. */
  817. in = &ulpq->asoc->ssnmap->in;
  818. /* Is this an old SSN? If so ignore. */
  819. if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
  820. return;
  821. /* Mark that we are no longer expecting this SSN or lower. */
  822. sctp_ssn_skip(in, sid, ssn);
  823. /* Go find any other chunks that were waiting for
  824. * ordering and deliver them if needed.
  825. */
  826. sctp_ulpq_reap_ordered(ulpq, sid);
  827. }
  828. static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
  829. struct sk_buff_head *list, __u16 needed)
  830. {
  831. __u16 freed = 0;
  832. __u32 tsn, last_tsn;
  833. struct sk_buff *skb, *flist, *last;
  834. struct sctp_ulpevent *event;
  835. struct sctp_tsnmap *tsnmap;
  836. tsnmap = &ulpq->asoc->peer.tsn_map;
  837. while ((skb = skb_peek_tail(list)) != NULL) {
  838. event = sctp_skb2event(skb);
  839. tsn = event->tsn;
  840. /* Don't renege below the Cumulative TSN ACK Point. */
  841. if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
  842. break;
  843. /* Events in ordering queue may have multiple fragments
  844. * corresponding to additional TSNs. Sum the total
  845. * freed space; find the last TSN.
  846. */
  847. freed += skb_headlen(skb);
  848. flist = skb_shinfo(skb)->frag_list;
  849. for (last = flist; flist; flist = flist->next) {
  850. last = flist;
  851. freed += skb_headlen(last);
  852. }
  853. if (last)
  854. last_tsn = sctp_skb2event(last)->tsn;
  855. else
  856. last_tsn = tsn;
  857. /* Unlink the event, then renege all applicable TSNs. */
  858. __skb_unlink(skb, list);
  859. sctp_ulpevent_free(event);
  860. while (TSN_lte(tsn, last_tsn)) {
  861. sctp_tsnmap_renege(tsnmap, tsn);
  862. tsn++;
  863. }
  864. if (freed >= needed)
  865. return freed;
  866. }
  867. return freed;
  868. }
  869. /* Renege 'needed' bytes from the ordering queue. */
  870. static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
  871. {
  872. return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
  873. }
  874. /* Renege 'needed' bytes from the reassembly queue. */
  875. static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
  876. {
  877. return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
  878. }
  879. /* Partial deliver the first message as there is pressure on rwnd. */
  880. void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
  881. gfp_t gfp)
  882. {
  883. struct sctp_ulpevent *event;
  884. struct sctp_association *asoc;
  885. struct sctp_sock *sp;
  886. __u32 ctsn;
  887. struct sk_buff *skb;
  888. asoc = ulpq->asoc;
  889. sp = sctp_sk(asoc->base.sk);
  890. /* If the association is already in Partial Delivery mode
  891. * we have nothing to do.
  892. */
  893. if (ulpq->pd_mode)
  894. return;
  895. /* Data must be at or below the Cumulative TSN ACK Point to
  896. * start partial delivery.
  897. */
  898. skb = skb_peek(&asoc->ulpq.reasm);
  899. if (skb != NULL) {
  900. ctsn = sctp_skb2event(skb)->tsn;
  901. if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
  902. return;
  903. }
  904. /* If the user enabled fragment interleave socket option,
  905. * multiple associations can enter partial delivery.
  906. * Otherwise, we can only enter partial delivery if the
  907. * socket is not in partial deliver mode.
  908. */
  909. if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
  910. /* Is partial delivery possible? */
  911. event = sctp_ulpq_retrieve_first(ulpq);
  912. /* Send event to the ULP. */
  913. if (event) {
  914. sctp_ulpq_tail_event(ulpq, event);
  915. sctp_ulpq_set_pd(ulpq);
  916. return;
  917. }
  918. }
  919. }
  920. /* Renege some packets to make room for an incoming chunk. */
  921. void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  922. gfp_t gfp)
  923. {
  924. struct sctp_association *asoc;
  925. __u16 needed, freed;
  926. asoc = ulpq->asoc;
  927. if (chunk) {
  928. needed = ntohs(chunk->chunk_hdr->length);
  929. needed -= sizeof(sctp_data_chunk_t);
  930. } else
  931. needed = SCTP_DEFAULT_MAXWINDOW;
  932. freed = 0;
  933. if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
  934. freed = sctp_ulpq_renege_order(ulpq, needed);
  935. if (freed < needed) {
  936. freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
  937. }
  938. }
  939. /* If able to free enough room, accept this chunk. */
  940. if (chunk && (freed >= needed)) {
  941. int retval;
  942. retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
  943. /*
  944. * Enter partial delivery if chunk has not been
  945. * delivered; otherwise, drain the reassembly queue.
  946. */
  947. if (retval <= 0)
  948. sctp_ulpq_partial_delivery(ulpq, gfp);
  949. else if (retval == 1)
  950. sctp_ulpq_reasm_drain(ulpq);
  951. }
  952. sk_mem_reclaim(asoc->base.sk);
  953. }
  954. /* Notify the application if an association is aborted and in
  955. * partial delivery mode. Send up any pending received messages.
  956. */
  957. void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
  958. {
  959. struct sctp_ulpevent *ev = NULL;
  960. struct sock *sk;
  961. struct sctp_sock *sp;
  962. if (!ulpq->pd_mode)
  963. return;
  964. sk = ulpq->asoc->base.sk;
  965. sp = sctp_sk(sk);
  966. if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
  967. &sctp_sk(sk)->subscribe))
  968. ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
  969. SCTP_PARTIAL_DELIVERY_ABORTED,
  970. gfp);
  971. if (ev)
  972. __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
  973. /* If there is data waiting, send it up the socket now. */
  974. if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
  975. sp->data_ready_signalled = 1;
  976. sk->sk_data_ready(sk);
  977. }
  978. }