stream_interleave.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118
  1. /* SCTP kernel implementation
  2. * (C) Copyright Red Hat Inc. 2017
  3. *
  4. * This file is part of the SCTP kernel implementation
  5. *
  6. * These functions manipulate sctp stream queue/scheduling.
  7. *
  8. * This SCTP implementation is free software;
  9. * you can redistribute it and/or modify it under the terms of
  10. * the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2, or (at your option)
  12. * any later version.
  13. *
  14. * This SCTP implementation is distributed in the hope that it
  15. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  16. * ************************
  17. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  18. * See the GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with GNU CC; see the file COPYING. If not, see
  22. * <http://www.gnu.org/licenses/>.
  23. *
  24. * Please send any bug reports or fixes you make to the
  25. * email addresched(es):
  26. * lksctp developers <linux-sctp@vger.kernel.org>
  27. *
  28. * Written or modified by:
  29. * Xin Long <lucien.xin@gmail.com>
  30. */
  31. #include <net/busy_poll.h>
  32. #include <net/sctp/sctp.h>
  33. #include <net/sctp/sm.h>
  34. #include <net/sctp/ulpevent.h>
  35. #include <linux/sctp.h>
  36. static struct sctp_chunk *sctp_make_idatafrag_empty(
  37. const struct sctp_association *asoc,
  38. const struct sctp_sndrcvinfo *sinfo,
  39. int len, __u8 flags, gfp_t gfp)
  40. {
  41. struct sctp_chunk *retval;
  42. struct sctp_idatahdr dp;
  43. memset(&dp, 0, sizeof(dp));
  44. dp.stream = htons(sinfo->sinfo_stream);
  45. if (sinfo->sinfo_flags & SCTP_UNORDERED)
  46. flags |= SCTP_DATA_UNORDERED;
  47. retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
  48. if (!retval)
  49. return NULL;
  50. retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
  51. memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
  52. return retval;
  53. }
  54. static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
  55. {
  56. struct sctp_stream *stream;
  57. struct sctp_chunk *lchunk;
  58. __u32 cfsn = 0;
  59. __u16 sid;
  60. if (chunk->has_mid)
  61. return;
  62. sid = sctp_chunk_stream_no(chunk);
  63. stream = &chunk->asoc->stream;
  64. list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
  65. struct sctp_idatahdr *hdr;
  66. __u32 mid;
  67. lchunk->has_mid = 1;
  68. hdr = lchunk->subh.idata_hdr;
  69. if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
  70. hdr->ppid = lchunk->sinfo.sinfo_ppid;
  71. else
  72. hdr->fsn = htonl(cfsn++);
  73. if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
  74. mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
  75. sctp_mid_uo_next(stream, out, sid) :
  76. sctp_mid_uo_peek(stream, out, sid);
  77. } else {
  78. mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
  79. sctp_mid_next(stream, out, sid) :
  80. sctp_mid_peek(stream, out, sid);
  81. }
  82. hdr->mid = htonl(mid);
  83. }
  84. }
  85. static bool sctp_validate_data(struct sctp_chunk *chunk)
  86. {
  87. const struct sctp_stream *stream;
  88. __u16 sid, ssn;
  89. if (chunk->chunk_hdr->type != SCTP_CID_DATA)
  90. return false;
  91. if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
  92. return true;
  93. stream = &chunk->asoc->stream;
  94. sid = sctp_chunk_stream_no(chunk);
  95. ssn = ntohs(chunk->subh.data_hdr->ssn);
  96. return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
  97. }
  98. static bool sctp_validate_idata(struct sctp_chunk *chunk)
  99. {
  100. struct sctp_stream *stream;
  101. __u32 mid;
  102. __u16 sid;
  103. if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
  104. return false;
  105. if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
  106. return true;
  107. stream = &chunk->asoc->stream;
  108. sid = sctp_chunk_stream_no(chunk);
  109. mid = ntohl(chunk->subh.idata_hdr->mid);
  110. return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
  111. }
  112. static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
  113. struct sctp_ulpevent *event)
  114. {
  115. struct sctp_ulpevent *cevent;
  116. struct sk_buff *pos;
  117. pos = skb_peek_tail(&ulpq->reasm);
  118. if (!pos) {
  119. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  120. return;
  121. }
  122. cevent = sctp_skb2event(pos);
  123. if (event->stream == cevent->stream &&
  124. event->mid == cevent->mid &&
  125. (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
  126. (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
  127. event->fsn > cevent->fsn))) {
  128. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  129. return;
  130. }
  131. if ((event->stream == cevent->stream &&
  132. MID_lt(cevent->mid, event->mid)) ||
  133. event->stream > cevent->stream) {
  134. __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
  135. return;
  136. }
  137. skb_queue_walk(&ulpq->reasm, pos) {
  138. cevent = sctp_skb2event(pos);
  139. if (event->stream < cevent->stream ||
  140. (event->stream == cevent->stream &&
  141. MID_lt(event->mid, cevent->mid)))
  142. break;
  143. if (event->stream == cevent->stream &&
  144. event->mid == cevent->mid &&
  145. !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
  146. (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
  147. event->fsn < cevent->fsn))
  148. break;
  149. }
  150. __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
  151. }
  152. static struct sctp_ulpevent *sctp_intl_retrieve_partial(
  153. struct sctp_ulpq *ulpq,
  154. struct sctp_ulpevent *event)
  155. {
  156. struct sk_buff *first_frag = NULL;
  157. struct sk_buff *last_frag = NULL;
  158. struct sctp_ulpevent *retval;
  159. struct sctp_stream_in *sin;
  160. struct sk_buff *pos;
  161. __u32 next_fsn = 0;
  162. int is_last = 0;
  163. sin = sctp_stream_in(ulpq->asoc, event->stream);
  164. skb_queue_walk(&ulpq->reasm, pos) {
  165. struct sctp_ulpevent *cevent = sctp_skb2event(pos);
  166. if (cevent->stream < event->stream)
  167. continue;
  168. if (cevent->stream > event->stream ||
  169. cevent->mid != sin->mid)
  170. break;
  171. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  172. case SCTP_DATA_FIRST_FRAG:
  173. goto out;
  174. case SCTP_DATA_MIDDLE_FRAG:
  175. if (!first_frag) {
  176. if (cevent->fsn == sin->fsn) {
  177. first_frag = pos;
  178. last_frag = pos;
  179. next_fsn = cevent->fsn + 1;
  180. }
  181. } else if (cevent->fsn == next_fsn) {
  182. last_frag = pos;
  183. next_fsn++;
  184. } else {
  185. goto out;
  186. }
  187. break;
  188. case SCTP_DATA_LAST_FRAG:
  189. if (!first_frag) {
  190. if (cevent->fsn == sin->fsn) {
  191. first_frag = pos;
  192. last_frag = pos;
  193. next_fsn = 0;
  194. is_last = 1;
  195. }
  196. } else if (cevent->fsn == next_fsn) {
  197. last_frag = pos;
  198. next_fsn = 0;
  199. is_last = 1;
  200. }
  201. goto out;
  202. default:
  203. goto out;
  204. }
  205. }
  206. out:
  207. if (!first_frag)
  208. return NULL;
  209. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  210. &ulpq->reasm, first_frag,
  211. last_frag);
  212. if (retval) {
  213. sin->fsn = next_fsn;
  214. if (is_last) {
  215. retval->msg_flags |= MSG_EOR;
  216. sin->pd_mode = 0;
  217. }
  218. }
  219. return retval;
  220. }
  221. static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
  222. struct sctp_ulpq *ulpq,
  223. struct sctp_ulpevent *event)
  224. {
  225. struct sctp_association *asoc = ulpq->asoc;
  226. struct sk_buff *pos, *first_frag = NULL;
  227. struct sctp_ulpevent *retval = NULL;
  228. struct sk_buff *pd_first = NULL;
  229. struct sk_buff *pd_last = NULL;
  230. struct sctp_stream_in *sin;
  231. __u32 next_fsn = 0;
  232. __u32 pd_point = 0;
  233. __u32 pd_len = 0;
  234. __u32 mid = 0;
  235. sin = sctp_stream_in(ulpq->asoc, event->stream);
  236. skb_queue_walk(&ulpq->reasm, pos) {
  237. struct sctp_ulpevent *cevent = sctp_skb2event(pos);
  238. if (cevent->stream < event->stream)
  239. continue;
  240. if (cevent->stream > event->stream)
  241. break;
  242. if (MID_lt(cevent->mid, event->mid))
  243. continue;
  244. if (MID_lt(event->mid, cevent->mid))
  245. break;
  246. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  247. case SCTP_DATA_FIRST_FRAG:
  248. if (cevent->mid == sin->mid) {
  249. pd_first = pos;
  250. pd_last = pos;
  251. pd_len = pos->len;
  252. }
  253. first_frag = pos;
  254. next_fsn = 0;
  255. mid = cevent->mid;
  256. break;
  257. case SCTP_DATA_MIDDLE_FRAG:
  258. if (first_frag && cevent->mid == mid &&
  259. cevent->fsn == next_fsn) {
  260. next_fsn++;
  261. if (pd_first) {
  262. pd_last = pos;
  263. pd_len += pos->len;
  264. }
  265. } else {
  266. first_frag = NULL;
  267. }
  268. break;
  269. case SCTP_DATA_LAST_FRAG:
  270. if (first_frag && cevent->mid == mid &&
  271. cevent->fsn == next_fsn)
  272. goto found;
  273. else
  274. first_frag = NULL;
  275. break;
  276. }
  277. }
  278. if (!pd_first)
  279. goto out;
  280. pd_point = sctp_sk(asoc->base.sk)->pd_point;
  281. if (pd_point && pd_point <= pd_len) {
  282. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  283. &ulpq->reasm,
  284. pd_first, pd_last);
  285. if (retval) {
  286. sin->fsn = next_fsn;
  287. sin->pd_mode = 1;
  288. }
  289. }
  290. goto out;
  291. found:
  292. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  293. &ulpq->reasm,
  294. first_frag, pos);
  295. if (retval)
  296. retval->msg_flags |= MSG_EOR;
  297. out:
  298. return retval;
  299. }
  300. static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
  301. struct sctp_ulpevent *event)
  302. {
  303. struct sctp_ulpevent *retval = NULL;
  304. struct sctp_stream_in *sin;
  305. if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
  306. event->msg_flags |= MSG_EOR;
  307. return event;
  308. }
  309. sctp_intl_store_reasm(ulpq, event);
  310. sin = sctp_stream_in(ulpq->asoc, event->stream);
  311. if (sin->pd_mode && event->mid == sin->mid &&
  312. event->fsn == sin->fsn)
  313. retval = sctp_intl_retrieve_partial(ulpq, event);
  314. if (!retval)
  315. retval = sctp_intl_retrieve_reassembled(ulpq, event);
  316. return retval;
  317. }
  318. static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
  319. struct sctp_ulpevent *event)
  320. {
  321. struct sctp_ulpevent *cevent;
  322. struct sk_buff *pos;
  323. pos = skb_peek_tail(&ulpq->lobby);
  324. if (!pos) {
  325. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  326. return;
  327. }
  328. cevent = (struct sctp_ulpevent *)pos->cb;
  329. if (event->stream == cevent->stream &&
  330. MID_lt(cevent->mid, event->mid)) {
  331. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  332. return;
  333. }
  334. if (event->stream > cevent->stream) {
  335. __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
  336. return;
  337. }
  338. skb_queue_walk(&ulpq->lobby, pos) {
  339. cevent = (struct sctp_ulpevent *)pos->cb;
  340. if (cevent->stream > event->stream)
  341. break;
  342. if (cevent->stream == event->stream &&
  343. MID_lt(event->mid, cevent->mid))
  344. break;
  345. }
  346. __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
  347. }
  348. static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
  349. struct sctp_ulpevent *event)
  350. {
  351. struct sk_buff_head *event_list;
  352. struct sctp_stream *stream;
  353. struct sk_buff *pos, *tmp;
  354. __u16 sid = event->stream;
  355. stream = &ulpq->asoc->stream;
  356. event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
  357. sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
  358. struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
  359. if (cevent->stream > sid)
  360. break;
  361. if (cevent->stream < sid)
  362. continue;
  363. if (cevent->mid != sctp_mid_peek(stream, in, sid))
  364. break;
  365. sctp_mid_next(stream, in, sid);
  366. __skb_unlink(pos, &ulpq->lobby);
  367. __skb_queue_tail(event_list, pos);
  368. }
  369. }
  370. static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
  371. struct sctp_ulpevent *event)
  372. {
  373. struct sctp_stream *stream;
  374. __u16 sid;
  375. stream = &ulpq->asoc->stream;
  376. sid = event->stream;
  377. if (event->mid != sctp_mid_peek(stream, in, sid)) {
  378. sctp_intl_store_ordered(ulpq, event);
  379. return NULL;
  380. }
  381. sctp_mid_next(stream, in, sid);
  382. sctp_intl_retrieve_ordered(ulpq, event);
  383. return event;
  384. }
  385. static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
  386. struct sctp_ulpevent *event)
  387. {
  388. struct sk_buff *skb = sctp_event2skb(event);
  389. struct sock *sk = ulpq->asoc->base.sk;
  390. struct sctp_sock *sp = sctp_sk(sk);
  391. struct sk_buff_head *skb_list;
  392. skb_list = (struct sk_buff_head *)skb->prev;
  393. if (sk->sk_shutdown & RCV_SHUTDOWN &&
  394. (sk->sk_shutdown & SEND_SHUTDOWN ||
  395. !sctp_ulpevent_is_notification(event)))
  396. goto out_free;
  397. if (!sctp_ulpevent_is_notification(event)) {
  398. sk_mark_napi_id(sk, skb);
  399. sk_incoming_cpu_update(sk);
  400. }
  401. if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
  402. goto out_free;
  403. if (skb_list)
  404. skb_queue_splice_tail_init(skb_list,
  405. &sk->sk_receive_queue);
  406. else
  407. __skb_queue_tail(&sk->sk_receive_queue, skb);
  408. if (!sp->data_ready_signalled) {
  409. sp->data_ready_signalled = 1;
  410. sk->sk_data_ready(sk);
  411. }
  412. return 1;
  413. out_free:
  414. if (skb_list)
  415. sctp_queue_purge_ulpevents(skb_list);
  416. else
  417. sctp_ulpevent_free(event);
  418. return 0;
  419. }
  420. static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
  421. struct sctp_ulpevent *event)
  422. {
  423. struct sctp_ulpevent *cevent;
  424. struct sk_buff *pos;
  425. pos = skb_peek_tail(&ulpq->reasm_uo);
  426. if (!pos) {
  427. __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
  428. return;
  429. }
  430. cevent = sctp_skb2event(pos);
  431. if (event->stream == cevent->stream &&
  432. event->mid == cevent->mid &&
  433. (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
  434. (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
  435. event->fsn > cevent->fsn))) {
  436. __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
  437. return;
  438. }
  439. if ((event->stream == cevent->stream &&
  440. MID_lt(cevent->mid, event->mid)) ||
  441. event->stream > cevent->stream) {
  442. __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
  443. return;
  444. }
  445. skb_queue_walk(&ulpq->reasm_uo, pos) {
  446. cevent = sctp_skb2event(pos);
  447. if (event->stream < cevent->stream ||
  448. (event->stream == cevent->stream &&
  449. MID_lt(event->mid, cevent->mid)))
  450. break;
  451. if (event->stream == cevent->stream &&
  452. event->mid == cevent->mid &&
  453. !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
  454. (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
  455. event->fsn < cevent->fsn))
  456. break;
  457. }
  458. __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
  459. }
  460. static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
  461. struct sctp_ulpq *ulpq,
  462. struct sctp_ulpevent *event)
  463. {
  464. struct sk_buff *first_frag = NULL;
  465. struct sk_buff *last_frag = NULL;
  466. struct sctp_ulpevent *retval;
  467. struct sctp_stream_in *sin;
  468. struct sk_buff *pos;
  469. __u32 next_fsn = 0;
  470. int is_last = 0;
  471. sin = sctp_stream_in(ulpq->asoc, event->stream);
  472. skb_queue_walk(&ulpq->reasm_uo, pos) {
  473. struct sctp_ulpevent *cevent = sctp_skb2event(pos);
  474. if (cevent->stream < event->stream)
  475. continue;
  476. if (cevent->stream > event->stream)
  477. break;
  478. if (MID_lt(cevent->mid, sin->mid_uo))
  479. continue;
  480. if (MID_lt(sin->mid_uo, cevent->mid))
  481. break;
  482. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  483. case SCTP_DATA_FIRST_FRAG:
  484. goto out;
  485. case SCTP_DATA_MIDDLE_FRAG:
  486. if (!first_frag) {
  487. if (cevent->fsn == sin->fsn_uo) {
  488. first_frag = pos;
  489. last_frag = pos;
  490. next_fsn = cevent->fsn + 1;
  491. }
  492. } else if (cevent->fsn == next_fsn) {
  493. last_frag = pos;
  494. next_fsn++;
  495. } else {
  496. goto out;
  497. }
  498. break;
  499. case SCTP_DATA_LAST_FRAG:
  500. if (!first_frag) {
  501. if (cevent->fsn == sin->fsn_uo) {
  502. first_frag = pos;
  503. last_frag = pos;
  504. next_fsn = 0;
  505. is_last = 1;
  506. }
  507. } else if (cevent->fsn == next_fsn) {
  508. last_frag = pos;
  509. next_fsn = 0;
  510. is_last = 1;
  511. }
  512. goto out;
  513. default:
  514. goto out;
  515. }
  516. }
  517. out:
  518. if (!first_frag)
  519. return NULL;
  520. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  521. &ulpq->reasm_uo, first_frag,
  522. last_frag);
  523. if (retval) {
  524. sin->fsn_uo = next_fsn;
  525. if (is_last) {
  526. retval->msg_flags |= MSG_EOR;
  527. sin->pd_mode_uo = 0;
  528. }
  529. }
  530. return retval;
  531. }
  532. static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
  533. struct sctp_ulpq *ulpq,
  534. struct sctp_ulpevent *event)
  535. {
  536. struct sctp_association *asoc = ulpq->asoc;
  537. struct sk_buff *pos, *first_frag = NULL;
  538. struct sctp_ulpevent *retval = NULL;
  539. struct sk_buff *pd_first = NULL;
  540. struct sk_buff *pd_last = NULL;
  541. struct sctp_stream_in *sin;
  542. __u32 next_fsn = 0;
  543. __u32 pd_point = 0;
  544. __u32 pd_len = 0;
  545. __u32 mid = 0;
  546. sin = sctp_stream_in(ulpq->asoc, event->stream);
  547. skb_queue_walk(&ulpq->reasm_uo, pos) {
  548. struct sctp_ulpevent *cevent = sctp_skb2event(pos);
  549. if (cevent->stream < event->stream)
  550. continue;
  551. if (cevent->stream > event->stream)
  552. break;
  553. if (MID_lt(cevent->mid, event->mid))
  554. continue;
  555. if (MID_lt(event->mid, cevent->mid))
  556. break;
  557. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  558. case SCTP_DATA_FIRST_FRAG:
  559. if (!sin->pd_mode_uo) {
  560. sin->mid_uo = cevent->mid;
  561. pd_first = pos;
  562. pd_last = pos;
  563. pd_len = pos->len;
  564. }
  565. first_frag = pos;
  566. next_fsn = 0;
  567. mid = cevent->mid;
  568. break;
  569. case SCTP_DATA_MIDDLE_FRAG:
  570. if (first_frag && cevent->mid == mid &&
  571. cevent->fsn == next_fsn) {
  572. next_fsn++;
  573. if (pd_first) {
  574. pd_last = pos;
  575. pd_len += pos->len;
  576. }
  577. } else {
  578. first_frag = NULL;
  579. }
  580. break;
  581. case SCTP_DATA_LAST_FRAG:
  582. if (first_frag && cevent->mid == mid &&
  583. cevent->fsn == next_fsn)
  584. goto found;
  585. else
  586. first_frag = NULL;
  587. break;
  588. }
  589. }
  590. if (!pd_first)
  591. goto out;
  592. pd_point = sctp_sk(asoc->base.sk)->pd_point;
  593. if (pd_point && pd_point <= pd_len) {
  594. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  595. &ulpq->reasm_uo,
  596. pd_first, pd_last);
  597. if (retval) {
  598. sin->fsn_uo = next_fsn;
  599. sin->pd_mode_uo = 1;
  600. }
  601. }
  602. goto out;
  603. found:
  604. retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
  605. &ulpq->reasm_uo,
  606. first_frag, pos);
  607. if (retval)
  608. retval->msg_flags |= MSG_EOR;
  609. out:
  610. return retval;
  611. }
  612. static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
  613. struct sctp_ulpevent *event)
  614. {
  615. struct sctp_ulpevent *retval = NULL;
  616. struct sctp_stream_in *sin;
  617. if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
  618. event->msg_flags |= MSG_EOR;
  619. return event;
  620. }
  621. sctp_intl_store_reasm_uo(ulpq, event);
  622. sin = sctp_stream_in(ulpq->asoc, event->stream);
  623. if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
  624. event->fsn == sin->fsn_uo)
  625. retval = sctp_intl_retrieve_partial_uo(ulpq, event);
  626. if (!retval)
  627. retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
  628. return retval;
  629. }
  630. static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
  631. {
  632. struct sctp_stream_in *csin, *sin = NULL;
  633. struct sk_buff *first_frag = NULL;
  634. struct sk_buff *last_frag = NULL;
  635. struct sctp_ulpevent *retval;
  636. struct sk_buff *pos;
  637. __u32 next_fsn = 0;
  638. __u16 sid = 0;
  639. skb_queue_walk(&ulpq->reasm_uo, pos) {
  640. struct sctp_ulpevent *cevent = sctp_skb2event(pos);
  641. csin = sctp_stream_in(ulpq->asoc, cevent->stream);
  642. if (csin->pd_mode_uo)
  643. continue;
  644. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  645. case SCTP_DATA_FIRST_FRAG:
  646. if (first_frag)
  647. goto out;
  648. first_frag = pos;
  649. last_frag = pos;
  650. next_fsn = 0;
  651. sin = csin;
  652. sid = cevent->stream;
  653. sin->mid_uo = cevent->mid;
  654. break;
  655. case SCTP_DATA_MIDDLE_FRAG:
  656. if (!first_frag)
  657. break;
  658. if (cevent->stream == sid &&
  659. cevent->mid == sin->mid_uo &&
  660. cevent->fsn == next_fsn) {
  661. next_fsn++;
  662. last_frag = pos;
  663. } else {
  664. goto out;
  665. }
  666. break;
  667. case SCTP_DATA_LAST_FRAG:
  668. if (first_frag)
  669. goto out;
  670. break;
  671. default:
  672. break;
  673. }
  674. }
  675. if (!first_frag)
  676. return NULL;
  677. out:
  678. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  679. &ulpq->reasm_uo, first_frag,
  680. last_frag);
  681. if (retval) {
  682. sin->fsn_uo = next_fsn;
  683. sin->pd_mode_uo = 1;
  684. }
  685. return retval;
  686. }
  687. static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
  688. struct sctp_chunk *chunk, gfp_t gfp)
  689. {
  690. struct sctp_ulpevent *event;
  691. struct sk_buff_head temp;
  692. int event_eor = 0;
  693. event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  694. if (!event)
  695. return -ENOMEM;
  696. event->mid = ntohl(chunk->subh.idata_hdr->mid);
  697. if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
  698. event->ppid = chunk->subh.idata_hdr->ppid;
  699. else
  700. event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
  701. if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
  702. event = sctp_intl_reasm(ulpq, event);
  703. if (event && event->msg_flags & MSG_EOR) {
  704. skb_queue_head_init(&temp);
  705. __skb_queue_tail(&temp, sctp_event2skb(event));
  706. event = sctp_intl_order(ulpq, event);
  707. }
  708. } else {
  709. event = sctp_intl_reasm_uo(ulpq, event);
  710. }
  711. if (event) {
  712. event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
  713. sctp_enqueue_event(ulpq, event);
  714. }
  715. return event_eor;
  716. }
  717. static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
  718. {
  719. struct sctp_stream_in *csin, *sin = NULL;
  720. struct sk_buff *first_frag = NULL;
  721. struct sk_buff *last_frag = NULL;
  722. struct sctp_ulpevent *retval;
  723. struct sk_buff *pos;
  724. __u32 next_fsn = 0;
  725. __u16 sid = 0;
  726. skb_queue_walk(&ulpq->reasm, pos) {
  727. struct sctp_ulpevent *cevent = sctp_skb2event(pos);
  728. csin = sctp_stream_in(ulpq->asoc, cevent->stream);
  729. if (csin->pd_mode)
  730. continue;
  731. switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  732. case SCTP_DATA_FIRST_FRAG:
  733. if (first_frag)
  734. goto out;
  735. if (cevent->mid == csin->mid) {
  736. first_frag = pos;
  737. last_frag = pos;
  738. next_fsn = 0;
  739. sin = csin;
  740. sid = cevent->stream;
  741. }
  742. break;
  743. case SCTP_DATA_MIDDLE_FRAG:
  744. if (!first_frag)
  745. break;
  746. if (cevent->stream == sid &&
  747. cevent->mid == sin->mid &&
  748. cevent->fsn == next_fsn) {
  749. next_fsn++;
  750. last_frag = pos;
  751. } else {
  752. goto out;
  753. }
  754. break;
  755. case SCTP_DATA_LAST_FRAG:
  756. if (first_frag)
  757. goto out;
  758. break;
  759. default:
  760. break;
  761. }
  762. }
  763. if (!first_frag)
  764. return NULL;
  765. out:
  766. retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
  767. &ulpq->reasm, first_frag,
  768. last_frag);
  769. if (retval) {
  770. sin->fsn = next_fsn;
  771. sin->pd_mode = 1;
  772. }
  773. return retval;
  774. }
  775. static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
  776. {
  777. struct sctp_ulpevent *event;
  778. if (!skb_queue_empty(&ulpq->reasm)) {
  779. do {
  780. event = sctp_intl_retrieve_first(ulpq);
  781. if (event)
  782. sctp_enqueue_event(ulpq, event);
  783. } while (event);
  784. }
  785. if (!skb_queue_empty(&ulpq->reasm_uo)) {
  786. do {
  787. event = sctp_intl_retrieve_first_uo(ulpq);
  788. if (event)
  789. sctp_enqueue_event(ulpq, event);
  790. } while (event);
  791. }
  792. }
  793. static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  794. gfp_t gfp)
  795. {
  796. struct sctp_association *asoc = ulpq->asoc;
  797. __u32 freed = 0;
  798. __u16 needed;
  799. if (chunk) {
  800. needed = ntohs(chunk->chunk_hdr->length);
  801. needed -= sizeof(struct sctp_idata_chunk);
  802. } else {
  803. needed = SCTP_DEFAULT_MAXWINDOW;
  804. }
  805. if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
  806. freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
  807. if (freed < needed)
  808. freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
  809. needed);
  810. if (freed < needed)
  811. freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
  812. needed);
  813. }
  814. if (chunk && freed >= needed)
  815. if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
  816. sctp_intl_start_pd(ulpq, gfp);
  817. sk_mem_reclaim(asoc->base.sk);
  818. }
  819. static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
  820. __u32 mid, __u16 flags, gfp_t gfp)
  821. {
  822. struct sock *sk = ulpq->asoc->base.sk;
  823. struct sctp_ulpevent *ev = NULL;
  824. if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
  825. &sctp_sk(sk)->subscribe))
  826. return;
  827. ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
  828. sid, mid, flags, gfp);
  829. if (ev) {
  830. __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
  831. if (!sctp_sk(sk)->data_ready_signalled) {
  832. sctp_sk(sk)->data_ready_signalled = 1;
  833. sk->sk_data_ready(sk);
  834. }
  835. }
  836. }
  837. static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
  838. {
  839. struct sctp_stream *stream = &ulpq->asoc->stream;
  840. struct sctp_ulpevent *cevent, *event = NULL;
  841. struct sk_buff_head *lobby = &ulpq->lobby;
  842. struct sk_buff *pos, *tmp;
  843. struct sk_buff_head temp;
  844. __u16 csid;
  845. __u32 cmid;
  846. skb_queue_head_init(&temp);
  847. sctp_skb_for_each(pos, lobby, tmp) {
  848. cevent = (struct sctp_ulpevent *)pos->cb;
  849. csid = cevent->stream;
  850. cmid = cevent->mid;
  851. if (csid > sid)
  852. break;
  853. if (csid < sid)
  854. continue;
  855. if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
  856. break;
  857. __skb_unlink(pos, lobby);
  858. if (!event)
  859. event = sctp_skb2event(pos);
  860. __skb_queue_tail(&temp, pos);
  861. }
  862. if (!event && pos != (struct sk_buff *)lobby) {
  863. cevent = (struct sctp_ulpevent *)pos->cb;
  864. csid = cevent->stream;
  865. cmid = cevent->mid;
  866. if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
  867. sctp_mid_next(stream, in, csid);
  868. __skb_unlink(pos, lobby);
  869. __skb_queue_tail(&temp, pos);
  870. event = sctp_skb2event(pos);
  871. }
  872. }
  873. if (event) {
  874. sctp_intl_retrieve_ordered(ulpq, event);
  875. sctp_enqueue_event(ulpq, event);
  876. }
  877. }
  878. static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
  879. {
  880. struct sctp_stream *stream = &ulpq->asoc->stream;
  881. __u16 sid;
  882. for (sid = 0; sid < stream->incnt; sid++) {
  883. struct sctp_stream_in *sin = &stream->in[sid];
  884. __u32 mid;
  885. if (sin->pd_mode_uo) {
  886. sin->pd_mode_uo = 0;
  887. mid = sin->mid_uo;
  888. sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
  889. }
  890. if (sin->pd_mode) {
  891. sin->pd_mode = 0;
  892. mid = sin->mid;
  893. sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
  894. sctp_mid_skip(stream, in, sid, mid);
  895. sctp_intl_reap_ordered(ulpq, sid);
  896. }
  897. }
  898. /* intl abort pd happens only when all data needs to be cleaned */
  899. sctp_ulpq_flush(ulpq);
  900. }
  901. static struct sctp_stream_interleave sctp_stream_interleave_0 = {
  902. .data_chunk_len = sizeof(struct sctp_data_chunk),
  903. /* DATA process functions */
  904. .make_datafrag = sctp_make_datafrag_empty,
  905. .assign_number = sctp_chunk_assign_ssn,
  906. .validate_data = sctp_validate_data,
  907. .ulpevent_data = sctp_ulpq_tail_data,
  908. .enqueue_event = sctp_ulpq_tail_event,
  909. .renege_events = sctp_ulpq_renege,
  910. .start_pd = sctp_ulpq_partial_delivery,
  911. .abort_pd = sctp_ulpq_abort_pd,
  912. };
  913. static struct sctp_stream_interleave sctp_stream_interleave_1 = {
  914. .data_chunk_len = sizeof(struct sctp_idata_chunk),
  915. /* I-DATA process functions */
  916. .make_datafrag = sctp_make_idatafrag_empty,
  917. .assign_number = sctp_chunk_assign_mid,
  918. .validate_data = sctp_validate_idata,
  919. .ulpevent_data = sctp_ulpevent_idata,
  920. .enqueue_event = sctp_enqueue_event,
  921. .renege_events = sctp_renege_events,
  922. .start_pd = sctp_intl_start_pd,
  923. .abort_pd = sctp_intl_abort_pd,
  924. };
  925. void sctp_stream_interleave_init(struct sctp_stream *stream)
  926. {
  927. struct sctp_association *asoc;
  928. asoc = container_of(stream, struct sctp_association, stream);
  929. stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
  930. : &sctp_stream_interleave_0;
  931. }