inqueue.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /* SCTP kernel implementation
  2. * Copyright (c) 1999-2000 Cisco, Inc.
  3. * Copyright (c) 1999-2001 Motorola, Inc.
  4. * Copyright (c) 2002 International Business Machines, Corp.
  5. *
  6. * This file is part of the SCTP kernel implementation
  7. *
  8. * These functions are the methods for accessing the SCTP inqueue.
  9. *
  10. * An SCTP inqueue is a queue into which you push SCTP packets
  11. * (which might be bundles or fragments of chunks) and out of which you
  12. * pop SCTP whole chunks.
  13. *
  14. * This SCTP implementation is free software;
  15. * you can redistribute it and/or modify it under the terms of
  16. * the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2, or (at your option)
  18. * any later version.
  19. *
  20. * This SCTP implementation is distributed in the hope that it
  21. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  22. * ************************
  23. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  24. * See the GNU General Public License for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License
  27. * along with GNU CC; see the file COPYING. If not, see
  28. * <http://www.gnu.org/licenses/>.
  29. *
  30. * Please send any bug reports or fixes you make to the
  31. * email address(es):
  32. * lksctp developers <linux-sctp@vger.kernel.org>
  33. *
  34. * Written or modified by:
  35. * La Monte H.P. Yarroll <piggy@acm.org>
  36. * Karl Knutson <karl@athena.chicago.il.us>
  37. */
  38. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  39. #include <net/sctp/sctp.h>
  40. #include <net/sctp/sm.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/slab.h>
  43. /* Initialize an SCTP inqueue. */
  44. void sctp_inq_init(struct sctp_inq *queue)
  45. {
  46. INIT_LIST_HEAD(&queue->in_chunk_list);
  47. queue->in_progress = NULL;
  48. /* Create a task for delivering data. */
  49. INIT_WORK(&queue->immediate, NULL);
  50. }
  51. /* Release the memory associated with an SCTP inqueue. */
  52. void sctp_inq_free(struct sctp_inq *queue)
  53. {
  54. struct sctp_chunk *chunk, *tmp;
  55. /* Empty the queue. */
  56. list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
  57. list_del_init(&chunk->list);
  58. sctp_chunk_free(chunk);
  59. }
  60. /* If there is a packet which is currently being worked on,
  61. * free it as well.
  62. */
  63. if (queue->in_progress) {
  64. sctp_chunk_free(queue->in_progress);
  65. queue->in_progress = NULL;
  66. }
  67. }
  68. /* Put a new packet in an SCTP inqueue.
  69. * We assume that packet->sctp_hdr is set and in host byte order.
  70. */
  71. void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
  72. {
  73. /* Directly call the packet handling routine. */
  74. if (chunk->rcvr->dead) {
  75. sctp_chunk_free(chunk);
  76. return;
  77. }
  78. /* We are now calling this either from the soft interrupt
  79. * or from the backlog processing.
  80. * Eventually, we should clean up inqueue to not rely
  81. * on the BH related data structures.
  82. */
  83. local_bh_disable();
  84. list_add_tail(&chunk->list, &q->in_chunk_list);
  85. if (chunk->asoc)
  86. chunk->asoc->stats.ipackets++;
  87. q->immediate.func(&q->immediate);
  88. local_bh_enable();
  89. }
  90. /* Peek at the next chunk on the inqeue. */
  91. struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
  92. {
  93. struct sctp_chunk *chunk;
  94. sctp_chunkhdr_t *ch = NULL;
  95. chunk = queue->in_progress;
  96. /* If there is no more chunks in this packet, say so */
  97. if (chunk->singleton ||
  98. chunk->end_of_packet ||
  99. chunk->pdiscard)
  100. return NULL;
  101. ch = (sctp_chunkhdr_t *)chunk->chunk_end;
  102. return ch;
  103. }
  104. /* Extract a chunk from an SCTP inqueue.
  105. *
  106. * WARNING: If you need to put the chunk on another queue, you need to
  107. * make a shallow copy (clone) of it.
  108. */
  109. struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
  110. {
  111. struct sctp_chunk *chunk;
  112. sctp_chunkhdr_t *ch = NULL;
  113. /* The assumption is that we are safe to process the chunks
  114. * at this time.
  115. */
  116. chunk = queue->in_progress;
  117. if (chunk) {
  118. /* There is a packet that we have been working on.
  119. * Any post processing work to do before we move on?
  120. */
  121. if (chunk->singleton ||
  122. chunk->end_of_packet ||
  123. chunk->pdiscard) {
  124. if (chunk->head_skb == chunk->skb) {
  125. chunk->skb = skb_shinfo(chunk->skb)->frag_list;
  126. goto new_skb;
  127. }
  128. if (chunk->skb->next) {
  129. chunk->skb = chunk->skb->next;
  130. goto new_skb;
  131. }
  132. if (chunk->head_skb)
  133. chunk->skb = chunk->head_skb;
  134. sctp_chunk_free(chunk);
  135. chunk = queue->in_progress = NULL;
  136. } else {
  137. /* Nothing to do. Next chunk in the packet, please. */
  138. ch = (sctp_chunkhdr_t *) chunk->chunk_end;
  139. /* Force chunk->skb->data to chunk->chunk_end. */
  140. skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
  141. /* We are guaranteed to pull a SCTP header. */
  142. }
  143. }
  144. /* Do we need to take the next packet out of the queue to process? */
  145. if (!chunk) {
  146. struct list_head *entry;
  147. next_chunk:
  148. /* Is the queue empty? */
  149. entry = sctp_list_dequeue(&queue->in_chunk_list);
  150. if (!entry)
  151. return NULL;
  152. chunk = list_entry(entry, struct sctp_chunk, list);
  153. /* Linearize if it's not GSO */
  154. if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
  155. skb_is_nonlinear(chunk->skb)) {
  156. if (skb_linearize(chunk->skb)) {
  157. __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
  158. sctp_chunk_free(chunk);
  159. goto next_chunk;
  160. }
  161. /* Update sctp_hdr as it probably changed */
  162. chunk->sctp_hdr = sctp_hdr(chunk->skb);
  163. }
  164. if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
  165. /* GSO-marked skbs but without frags, handle
  166. * them normally
  167. */
  168. if (skb_shinfo(chunk->skb)->frag_list)
  169. chunk->head_skb = chunk->skb;
  170. /* skbs with "cover letter" */
  171. if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
  172. chunk->skb = skb_shinfo(chunk->skb)->frag_list;
  173. if (WARN_ON(!chunk->skb)) {
  174. __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
  175. sctp_chunk_free(chunk);
  176. goto next_chunk;
  177. }
  178. }
  179. if (chunk->asoc)
  180. sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
  181. queue->in_progress = chunk;
  182. new_skb:
  183. /* This is the first chunk in the packet. */
  184. ch = (sctp_chunkhdr_t *) chunk->skb->data;
  185. chunk->singleton = 1;
  186. chunk->data_accepted = 0;
  187. chunk->pdiscard = 0;
  188. chunk->auth = 0;
  189. chunk->has_asconf = 0;
  190. chunk->end_of_packet = 0;
  191. if (chunk->head_skb) {
  192. struct sctp_input_cb
  193. *cb = SCTP_INPUT_CB(chunk->skb),
  194. *head_cb = SCTP_INPUT_CB(chunk->head_skb);
  195. cb->chunk = head_cb->chunk;
  196. cb->af = head_cb->af;
  197. }
  198. }
  199. chunk->chunk_hdr = ch;
  200. chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
  201. skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
  202. chunk->subh.v = NULL; /* Subheader is no longer valid. */
  203. if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
  204. skb_tail_pointer(chunk->skb)) {
  205. /* This is not a singleton */
  206. chunk->singleton = 0;
  207. } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
  208. /* Discard inside state machine. */
  209. chunk->pdiscard = 1;
  210. chunk->chunk_end = skb_tail_pointer(chunk->skb);
  211. } else {
  212. /* We are at the end of the packet, so mark the chunk
  213. * in case we need to send a SACK.
  214. */
  215. chunk->end_of_packet = 1;
  216. }
  217. pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
  218. chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
  219. ntohs(chunk->chunk_hdr->length), chunk->skb->len);
  220. return chunk;
  221. }
  222. /* Set a top-half handler.
  223. *
  224. * Originally, we the top-half handler was scheduled as a BH. We now
  225. * call the handler directly in sctp_inq_push() at a time that
  226. * we know we are lock safe.
  227. * The intent is that this routine will pull stuff out of the
  228. * inqueue and process it.
  229. */
  230. void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
  231. {
  232. INIT_WORK(&q->immediate, callback);
  233. }