inqueue.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /* SCTP kernel implementation
  2. * Copyright (c) 1999-2000 Cisco, Inc.
  3. * Copyright (c) 1999-2001 Motorola, Inc.
  4. * Copyright (c) 2002 International Business Machines, Corp.
  5. *
  6. * This file is part of the SCTP kernel implementation
  7. *
  8. * These functions are the methods for accessing the SCTP inqueue.
  9. *
  10. * An SCTP inqueue is a queue into which you push SCTP packets
  11. * (which might be bundles or fragments of chunks) and out of which you
  12. * pop SCTP whole chunks.
  13. *
  14. * This SCTP implementation is free software;
  15. * you can redistribute it and/or modify it under the terms of
  16. * the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2, or (at your option)
  18. * any later version.
  19. *
  20. * This SCTP implementation is distributed in the hope that it
  21. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  22. * ************************
  23. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  24. * See the GNU General Public License for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License
  27. * along with GNU CC; see the file COPYING. If not, see
  28. * <http://www.gnu.org/licenses/>.
  29. *
  30. * Please send any bug reports or fixes you make to the
  31. * email address(es):
  32. * lksctp developers <linux-sctp@vger.kernel.org>
  33. *
  34. * Written or modified by:
  35. * La Monte H.P. Yarroll <piggy@acm.org>
  36. * Karl Knutson <karl@athena.chicago.il.us>
  37. */
  38. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  39. #include <net/sctp/sctp.h>
  40. #include <net/sctp/sm.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/slab.h>
  43. /* Initialize an SCTP inqueue. */
  44. void sctp_inq_init(struct sctp_inq *queue)
  45. {
  46. INIT_LIST_HEAD(&queue->in_chunk_list);
  47. queue->in_progress = NULL;
  48. /* Create a task for delivering data. */
  49. INIT_WORK(&queue->immediate, NULL);
  50. }
  51. /* Release the memory associated with an SCTP inqueue. */
  52. void sctp_inq_free(struct sctp_inq *queue)
  53. {
  54. struct sctp_chunk *chunk, *tmp;
  55. /* Empty the queue. */
  56. list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
  57. list_del_init(&chunk->list);
  58. sctp_chunk_free(chunk);
  59. }
  60. /* If there is a packet which is currently being worked on,
  61. * free it as well.
  62. */
  63. if (queue->in_progress) {
  64. sctp_chunk_free(queue->in_progress);
  65. queue->in_progress = NULL;
  66. }
  67. }
  68. /* Put a new packet in an SCTP inqueue.
  69. * We assume that packet->sctp_hdr is set and in host byte order.
  70. */
  71. void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
  72. {
  73. /* Directly call the packet handling routine. */
  74. if (chunk->rcvr->dead) {
  75. sctp_chunk_free(chunk);
  76. return;
  77. }
  78. /* We are now calling this either from the soft interrupt
  79. * or from the backlog processing.
  80. * Eventually, we should clean up inqueue to not rely
  81. * on the BH related data structures.
  82. */
  83. list_add_tail(&chunk->list, &q->in_chunk_list);
  84. if (chunk->asoc)
  85. chunk->asoc->stats.ipackets++;
  86. q->immediate.func(&q->immediate);
  87. }
  88. /* Peek at the next chunk on the inqeue. */
  89. struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
  90. {
  91. struct sctp_chunk *chunk;
  92. sctp_chunkhdr_t *ch = NULL;
  93. chunk = queue->in_progress;
  94. /* If there is no more chunks in this packet, say so */
  95. if (chunk->singleton ||
  96. chunk->end_of_packet ||
  97. chunk->pdiscard)
  98. return NULL;
  99. ch = (sctp_chunkhdr_t *)chunk->chunk_end;
  100. return ch;
  101. }
  102. /* Extract a chunk from an SCTP inqueue.
  103. *
  104. * WARNING: If you need to put the chunk on another queue, you need to
  105. * make a shallow copy (clone) of it.
  106. */
  107. struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
  108. {
  109. struct sctp_chunk *chunk;
  110. sctp_chunkhdr_t *ch = NULL;
  111. /* The assumption is that we are safe to process the chunks
  112. * at this time.
  113. */
  114. chunk = queue->in_progress;
  115. if (chunk) {
  116. /* There is a packet that we have been working on.
  117. * Any post processing work to do before we move on?
  118. */
  119. if (chunk->singleton ||
  120. chunk->end_of_packet ||
  121. chunk->pdiscard) {
  122. if (chunk->head_skb == chunk->skb) {
  123. chunk->skb = skb_shinfo(chunk->skb)->frag_list;
  124. goto new_skb;
  125. }
  126. if (chunk->skb->next) {
  127. chunk->skb = chunk->skb->next;
  128. goto new_skb;
  129. }
  130. if (chunk->head_skb)
  131. chunk->skb = chunk->head_skb;
  132. sctp_chunk_free(chunk);
  133. chunk = queue->in_progress = NULL;
  134. } else {
  135. /* Nothing to do. Next chunk in the packet, please. */
  136. ch = (sctp_chunkhdr_t *) chunk->chunk_end;
  137. /* Force chunk->skb->data to chunk->chunk_end. */
  138. skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
  139. /* We are guaranteed to pull a SCTP header. */
  140. }
  141. }
  142. /* Do we need to take the next packet out of the queue to process? */
  143. if (!chunk) {
  144. struct list_head *entry;
  145. next_chunk:
  146. /* Is the queue empty? */
  147. entry = sctp_list_dequeue(&queue->in_chunk_list);
  148. if (!entry)
  149. return NULL;
  150. chunk = list_entry(entry, struct sctp_chunk, list);
  151. /* Linearize if it's not GSO */
  152. if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
  153. skb_is_nonlinear(chunk->skb)) {
  154. if (skb_linearize(chunk->skb)) {
  155. __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
  156. sctp_chunk_free(chunk);
  157. goto next_chunk;
  158. }
  159. /* Update sctp_hdr as it probably changed */
  160. chunk->sctp_hdr = sctp_hdr(chunk->skb);
  161. }
  162. if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
  163. /* GSO-marked skbs but without frags, handle
  164. * them normally
  165. */
  166. if (skb_shinfo(chunk->skb)->frag_list)
  167. chunk->head_skb = chunk->skb;
  168. /* skbs with "cover letter" */
  169. if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
  170. chunk->skb = skb_shinfo(chunk->skb)->frag_list;
  171. if (WARN_ON(!chunk->skb)) {
  172. __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
  173. sctp_chunk_free(chunk);
  174. goto next_chunk;
  175. }
  176. }
  177. if (chunk->asoc)
  178. sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
  179. queue->in_progress = chunk;
  180. new_skb:
  181. /* This is the first chunk in the packet. */
  182. ch = (sctp_chunkhdr_t *) chunk->skb->data;
  183. chunk->singleton = 1;
  184. chunk->data_accepted = 0;
  185. chunk->pdiscard = 0;
  186. chunk->auth = 0;
  187. chunk->has_asconf = 0;
  188. chunk->end_of_packet = 0;
  189. if (chunk->head_skb) {
  190. struct sctp_input_cb
  191. *cb = SCTP_INPUT_CB(chunk->skb),
  192. *head_cb = SCTP_INPUT_CB(chunk->head_skb);
  193. cb->chunk = head_cb->chunk;
  194. cb->af = head_cb->af;
  195. }
  196. }
  197. chunk->chunk_hdr = ch;
  198. chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
  199. skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
  200. chunk->subh.v = NULL; /* Subheader is no longer valid. */
  201. if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
  202. skb_tail_pointer(chunk->skb)) {
  203. /* This is not a singleton */
  204. chunk->singleton = 0;
  205. } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
  206. /* Discard inside state machine. */
  207. chunk->pdiscard = 1;
  208. chunk->chunk_end = skb_tail_pointer(chunk->skb);
  209. } else {
  210. /* We are at the end of the packet, so mark the chunk
  211. * in case we need to send a SACK.
  212. */
  213. chunk->end_of_packet = 1;
  214. }
  215. pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
  216. chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
  217. ntohs(chunk->chunk_hdr->length), chunk->skb->len);
  218. return chunk;
  219. }
  220. /* Set a top-half handler.
  221. *
  222. * Originally, we the top-half handler was scheduled as a BH. We now
  223. * call the handler directly in sctp_inq_push() at a time that
  224. * we know we are lock safe.
  225. * The intent is that this routine will pull stuff out of the
  226. * inqueue and process it.
  227. */
  228. void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
  229. {
  230. INIT_WORK(&q->immediate, callback);
  231. }