qeth_eddp.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /*
  2. * linux/drivers/s390/net/qeth_eddp.c
  3. *
  4. * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
  5. *
  6. * Copyright 2004 IBM Corporation
  7. *
  8. * Author(s): Thomas Spatzier <tspat@de.ibm.com>
  9. *
  10. */
  11. #include <linux/config.h>
  12. #include <linux/errno.h>
  13. #include <linux/ip.h>
  14. #include <linux/inetdevice.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/kernel.h>
  17. #include <linux/tcp.h>
  18. #include <net/tcp.h>
  19. #include <linux/skbuff.h>
  20. #include <net/ip.h>
  21. #include "qeth.h"
  22. #include "qeth_mpc.h"
  23. #include "qeth_eddp.h"
  24. int
  25. qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
  26. struct qeth_eddp_context *ctx)
  27. {
  28. int index = queue->next_buf_to_fill;
  29. int elements_needed = ctx->num_elements;
  30. int elements_in_buffer;
  31. int skbs_in_buffer;
  32. int buffers_needed = 0;
  33. QETH_DBF_TEXT(trace, 5, "eddpcbfc");
  34. while(elements_needed > 0) {
  35. buffers_needed++;
  36. if (atomic_read(&queue->bufs[index].state) !=
  37. QETH_QDIO_BUF_EMPTY)
  38. return -EBUSY;
  39. elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  40. queue->bufs[index].next_element_to_fill;
  41. skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
  42. elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
  43. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  44. }
  45. return buffers_needed;
  46. }
  47. static inline void
  48. qeth_eddp_free_context(struct qeth_eddp_context *ctx)
  49. {
  50. int i;
  51. QETH_DBF_TEXT(trace, 5, "eddpfctx");
  52. for (i = 0; i < ctx->num_pages; ++i)
  53. free_page((unsigned long)ctx->pages[i]);
  54. kfree(ctx->pages);
  55. if (ctx->elements != NULL)
  56. kfree(ctx->elements);
  57. kfree(ctx);
  58. }
  59. static inline void
  60. qeth_eddp_get_context(struct qeth_eddp_context *ctx)
  61. {
  62. atomic_inc(&ctx->refcnt);
  63. }
  64. void
  65. qeth_eddp_put_context(struct qeth_eddp_context *ctx)
  66. {
  67. if (atomic_dec_return(&ctx->refcnt) == 0)
  68. qeth_eddp_free_context(ctx);
  69. }
  70. void
  71. qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
  72. {
  73. struct qeth_eddp_context_reference *ref;
  74. QETH_DBF_TEXT(trace, 6, "eddprctx");
  75. while (!list_empty(&buf->ctx_list)){
  76. ref = list_entry(buf->ctx_list.next,
  77. struct qeth_eddp_context_reference, list);
  78. qeth_eddp_put_context(ref->ctx);
  79. list_del(&ref->list);
  80. kfree(ref);
  81. }
  82. }
  83. static inline int
  84. qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
  85. struct qeth_eddp_context *ctx)
  86. {
  87. struct qeth_eddp_context_reference *ref;
  88. QETH_DBF_TEXT(trace, 6, "eddprfcx");
  89. ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
  90. if (ref == NULL)
  91. return -ENOMEM;
  92. qeth_eddp_get_context(ctx);
  93. ref->ctx = ctx;
  94. list_add_tail(&ref->list, &buf->ctx_list);
  95. return 0;
  96. }
  97. int
  98. qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
  99. struct qeth_eddp_context *ctx,
  100. int index)
  101. {
  102. struct qeth_qdio_out_buffer *buf = NULL;
  103. struct qdio_buffer *buffer;
  104. int elements = ctx->num_elements;
  105. int element = 0;
  106. int flush_cnt = 0;
  107. int must_refcnt = 1;
  108. int i;
  109. QETH_DBF_TEXT(trace, 5, "eddpfibu");
  110. while (elements > 0) {
  111. buf = &queue->bufs[index];
  112. if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
  113. /* normally this should not happen since we checked for
  114. * available elements in qeth_check_elements_for_context
  115. */
  116. if (element == 0)
  117. return -EBUSY;
  118. else {
  119. PRINT_WARN("could only partially fill eddp "
  120. "buffer!\n");
  121. goto out;
  122. }
  123. }
  124. /* check if the whole next skb fits into current buffer */
  125. if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
  126. buf->next_element_to_fill)
  127. < ctx->elements_per_skb){
  128. /* no -> go to next buffer */
  129. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  130. index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
  131. flush_cnt++;
  132. /* new buffer, so we have to add ctx to buffer'ctx_list
  133. * and increment ctx's refcnt */
  134. must_refcnt = 1;
  135. continue;
  136. }
  137. if (must_refcnt){
  138. must_refcnt = 0;
  139. if (qeth_eddp_buf_ref_context(buf, ctx)){
  140. PRINT_WARN("no memory to create eddp context "
  141. "reference\n");
  142. goto out_check;
  143. }
  144. }
  145. buffer = buf->buffer;
  146. /* fill one skb into buffer */
  147. for (i = 0; i < ctx->elements_per_skb; ++i){
  148. buffer->element[buf->next_element_to_fill].addr =
  149. ctx->elements[element].addr;
  150. buffer->element[buf->next_element_to_fill].length =
  151. ctx->elements[element].length;
  152. buffer->element[buf->next_element_to_fill].flags =
  153. ctx->elements[element].flags;
  154. buf->next_element_to_fill++;
  155. element++;
  156. elements--;
  157. }
  158. }
  159. out_check:
  160. if (!queue->do_pack) {
  161. QETH_DBF_TEXT(trace, 6, "fillbfnp");
  162. /* set state to PRIMED -> will be flushed */
  163. if (buf->next_element_to_fill > 0){
  164. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  165. flush_cnt++;
  166. }
  167. } else {
  168. #ifdef CONFIG_QETH_PERF_STATS
  169. queue->card->perf_stats.skbs_sent_pack++;
  170. #endif
  171. QETH_DBF_TEXT(trace, 6, "fillbfpa");
  172. if (buf->next_element_to_fill >=
  173. QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
  174. /*
  175. * packed buffer if full -> set state PRIMED
  176. * -> will be flushed
  177. */
  178. atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
  179. flush_cnt++;
  180. }
  181. }
  182. out:
  183. return flush_cnt;
  184. }
  185. static inline void
  186. qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
  187. struct qeth_eddp_data *eddp, int data_len)
  188. {
  189. u8 *page;
  190. int page_remainder;
  191. int page_offset;
  192. int pkt_len;
  193. struct qeth_eddp_element *element;
  194. QETH_DBF_TEXT(trace, 5, "eddpcrsh");
  195. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  196. page_offset = ctx->offset % PAGE_SIZE;
  197. element = &ctx->elements[ctx->num_elements];
  198. pkt_len = eddp->nhl + eddp->thl + data_len;
  199. /* FIXME: layer2 and VLAN !!! */
  200. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
  201. pkt_len += ETH_HLEN;
  202. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  203. pkt_len += VLAN_HLEN;
  204. /* does complete packet fit in current page ? */
  205. page_remainder = PAGE_SIZE - page_offset;
  206. if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
  207. /* no -> go to start of next page */
  208. ctx->offset += page_remainder;
  209. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  210. page_offset = 0;
  211. }
  212. memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
  213. element->addr = page + page_offset;
  214. element->length = sizeof(struct qeth_hdr);
  215. ctx->offset += sizeof(struct qeth_hdr);
  216. page_offset += sizeof(struct qeth_hdr);
  217. /* add mac header (?) */
  218. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  219. memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
  220. element->length += ETH_HLEN;
  221. ctx->offset += ETH_HLEN;
  222. page_offset += ETH_HLEN;
  223. }
  224. /* add VLAN tag */
  225. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
  226. memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
  227. element->length += VLAN_HLEN;
  228. ctx->offset += VLAN_HLEN;
  229. page_offset += VLAN_HLEN;
  230. }
  231. /* add network header */
  232. memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
  233. element->length += eddp->nhl;
  234. eddp->nh_in_ctx = page + page_offset;
  235. ctx->offset += eddp->nhl;
  236. page_offset += eddp->nhl;
  237. /* add transport header */
  238. memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
  239. element->length += eddp->thl;
  240. eddp->th_in_ctx = page + page_offset;
  241. ctx->offset += eddp->thl;
  242. }
  243. static inline void
  244. qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
  245. u32 *hcsum)
  246. {
  247. struct skb_frag_struct *frag;
  248. int left_in_frag;
  249. int copy_len;
  250. u8 *src;
  251. QETH_DBF_TEXT(trace, 5, "eddpcdtc");
  252. if (skb_shinfo(eddp->skb)->nr_frags == 0) {
  253. memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
  254. *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
  255. *hcsum);
  256. eddp->skb_offset += len;
  257. } else {
  258. while (len > 0) {
  259. if (eddp->frag < 0) {
  260. /* we're in skb->data */
  261. left_in_frag = (eddp->skb->len - eddp->skb->data_len)
  262. - eddp->skb_offset;
  263. src = eddp->skb->data + eddp->skb_offset;
  264. } else {
  265. frag = &skb_shinfo(eddp->skb)->
  266. frags[eddp->frag];
  267. left_in_frag = frag->size - eddp->frag_offset;
  268. src = (u8 *)(
  269. (page_to_pfn(frag->page) << PAGE_SHIFT)+
  270. frag->page_offset + eddp->frag_offset);
  271. }
  272. if (left_in_frag <= 0) {
  273. eddp->frag++;
  274. eddp->frag_offset = 0;
  275. continue;
  276. }
  277. copy_len = min(left_in_frag, len);
  278. memcpy(dst, src, copy_len);
  279. *hcsum = csum_partial(src, copy_len, *hcsum);
  280. dst += copy_len;
  281. eddp->frag_offset += copy_len;
  282. eddp->skb_offset += copy_len;
  283. len -= copy_len;
  284. }
  285. }
  286. }
  287. static inline void
  288. qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
  289. struct qeth_eddp_data *eddp, int data_len,
  290. u32 hcsum)
  291. {
  292. u8 *page;
  293. int page_remainder;
  294. int page_offset;
  295. struct qeth_eddp_element *element;
  296. int first_lap = 1;
  297. QETH_DBF_TEXT(trace, 5, "eddpcsdt");
  298. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  299. page_offset = ctx->offset % PAGE_SIZE;
  300. element = &ctx->elements[ctx->num_elements];
  301. while (data_len){
  302. page_remainder = PAGE_SIZE - page_offset;
  303. if (page_remainder < data_len){
  304. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  305. page_remainder, &hcsum);
  306. element->length += page_remainder;
  307. if (first_lap)
  308. element->flags = SBAL_FLAGS_FIRST_FRAG;
  309. else
  310. element->flags = SBAL_FLAGS_MIDDLE_FRAG;
  311. ctx->num_elements++;
  312. element++;
  313. data_len -= page_remainder;
  314. ctx->offset += page_remainder;
  315. page = ctx->pages[ctx->offset >> PAGE_SHIFT];
  316. page_offset = 0;
  317. element->addr = page + page_offset;
  318. } else {
  319. qeth_eddp_copy_data_tcp(page + page_offset, eddp,
  320. data_len, &hcsum);
  321. element->length += data_len;
  322. if (!first_lap)
  323. element->flags = SBAL_FLAGS_LAST_FRAG;
  324. ctx->num_elements++;
  325. ctx->offset += data_len;
  326. data_len = 0;
  327. }
  328. first_lap = 0;
  329. }
  330. ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
  331. }
  332. static inline u32
  333. qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
  334. {
  335. u32 phcsum; /* pseudo header checksum */
  336. QETH_DBF_TEXT(trace, 5, "eddpckt4");
  337. eddp->th.tcp.h.check = 0;
  338. /* compute pseudo header checksum */
  339. phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
  340. eddp->thl + data_len, IPPROTO_TCP, 0);
  341. /* compute checksum of tcp header */
  342. return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
  343. }
  344. static inline u32
  345. qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
  346. {
  347. u32 proto;
  348. u32 phcsum; /* pseudo header checksum */
  349. QETH_DBF_TEXT(trace, 5, "eddpckt6");
  350. eddp->th.tcp.h.check = 0;
  351. /* compute pseudo header checksum */
  352. phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
  353. sizeof(struct in6_addr), 0);
  354. phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
  355. sizeof(struct in6_addr), phcsum);
  356. proto = htonl(IPPROTO_TCP);
  357. phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
  358. return phcsum;
  359. }
  360. static inline struct qeth_eddp_data *
  361. qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
  362. {
  363. struct qeth_eddp_data *eddp;
  364. QETH_DBF_TEXT(trace, 5, "eddpcrda");
  365. eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
  366. if (eddp){
  367. memset(eddp, 0, sizeof(struct qeth_eddp_data));
  368. eddp->nhl = nhl;
  369. eddp->thl = thl;
  370. memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
  371. memcpy(&eddp->nh, nh, nhl);
  372. memcpy(&eddp->th, th, thl);
  373. eddp->frag = -1; /* initially we're in skb->data */
  374. }
  375. return eddp;
  376. }
  377. static inline void
  378. __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  379. struct qeth_eddp_data *eddp)
  380. {
  381. struct tcphdr *tcph;
  382. int data_len;
  383. u32 hcsum;
  384. QETH_DBF_TEXT(trace, 5, "eddpftcp");
  385. eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
  386. tcph = eddp->skb->h.th;
  387. while (eddp->skb_offset < eddp->skb->len) {
  388. data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
  389. (int)(eddp->skb->len - eddp->skb_offset));
  390. /* prepare qdio hdr */
  391. if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
  392. eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
  393. eddp->nhl + eddp->thl -
  394. sizeof(struct qeth_hdr);
  395. #ifdef CONFIG_QETH_VLAN
  396. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
  397. eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
  398. #endif /* CONFIG_QETH_VLAN */
  399. } else
  400. eddp->qh.hdr.l3.length = data_len + eddp->nhl +
  401. eddp->thl;
  402. /* prepare ip hdr */
  403. if (eddp->skb->protocol == ETH_P_IP){
  404. eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
  405. eddp->thl;
  406. eddp->nh.ip4.h.check = 0;
  407. eddp->nh.ip4.h.check =
  408. ip_fast_csum((u8 *)&eddp->nh.ip4.h,
  409. eddp->nh.ip4.h.ihl);
  410. } else
  411. eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
  412. /* prepare tcp hdr */
  413. if (data_len == (eddp->skb->len - eddp->skb_offset)){
  414. /* last segment -> set FIN and PSH flags */
  415. eddp->th.tcp.h.fin = tcph->fin;
  416. eddp->th.tcp.h.psh = tcph->psh;
  417. }
  418. if (eddp->skb->protocol == ETH_P_IP)
  419. hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
  420. else
  421. hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
  422. /* fill the next segment into the context */
  423. qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
  424. qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
  425. if (eddp->skb_offset >= eddp->skb->len)
  426. break;
  427. /* prepare headers for next round */
  428. if (eddp->skb->protocol == ETH_P_IP)
  429. eddp->nh.ip4.h.id++;
  430. eddp->th.tcp.h.seq += data_len;
  431. }
  432. }
  433. static inline int
  434. qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
  435. struct sk_buff *skb, struct qeth_hdr *qhdr)
  436. {
  437. struct qeth_eddp_data *eddp = NULL;
  438. QETH_DBF_TEXT(trace, 5, "eddpficx");
  439. /* create our segmentation headers and copy original headers */
  440. if (skb->protocol == ETH_P_IP)
  441. eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
  442. skb->nh.iph->ihl*4,
  443. (u8 *)skb->h.th, skb->h.th->doff*4);
  444. else
  445. eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
  446. sizeof(struct ipv6hdr),
  447. (u8 *)skb->h.th, skb->h.th->doff*4);
  448. if (eddp == NULL) {
  449. QETH_DBF_TEXT(trace, 2, "eddpfcnm");
  450. return -ENOMEM;
  451. }
  452. if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
  453. memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
  454. #ifdef CONFIG_QETH_VLAN
  455. if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
  456. eddp->vlan[0] = __constant_htons(skb->protocol);
  457. eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
  458. }
  459. #endif /* CONFIG_QETH_VLAN */
  460. }
  461. /* the next flags will only be set on the last segment */
  462. eddp->th.tcp.h.fin = 0;
  463. eddp->th.tcp.h.psh = 0;
  464. eddp->skb = skb;
  465. /* begin segmentation and fill context */
  466. __qeth_eddp_fill_context_tcp(ctx, eddp);
  467. kfree(eddp);
  468. return 0;
  469. }
  470. static inline void
  471. qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
  472. int hdr_len)
  473. {
  474. int skbs_per_page;
  475. QETH_DBF_TEXT(trace, 5, "eddpcanp");
  476. /* can we put multiple skbs in one page? */
  477. skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
  478. if (skbs_per_page > 1){
  479. ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
  480. skbs_per_page + 1;
  481. ctx->elements_per_skb = 1;
  482. } else {
  483. /* no -> how many elements per skb? */
  484. ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
  485. PAGE_SIZE) >> PAGE_SHIFT;
  486. ctx->num_pages = ctx->elements_per_skb *
  487. (skb_shinfo(skb)->tso_segs + 1);
  488. }
  489. ctx->num_elements = ctx->elements_per_skb *
  490. (skb_shinfo(skb)->tso_segs + 1);
  491. }
  492. static inline struct qeth_eddp_context *
  493. qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
  494. int hdr_len)
  495. {
  496. struct qeth_eddp_context *ctx = NULL;
  497. u8 *addr;
  498. int i;
  499. QETH_DBF_TEXT(trace, 5, "creddpcg");
  500. /* create the context and allocate pages */
  501. ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
  502. if (ctx == NULL){
  503. QETH_DBF_TEXT(trace, 2, "ceddpcn1");
  504. return NULL;
  505. }
  506. memset(ctx, 0, sizeof(struct qeth_eddp_context));
  507. ctx->type = QETH_LARGE_SEND_EDDP;
  508. qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
  509. if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
  510. QETH_DBF_TEXT(trace, 2, "ceddpcis");
  511. kfree(ctx);
  512. return NULL;
  513. }
  514. ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
  515. if (ctx->pages == NULL){
  516. QETH_DBF_TEXT(trace, 2, "ceddpcn2");
  517. kfree(ctx);
  518. return NULL;
  519. }
  520. memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
  521. for (i = 0; i < ctx->num_pages; ++i){
  522. addr = (u8 *)__get_free_page(GFP_ATOMIC);
  523. if (addr == NULL){
  524. QETH_DBF_TEXT(trace, 2, "ceddpcn3");
  525. ctx->num_pages = i;
  526. qeth_eddp_free_context(ctx);
  527. return NULL;
  528. }
  529. memset(addr, 0, PAGE_SIZE);
  530. ctx->pages[i] = addr;
  531. }
  532. ctx->elements = kmalloc(ctx->num_elements *
  533. sizeof(struct qeth_eddp_element), GFP_ATOMIC);
  534. if (ctx->elements == NULL){
  535. QETH_DBF_TEXT(trace, 2, "ceddpcn4");
  536. qeth_eddp_free_context(ctx);
  537. return NULL;
  538. }
  539. memset(ctx->elements, 0,
  540. ctx->num_elements * sizeof(struct qeth_eddp_element));
  541. /* reset num_elements; will be incremented again in fill_buffer to
  542. * reflect number of actually used elements */
  543. ctx->num_elements = 0;
  544. return ctx;
  545. }
  546. static inline struct qeth_eddp_context *
  547. qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
  548. struct qeth_hdr *qhdr)
  549. {
  550. struct qeth_eddp_context *ctx = NULL;
  551. QETH_DBF_TEXT(trace, 5, "creddpct");
  552. if (skb->protocol == ETH_P_IP)
  553. ctx = qeth_eddp_create_context_generic(card, skb,
  554. sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
  555. skb->h.th->doff*4);
  556. else if (skb->protocol == ETH_P_IPV6)
  557. ctx = qeth_eddp_create_context_generic(card, skb,
  558. sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
  559. skb->h.th->doff*4);
  560. else
  561. QETH_DBF_TEXT(trace, 2, "cetcpinv");
  562. if (ctx == NULL) {
  563. QETH_DBF_TEXT(trace, 2, "creddpnl");
  564. return NULL;
  565. }
  566. if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
  567. QETH_DBF_TEXT(trace, 2, "ceddptfe");
  568. qeth_eddp_free_context(ctx);
  569. return NULL;
  570. }
  571. atomic_set(&ctx->refcnt, 1);
  572. return ctx;
  573. }
  574. struct qeth_eddp_context *
  575. qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
  576. struct qeth_hdr *qhdr)
  577. {
  578. QETH_DBF_TEXT(trace, 5, "creddpc");
  579. switch (skb->sk->sk_protocol){
  580. case IPPROTO_TCP:
  581. return qeth_eddp_create_context_tcp(card, skb, qhdr);
  582. default:
  583. QETH_DBF_TEXT(trace, 2, "eddpinvp");
  584. }
  585. return NULL;
  586. }