cxgbit_target.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640
  1. /*
  2. * Copyright (c) 2016 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/workqueue.h>
  9. #include <linux/kthread.h>
  10. #include <linux/sched/signal.h>
  11. #include <asm/unaligned.h>
  12. #include <net/tcp.h>
  13. #include <target/target_core_base.h>
  14. #include <target/target_core_fabric.h>
  15. #include "cxgbit.h"
  16. struct sge_opaque_hdr {
  17. void *dev;
  18. dma_addr_t addr[MAX_SKB_FRAGS + 1];
  19. };
  20. static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
  21. #define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
  22. sizeof(struct fw_ofld_tx_data_wr))
  23. static struct sk_buff *
  24. __cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
  25. {
  26. struct sk_buff *skb = NULL;
  27. u8 submode = 0;
  28. int errcode;
  29. static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
  30. if (len) {
  31. skb = alloc_skb_with_frags(hdr_len, len,
  32. 0, &errcode,
  33. GFP_KERNEL);
  34. if (!skb)
  35. return NULL;
  36. skb_reserve(skb, TX_HDR_LEN);
  37. skb_reset_transport_header(skb);
  38. __skb_put(skb, ISCSI_HDR_LEN);
  39. skb->data_len = len;
  40. skb->len += len;
  41. submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
  42. } else {
  43. u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
  44. skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
  45. if (!skb)
  46. return NULL;
  47. skb_reserve(skb, TX_HDR_LEN + iso_len);
  48. skb_reset_transport_header(skb);
  49. __skb_put(skb, ISCSI_HDR_LEN);
  50. }
  51. submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
  52. cxgbit_skcb_submode(skb) = submode;
  53. cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
  54. cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
  55. return skb;
  56. }
  57. static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
  58. {
  59. return __cxgbit_alloc_skb(csk, len, false);
  60. }
  61. /*
  62. * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
  63. * @skb: the packet
  64. *
  65. * Returns true if a packet can be sent as an offload WR with immediate
  66. * data. We currently use the same limit as for Ethernet packets.
  67. */
  68. static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
  69. {
  70. int length = skb->len;
  71. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
  72. length += sizeof(struct fw_ofld_tx_data_wr);
  73. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
  74. length += sizeof(struct cpl_tx_data_iso);
  75. #define MAX_IMM_TX_PKT_LEN 256
  76. return length <= MAX_IMM_TX_PKT_LEN;
  77. }
  78. /*
  79. * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
  80. * @n: the number of SGL entries
  81. * Calculates the number of flits needed for a scatter/gather list that
  82. * can hold the given number of entries.
  83. */
  84. static inline unsigned int cxgbit_sgl_len(unsigned int n)
  85. {
  86. n--;
  87. return (3 * n) / 2 + (n & 1) + 2;
  88. }
  89. /*
  90. * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
  91. * @skb: the packet
  92. *
  93. * Returns the number of flits needed for the given offload packet.
  94. * These packets are already fully constructed and no additional headers
  95. * will be added.
  96. */
  97. static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
  98. {
  99. unsigned int flits, cnt;
  100. if (cxgbit_is_ofld_imm(skb))
  101. return DIV_ROUND_UP(skb->len, 8);
  102. flits = skb_transport_offset(skb) / 8;
  103. cnt = skb_shinfo(skb)->nr_frags;
  104. if (skb_tail_pointer(skb) != skb_transport_header(skb))
  105. cnt++;
  106. return flits + cxgbit_sgl_len(cnt);
  107. }
  108. #define CXGBIT_ISO_FSLICE 0x1
  109. #define CXGBIT_ISO_LSLICE 0x2
  110. static void
  111. cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
  112. {
  113. struct cpl_tx_data_iso *cpl;
  114. unsigned int submode = cxgbit_skcb_submode(skb);
  115. unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
  116. unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
  117. cpl = __skb_push(skb, sizeof(*cpl));
  118. cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
  119. CPL_TX_DATA_ISO_FIRST_V(fslice) |
  120. CPL_TX_DATA_ISO_LAST_V(lslice) |
  121. CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
  122. CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
  123. CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
  124. CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
  125. CPL_TX_DATA_ISO_SCSI_V(2));
  126. cpl->ahs_len = 0;
  127. cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
  128. cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
  129. cpl->len = htonl(iso_info->len);
  130. cpl->reserved2_seglen_offset = htonl(0);
  131. cpl->datasn_offset = htonl(0);
  132. cpl->buffer_offset = htonl(0);
  133. cpl->reserved3 = 0;
  134. __skb_pull(skb, sizeof(*cpl));
  135. }
  136. static void
  137. cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
  138. u32 len, u32 credits, u32 compl)
  139. {
  140. struct fw_ofld_tx_data_wr *req;
  141. const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  142. u32 submode = cxgbit_skcb_submode(skb);
  143. u32 wr_ulp_mode = 0;
  144. u32 hdr_size = sizeof(*req);
  145. u32 opcode = FW_OFLD_TX_DATA_WR;
  146. u32 immlen = 0;
  147. u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
  148. T6_TX_FORCE_F;
  149. if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
  150. opcode = FW_ISCSI_TX_DATA_WR;
  151. immlen += sizeof(struct cpl_tx_data_iso);
  152. hdr_size += sizeof(struct cpl_tx_data_iso);
  153. submode |= 8;
  154. }
  155. if (cxgbit_is_ofld_imm(skb))
  156. immlen += dlen;
  157. req = __skb_push(skb, hdr_size);
  158. req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
  159. FW_WR_COMPL_V(compl) |
  160. FW_WR_IMMDLEN_V(immlen));
  161. req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
  162. FW_WR_LEN16_V(credits));
  163. req->plen = htonl(len);
  164. wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
  165. FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
  166. req->tunnel_to_proxy = htonl((wr_ulp_mode) | force |
  167. FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1));
  168. }
  169. static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
  170. {
  171. kfree_skb(skb);
  172. }
  173. void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
  174. {
  175. struct sk_buff *skb;
  176. while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
  177. u32 dlen = skb->len;
  178. u32 len = skb->len;
  179. u32 credits_needed;
  180. u32 compl = 0;
  181. u32 flowclen16 = 0;
  182. u32 iso_cpl_len = 0;
  183. if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
  184. iso_cpl_len = sizeof(struct cpl_tx_data_iso);
  185. if (cxgbit_is_ofld_imm(skb))
  186. credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
  187. else
  188. credits_needed = DIV_ROUND_UP((8 *
  189. cxgbit_calc_tx_flits_ofld(skb)) +
  190. iso_cpl_len, 16);
  191. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
  192. credits_needed += DIV_ROUND_UP(
  193. sizeof(struct fw_ofld_tx_data_wr), 16);
  194. /*
  195. * Assumes the initial credits is large enough to support
  196. * fw_flowc_wr plus largest possible first payload
  197. */
  198. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
  199. flowclen16 = cxgbit_send_tx_flowc_wr(csk);
  200. csk->wr_cred -= flowclen16;
  201. csk->wr_una_cred += flowclen16;
  202. }
  203. if (csk->wr_cred < credits_needed) {
  204. pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
  205. csk, skb->len, skb->data_len,
  206. credits_needed, csk->wr_cred);
  207. break;
  208. }
  209. __skb_unlink(skb, &csk->txq);
  210. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
  211. skb->csum = (__force __wsum)(credits_needed + flowclen16);
  212. csk->wr_cred -= credits_needed;
  213. csk->wr_una_cred += credits_needed;
  214. pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
  215. csk, skb->len, skb->data_len, credits_needed,
  216. csk->wr_cred, csk->wr_una_cred);
  217. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
  218. len += cxgbit_skcb_tx_extralen(skb);
  219. if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
  220. (!before(csk->write_seq,
  221. csk->snd_una + csk->snd_win))) {
  222. compl = 1;
  223. csk->wr_una_cred = 0;
  224. }
  225. cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
  226. compl);
  227. csk->snd_nxt += len;
  228. } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
  229. (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
  230. struct cpl_close_con_req *req =
  231. (struct cpl_close_con_req *)skb->data;
  232. req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
  233. csk->wr_una_cred = 0;
  234. }
  235. cxgbit_sock_enqueue_wr(csk, skb);
  236. t4_set_arp_err_handler(skb, csk,
  237. cxgbit_arp_failure_skb_discard);
  238. pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
  239. csk, csk->tid, skb, len);
  240. cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  241. }
  242. }
  243. static bool cxgbit_lock_sock(struct cxgbit_sock *csk)
  244. {
  245. spin_lock_bh(&csk->lock);
  246. if (before(csk->write_seq, csk->snd_una + csk->snd_win))
  247. csk->lock_owner = true;
  248. spin_unlock_bh(&csk->lock);
  249. return csk->lock_owner;
  250. }
  251. static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
  252. {
  253. struct sk_buff_head backlogq;
  254. struct sk_buff *skb;
  255. void (*fn)(struct cxgbit_sock *, struct sk_buff *);
  256. skb_queue_head_init(&backlogq);
  257. spin_lock_bh(&csk->lock);
  258. while (skb_queue_len(&csk->backlogq)) {
  259. skb_queue_splice_init(&csk->backlogq, &backlogq);
  260. spin_unlock_bh(&csk->lock);
  261. while ((skb = __skb_dequeue(&backlogq))) {
  262. fn = cxgbit_skcb_rx_backlog_fn(skb);
  263. fn(csk, skb);
  264. }
  265. spin_lock_bh(&csk->lock);
  266. }
  267. csk->lock_owner = false;
  268. spin_unlock_bh(&csk->lock);
  269. }
  270. static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  271. {
  272. int ret = 0;
  273. wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk));
  274. if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
  275. signal_pending(current))) {
  276. __kfree_skb(skb);
  277. __skb_queue_purge(&csk->ppodq);
  278. ret = -1;
  279. spin_lock_bh(&csk->lock);
  280. if (csk->lock_owner) {
  281. spin_unlock_bh(&csk->lock);
  282. goto unlock;
  283. }
  284. spin_unlock_bh(&csk->lock);
  285. return ret;
  286. }
  287. csk->write_seq += skb->len +
  288. cxgbit_skcb_tx_extralen(skb);
  289. skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
  290. __skb_queue_tail(&csk->txq, skb);
  291. cxgbit_push_tx_frames(csk);
  292. unlock:
  293. cxgbit_unlock_sock(csk);
  294. return ret;
  295. }
  296. static int
  297. cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
  298. u32 data_length)
  299. {
  300. u32 i = 0, nr_frags = MAX_SKB_FRAGS;
  301. u32 padding = ((-data_length) & 3);
  302. struct scatterlist *sg;
  303. struct page *page;
  304. unsigned int page_off;
  305. if (padding)
  306. nr_frags--;
  307. /*
  308. * We know each entry in t_data_sg contains a page.
  309. */
  310. sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
  311. page_off = (data_offset % PAGE_SIZE);
  312. while (data_length && (i < nr_frags)) {
  313. u32 cur_len = min_t(u32, data_length, sg->length - page_off);
  314. page = sg_page(sg);
  315. get_page(page);
  316. skb_fill_page_desc(skb, i, page, sg->offset + page_off,
  317. cur_len);
  318. skb->data_len += cur_len;
  319. skb->len += cur_len;
  320. skb->truesize += cur_len;
  321. data_length -= cur_len;
  322. page_off = 0;
  323. sg = sg_next(sg);
  324. i++;
  325. }
  326. if (data_length)
  327. return -1;
  328. if (padding) {
  329. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  330. if (!page)
  331. return -1;
  332. skb_fill_page_desc(skb, i, page, 0, padding);
  333. skb->data_len += padding;
  334. skb->len += padding;
  335. skb->truesize += padding;
  336. }
  337. return 0;
  338. }
  339. static int
  340. cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
  341. struct iscsi_datain_req *dr)
  342. {
  343. struct iscsi_conn *conn = csk->conn;
  344. struct sk_buff *skb;
  345. struct iscsi_datain datain;
  346. struct cxgbit_iso_info iso_info;
  347. u32 data_length = cmd->se_cmd.data_length;
  348. u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
  349. u32 num_pdu, plen, tx_data = 0;
  350. bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
  351. SCF_TRANSPORT_TASK_SENSE);
  352. bool set_statsn = false;
  353. int ret = -1;
  354. while (data_length) {
  355. num_pdu = (data_length + mrdsl - 1) / mrdsl;
  356. if (num_pdu > csk->max_iso_npdu)
  357. num_pdu = csk->max_iso_npdu;
  358. plen = num_pdu * mrdsl;
  359. if (plen > data_length)
  360. plen = data_length;
  361. skb = __cxgbit_alloc_skb(csk, 0, true);
  362. if (unlikely(!skb))
  363. return -ENOMEM;
  364. memset(skb->data, 0, ISCSI_HDR_LEN);
  365. cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
  366. cxgbit_skcb_submode(skb) |= (csk->submode &
  367. CXGBIT_SUBMODE_DCRC);
  368. cxgbit_skcb_tx_extralen(skb) = (num_pdu *
  369. cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
  370. ((num_pdu - 1) * ISCSI_HDR_LEN);
  371. memset(&datain, 0, sizeof(struct iscsi_datain));
  372. memset(&iso_info, 0, sizeof(iso_info));
  373. if (!tx_data)
  374. iso_info.flags |= CXGBIT_ISO_FSLICE;
  375. if (!(data_length - plen)) {
  376. iso_info.flags |= CXGBIT_ISO_LSLICE;
  377. if (!task_sense) {
  378. datain.flags = ISCSI_FLAG_DATA_STATUS;
  379. iscsit_increment_maxcmdsn(cmd, conn->sess);
  380. cmd->stat_sn = conn->stat_sn++;
  381. set_statsn = true;
  382. }
  383. }
  384. iso_info.burst_len = num_pdu * mrdsl;
  385. iso_info.mpdu = mrdsl;
  386. iso_info.len = ISCSI_HDR_LEN + plen;
  387. cxgbit_cpl_tx_data_iso(skb, &iso_info);
  388. datain.offset = tx_data;
  389. datain.data_sn = cmd->data_sn - 1;
  390. iscsit_build_datain_pdu(cmd, conn, &datain,
  391. (struct iscsi_data_rsp *)skb->data,
  392. set_statsn);
  393. ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
  394. if (unlikely(ret)) {
  395. __kfree_skb(skb);
  396. goto out;
  397. }
  398. ret = cxgbit_queue_skb(csk, skb);
  399. if (unlikely(ret))
  400. goto out;
  401. tx_data += plen;
  402. data_length -= plen;
  403. cmd->read_data_done += plen;
  404. cmd->data_sn += num_pdu;
  405. }
  406. dr->dr_complete = DATAIN_COMPLETE_NORMAL;
  407. return 0;
  408. out:
  409. return ret;
  410. }
  411. static int
  412. cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
  413. const struct iscsi_datain *datain)
  414. {
  415. struct sk_buff *skb;
  416. int ret = 0;
  417. skb = cxgbit_alloc_skb(csk, 0);
  418. if (unlikely(!skb))
  419. return -ENOMEM;
  420. memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
  421. if (datain->length) {
  422. cxgbit_skcb_submode(skb) |= (csk->submode &
  423. CXGBIT_SUBMODE_DCRC);
  424. cxgbit_skcb_tx_extralen(skb) =
  425. cxgbit_digest_len[cxgbit_skcb_submode(skb)];
  426. }
  427. ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
  428. if (ret < 0) {
  429. __kfree_skb(skb);
  430. return ret;
  431. }
  432. return cxgbit_queue_skb(csk, skb);
  433. }
  434. static int
  435. cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  436. struct iscsi_datain_req *dr,
  437. const struct iscsi_datain *datain)
  438. {
  439. struct cxgbit_sock *csk = conn->context;
  440. u32 data_length = cmd->se_cmd.data_length;
  441. u32 padding = ((-data_length) & 3);
  442. u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
  443. if ((data_length > mrdsl) && (!dr->recovery) &&
  444. (!padding) && (!datain->offset) && csk->max_iso_npdu) {
  445. atomic_long_add(data_length - datain->length,
  446. &conn->sess->tx_data_octets);
  447. return cxgbit_tx_datain_iso(csk, cmd, dr);
  448. }
  449. return cxgbit_tx_datain(csk, cmd, datain);
  450. }
  451. static int
  452. cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  453. const void *data_buf, u32 data_buf_len)
  454. {
  455. struct cxgbit_sock *csk = conn->context;
  456. struct sk_buff *skb;
  457. u32 padding = ((-data_buf_len) & 3);
  458. skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
  459. if (unlikely(!skb))
  460. return -ENOMEM;
  461. memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
  462. if (data_buf_len) {
  463. u32 pad_bytes = 0;
  464. skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
  465. if (padding)
  466. skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
  467. &pad_bytes, padding);
  468. }
  469. cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
  470. cxgbit_skcb_submode(skb)];
  471. return cxgbit_queue_skb(csk, skb);
  472. }
  473. int
  474. cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  475. struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
  476. {
  477. if (dr)
  478. return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
  479. else
  480. return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
  481. }
  482. int cxgbit_validate_params(struct iscsi_conn *conn)
  483. {
  484. struct cxgbit_sock *csk = conn->context;
  485. struct cxgbit_device *cdev = csk->com.cdev;
  486. struct iscsi_param *param;
  487. u32 max_xmitdsl;
  488. param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
  489. conn->param_list);
  490. if (!param)
  491. return -1;
  492. if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
  493. return -1;
  494. if (max_xmitdsl > cdev->mdsl) {
  495. if (iscsi_change_param_sprintf(
  496. conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
  497. return -1;
  498. }
  499. return 0;
  500. }
  501. static int cxgbit_set_digest(struct cxgbit_sock *csk)
  502. {
  503. struct iscsi_conn *conn = csk->conn;
  504. struct iscsi_param *param;
  505. param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
  506. if (!param) {
  507. pr_err("param not found key %s\n", HEADERDIGEST);
  508. return -1;
  509. }
  510. if (!strcmp(param->value, CRC32C))
  511. csk->submode |= CXGBIT_SUBMODE_HCRC;
  512. param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
  513. if (!param) {
  514. csk->submode = 0;
  515. pr_err("param not found key %s\n", DATADIGEST);
  516. return -1;
  517. }
  518. if (!strcmp(param->value, CRC32C))
  519. csk->submode |= CXGBIT_SUBMODE_DCRC;
  520. if (cxgbit_setup_conn_digest(csk)) {
  521. csk->submode = 0;
  522. return -1;
  523. }
  524. return 0;
  525. }
  526. static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
  527. {
  528. struct iscsi_conn *conn = csk->conn;
  529. struct iscsi_conn_ops *conn_ops = conn->conn_ops;
  530. struct iscsi_param *param;
  531. u32 mrdsl, mbl;
  532. u32 max_npdu, max_iso_npdu;
  533. u32 max_iso_payload;
  534. if (conn->login->leading_connection) {
  535. param = iscsi_find_param_from_key(MAXBURSTLENGTH,
  536. conn->param_list);
  537. if (!param) {
  538. pr_err("param not found key %s\n", MAXBURSTLENGTH);
  539. return -1;
  540. }
  541. if (kstrtou32(param->value, 0, &mbl) < 0)
  542. return -1;
  543. } else {
  544. mbl = conn->sess->sess_ops->MaxBurstLength;
  545. }
  546. mrdsl = conn_ops->MaxRecvDataSegmentLength;
  547. max_npdu = mbl / mrdsl;
  548. max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
  549. max_iso_npdu = max_iso_payload /
  550. (ISCSI_HDR_LEN + mrdsl +
  551. cxgbit_digest_len[csk->submode]);
  552. csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
  553. if (csk->max_iso_npdu <= 1)
  554. csk->max_iso_npdu = 0;
  555. return 0;
  556. }
  557. /*
  558. * cxgbit_seq_pdu_inorder()
  559. * @csk: pointer to cxgbit socket structure
  560. *
  561. * This function checks whether data sequence and data
  562. * pdu are in order.
  563. *
  564. * Return: returns -1 on error, 0 if data sequence and
  565. * data pdu are in order, 1 if data sequence or data pdu
  566. * is not in order.
  567. */
  568. static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
  569. {
  570. struct iscsi_conn *conn = csk->conn;
  571. struct iscsi_param *param;
  572. if (conn->login->leading_connection) {
  573. param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
  574. conn->param_list);
  575. if (!param) {
  576. pr_err("param not found key %s\n", DATASEQUENCEINORDER);
  577. return -1;
  578. }
  579. if (strcmp(param->value, YES))
  580. return 1;
  581. param = iscsi_find_param_from_key(DATAPDUINORDER,
  582. conn->param_list);
  583. if (!param) {
  584. pr_err("param not found key %s\n", DATAPDUINORDER);
  585. return -1;
  586. }
  587. if (strcmp(param->value, YES))
  588. return 1;
  589. } else {
  590. if (!conn->sess->sess_ops->DataSequenceInOrder)
  591. return 1;
  592. if (!conn->sess->sess_ops->DataPDUInOrder)
  593. return 1;
  594. }
  595. return 0;
  596. }
  597. static int cxgbit_set_params(struct iscsi_conn *conn)
  598. {
  599. struct cxgbit_sock *csk = conn->context;
  600. struct cxgbit_device *cdev = csk->com.cdev;
  601. struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
  602. struct iscsi_conn_ops *conn_ops = conn->conn_ops;
  603. struct iscsi_param *param;
  604. u8 erl;
  605. if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
  606. conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
  607. if (cxgbit_set_digest(csk))
  608. return -1;
  609. if (conn->login->leading_connection) {
  610. param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
  611. conn->param_list);
  612. if (!param) {
  613. pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
  614. return -1;
  615. }
  616. if (kstrtou8(param->value, 0, &erl) < 0)
  617. return -1;
  618. } else {
  619. erl = conn->sess->sess_ops->ErrorRecoveryLevel;
  620. }
  621. if (!erl) {
  622. int ret;
  623. ret = cxgbit_seq_pdu_inorder(csk);
  624. if (ret < 0) {
  625. return -1;
  626. } else if (ret > 0) {
  627. if (is_t5(cdev->lldi.adapter_type))
  628. goto enable_ddp;
  629. else
  630. return 0;
  631. }
  632. if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
  633. if (cxgbit_set_iso_npdu(csk))
  634. return -1;
  635. }
  636. enable_ddp:
  637. if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
  638. if (cxgbit_setup_conn_pgidx(csk,
  639. ppm->tformat.pgsz_idx_dflt))
  640. return -1;
  641. set_bit(CSK_DDP_ENABLE, &csk->com.flags);
  642. }
  643. }
  644. return 0;
  645. }
  646. int
  647. cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
  648. u32 length)
  649. {
  650. struct cxgbit_sock *csk = conn->context;
  651. struct sk_buff *skb;
  652. u32 padding_buf = 0;
  653. u8 padding = ((-length) & 3);
  654. skb = cxgbit_alloc_skb(csk, length + padding);
  655. if (!skb)
  656. return -ENOMEM;
  657. skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
  658. skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
  659. if (padding)
  660. skb_store_bits(skb, ISCSI_HDR_LEN + length,
  661. &padding_buf, padding);
  662. if (login->login_complete) {
  663. if (cxgbit_set_params(conn)) {
  664. kfree_skb(skb);
  665. return -1;
  666. }
  667. set_bit(CSK_LOGIN_DONE, &csk->com.flags);
  668. }
  669. if (cxgbit_queue_skb(csk, skb))
  670. return -1;
  671. if ((!login->login_complete) && (!login->login_failed))
  672. schedule_delayed_work(&conn->login_work, 0);
  673. return 0;
  674. }
  675. static void
  676. cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
  677. unsigned int nents, u32 skip)
  678. {
  679. struct skb_seq_state st;
  680. const u8 *buf;
  681. unsigned int consumed = 0, buf_len;
  682. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
  683. skb_prepare_seq_read(skb, pdu_cb->doffset,
  684. pdu_cb->doffset + pdu_cb->dlen,
  685. &st);
  686. while (true) {
  687. buf_len = skb_seq_read(consumed, &buf, &st);
  688. if (!buf_len) {
  689. skb_abort_seq_read(&st);
  690. break;
  691. }
  692. consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
  693. buf_len, skip + consumed);
  694. }
  695. }
  696. static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
  697. {
  698. struct iscsi_conn *conn = csk->conn;
  699. struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
  700. struct cxgbit_cmd *ccmd;
  701. struct iscsi_cmd *cmd;
  702. cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
  703. if (!cmd) {
  704. pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n");
  705. return NULL;
  706. }
  707. ccmd = iscsit_priv_cmd(cmd);
  708. ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
  709. ccmd->setup_ddp = true;
  710. return cmd;
  711. }
  712. static int
  713. cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
  714. u32 length)
  715. {
  716. struct iscsi_conn *conn = cmd->conn;
  717. struct cxgbit_sock *csk = conn->context;
  718. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  719. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  720. pr_err("ImmediateData CRC32C DataDigest error\n");
  721. if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
  722. pr_err("Unable to recover from"
  723. " Immediate Data digest failure while"
  724. " in ERL=0.\n");
  725. iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
  726. (unsigned char *)hdr);
  727. return IMMEDIATE_DATA_CANNOT_RECOVER;
  728. }
  729. iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
  730. (unsigned char *)hdr);
  731. return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
  732. }
  733. if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
  734. struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
  735. struct skb_shared_info *ssi = skb_shinfo(csk->skb);
  736. skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
  737. sg_init_table(&ccmd->sg, 1);
  738. sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
  739. dfrag->page_offset);
  740. get_page(dfrag->page.p);
  741. cmd->se_cmd.t_data_sg = &ccmd->sg;
  742. cmd->se_cmd.t_data_nents = 1;
  743. ccmd->release = true;
  744. } else {
  745. struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
  746. u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
  747. cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
  748. }
  749. cmd->write_data_done += pdu_cb->dlen;
  750. if (cmd->write_data_done == cmd->se_cmd.data_length) {
  751. spin_lock_bh(&cmd->istate_lock);
  752. cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
  753. cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
  754. spin_unlock_bh(&cmd->istate_lock);
  755. }
  756. return IMMEDIATE_DATA_NORMAL_OPERATION;
  757. }
  758. static int
  759. cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
  760. bool dump_payload)
  761. {
  762. struct iscsi_conn *conn = cmd->conn;
  763. int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
  764. /*
  765. * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
  766. */
  767. if (dump_payload)
  768. goto after_immediate_data;
  769. immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
  770. cmd->first_burst_len);
  771. after_immediate_data:
  772. if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
  773. /*
  774. * A PDU/CmdSN carrying Immediate Data passed
  775. * DataCRC, check against ExpCmdSN/MaxCmdSN if
  776. * Immediate Bit is not set.
  777. */
  778. cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
  779. (unsigned char *)hdr,
  780. hdr->cmdsn);
  781. if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
  782. return -1;
  783. if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
  784. target_put_sess_cmd(&cmd->se_cmd);
  785. return 0;
  786. } else if (cmd->unsolicited_data) {
  787. iscsit_set_unsoliticed_dataout(cmd);
  788. }
  789. } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
  790. /*
  791. * Immediate Data failed DataCRC and ERL>=1,
  792. * silently drop this PDU and let the initiator
  793. * plug the CmdSN gap.
  794. *
  795. * FIXME: Send Unsolicited NOPIN with reserved
  796. * TTT here to help the initiator figure out
  797. * the missing CmdSN, although they should be
  798. * intelligent enough to determine the missing
  799. * CmdSN and issue a retry to plug the sequence.
  800. */
  801. cmd->i_state = ISTATE_REMOVE;
  802. iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
  803. } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
  804. return -1;
  805. return 0;
  806. }
  807. static int
  808. cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
  809. {
  810. struct iscsi_conn *conn = csk->conn;
  811. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  812. struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
  813. int rc;
  814. bool dump_payload = false;
  815. rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
  816. if (rc < 0)
  817. return rc;
  818. if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
  819. (pdu_cb->nr_dfrags == 1))
  820. cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  821. rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
  822. if (rc < 0)
  823. return 0;
  824. else if (rc > 0)
  825. dump_payload = true;
  826. if (!pdu_cb->dlen)
  827. return 0;
  828. return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
  829. }
  830. static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
  831. {
  832. struct scatterlist *sg_start;
  833. struct iscsi_conn *conn = csk->conn;
  834. struct iscsi_cmd *cmd = NULL;
  835. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  836. struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
  837. u32 data_offset = be32_to_cpu(hdr->offset);
  838. u32 data_len = pdu_cb->dlen;
  839. int rc, sg_nents, sg_off;
  840. bool dcrc_err = false;
  841. if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
  842. u32 offset = be32_to_cpu(hdr->offset);
  843. u32 ddp_data_len;
  844. u32 payload_length = ntoh24(hdr->dlength);
  845. bool success = false;
  846. cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
  847. if (!cmd)
  848. return 0;
  849. ddp_data_len = offset - cmd->write_data_done;
  850. atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets);
  851. cmd->write_data_done = offset;
  852. cmd->next_burst_len = ddp_data_len;
  853. cmd->data_sn = be32_to_cpu(hdr->datasn);
  854. rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
  855. cmd, payload_length, &success);
  856. if (rc < 0)
  857. return rc;
  858. else if (!success)
  859. return 0;
  860. } else {
  861. rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
  862. if (rc < 0)
  863. return rc;
  864. else if (!cmd)
  865. return 0;
  866. }
  867. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  868. pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
  869. " DataSN: 0x%08x\n",
  870. hdr->itt, hdr->offset, data_len,
  871. hdr->datasn);
  872. dcrc_err = true;
  873. goto check_payload;
  874. }
  875. pr_debug("DataOut data_len: %u, "
  876. "write_data_done: %u, data_length: %u\n",
  877. data_len, cmd->write_data_done,
  878. cmd->se_cmd.data_length);
  879. if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
  880. u32 skip = data_offset % PAGE_SIZE;
  881. sg_off = data_offset / PAGE_SIZE;
  882. sg_start = &cmd->se_cmd.t_data_sg[sg_off];
  883. sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
  884. cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
  885. }
  886. check_payload:
  887. rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
  888. if (rc < 0)
  889. return rc;
  890. return 0;
  891. }
  892. static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
  893. {
  894. struct iscsi_conn *conn = csk->conn;
  895. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  896. struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
  897. unsigned char *ping_data = NULL;
  898. u32 payload_length = pdu_cb->dlen;
  899. int ret;
  900. ret = iscsit_setup_nop_out(conn, cmd, hdr);
  901. if (ret < 0)
  902. return 0;
  903. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  904. if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
  905. pr_err("Unable to recover from"
  906. " NOPOUT Ping DataCRC failure while in"
  907. " ERL=0.\n");
  908. ret = -1;
  909. goto out;
  910. } else {
  911. /*
  912. * drop this PDU and let the
  913. * initiator plug the CmdSN gap.
  914. */
  915. pr_info("Dropping NOPOUT"
  916. " Command CmdSN: 0x%08x due to"
  917. " DataCRC error.\n", hdr->cmdsn);
  918. ret = 0;
  919. goto out;
  920. }
  921. }
  922. /*
  923. * Handle NOP-OUT payload for traditional iSCSI sockets
  924. */
  925. if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
  926. ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
  927. if (!ping_data) {
  928. pr_err("Unable to allocate memory for"
  929. " NOPOUT ping data.\n");
  930. ret = -1;
  931. goto out;
  932. }
  933. skb_copy_bits(csk->skb, pdu_cb->doffset,
  934. ping_data, payload_length);
  935. ping_data[payload_length] = '\0';
  936. /*
  937. * Attach ping data to struct iscsi_cmd->buf_ptr.
  938. */
  939. cmd->buf_ptr = ping_data;
  940. cmd->buf_ptr_size = payload_length;
  941. pr_debug("Got %u bytes of NOPOUT ping"
  942. " data.\n", payload_length);
  943. pr_debug("Ping Data: \"%s\"\n", ping_data);
  944. }
  945. return iscsit_process_nop_out(conn, cmd, hdr);
  946. out:
  947. if (cmd)
  948. iscsit_free_cmd(cmd, false);
  949. return ret;
  950. }
  951. static int
  952. cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
  953. {
  954. struct iscsi_conn *conn = csk->conn;
  955. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  956. struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
  957. u32 payload_length = pdu_cb->dlen;
  958. int rc;
  959. unsigned char *text_in = NULL;
  960. rc = iscsit_setup_text_cmd(conn, cmd, hdr);
  961. if (rc < 0)
  962. return rc;
  963. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  964. if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
  965. pr_err("Unable to recover from"
  966. " Text Data digest failure while in"
  967. " ERL=0.\n");
  968. goto reject;
  969. } else {
  970. /*
  971. * drop this PDU and let the
  972. * initiator plug the CmdSN gap.
  973. */
  974. pr_info("Dropping Text"
  975. " Command CmdSN: 0x%08x due to"
  976. " DataCRC error.\n", hdr->cmdsn);
  977. return 0;
  978. }
  979. }
  980. if (payload_length) {
  981. text_in = kzalloc(payload_length, GFP_KERNEL);
  982. if (!text_in) {
  983. pr_err("Unable to allocate text_in of payload_length: %u\n",
  984. payload_length);
  985. return -ENOMEM;
  986. }
  987. skb_copy_bits(csk->skb, pdu_cb->doffset,
  988. text_in, payload_length);
  989. text_in[payload_length - 1] = '\0';
  990. cmd->text_in_ptr = text_in;
  991. }
  992. return iscsit_process_text_cmd(conn, cmd, hdr);
  993. reject:
  994. return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
  995. pdu_cb->hdr);
  996. }
  997. static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
  998. {
  999. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  1000. struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
  1001. struct iscsi_conn *conn = csk->conn;
  1002. struct iscsi_cmd *cmd = NULL;
  1003. u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
  1004. int ret = -EINVAL;
  1005. switch (opcode) {
  1006. case ISCSI_OP_SCSI_CMD:
  1007. cmd = cxgbit_allocate_cmd(csk);
  1008. if (!cmd)
  1009. goto reject;
  1010. ret = cxgbit_handle_scsi_cmd(csk, cmd);
  1011. break;
  1012. case ISCSI_OP_SCSI_DATA_OUT:
  1013. ret = cxgbit_handle_iscsi_dataout(csk);
  1014. break;
  1015. case ISCSI_OP_NOOP_OUT:
  1016. if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
  1017. cmd = cxgbit_allocate_cmd(csk);
  1018. if (!cmd)
  1019. goto reject;
  1020. }
  1021. ret = cxgbit_handle_nop_out(csk, cmd);
  1022. break;
  1023. case ISCSI_OP_SCSI_TMFUNC:
  1024. cmd = cxgbit_allocate_cmd(csk);
  1025. if (!cmd)
  1026. goto reject;
  1027. ret = iscsit_handle_task_mgt_cmd(conn, cmd,
  1028. (unsigned char *)hdr);
  1029. break;
  1030. case ISCSI_OP_TEXT:
  1031. if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
  1032. cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
  1033. if (!cmd)
  1034. goto reject;
  1035. } else {
  1036. cmd = cxgbit_allocate_cmd(csk);
  1037. if (!cmd)
  1038. goto reject;
  1039. }
  1040. ret = cxgbit_handle_text_cmd(csk, cmd);
  1041. break;
  1042. case ISCSI_OP_LOGOUT:
  1043. cmd = cxgbit_allocate_cmd(csk);
  1044. if (!cmd)
  1045. goto reject;
  1046. ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
  1047. if (ret > 0)
  1048. wait_for_completion_timeout(&conn->conn_logout_comp,
  1049. SECONDS_FOR_LOGOUT_COMP
  1050. * HZ);
  1051. break;
  1052. case ISCSI_OP_SNACK:
  1053. ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
  1054. break;
  1055. default:
  1056. pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
  1057. dump_stack();
  1058. break;
  1059. }
  1060. return ret;
  1061. reject:
  1062. return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
  1063. (unsigned char *)hdr);
  1064. return ret;
  1065. }
  1066. static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
  1067. {
  1068. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  1069. struct iscsi_conn *conn = csk->conn;
  1070. struct iscsi_hdr *hdr = pdu_cb->hdr;
  1071. u8 opcode;
  1072. if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
  1073. atomic_long_inc(&conn->sess->conn_digest_errors);
  1074. goto transport_err;
  1075. }
  1076. if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
  1077. goto transport_err;
  1078. opcode = hdr->opcode & ISCSI_OPCODE_MASK;
  1079. if (conn->sess->sess_ops->SessionType &&
  1080. ((!(opcode & ISCSI_OP_TEXT)) ||
  1081. (!(opcode & ISCSI_OP_LOGOUT)))) {
  1082. pr_err("Received illegal iSCSI Opcode: 0x%02x"
  1083. " while in Discovery Session, rejecting.\n", opcode);
  1084. iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
  1085. (unsigned char *)hdr);
  1086. goto transport_err;
  1087. }
  1088. if (cxgbit_target_rx_opcode(csk) < 0)
  1089. goto transport_err;
  1090. return 0;
  1091. transport_err:
  1092. return -1;
  1093. }
  1094. static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
  1095. {
  1096. struct iscsi_conn *conn = csk->conn;
  1097. struct iscsi_login *login = conn->login;
  1098. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  1099. struct iscsi_login_req *login_req;
  1100. login_req = (struct iscsi_login_req *)login->req;
  1101. memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
  1102. pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
  1103. " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
  1104. login_req->flags, login_req->itt, login_req->cmdsn,
  1105. login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
  1106. /*
  1107. * Setup the initial iscsi_login values from the leading
  1108. * login request PDU.
  1109. */
  1110. if (login->first_request) {
  1111. login_req = (struct iscsi_login_req *)login->req;
  1112. login->leading_connection = (!login_req->tsih) ? 1 : 0;
  1113. login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
  1114. login_req->flags);
  1115. login->version_min = login_req->min_version;
  1116. login->version_max = login_req->max_version;
  1117. memcpy(login->isid, login_req->isid, 6);
  1118. login->cmd_sn = be32_to_cpu(login_req->cmdsn);
  1119. login->init_task_tag = login_req->itt;
  1120. login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
  1121. login->cid = be16_to_cpu(login_req->cid);
  1122. login->tsih = be16_to_cpu(login_req->tsih);
  1123. }
  1124. if (iscsi_target_check_login_request(conn, login) < 0)
  1125. return -1;
  1126. memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
  1127. skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
  1128. return 0;
  1129. }
  1130. static int
  1131. cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
  1132. {
  1133. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
  1134. int ret;
  1135. cxgbit_rx_pdu_cb(skb) = pdu_cb;
  1136. csk->skb = skb;
  1137. if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
  1138. ret = cxgbit_rx_login_pdu(csk);
  1139. set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
  1140. } else {
  1141. ret = cxgbit_rx_opcode(csk);
  1142. }
  1143. return ret;
  1144. }
  1145. static void cxgbit_lro_skb_dump(struct sk_buff *skb)
  1146. {
  1147. struct skb_shared_info *ssi = skb_shinfo(skb);
  1148. struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
  1149. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
  1150. u8 i;
  1151. pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
  1152. skb, skb->head, skb->data, skb->len, skb->data_len,
  1153. ssi->nr_frags);
  1154. pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
  1155. skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
  1156. for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
  1157. pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
  1158. "frags %u.\n",
  1159. skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
  1160. pdu_cb->ddigest, pdu_cb->frags);
  1161. for (i = 0; i < ssi->nr_frags; i++)
  1162. pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
  1163. skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
  1164. }
  1165. static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
  1166. {
  1167. struct sk_buff *skb = csk->lro_hskb;
  1168. struct skb_shared_info *ssi = skb_shinfo(skb);
  1169. u8 i;
  1170. memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
  1171. for (i = 0; i < ssi->nr_frags; i++)
  1172. put_page(skb_frag_page(&ssi->frags[i]));
  1173. ssi->nr_frags = 0;
  1174. skb->data_len = 0;
  1175. skb->truesize -= skb->len;
  1176. skb->len = 0;
  1177. }
  1178. static void
  1179. cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
  1180. {
  1181. struct sk_buff *hskb = csk->lro_hskb;
  1182. struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
  1183. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
  1184. struct skb_shared_info *hssi = skb_shinfo(hskb);
  1185. struct skb_shared_info *ssi = skb_shinfo(skb);
  1186. unsigned int len = 0;
  1187. if (pdu_cb->flags & PDUCBF_RX_HDR) {
  1188. u8 hfrag_idx = hssi->nr_frags;
  1189. hpdu_cb->flags |= pdu_cb->flags;
  1190. hpdu_cb->seq = pdu_cb->seq;
  1191. hpdu_cb->hdr = pdu_cb->hdr;
  1192. hpdu_cb->hlen = pdu_cb->hlen;
  1193. memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx],
  1194. sizeof(skb_frag_t));
  1195. get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
  1196. hssi->nr_frags++;
  1197. hpdu_cb->frags++;
  1198. hpdu_cb->hfrag_idx = hfrag_idx;
  1199. len = hssi->frags[hfrag_idx].size;
  1200. hskb->len += len;
  1201. hskb->data_len += len;
  1202. hskb->truesize += len;
  1203. }
  1204. if (pdu_cb->flags & PDUCBF_RX_DATA) {
  1205. u8 dfrag_idx = hssi->nr_frags, i;
  1206. hpdu_cb->flags |= pdu_cb->flags;
  1207. hpdu_cb->dfrag_idx = dfrag_idx;
  1208. len = 0;
  1209. for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) {
  1210. memcpy(&hssi->frags[dfrag_idx],
  1211. &ssi->frags[pdu_cb->dfrag_idx + i],
  1212. sizeof(skb_frag_t));
  1213. get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
  1214. len += hssi->frags[dfrag_idx].size;
  1215. hssi->nr_frags++;
  1216. hpdu_cb->frags++;
  1217. }
  1218. hpdu_cb->dlen = pdu_cb->dlen;
  1219. hpdu_cb->doffset = hpdu_cb->hlen;
  1220. hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
  1221. hskb->len += len;
  1222. hskb->data_len += len;
  1223. hskb->truesize += len;
  1224. }
  1225. if (pdu_cb->flags & PDUCBF_RX_STATUS) {
  1226. hpdu_cb->flags |= pdu_cb->flags;
  1227. if (hpdu_cb->flags & PDUCBF_RX_DATA)
  1228. hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
  1229. hpdu_cb->ddigest = pdu_cb->ddigest;
  1230. hpdu_cb->pdulen = pdu_cb->pdulen;
  1231. }
  1232. }
  1233. static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1234. {
  1235. struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
  1236. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
  1237. u8 pdu_idx = 0, last_idx = 0;
  1238. int ret = 0;
  1239. if (!pdu_cb->complete) {
  1240. cxgbit_lro_skb_merge(csk, skb, 0);
  1241. if (pdu_cb->flags & PDUCBF_RX_STATUS) {
  1242. struct sk_buff *hskb = csk->lro_hskb;
  1243. ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
  1244. cxgbit_lro_hskb_reset(csk);
  1245. if (ret < 0)
  1246. goto out;
  1247. }
  1248. pdu_idx = 1;
  1249. }
  1250. if (lro_cb->pdu_idx)
  1251. last_idx = lro_cb->pdu_idx - 1;
  1252. for (; pdu_idx <= last_idx; pdu_idx++) {
  1253. ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
  1254. if (ret < 0)
  1255. goto out;
  1256. }
  1257. if ((!lro_cb->complete) && lro_cb->pdu_idx)
  1258. cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
  1259. out:
  1260. return ret;
  1261. }
  1262. static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1263. {
  1264. struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
  1265. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
  1266. int ret = -1;
  1267. if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
  1268. (pdu_cb->seq != csk->rcv_nxt)) {
  1269. pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
  1270. csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
  1271. cxgbit_lro_skb_dump(skb);
  1272. return ret;
  1273. }
  1274. csk->rcv_nxt += lro_cb->pdu_totallen;
  1275. ret = cxgbit_process_lro_skb(csk, skb);
  1276. csk->rx_credits += lro_cb->pdu_totallen;
  1277. if (csk->rx_credits >= (csk->rcv_win / 4))
  1278. cxgbit_rx_data_ack(csk);
  1279. return ret;
  1280. }
  1281. static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1282. {
  1283. struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  1284. int ret = -1;
  1285. if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
  1286. if (is_t5(lldi->adapter_type))
  1287. ret = cxgbit_rx_lro_skb(csk, skb);
  1288. else
  1289. ret = cxgbit_process_lro_skb(csk, skb);
  1290. }
  1291. __kfree_skb(skb);
  1292. return ret;
  1293. }
  1294. static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
  1295. {
  1296. spin_lock_bh(&csk->rxq.lock);
  1297. if (skb_queue_len(&csk->rxq)) {
  1298. skb_queue_splice_init(&csk->rxq, rxq);
  1299. spin_unlock_bh(&csk->rxq.lock);
  1300. return true;
  1301. }
  1302. spin_unlock_bh(&csk->rxq.lock);
  1303. return false;
  1304. }
  1305. static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
  1306. {
  1307. struct sk_buff *skb;
  1308. struct sk_buff_head rxq;
  1309. skb_queue_head_init(&rxq);
  1310. wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
  1311. if (signal_pending(current))
  1312. goto out;
  1313. while ((skb = __skb_dequeue(&rxq))) {
  1314. if (cxgbit_rx_skb(csk, skb))
  1315. goto out;
  1316. }
  1317. return 0;
  1318. out:
  1319. __skb_queue_purge(&rxq);
  1320. return -1;
  1321. }
  1322. int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
  1323. {
  1324. struct cxgbit_sock *csk = conn->context;
  1325. int ret = -1;
  1326. while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
  1327. ret = cxgbit_wait_rxq(csk);
  1328. if (ret) {
  1329. clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
  1330. break;
  1331. }
  1332. }
  1333. return ret;
  1334. }
  1335. void cxgbit_get_rx_pdu(struct iscsi_conn *conn)
  1336. {
  1337. struct cxgbit_sock *csk = conn->context;
  1338. while (!kthread_should_stop()) {
  1339. iscsit_thread_check_cpumask(conn, current, 0);
  1340. if (cxgbit_wait_rxq(csk))
  1341. return;
  1342. }
  1343. }