qed_ll2.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288
  1. /* QLogic qed NIC Driver
  2. *
  3. * Copyright (c) 2015 QLogic Corporation
  4. *
  5. * This software is available under the terms of the GNU General Public License
  6. * (GPL) Version 2, available from the file COPYING in the main directory of
  7. * this source tree.
  8. */
  9. #include <linux/types.h>
  10. #include <asm/byteorder.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/if_vlan.h>
  13. #include <linux/kernel.h>
  14. #include <linux/pci.h>
  15. #include <linux/slab.h>
  16. #include <linux/stddef.h>
  17. #include <linux/version.h>
  18. #include <linux/workqueue.h>
  19. #include <net/ipv6.h>
  20. #include <linux/bitops.h>
  21. #include <linux/delay.h>
  22. #include <linux/errno.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/io.h>
  25. #include <linux/list.h>
  26. #include <linux/mutex.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/string.h>
  29. #include <linux/qed/qed_ll2_if.h>
  30. #include "qed.h"
  31. #include "qed_cxt.h"
  32. #include "qed_dev_api.h"
  33. #include "qed_hsi.h"
  34. #include "qed_hw.h"
  35. #include "qed_int.h"
  36. #include "qed_ll2.h"
  37. #include "qed_mcp.h"
  38. #include "qed_ooo.h"
  39. #include "qed_reg_addr.h"
  40. #include "qed_sp.h"
  41. #include "qed_roce.h"
  42. #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
  43. #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
  44. #define QED_LL2_TX_SIZE (256)
  45. #define QED_LL2_RX_SIZE (4096)
  46. struct qed_cb_ll2_info {
  47. int rx_cnt;
  48. u32 rx_size;
  49. u8 handle;
  50. bool frags_mapped;
  51. /* Lock protecting LL2 buffer lists in sleepless context */
  52. spinlock_t lock;
  53. struct list_head list;
  54. const struct qed_ll2_cb_ops *cbs;
  55. void *cb_cookie;
  56. };
  57. struct qed_ll2_buffer {
  58. struct list_head list;
  59. void *data;
  60. dma_addr_t phys_addr;
  61. };
  62. static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
  63. u8 connection_handle,
  64. void *cookie,
  65. dma_addr_t first_frag_addr,
  66. bool b_last_fragment,
  67. bool b_last_packet)
  68. {
  69. struct qed_dev *cdev = p_hwfn->cdev;
  70. struct sk_buff *skb = cookie;
  71. /* All we need to do is release the mapping */
  72. dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
  73. skb_headlen(skb), DMA_TO_DEVICE);
  74. if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
  75. cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
  76. b_last_fragment);
  77. if (cdev->ll2->frags_mapped)
  78. /* Case where mapped frags were received, need to
  79. * free skb with nr_frags marked as 0
  80. */
  81. skb_shinfo(skb)->nr_frags = 0;
  82. dev_kfree_skb_any(skb);
  83. }
  84. static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
  85. u8 **data, dma_addr_t *phys_addr)
  86. {
  87. *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
  88. if (!(*data)) {
  89. DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
  90. return -ENOMEM;
  91. }
  92. *phys_addr = dma_map_single(&cdev->pdev->dev,
  93. ((*data) + NET_SKB_PAD),
  94. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  95. if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
  96. DP_INFO(cdev, "Failed to map LL2 buffer data\n");
  97. kfree((*data));
  98. return -ENOMEM;
  99. }
  100. return 0;
  101. }
  102. static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
  103. struct qed_ll2_buffer *buffer)
  104. {
  105. spin_lock_bh(&cdev->ll2->lock);
  106. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  107. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  108. kfree(buffer->data);
  109. list_del(&buffer->list);
  110. cdev->ll2->rx_cnt--;
  111. if (!cdev->ll2->rx_cnt)
  112. DP_INFO(cdev, "All LL2 entries were removed\n");
  113. spin_unlock_bh(&cdev->ll2->lock);
  114. return 0;
  115. }
  116. static void qed_ll2_kill_buffers(struct qed_dev *cdev)
  117. {
  118. struct qed_ll2_buffer *buffer, *tmp_buffer;
  119. list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
  120. qed_ll2_dealloc_buffer(cdev, buffer);
  121. }
  122. static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
  123. u8 connection_handle,
  124. struct qed_ll2_rx_packet *p_pkt,
  125. struct core_rx_fast_path_cqe *p_cqe,
  126. bool b_last_packet)
  127. {
  128. u16 packet_length = le16_to_cpu(p_cqe->packet_length);
  129. struct qed_ll2_buffer *buffer = p_pkt->cookie;
  130. struct qed_dev *cdev = p_hwfn->cdev;
  131. u16 vlan = le16_to_cpu(p_cqe->vlan);
  132. u32 opaque_data_0, opaque_data_1;
  133. u8 pad = p_cqe->placement_offset;
  134. dma_addr_t new_phys_addr;
  135. struct sk_buff *skb;
  136. bool reuse = false;
  137. int rc = -EINVAL;
  138. u8 *new_data;
  139. opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
  140. opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
  141. DP_VERBOSE(p_hwfn,
  142. (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
  143. "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
  144. (u64)p_pkt->rx_buf_addr, pad, packet_length,
  145. le16_to_cpu(p_cqe->parse_flags.flags), vlan,
  146. opaque_data_0, opaque_data_1);
  147. if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
  148. print_hex_dump(KERN_INFO, "",
  149. DUMP_PREFIX_OFFSET, 16, 1,
  150. buffer->data, packet_length, false);
  151. }
  152. /* Determine if data is valid */
  153. if (packet_length < ETH_HLEN)
  154. reuse = true;
  155. /* Allocate a replacement for buffer; Reuse upon failure */
  156. if (!reuse)
  157. rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
  158. &new_phys_addr);
  159. /* If need to reuse or there's no replacement buffer, repost this */
  160. if (rc)
  161. goto out_post;
  162. skb = build_skb(buffer->data, 0);
  163. if (!skb) {
  164. rc = -ENOMEM;
  165. goto out_post;
  166. }
  167. pad += NET_SKB_PAD;
  168. skb_reserve(skb, pad);
  169. skb_put(skb, packet_length);
  170. skb_checksum_none_assert(skb);
  171. /* Get parital ethernet information instead of eth_type_trans(),
  172. * Since we don't have an associated net_device.
  173. */
  174. skb_reset_mac_header(skb);
  175. skb->protocol = eth_hdr(skb)->h_proto;
  176. /* Pass SKB onward */
  177. if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
  178. if (vlan)
  179. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
  180. cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
  181. opaque_data_0, opaque_data_1);
  182. }
  183. /* Update Buffer information and update FW producer */
  184. buffer->data = new_data;
  185. buffer->phys_addr = new_phys_addr;
  186. out_post:
  187. rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
  188. buffer->phys_addr, 0, buffer, 1);
  189. if (rc)
  190. qed_ll2_dealloc_buffer(cdev, buffer);
  191. }
  192. static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
  193. u8 connection_handle,
  194. bool b_lock,
  195. bool b_only_active)
  196. {
  197. struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
  198. if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
  199. return NULL;
  200. if (!p_hwfn->p_ll2_info)
  201. return NULL;
  202. p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
  203. if (b_only_active) {
  204. if (b_lock)
  205. mutex_lock(&p_ll2_conn->mutex);
  206. if (p_ll2_conn->b_active)
  207. p_ret = p_ll2_conn;
  208. if (b_lock)
  209. mutex_unlock(&p_ll2_conn->mutex);
  210. } else {
  211. p_ret = p_ll2_conn;
  212. }
  213. return p_ret;
  214. }
  215. static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
  216. u8 connection_handle)
  217. {
  218. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
  219. }
  220. static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
  221. u8 connection_handle)
  222. {
  223. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
  224. }
  225. static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
  226. *p_hwfn,
  227. u8 connection_handle)
  228. {
  229. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
  230. }
  231. static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
  232. {
  233. bool b_last_packet = false, b_last_frag = false;
  234. struct qed_ll2_tx_packet *p_pkt = NULL;
  235. struct qed_ll2_info *p_ll2_conn;
  236. struct qed_ll2_tx_queue *p_tx;
  237. dma_addr_t tx_frag;
  238. p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
  239. if (!p_ll2_conn)
  240. return;
  241. p_tx = &p_ll2_conn->tx_queue;
  242. while (!list_empty(&p_tx->active_descq)) {
  243. p_pkt = list_first_entry(&p_tx->active_descq,
  244. struct qed_ll2_tx_packet, list_entry);
  245. if (!p_pkt)
  246. break;
  247. list_del(&p_pkt->list_entry);
  248. b_last_packet = list_empty(&p_tx->active_descq);
  249. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  250. if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
  251. struct qed_ooo_buffer *p_buffer;
  252. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  253. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  254. p_buffer);
  255. } else {
  256. p_tx->cur_completing_packet = *p_pkt;
  257. p_tx->cur_completing_bd_idx = 1;
  258. b_last_frag =
  259. p_tx->cur_completing_bd_idx == p_pkt->bd_used;
  260. tx_frag = p_pkt->bds_set[0].tx_frag;
  261. if (p_ll2_conn->gsi_enable)
  262. qed_ll2b_release_tx_gsi_packet(p_hwfn,
  263. p_ll2_conn->
  264. my_id,
  265. p_pkt->cookie,
  266. tx_frag,
  267. b_last_frag,
  268. b_last_packet);
  269. else
  270. qed_ll2b_complete_tx_packet(p_hwfn,
  271. p_ll2_conn->my_id,
  272. p_pkt->cookie,
  273. tx_frag,
  274. b_last_frag,
  275. b_last_packet);
  276. }
  277. }
  278. }
  279. static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  280. {
  281. struct qed_ll2_info *p_ll2_conn = p_cookie;
  282. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  283. u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
  284. struct qed_ll2_tx_packet *p_pkt;
  285. bool b_last_frag = false;
  286. unsigned long flags;
  287. dma_addr_t tx_frag;
  288. int rc = -EINVAL;
  289. spin_lock_irqsave(&p_tx->lock, flags);
  290. if (p_tx->b_completing_packet) {
  291. rc = -EBUSY;
  292. goto out;
  293. }
  294. new_idx = le16_to_cpu(*p_tx->p_fw_cons);
  295. num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
  296. while (num_bds) {
  297. if (list_empty(&p_tx->active_descq))
  298. goto out;
  299. p_pkt = list_first_entry(&p_tx->active_descq,
  300. struct qed_ll2_tx_packet, list_entry);
  301. if (!p_pkt)
  302. goto out;
  303. p_tx->b_completing_packet = true;
  304. p_tx->cur_completing_packet = *p_pkt;
  305. num_bds_in_packet = p_pkt->bd_used;
  306. list_del(&p_pkt->list_entry);
  307. if (num_bds < num_bds_in_packet) {
  308. DP_NOTICE(p_hwfn,
  309. "Rest of BDs does not cover whole packet\n");
  310. goto out;
  311. }
  312. num_bds -= num_bds_in_packet;
  313. p_tx->bds_idx += num_bds_in_packet;
  314. while (num_bds_in_packet--)
  315. qed_chain_consume(&p_tx->txq_chain);
  316. p_tx->cur_completing_bd_idx = 1;
  317. b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
  318. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  319. spin_unlock_irqrestore(&p_tx->lock, flags);
  320. tx_frag = p_pkt->bds_set[0].tx_frag;
  321. if (p_ll2_conn->gsi_enable)
  322. qed_ll2b_complete_tx_gsi_packet(p_hwfn,
  323. p_ll2_conn->my_id,
  324. p_pkt->cookie,
  325. tx_frag,
  326. b_last_frag, !num_bds);
  327. else
  328. qed_ll2b_complete_tx_packet(p_hwfn,
  329. p_ll2_conn->my_id,
  330. p_pkt->cookie,
  331. tx_frag,
  332. b_last_frag, !num_bds);
  333. spin_lock_irqsave(&p_tx->lock, flags);
  334. }
  335. p_tx->b_completing_packet = false;
  336. rc = 0;
  337. out:
  338. spin_unlock_irqrestore(&p_tx->lock, flags);
  339. return rc;
  340. }
  341. static int
  342. qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
  343. struct qed_ll2_info *p_ll2_info,
  344. union core_rx_cqe_union *p_cqe,
  345. unsigned long lock_flags, bool b_last_cqe)
  346. {
  347. struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
  348. struct qed_ll2_rx_packet *p_pkt = NULL;
  349. u16 packet_length, parse_flags, vlan;
  350. u32 src_mac_addrhi;
  351. u16 src_mac_addrlo;
  352. if (!list_empty(&p_rx->active_descq))
  353. p_pkt = list_first_entry(&p_rx->active_descq,
  354. struct qed_ll2_rx_packet, list_entry);
  355. if (!p_pkt) {
  356. DP_NOTICE(p_hwfn,
  357. "GSI Rx completion but active_descq is empty\n");
  358. return -EIO;
  359. }
  360. list_del(&p_pkt->list_entry);
  361. parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
  362. packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
  363. vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
  364. src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
  365. src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
  366. if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
  367. DP_NOTICE(p_hwfn,
  368. "Mismatch between active_descq and the LL2 Rx chain\n");
  369. list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  370. spin_unlock_irqrestore(&p_rx->lock, lock_flags);
  371. qed_ll2b_complete_rx_gsi_packet(p_hwfn,
  372. p_ll2_info->my_id,
  373. p_pkt->cookie,
  374. p_pkt->rx_buf_addr,
  375. packet_length,
  376. p_cqe->rx_cqe_gsi.data_length_error,
  377. parse_flags,
  378. vlan,
  379. src_mac_addrhi,
  380. src_mac_addrlo, b_last_cqe);
  381. spin_lock_irqsave(&p_rx->lock, lock_flags);
  382. return 0;
  383. }
  384. static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
  385. struct qed_ll2_info *p_ll2_conn,
  386. union core_rx_cqe_union *p_cqe,
  387. unsigned long lock_flags,
  388. bool b_last_cqe)
  389. {
  390. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  391. struct qed_ll2_rx_packet *p_pkt = NULL;
  392. if (!list_empty(&p_rx->active_descq))
  393. p_pkt = list_first_entry(&p_rx->active_descq,
  394. struct qed_ll2_rx_packet, list_entry);
  395. if (!p_pkt) {
  396. DP_NOTICE(p_hwfn,
  397. "LL2 Rx completion but active_descq is empty\n");
  398. return -EIO;
  399. }
  400. list_del(&p_pkt->list_entry);
  401. if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
  402. DP_NOTICE(p_hwfn,
  403. "Mismatch between active_descq and the LL2 Rx chain\n");
  404. list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  405. spin_unlock_irqrestore(&p_rx->lock, lock_flags);
  406. qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
  407. p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
  408. spin_lock_irqsave(&p_rx->lock, lock_flags);
  409. return 0;
  410. }
  411. static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
  412. {
  413. struct qed_ll2_info *p_ll2_conn = cookie;
  414. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  415. union core_rx_cqe_union *cqe = NULL;
  416. u16 cq_new_idx = 0, cq_old_idx = 0;
  417. unsigned long flags = 0;
  418. int rc = 0;
  419. spin_lock_irqsave(&p_rx->lock, flags);
  420. cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
  421. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  422. while (cq_new_idx != cq_old_idx) {
  423. bool b_last_cqe = (cq_new_idx == cq_old_idx);
  424. cqe = qed_chain_consume(&p_rx->rcq_chain);
  425. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  426. DP_VERBOSE(p_hwfn,
  427. QED_MSG_LL2,
  428. "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
  429. cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
  430. switch (cqe->rx_cqe_sp.type) {
  431. case CORE_RX_CQE_TYPE_SLOW_PATH:
  432. DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
  433. rc = -EINVAL;
  434. break;
  435. case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
  436. rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
  437. cqe, flags, b_last_cqe);
  438. break;
  439. case CORE_RX_CQE_TYPE_REGULAR:
  440. rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
  441. cqe, flags, b_last_cqe);
  442. break;
  443. default:
  444. rc = -EIO;
  445. }
  446. }
  447. spin_unlock_irqrestore(&p_rx->lock, flags);
  448. return rc;
  449. }
  450. static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
  451. {
  452. struct qed_ll2_info *p_ll2_conn = NULL;
  453. struct qed_ll2_rx_packet *p_pkt = NULL;
  454. struct qed_ll2_rx_queue *p_rx;
  455. p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
  456. if (!p_ll2_conn)
  457. return;
  458. p_rx = &p_ll2_conn->rx_queue;
  459. while (!list_empty(&p_rx->active_descq)) {
  460. dma_addr_t rx_buf_addr;
  461. void *cookie;
  462. bool b_last;
  463. p_pkt = list_first_entry(&p_rx->active_descq,
  464. struct qed_ll2_rx_packet, list_entry);
  465. if (!p_pkt)
  466. break;
  467. list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
  468. if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
  469. struct qed_ooo_buffer *p_buffer;
  470. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  471. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  472. p_buffer);
  473. } else {
  474. rx_buf_addr = p_pkt->rx_buf_addr;
  475. cookie = p_pkt->cookie;
  476. b_last = list_empty(&p_rx->active_descq);
  477. }
  478. }
  479. }
  480. #if IS_ENABLED(CONFIG_QED_ISCSI)
  481. static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
  482. {
  483. u8 bd_flags = 0;
  484. if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
  485. SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
  486. return bd_flags;
  487. }
  488. static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
  489. struct qed_ll2_info *p_ll2_conn)
  490. {
  491. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  492. u16 packet_length = 0, parse_flags = 0, vlan = 0;
  493. struct qed_ll2_rx_packet *p_pkt = NULL;
  494. u32 num_ooo_add_to_peninsula = 0, cid;
  495. union core_rx_cqe_union *cqe = NULL;
  496. u16 cq_new_idx = 0, cq_old_idx = 0;
  497. struct qed_ooo_buffer *p_buffer;
  498. struct ooo_opaque *iscsi_ooo;
  499. u8 placement_offset = 0;
  500. u8 cqe_type;
  501. cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
  502. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  503. if (cq_new_idx == cq_old_idx)
  504. return 0;
  505. while (cq_new_idx != cq_old_idx) {
  506. struct core_rx_fast_path_cqe *p_cqe_fp;
  507. cqe = qed_chain_consume(&p_rx->rcq_chain);
  508. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  509. cqe_type = cqe->rx_cqe_sp.type;
  510. if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
  511. DP_NOTICE(p_hwfn,
  512. "Got a non-regular LB LL2 completion [type 0x%02x]\n",
  513. cqe_type);
  514. return -EINVAL;
  515. }
  516. p_cqe_fp = &cqe->rx_cqe_fp;
  517. placement_offset = p_cqe_fp->placement_offset;
  518. parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
  519. packet_length = le16_to_cpu(p_cqe_fp->packet_length);
  520. vlan = le16_to_cpu(p_cqe_fp->vlan);
  521. iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
  522. qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
  523. iscsi_ooo);
  524. cid = le32_to_cpu(iscsi_ooo->cid);
  525. /* Process delete isle first */
  526. if (iscsi_ooo->drop_size)
  527. qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
  528. iscsi_ooo->drop_isle,
  529. iscsi_ooo->drop_size);
  530. if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
  531. continue;
  532. /* Now process create/add/join isles */
  533. if (list_empty(&p_rx->active_descq)) {
  534. DP_NOTICE(p_hwfn,
  535. "LL2 OOO RX chain has no submitted buffers\n"
  536. );
  537. return -EIO;
  538. }
  539. p_pkt = list_first_entry(&p_rx->active_descq,
  540. struct qed_ll2_rx_packet, list_entry);
  541. if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
  542. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
  543. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
  544. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
  545. (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
  546. if (!p_pkt) {
  547. DP_NOTICE(p_hwfn,
  548. "LL2 OOO RX packet is not valid\n");
  549. return -EIO;
  550. }
  551. list_del(&p_pkt->list_entry);
  552. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  553. p_buffer->packet_length = packet_length;
  554. p_buffer->parse_flags = parse_flags;
  555. p_buffer->vlan = vlan;
  556. p_buffer->placement_offset = placement_offset;
  557. qed_chain_consume(&p_rx->rxq_chain);
  558. list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  559. switch (iscsi_ooo->ooo_opcode) {
  560. case TCP_EVENT_ADD_NEW_ISLE:
  561. qed_ooo_add_new_isle(p_hwfn,
  562. p_hwfn->p_ooo_info,
  563. cid,
  564. iscsi_ooo->ooo_isle,
  565. p_buffer);
  566. break;
  567. case TCP_EVENT_ADD_ISLE_RIGHT:
  568. qed_ooo_add_new_buffer(p_hwfn,
  569. p_hwfn->p_ooo_info,
  570. cid,
  571. iscsi_ooo->ooo_isle,
  572. p_buffer,
  573. QED_OOO_RIGHT_BUF);
  574. break;
  575. case TCP_EVENT_ADD_ISLE_LEFT:
  576. qed_ooo_add_new_buffer(p_hwfn,
  577. p_hwfn->p_ooo_info,
  578. cid,
  579. iscsi_ooo->ooo_isle,
  580. p_buffer,
  581. QED_OOO_LEFT_BUF);
  582. break;
  583. case TCP_EVENT_JOIN:
  584. qed_ooo_add_new_buffer(p_hwfn,
  585. p_hwfn->p_ooo_info,
  586. cid,
  587. iscsi_ooo->ooo_isle +
  588. 1,
  589. p_buffer,
  590. QED_OOO_LEFT_BUF);
  591. qed_ooo_join_isles(p_hwfn,
  592. p_hwfn->p_ooo_info,
  593. cid, iscsi_ooo->ooo_isle);
  594. break;
  595. case TCP_EVENT_ADD_PEN:
  596. num_ooo_add_to_peninsula++;
  597. qed_ooo_put_ready_buffer(p_hwfn,
  598. p_hwfn->p_ooo_info,
  599. p_buffer, true);
  600. break;
  601. }
  602. } else {
  603. DP_NOTICE(p_hwfn,
  604. "Unexpected event (%d) TX OOO completion\n",
  605. iscsi_ooo->ooo_opcode);
  606. }
  607. }
  608. return 0;
  609. }
  610. static void
  611. qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
  612. struct qed_ll2_info *p_ll2_conn)
  613. {
  614. struct qed_ooo_buffer *p_buffer;
  615. int rc;
  616. u16 l4_hdr_offset_w;
  617. dma_addr_t first_frag;
  618. u16 parse_flags;
  619. u8 bd_flags;
  620. /* Submit Tx buffers here */
  621. while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
  622. p_hwfn->p_ooo_info))) {
  623. l4_hdr_offset_w = 0;
  624. bd_flags = 0;
  625. first_frag = p_buffer->rx_buffer_phys_addr +
  626. p_buffer->placement_offset;
  627. parse_flags = p_buffer->parse_flags;
  628. bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
  629. SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
  630. SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
  631. rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
  632. p_buffer->vlan, bd_flags,
  633. l4_hdr_offset_w,
  634. p_ll2_conn->tx_dest, 0,
  635. first_frag,
  636. p_buffer->packet_length,
  637. p_buffer, true);
  638. if (rc) {
  639. qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
  640. p_buffer, false);
  641. break;
  642. }
  643. }
  644. }
  645. static void
  646. qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
  647. struct qed_ll2_info *p_ll2_conn)
  648. {
  649. struct qed_ooo_buffer *p_buffer;
  650. int rc;
  651. while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
  652. p_hwfn->p_ooo_info))) {
  653. rc = qed_ll2_post_rx_buffer(p_hwfn,
  654. p_ll2_conn->my_id,
  655. p_buffer->rx_buffer_phys_addr,
  656. 0, p_buffer, true);
  657. if (rc) {
  658. qed_ooo_put_free_buffer(p_hwfn,
  659. p_hwfn->p_ooo_info, p_buffer);
  660. break;
  661. }
  662. }
  663. }
  664. static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  665. {
  666. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
  667. int rc;
  668. rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
  669. if (rc)
  670. return rc;
  671. qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
  672. qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
  673. return 0;
  674. }
  675. static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  676. {
  677. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
  678. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  679. struct qed_ll2_tx_packet *p_pkt = NULL;
  680. struct qed_ooo_buffer *p_buffer;
  681. bool b_dont_submit_rx = false;
  682. u16 new_idx = 0, num_bds = 0;
  683. int rc;
  684. new_idx = le16_to_cpu(*p_tx->p_fw_cons);
  685. num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
  686. if (!num_bds)
  687. return 0;
  688. while (num_bds) {
  689. if (list_empty(&p_tx->active_descq))
  690. return -EINVAL;
  691. p_pkt = list_first_entry(&p_tx->active_descq,
  692. struct qed_ll2_tx_packet, list_entry);
  693. if (!p_pkt)
  694. return -EINVAL;
  695. if (p_pkt->bd_used != 1) {
  696. DP_NOTICE(p_hwfn,
  697. "Unexpectedly many BDs(%d) in TX OOO completion\n",
  698. p_pkt->bd_used);
  699. return -EINVAL;
  700. }
  701. list_del(&p_pkt->list_entry);
  702. num_bds--;
  703. p_tx->bds_idx++;
  704. qed_chain_consume(&p_tx->txq_chain);
  705. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  706. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  707. if (b_dont_submit_rx) {
  708. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  709. p_buffer);
  710. continue;
  711. }
  712. rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
  713. p_buffer->rx_buffer_phys_addr, 0,
  714. p_buffer, true);
  715. if (rc != 0) {
  716. qed_ooo_put_free_buffer(p_hwfn,
  717. p_hwfn->p_ooo_info, p_buffer);
  718. b_dont_submit_rx = true;
  719. }
  720. }
  721. qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
  722. return 0;
  723. }
  724. static int
  725. qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
  726. struct qed_ll2_info *p_ll2_info,
  727. u16 rx_num_ooo_buffers, u16 mtu)
  728. {
  729. struct qed_ooo_buffer *p_buf = NULL;
  730. void *p_virt;
  731. u16 buf_idx;
  732. int rc = 0;
  733. if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
  734. return rc;
  735. if (!rx_num_ooo_buffers)
  736. return -EINVAL;
  737. for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
  738. p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
  739. if (!p_buf) {
  740. rc = -ENOMEM;
  741. goto out;
  742. }
  743. p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
  744. p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
  745. ETH_CACHE_LINE_SIZE - 1) &
  746. ~(ETH_CACHE_LINE_SIZE - 1);
  747. p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  748. p_buf->rx_buffer_size,
  749. &p_buf->rx_buffer_phys_addr,
  750. GFP_KERNEL);
  751. if (!p_virt) {
  752. kfree(p_buf);
  753. rc = -ENOMEM;
  754. goto out;
  755. }
  756. p_buf->rx_buffer_virt_addr = p_virt;
  757. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
  758. }
  759. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  760. "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
  761. rx_num_ooo_buffers, p_buf->rx_buffer_size);
  762. out:
  763. return rc;
  764. }
  765. static void
  766. qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
  767. struct qed_ll2_info *p_ll2_conn)
  768. {
  769. if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
  770. return;
  771. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  772. qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
  773. }
  774. static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
  775. struct qed_ll2_info *p_ll2_conn)
  776. {
  777. struct qed_ooo_buffer *p_buffer;
  778. if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
  779. return;
  780. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  781. while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
  782. p_hwfn->p_ooo_info))) {
  783. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  784. p_buffer->rx_buffer_size,
  785. p_buffer->rx_buffer_virt_addr,
  786. p_buffer->rx_buffer_phys_addr);
  787. kfree(p_buffer);
  788. }
  789. }
  790. static void qed_ll2_stop_ooo(struct qed_dev *cdev)
  791. {
  792. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  793. u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
  794. DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
  795. *handle);
  796. qed_ll2_terminate_connection(hwfn, *handle);
  797. qed_ll2_release_connection(hwfn, *handle);
  798. *handle = QED_LL2_UNUSED_HANDLE;
  799. }
  800. static int qed_ll2_start_ooo(struct qed_dev *cdev,
  801. struct qed_ll2_params *params)
  802. {
  803. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  804. u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
  805. struct qed_ll2_info *ll2_info;
  806. int rc;
  807. ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
  808. if (!ll2_info)
  809. return -ENOMEM;
  810. ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
  811. ll2_info->mtu = params->mtu;
  812. ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
  813. ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
  814. ll2_info->tx_tc = OOO_LB_TC;
  815. ll2_info->tx_dest = CORE_TX_DEST_LB;
  816. rc = qed_ll2_acquire_connection(hwfn, ll2_info,
  817. QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
  818. handle);
  819. kfree(ll2_info);
  820. if (rc) {
  821. DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
  822. goto out;
  823. }
  824. rc = qed_ll2_establish_connection(hwfn, *handle);
  825. if (rc) {
  826. DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
  827. goto fail;
  828. }
  829. return 0;
  830. fail:
  831. qed_ll2_release_connection(hwfn, *handle);
  832. out:
  833. *handle = QED_LL2_UNUSED_HANDLE;
  834. return rc;
  835. }
  836. #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
  837. static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
  838. void *p_cookie) { return -EINVAL; }
  839. static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
  840. void *p_cookie) { return -EINVAL; }
  841. static inline int
  842. qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
  843. struct qed_ll2_info *p_ll2_info,
  844. u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
  845. static inline void
  846. qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
  847. struct qed_ll2_info *p_ll2_conn) { return; }
  848. static inline void
  849. qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
  850. struct qed_ll2_info *p_ll2_conn) { return; }
  851. static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
  852. static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
  853. struct qed_ll2_params *params)
  854. { return -EINVAL; }
  855. #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
  856. static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
  857. struct qed_ll2_info *p_ll2_conn,
  858. u8 action_on_error)
  859. {
  860. enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
  861. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  862. struct core_rx_start_ramrod_data *p_ramrod = NULL;
  863. struct qed_spq_entry *p_ent = NULL;
  864. struct qed_sp_init_data init_data;
  865. u16 cqe_pbl_size;
  866. int rc = 0;
  867. /* Get SPQ entry */
  868. memset(&init_data, 0, sizeof(init_data));
  869. init_data.cid = p_ll2_conn->cid;
  870. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  871. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  872. rc = qed_sp_init_request(p_hwfn, &p_ent,
  873. CORE_RAMROD_RX_QUEUE_START,
  874. PROTOCOLID_CORE, &init_data);
  875. if (rc)
  876. return rc;
  877. p_ramrod = &p_ent->ramrod.core_rx_queue_start;
  878. p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
  879. p_ramrod->sb_index = p_rx->rx_sb_index;
  880. p_ramrod->complete_event_flg = 1;
  881. p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
  882. DMA_REGPAIR_LE(p_ramrod->bd_base,
  883. p_rx->rxq_chain.p_phys_addr);
  884. cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
  885. p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
  886. DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
  887. qed_chain_get_pbl_phys(&p_rx->rcq_chain));
  888. p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
  889. p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
  890. p_ramrod->queue_id = p_ll2_conn->queue_id;
  891. p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
  892. : 1;
  893. if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
  894. p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
  895. p_ramrod->mf_si_bcast_accept_all = 1;
  896. p_ramrod->mf_si_mcast_accept_all = 1;
  897. } else {
  898. p_ramrod->mf_si_bcast_accept_all = 0;
  899. p_ramrod->mf_si_mcast_accept_all = 0;
  900. }
  901. p_ramrod->action_on_error.error_type = action_on_error;
  902. p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
  903. return qed_spq_post(p_hwfn, p_ent, NULL);
  904. }
  905. static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
  906. struct qed_ll2_info *p_ll2_conn)
  907. {
  908. enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
  909. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  910. struct core_tx_start_ramrod_data *p_ramrod = NULL;
  911. struct qed_spq_entry *p_ent = NULL;
  912. struct qed_sp_init_data init_data;
  913. union qed_qm_pq_params pq_params;
  914. u16 pq_id = 0, pbl_size;
  915. int rc = -EINVAL;
  916. if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
  917. return 0;
  918. if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
  919. p_ll2_conn->tx_stats_en = 0;
  920. else
  921. p_ll2_conn->tx_stats_en = 1;
  922. /* Get SPQ entry */
  923. memset(&init_data, 0, sizeof(init_data));
  924. init_data.cid = p_ll2_conn->cid;
  925. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  926. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  927. rc = qed_sp_init_request(p_hwfn, &p_ent,
  928. CORE_RAMROD_TX_QUEUE_START,
  929. PROTOCOLID_CORE, &init_data);
  930. if (rc)
  931. return rc;
  932. p_ramrod = &p_ent->ramrod.core_tx_queue_start;
  933. p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
  934. p_ramrod->sb_index = p_tx->tx_sb_index;
  935. p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
  936. p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
  937. p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
  938. DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
  939. qed_chain_get_pbl_phys(&p_tx->txq_chain));
  940. pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
  941. p_ramrod->pbl_size = cpu_to_le16(pbl_size);
  942. memset(&pq_params, 0, sizeof(pq_params));
  943. pq_params.core.tc = p_ll2_conn->tx_tc;
  944. pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
  945. p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
  946. switch (conn_type) {
  947. case QED_LL2_TYPE_ISCSI:
  948. case QED_LL2_TYPE_ISCSI_OOO:
  949. p_ramrod->conn_type = PROTOCOLID_ISCSI;
  950. break;
  951. case QED_LL2_TYPE_ROCE:
  952. p_ramrod->conn_type = PROTOCOLID_ROCE;
  953. break;
  954. default:
  955. p_ramrod->conn_type = PROTOCOLID_ETH;
  956. DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
  957. }
  958. p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
  959. return qed_spq_post(p_hwfn, p_ent, NULL);
  960. }
  961. static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
  962. struct qed_ll2_info *p_ll2_conn)
  963. {
  964. struct core_rx_stop_ramrod_data *p_ramrod = NULL;
  965. struct qed_spq_entry *p_ent = NULL;
  966. struct qed_sp_init_data init_data;
  967. int rc = -EINVAL;
  968. /* Get SPQ entry */
  969. memset(&init_data, 0, sizeof(init_data));
  970. init_data.cid = p_ll2_conn->cid;
  971. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  972. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  973. rc = qed_sp_init_request(p_hwfn, &p_ent,
  974. CORE_RAMROD_RX_QUEUE_STOP,
  975. PROTOCOLID_CORE, &init_data);
  976. if (rc)
  977. return rc;
  978. p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
  979. p_ramrod->complete_event_flg = 1;
  980. p_ramrod->queue_id = p_ll2_conn->queue_id;
  981. return qed_spq_post(p_hwfn, p_ent, NULL);
  982. }
  983. static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
  984. struct qed_ll2_info *p_ll2_conn)
  985. {
  986. struct qed_spq_entry *p_ent = NULL;
  987. struct qed_sp_init_data init_data;
  988. int rc = -EINVAL;
  989. /* Get SPQ entry */
  990. memset(&init_data, 0, sizeof(init_data));
  991. init_data.cid = p_ll2_conn->cid;
  992. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  993. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  994. rc = qed_sp_init_request(p_hwfn, &p_ent,
  995. CORE_RAMROD_TX_QUEUE_STOP,
  996. PROTOCOLID_CORE, &init_data);
  997. if (rc)
  998. return rc;
  999. return qed_spq_post(p_hwfn, p_ent, NULL);
  1000. }
  1001. static int
  1002. qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
  1003. struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
  1004. {
  1005. struct qed_ll2_rx_packet *p_descq;
  1006. u32 capacity;
  1007. int rc = 0;
  1008. if (!rx_num_desc)
  1009. goto out;
  1010. rc = qed_chain_alloc(p_hwfn->cdev,
  1011. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1012. QED_CHAIN_MODE_NEXT_PTR,
  1013. QED_CHAIN_CNT_TYPE_U16,
  1014. rx_num_desc,
  1015. sizeof(struct core_rx_bd),
  1016. &p_ll2_info->rx_queue.rxq_chain);
  1017. if (rc) {
  1018. DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
  1019. goto out;
  1020. }
  1021. capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
  1022. p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
  1023. GFP_KERNEL);
  1024. if (!p_descq) {
  1025. rc = -ENOMEM;
  1026. DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
  1027. goto out;
  1028. }
  1029. p_ll2_info->rx_queue.descq_array = p_descq;
  1030. rc = qed_chain_alloc(p_hwfn->cdev,
  1031. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1032. QED_CHAIN_MODE_PBL,
  1033. QED_CHAIN_CNT_TYPE_U16,
  1034. rx_num_desc,
  1035. sizeof(struct core_rx_fast_path_cqe),
  1036. &p_ll2_info->rx_queue.rcq_chain);
  1037. if (rc) {
  1038. DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
  1039. goto out;
  1040. }
  1041. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  1042. "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
  1043. p_ll2_info->conn_type, rx_num_desc);
  1044. out:
  1045. return rc;
  1046. }
  1047. static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
  1048. struct qed_ll2_info *p_ll2_info,
  1049. u16 tx_num_desc)
  1050. {
  1051. struct qed_ll2_tx_packet *p_descq;
  1052. u32 capacity;
  1053. int rc = 0;
  1054. if (!tx_num_desc)
  1055. goto out;
  1056. rc = qed_chain_alloc(p_hwfn->cdev,
  1057. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1058. QED_CHAIN_MODE_PBL,
  1059. QED_CHAIN_CNT_TYPE_U16,
  1060. tx_num_desc,
  1061. sizeof(struct core_tx_bd),
  1062. &p_ll2_info->tx_queue.txq_chain);
  1063. if (rc)
  1064. goto out;
  1065. capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
  1066. p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
  1067. GFP_KERNEL);
  1068. if (!p_descq) {
  1069. rc = -ENOMEM;
  1070. goto out;
  1071. }
  1072. p_ll2_info->tx_queue.descq_array = p_descq;
  1073. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  1074. "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
  1075. p_ll2_info->conn_type, tx_num_desc);
  1076. out:
  1077. if (rc)
  1078. DP_NOTICE(p_hwfn,
  1079. "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
  1080. tx_num_desc);
  1081. return rc;
  1082. }
  1083. int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
  1084. struct qed_ll2_info *p_params,
  1085. u16 rx_num_desc,
  1086. u16 tx_num_desc,
  1087. u8 *p_connection_handle)
  1088. {
  1089. qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
  1090. struct qed_ll2_info *p_ll2_info = NULL;
  1091. int rc;
  1092. u8 i;
  1093. if (!p_connection_handle || !p_hwfn->p_ll2_info)
  1094. return -EINVAL;
  1095. /* Find a free connection to be used */
  1096. for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
  1097. mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
  1098. if (p_hwfn->p_ll2_info[i].b_active) {
  1099. mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
  1100. continue;
  1101. }
  1102. p_hwfn->p_ll2_info[i].b_active = true;
  1103. p_ll2_info = &p_hwfn->p_ll2_info[i];
  1104. mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
  1105. break;
  1106. }
  1107. if (!p_ll2_info)
  1108. return -EBUSY;
  1109. p_ll2_info->conn_type = p_params->conn_type;
  1110. p_ll2_info->mtu = p_params->mtu;
  1111. p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
  1112. p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
  1113. p_ll2_info->tx_tc = p_params->tx_tc;
  1114. p_ll2_info->tx_dest = p_params->tx_dest;
  1115. p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
  1116. p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
  1117. p_ll2_info->gsi_enable = p_params->gsi_enable;
  1118. rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
  1119. if (rc)
  1120. goto q_allocate_fail;
  1121. rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
  1122. if (rc)
  1123. goto q_allocate_fail;
  1124. rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
  1125. rx_num_desc * 2, p_params->mtu);
  1126. if (rc)
  1127. goto q_allocate_fail;
  1128. /* Register callbacks for the Rx/Tx queues */
  1129. if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
  1130. comp_rx_cb = qed_ll2_lb_rxq_completion;
  1131. comp_tx_cb = qed_ll2_lb_txq_completion;
  1132. } else {
  1133. comp_rx_cb = qed_ll2_rxq_completion;
  1134. comp_tx_cb = qed_ll2_txq_completion;
  1135. }
  1136. if (rx_num_desc) {
  1137. qed_int_register_cb(p_hwfn, comp_rx_cb,
  1138. &p_hwfn->p_ll2_info[i],
  1139. &p_ll2_info->rx_queue.rx_sb_index,
  1140. &p_ll2_info->rx_queue.p_fw_cons);
  1141. p_ll2_info->rx_queue.b_cb_registred = true;
  1142. }
  1143. if (tx_num_desc) {
  1144. qed_int_register_cb(p_hwfn,
  1145. comp_tx_cb,
  1146. &p_hwfn->p_ll2_info[i],
  1147. &p_ll2_info->tx_queue.tx_sb_index,
  1148. &p_ll2_info->tx_queue.p_fw_cons);
  1149. p_ll2_info->tx_queue.b_cb_registred = true;
  1150. }
  1151. *p_connection_handle = i;
  1152. return rc;
  1153. q_allocate_fail:
  1154. qed_ll2_release_connection(p_hwfn, i);
  1155. return -ENOMEM;
  1156. }
  1157. static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
  1158. struct qed_ll2_info *p_ll2_conn)
  1159. {
  1160. u8 action_on_error = 0;
  1161. if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
  1162. return 0;
  1163. DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
  1164. SET_FIELD(action_on_error,
  1165. CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
  1166. p_ll2_conn->ai_err_packet_too_big);
  1167. SET_FIELD(action_on_error,
  1168. CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
  1169. return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
  1170. }
  1171. int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
  1172. {
  1173. struct qed_ll2_info *p_ll2_conn;
  1174. struct qed_ll2_rx_queue *p_rx;
  1175. struct qed_ll2_tx_queue *p_tx;
  1176. int rc = -EINVAL;
  1177. u32 i, capacity;
  1178. u8 qid;
  1179. p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
  1180. if (!p_ll2_conn)
  1181. return -EINVAL;
  1182. p_rx = &p_ll2_conn->rx_queue;
  1183. p_tx = &p_ll2_conn->tx_queue;
  1184. qed_chain_reset(&p_rx->rxq_chain);
  1185. qed_chain_reset(&p_rx->rcq_chain);
  1186. INIT_LIST_HEAD(&p_rx->active_descq);
  1187. INIT_LIST_HEAD(&p_rx->free_descq);
  1188. INIT_LIST_HEAD(&p_rx->posting_descq);
  1189. spin_lock_init(&p_rx->lock);
  1190. capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
  1191. for (i = 0; i < capacity; i++)
  1192. list_add_tail(&p_rx->descq_array[i].list_entry,
  1193. &p_rx->free_descq);
  1194. *p_rx->p_fw_cons = 0;
  1195. qed_chain_reset(&p_tx->txq_chain);
  1196. INIT_LIST_HEAD(&p_tx->active_descq);
  1197. INIT_LIST_HEAD(&p_tx->free_descq);
  1198. INIT_LIST_HEAD(&p_tx->sending_descq);
  1199. spin_lock_init(&p_tx->lock);
  1200. capacity = qed_chain_get_capacity(&p_tx->txq_chain);
  1201. for (i = 0; i < capacity; i++)
  1202. list_add_tail(&p_tx->descq_array[i].list_entry,
  1203. &p_tx->free_descq);
  1204. p_tx->cur_completing_bd_idx = 0;
  1205. p_tx->bds_idx = 0;
  1206. p_tx->b_completing_packet = false;
  1207. p_tx->cur_send_packet = NULL;
  1208. p_tx->cur_send_frag_num = 0;
  1209. p_tx->cur_completing_frag_num = 0;
  1210. *p_tx->p_fw_cons = 0;
  1211. qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
  1212. qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
  1213. p_ll2_conn->queue_id = qid;
  1214. p_ll2_conn->tx_stats_id = qid;
  1215. p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
  1216. GTT_BAR0_MAP_REG_TSDM_RAM +
  1217. TSTORM_LL2_RX_PRODS_OFFSET(qid);
  1218. p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
  1219. qed_db_addr(p_ll2_conn->cid,
  1220. DQ_DEMS_LEGACY);
  1221. rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
  1222. if (rc)
  1223. return rc;
  1224. rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
  1225. if (rc)
  1226. return rc;
  1227. if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
  1228. qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
  1229. qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
  1230. return rc;
  1231. }
  1232. static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
  1233. struct qed_ll2_rx_queue *p_rx,
  1234. struct qed_ll2_rx_packet *p_curp)
  1235. {
  1236. struct qed_ll2_rx_packet *p_posting_packet = NULL;
  1237. struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
  1238. bool b_notify_fw = false;
  1239. u16 bd_prod, cq_prod;
  1240. /* This handles the flushing of already posted buffers */
  1241. while (!list_empty(&p_rx->posting_descq)) {
  1242. p_posting_packet = list_first_entry(&p_rx->posting_descq,
  1243. struct qed_ll2_rx_packet,
  1244. list_entry);
  1245. list_move_tail(&p_posting_packet->list_entry,
  1246. &p_rx->active_descq);
  1247. b_notify_fw = true;
  1248. }
  1249. /* This handles the supplied packet [if there is one] */
  1250. if (p_curp) {
  1251. list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
  1252. b_notify_fw = true;
  1253. }
  1254. if (!b_notify_fw)
  1255. return;
  1256. bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
  1257. cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
  1258. rx_prod.bd_prod = cpu_to_le16(bd_prod);
  1259. rx_prod.cqe_prod = cpu_to_le16(cq_prod);
  1260. DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
  1261. }
  1262. int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
  1263. u8 connection_handle,
  1264. dma_addr_t addr,
  1265. u16 buf_len, void *cookie, u8 notify_fw)
  1266. {
  1267. struct core_rx_bd_with_buff_len *p_curb = NULL;
  1268. struct qed_ll2_rx_packet *p_curp = NULL;
  1269. struct qed_ll2_info *p_ll2_conn;
  1270. struct qed_ll2_rx_queue *p_rx;
  1271. unsigned long flags;
  1272. void *p_data;
  1273. int rc = 0;
  1274. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1275. if (!p_ll2_conn)
  1276. return -EINVAL;
  1277. p_rx = &p_ll2_conn->rx_queue;
  1278. spin_lock_irqsave(&p_rx->lock, flags);
  1279. if (!list_empty(&p_rx->free_descq))
  1280. p_curp = list_first_entry(&p_rx->free_descq,
  1281. struct qed_ll2_rx_packet, list_entry);
  1282. if (p_curp) {
  1283. if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
  1284. qed_chain_get_elem_left(&p_rx->rcq_chain)) {
  1285. p_data = qed_chain_produce(&p_rx->rxq_chain);
  1286. p_curb = (struct core_rx_bd_with_buff_len *)p_data;
  1287. qed_chain_produce(&p_rx->rcq_chain);
  1288. }
  1289. }
  1290. /* If we're lacking entires, let's try to flush buffers to FW */
  1291. if (!p_curp || !p_curb) {
  1292. rc = -EBUSY;
  1293. p_curp = NULL;
  1294. goto out_notify;
  1295. }
  1296. /* We have an Rx packet we can fill */
  1297. DMA_REGPAIR_LE(p_curb->addr, addr);
  1298. p_curb->buff_length = cpu_to_le16(buf_len);
  1299. p_curp->rx_buf_addr = addr;
  1300. p_curp->cookie = cookie;
  1301. p_curp->rxq_bd = p_curb;
  1302. p_curp->buf_length = buf_len;
  1303. list_del(&p_curp->list_entry);
  1304. /* Check if we only want to enqueue this packet without informing FW */
  1305. if (!notify_fw) {
  1306. list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
  1307. goto out;
  1308. }
  1309. out_notify:
  1310. qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
  1311. out:
  1312. spin_unlock_irqrestore(&p_rx->lock, flags);
  1313. return rc;
  1314. }
  1315. static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
  1316. struct qed_ll2_tx_queue *p_tx,
  1317. struct qed_ll2_tx_packet *p_curp,
  1318. u8 num_of_bds,
  1319. dma_addr_t first_frag,
  1320. u16 first_frag_len, void *p_cookie,
  1321. u8 notify_fw)
  1322. {
  1323. list_del(&p_curp->list_entry);
  1324. p_curp->cookie = p_cookie;
  1325. p_curp->bd_used = num_of_bds;
  1326. p_curp->notify_fw = notify_fw;
  1327. p_tx->cur_send_packet = p_curp;
  1328. p_tx->cur_send_frag_num = 0;
  1329. p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
  1330. p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
  1331. p_tx->cur_send_frag_num++;
  1332. }
  1333. static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
  1334. struct qed_ll2_info *p_ll2,
  1335. struct qed_ll2_tx_packet *p_curp,
  1336. u8 num_of_bds,
  1337. enum core_tx_dest tx_dest,
  1338. u16 vlan,
  1339. u8 bd_flags,
  1340. u16 l4_hdr_offset_w,
  1341. enum core_roce_flavor_type type,
  1342. dma_addr_t first_frag,
  1343. u16 first_frag_len)
  1344. {
  1345. struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
  1346. u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
  1347. struct core_tx_bd *start_bd = NULL;
  1348. u16 frag_idx;
  1349. start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
  1350. start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
  1351. SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
  1352. cpu_to_le16(l4_hdr_offset_w));
  1353. SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
  1354. start_bd->bd_flags.as_bitfield = bd_flags;
  1355. start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
  1356. CORE_TX_BD_FLAGS_START_BD_SHIFT;
  1357. SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
  1358. SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
  1359. DMA_REGPAIR_LE(start_bd->addr, first_frag);
  1360. start_bd->nbytes = cpu_to_le16(first_frag_len);
  1361. DP_VERBOSE(p_hwfn,
  1362. (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
  1363. "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
  1364. p_ll2->queue_id,
  1365. p_ll2->cid,
  1366. p_ll2->conn_type,
  1367. prod_idx,
  1368. first_frag_len,
  1369. num_of_bds,
  1370. le32_to_cpu(start_bd->addr.hi),
  1371. le32_to_cpu(start_bd->addr.lo));
  1372. if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
  1373. return;
  1374. /* Need to provide the packet with additional BDs for frags */
  1375. for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
  1376. frag_idx < num_of_bds; frag_idx++) {
  1377. struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
  1378. *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
  1379. (*p_bd)->bd_flags.as_bitfield = 0;
  1380. (*p_bd)->bitfield1 = 0;
  1381. (*p_bd)->bitfield0 = 0;
  1382. p_curp->bds_set[frag_idx].tx_frag = 0;
  1383. p_curp->bds_set[frag_idx].frag_len = 0;
  1384. }
  1385. }
  1386. /* This should be called while the Txq spinlock is being held */
  1387. static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
  1388. struct qed_ll2_info *p_ll2_conn)
  1389. {
  1390. bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
  1391. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  1392. struct qed_ll2_tx_packet *p_pkt = NULL;
  1393. struct core_db_data db_msg = { 0, 0, 0 };
  1394. u16 bd_prod;
  1395. /* If there are missing BDs, don't do anything now */
  1396. if (p_ll2_conn->tx_queue.cur_send_frag_num !=
  1397. p_ll2_conn->tx_queue.cur_send_packet->bd_used)
  1398. return;
  1399. /* Push the current packet to the list and clean after it */
  1400. list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
  1401. &p_ll2_conn->tx_queue.sending_descq);
  1402. p_ll2_conn->tx_queue.cur_send_packet = NULL;
  1403. p_ll2_conn->tx_queue.cur_send_frag_num = 0;
  1404. /* Notify FW of packet only if requested to */
  1405. if (!b_notify)
  1406. return;
  1407. bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
  1408. while (!list_empty(&p_tx->sending_descq)) {
  1409. p_pkt = list_first_entry(&p_tx->sending_descq,
  1410. struct qed_ll2_tx_packet, list_entry);
  1411. if (!p_pkt)
  1412. break;
  1413. list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
  1414. }
  1415. SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
  1416. SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
  1417. SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
  1418. DQ_XCM_CORE_TX_BD_PROD_CMD);
  1419. db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
  1420. db_msg.spq_prod = cpu_to_le16(bd_prod);
  1421. /* Make sure the BDs data is updated before ringing the doorbell */
  1422. wmb();
  1423. DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
  1424. DP_VERBOSE(p_hwfn,
  1425. (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
  1426. "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
  1427. p_ll2_conn->queue_id,
  1428. p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
  1429. }
  1430. int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
  1431. u8 connection_handle,
  1432. u8 num_of_bds,
  1433. u16 vlan,
  1434. u8 bd_flags,
  1435. u16 l4_hdr_offset_w,
  1436. enum qed_ll2_tx_dest e_tx_dest,
  1437. enum qed_ll2_roce_flavor_type qed_roce_flavor,
  1438. dma_addr_t first_frag,
  1439. u16 first_frag_len, void *cookie, u8 notify_fw)
  1440. {
  1441. struct qed_ll2_tx_packet *p_curp = NULL;
  1442. struct qed_ll2_info *p_ll2_conn = NULL;
  1443. enum core_roce_flavor_type roce_flavor;
  1444. struct qed_ll2_tx_queue *p_tx;
  1445. struct qed_chain *p_tx_chain;
  1446. enum core_tx_dest tx_dest;
  1447. unsigned long flags;
  1448. int rc = 0;
  1449. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1450. if (!p_ll2_conn)
  1451. return -EINVAL;
  1452. p_tx = &p_ll2_conn->tx_queue;
  1453. p_tx_chain = &p_tx->txq_chain;
  1454. if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
  1455. return -EIO;
  1456. spin_lock_irqsave(&p_tx->lock, flags);
  1457. if (p_tx->cur_send_packet) {
  1458. rc = -EEXIST;
  1459. goto out;
  1460. }
  1461. /* Get entry, but only if we have tx elements for it */
  1462. if (!list_empty(&p_tx->free_descq))
  1463. p_curp = list_first_entry(&p_tx->free_descq,
  1464. struct qed_ll2_tx_packet, list_entry);
  1465. if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
  1466. p_curp = NULL;
  1467. if (!p_curp) {
  1468. rc = -EBUSY;
  1469. goto out;
  1470. }
  1471. tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
  1472. CORE_TX_DEST_LB;
  1473. if (qed_roce_flavor == QED_LL2_ROCE) {
  1474. roce_flavor = CORE_ROCE;
  1475. } else if (qed_roce_flavor == QED_LL2_RROCE) {
  1476. roce_flavor = CORE_RROCE;
  1477. } else {
  1478. rc = -EINVAL;
  1479. goto out;
  1480. }
  1481. /* Prepare packet and BD, and perhaps send a doorbell to FW */
  1482. qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
  1483. num_of_bds, first_frag,
  1484. first_frag_len, cookie, notify_fw);
  1485. qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
  1486. num_of_bds, tx_dest,
  1487. vlan, bd_flags, l4_hdr_offset_w,
  1488. roce_flavor,
  1489. first_frag, first_frag_len);
  1490. qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
  1491. out:
  1492. spin_unlock_irqrestore(&p_tx->lock, flags);
  1493. return rc;
  1494. }
  1495. int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
  1496. u8 connection_handle,
  1497. dma_addr_t addr, u16 nbytes)
  1498. {
  1499. struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
  1500. struct qed_ll2_info *p_ll2_conn = NULL;
  1501. u16 cur_send_frag_num = 0;
  1502. struct core_tx_bd *p_bd;
  1503. unsigned long flags;
  1504. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1505. if (!p_ll2_conn)
  1506. return -EINVAL;
  1507. if (!p_ll2_conn->tx_queue.cur_send_packet)
  1508. return -EINVAL;
  1509. p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
  1510. cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
  1511. if (cur_send_frag_num >= p_cur_send_packet->bd_used)
  1512. return -EINVAL;
  1513. /* Fill the BD information, and possibly notify FW */
  1514. p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
  1515. DMA_REGPAIR_LE(p_bd->addr, addr);
  1516. p_bd->nbytes = cpu_to_le16(nbytes);
  1517. p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
  1518. p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
  1519. p_ll2_conn->tx_queue.cur_send_frag_num++;
  1520. spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
  1521. qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
  1522. spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
  1523. return 0;
  1524. }
  1525. int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
  1526. {
  1527. struct qed_ll2_info *p_ll2_conn = NULL;
  1528. int rc = -EINVAL;
  1529. p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
  1530. if (!p_ll2_conn)
  1531. return -EINVAL;
  1532. /* Stop Tx & Rx of connection, if needed */
  1533. if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
  1534. rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
  1535. if (rc)
  1536. return rc;
  1537. qed_ll2_txq_flush(p_hwfn, connection_handle);
  1538. }
  1539. if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
  1540. rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
  1541. if (rc)
  1542. return rc;
  1543. qed_ll2_rxq_flush(p_hwfn, connection_handle);
  1544. }
  1545. if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
  1546. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  1547. return rc;
  1548. }
  1549. void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
  1550. {
  1551. struct qed_ll2_info *p_ll2_conn = NULL;
  1552. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1553. if (!p_ll2_conn)
  1554. return;
  1555. if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
  1556. p_ll2_conn->rx_queue.b_cb_registred = false;
  1557. qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
  1558. }
  1559. if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
  1560. p_ll2_conn->tx_queue.b_cb_registred = false;
  1561. qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
  1562. }
  1563. kfree(p_ll2_conn->tx_queue.descq_array);
  1564. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
  1565. kfree(p_ll2_conn->rx_queue.descq_array);
  1566. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
  1567. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
  1568. qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
  1569. qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
  1570. mutex_lock(&p_ll2_conn->mutex);
  1571. p_ll2_conn->b_active = false;
  1572. mutex_unlock(&p_ll2_conn->mutex);
  1573. }
  1574. struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
  1575. {
  1576. struct qed_ll2_info *p_ll2_connections;
  1577. u8 i;
  1578. /* Allocate LL2's set struct */
  1579. p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
  1580. sizeof(struct qed_ll2_info), GFP_KERNEL);
  1581. if (!p_ll2_connections) {
  1582. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
  1583. return NULL;
  1584. }
  1585. for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
  1586. p_ll2_connections[i].my_id = i;
  1587. return p_ll2_connections;
  1588. }
  1589. void qed_ll2_setup(struct qed_hwfn *p_hwfn,
  1590. struct qed_ll2_info *p_ll2_connections)
  1591. {
  1592. int i;
  1593. for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
  1594. mutex_init(&p_ll2_connections[i].mutex);
  1595. }
  1596. void qed_ll2_free(struct qed_hwfn *p_hwfn,
  1597. struct qed_ll2_info *p_ll2_connections)
  1598. {
  1599. kfree(p_ll2_connections);
  1600. }
  1601. static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
  1602. struct qed_ptt *p_ptt,
  1603. struct qed_ll2_info *p_ll2_conn,
  1604. struct qed_ll2_stats *p_stats)
  1605. {
  1606. struct core_ll2_tstorm_per_queue_stat tstats;
  1607. u8 qid = p_ll2_conn->queue_id;
  1608. u32 tstats_addr;
  1609. memset(&tstats, 0, sizeof(tstats));
  1610. tstats_addr = BAR0_MAP_REG_TSDM_RAM +
  1611. CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
  1612. qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
  1613. p_stats->packet_too_big_discard =
  1614. HILO_64_REGPAIR(tstats.packet_too_big_discard);
  1615. p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
  1616. }
  1617. static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
  1618. struct qed_ptt *p_ptt,
  1619. struct qed_ll2_info *p_ll2_conn,
  1620. struct qed_ll2_stats *p_stats)
  1621. {
  1622. struct core_ll2_ustorm_per_queue_stat ustats;
  1623. u8 qid = p_ll2_conn->queue_id;
  1624. u32 ustats_addr;
  1625. memset(&ustats, 0, sizeof(ustats));
  1626. ustats_addr = BAR0_MAP_REG_USDM_RAM +
  1627. CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
  1628. qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
  1629. p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
  1630. p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
  1631. p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
  1632. p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
  1633. p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
  1634. p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
  1635. }
  1636. static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
  1637. struct qed_ptt *p_ptt,
  1638. struct qed_ll2_info *p_ll2_conn,
  1639. struct qed_ll2_stats *p_stats)
  1640. {
  1641. struct core_ll2_pstorm_per_queue_stat pstats;
  1642. u8 stats_id = p_ll2_conn->tx_stats_id;
  1643. u32 pstats_addr;
  1644. memset(&pstats, 0, sizeof(pstats));
  1645. pstats_addr = BAR0_MAP_REG_PSDM_RAM +
  1646. CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
  1647. qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
  1648. p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
  1649. p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
  1650. p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
  1651. p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
  1652. p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
  1653. p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
  1654. }
  1655. int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
  1656. u8 connection_handle, struct qed_ll2_stats *p_stats)
  1657. {
  1658. struct qed_ll2_info *p_ll2_conn = NULL;
  1659. struct qed_ptt *p_ptt;
  1660. memset(p_stats, 0, sizeof(*p_stats));
  1661. if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
  1662. !p_hwfn->p_ll2_info)
  1663. return -EINVAL;
  1664. p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
  1665. p_ptt = qed_ptt_acquire(p_hwfn);
  1666. if (!p_ptt) {
  1667. DP_ERR(p_hwfn, "Failed to acquire ptt\n");
  1668. return -EINVAL;
  1669. }
  1670. _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1671. _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1672. if (p_ll2_conn->tx_stats_en)
  1673. _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1674. qed_ptt_release(p_hwfn, p_ptt);
  1675. return 0;
  1676. }
  1677. static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
  1678. const struct qed_ll2_cb_ops *ops,
  1679. void *cookie)
  1680. {
  1681. cdev->ll2->cbs = ops;
  1682. cdev->ll2->cb_cookie = cookie;
  1683. }
  1684. static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
  1685. {
  1686. struct qed_ll2_info ll2_info;
  1687. struct qed_ll2_buffer *buffer, *tmp_buffer;
  1688. enum qed_ll2_conn_type conn_type;
  1689. struct qed_ptt *p_ptt;
  1690. int rc, i;
  1691. u8 gsi_enable = 1;
  1692. /* Initialize LL2 locks & lists */
  1693. INIT_LIST_HEAD(&cdev->ll2->list);
  1694. spin_lock_init(&cdev->ll2->lock);
  1695. cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
  1696. L1_CACHE_BYTES + params->mtu;
  1697. cdev->ll2->frags_mapped = params->frags_mapped;
  1698. /*Allocate memory for LL2 */
  1699. DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
  1700. cdev->ll2->rx_size);
  1701. for (i = 0; i < QED_LL2_RX_SIZE; i++) {
  1702. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  1703. if (!buffer) {
  1704. DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
  1705. goto fail;
  1706. }
  1707. rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
  1708. &buffer->phys_addr);
  1709. if (rc) {
  1710. kfree(buffer);
  1711. goto fail;
  1712. }
  1713. list_add_tail(&buffer->list, &cdev->ll2->list);
  1714. }
  1715. switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
  1716. case QED_PCI_ISCSI:
  1717. conn_type = QED_LL2_TYPE_ISCSI;
  1718. gsi_enable = 0;
  1719. break;
  1720. case QED_PCI_ETH_ROCE:
  1721. conn_type = QED_LL2_TYPE_ROCE;
  1722. break;
  1723. default:
  1724. conn_type = QED_LL2_TYPE_TEST;
  1725. }
  1726. /* Prepare the temporary ll2 information */
  1727. memset(&ll2_info, 0, sizeof(ll2_info));
  1728. ll2_info.conn_type = conn_type;
  1729. ll2_info.mtu = params->mtu;
  1730. ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
  1731. ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
  1732. ll2_info.tx_tc = 0;
  1733. ll2_info.tx_dest = CORE_TX_DEST_NW;
  1734. ll2_info.gsi_enable = gsi_enable;
  1735. rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
  1736. QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
  1737. &cdev->ll2->handle);
  1738. if (rc) {
  1739. DP_INFO(cdev, "Failed to acquire LL2 connection\n");
  1740. goto fail;
  1741. }
  1742. rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
  1743. cdev->ll2->handle);
  1744. if (rc) {
  1745. DP_INFO(cdev, "Failed to establish LL2 connection\n");
  1746. goto release_fail;
  1747. }
  1748. /* Post all Rx buffers to FW */
  1749. spin_lock_bh(&cdev->ll2->lock);
  1750. list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
  1751. rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
  1752. cdev->ll2->handle,
  1753. buffer->phys_addr, 0, buffer, 1);
  1754. if (rc) {
  1755. DP_INFO(cdev,
  1756. "Failed to post an Rx buffer; Deleting it\n");
  1757. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  1758. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  1759. kfree(buffer->data);
  1760. list_del(&buffer->list);
  1761. kfree(buffer);
  1762. } else {
  1763. cdev->ll2->rx_cnt++;
  1764. }
  1765. }
  1766. spin_unlock_bh(&cdev->ll2->lock);
  1767. if (!cdev->ll2->rx_cnt) {
  1768. DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
  1769. goto release_terminate;
  1770. }
  1771. if (!is_valid_ether_addr(params->ll2_mac_address)) {
  1772. DP_INFO(cdev, "Invalid Ethernet address\n");
  1773. goto release_terminate;
  1774. }
  1775. if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
  1776. cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
  1777. DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
  1778. rc = qed_ll2_start_ooo(cdev, params);
  1779. if (rc) {
  1780. DP_INFO(cdev,
  1781. "Failed to initialize the OOO LL2 queue\n");
  1782. goto release_terminate;
  1783. }
  1784. }
  1785. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  1786. if (!p_ptt) {
  1787. DP_INFO(cdev, "Failed to acquire PTT\n");
  1788. goto release_terminate;
  1789. }
  1790. rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  1791. params->ll2_mac_address);
  1792. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  1793. if (rc) {
  1794. DP_ERR(cdev, "Failed to allocate LLH filter\n");
  1795. goto release_terminate_all;
  1796. }
  1797. ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
  1798. return 0;
  1799. release_terminate_all:
  1800. release_terminate:
  1801. qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  1802. release_fail:
  1803. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  1804. fail:
  1805. qed_ll2_kill_buffers(cdev);
  1806. cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
  1807. return -EINVAL;
  1808. }
  1809. static int qed_ll2_stop(struct qed_dev *cdev)
  1810. {
  1811. struct qed_ptt *p_ptt;
  1812. int rc;
  1813. if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
  1814. return 0;
  1815. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  1816. if (!p_ptt) {
  1817. DP_INFO(cdev, "Failed to acquire PTT\n");
  1818. goto fail;
  1819. }
  1820. qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  1821. cdev->ll2_mac_address);
  1822. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  1823. eth_zero_addr(cdev->ll2_mac_address);
  1824. if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
  1825. cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
  1826. qed_ll2_stop_ooo(cdev);
  1827. rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
  1828. cdev->ll2->handle);
  1829. if (rc)
  1830. DP_INFO(cdev, "Failed to terminate LL2 connection\n");
  1831. qed_ll2_kill_buffers(cdev);
  1832. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  1833. cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
  1834. return rc;
  1835. fail:
  1836. return -EINVAL;
  1837. }
  1838. static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
  1839. {
  1840. const skb_frag_t *frag;
  1841. int rc = -EINVAL, i;
  1842. dma_addr_t mapping;
  1843. u16 vlan = 0;
  1844. u8 flags = 0;
  1845. if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
  1846. DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
  1847. return -EINVAL;
  1848. }
  1849. if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
  1850. DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
  1851. 1 + skb_shinfo(skb)->nr_frags);
  1852. return -EINVAL;
  1853. }
  1854. mapping = dma_map_single(&cdev->pdev->dev, skb->data,
  1855. skb->len, DMA_TO_DEVICE);
  1856. if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
  1857. DP_NOTICE(cdev, "SKB mapping failed\n");
  1858. return -EINVAL;
  1859. }
  1860. /* Request HW to calculate IP csum */
  1861. if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
  1862. ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
  1863. flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
  1864. if (skb_vlan_tag_present(skb)) {
  1865. vlan = skb_vlan_tag_get(skb);
  1866. flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
  1867. }
  1868. rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
  1869. cdev->ll2->handle,
  1870. 1 + skb_shinfo(skb)->nr_frags,
  1871. vlan, flags, 0, QED_LL2_TX_DEST_NW,
  1872. 0 /* RoCE FLAVOR */,
  1873. mapping, skb->len, skb, 1);
  1874. if (rc)
  1875. goto err;
  1876. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1877. frag = &skb_shinfo(skb)->frags[i];
  1878. if (!cdev->ll2->frags_mapped) {
  1879. mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
  1880. skb_frag_size(frag),
  1881. DMA_TO_DEVICE);
  1882. if (unlikely(dma_mapping_error(&cdev->pdev->dev,
  1883. mapping))) {
  1884. DP_NOTICE(cdev,
  1885. "Unable to map frag - dropping packet\n");
  1886. rc = -ENOMEM;
  1887. goto err;
  1888. }
  1889. } else {
  1890. mapping = page_to_phys(skb_frag_page(frag)) |
  1891. frag->page_offset;
  1892. }
  1893. rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
  1894. cdev->ll2->handle,
  1895. mapping,
  1896. skb_frag_size(frag));
  1897. /* if failed not much to do here, partial packet has been posted
  1898. * we can't free memory, will need to wait for completion.
  1899. */
  1900. if (rc)
  1901. goto err2;
  1902. }
  1903. return 0;
  1904. err:
  1905. dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
  1906. err2:
  1907. return rc;
  1908. }
  1909. static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
  1910. {
  1911. if (!cdev->ll2)
  1912. return -EINVAL;
  1913. return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
  1914. cdev->ll2->handle, stats);
  1915. }
  1916. const struct qed_ll2_ops qed_ll2_ops_pass = {
  1917. .start = &qed_ll2_start,
  1918. .stop = &qed_ll2_stop,
  1919. .start_xmit = &qed_ll2_start_xmit,
  1920. .register_cb_ops = &qed_ll2_register_cb_ops,
  1921. .get_stats = &qed_ll2_stats,
  1922. };
  1923. int qed_ll2_alloc_if(struct qed_dev *cdev)
  1924. {
  1925. cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
  1926. return cdev->ll2 ? 0 : -ENOMEM;
  1927. }
  1928. void qed_ll2_dealloc_if(struct qed_dev *cdev)
  1929. {
  1930. kfree(cdev->ll2);
  1931. cdev->ll2 = NULL;
  1932. }