qed_ll2.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <asm/byteorder.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/kernel.h>
  37. #include <linux/pci.h>
  38. #include <linux/slab.h>
  39. #include <linux/stddef.h>
  40. #include <linux/workqueue.h>
  41. #include <net/ipv6.h>
  42. #include <linux/bitops.h>
  43. #include <linux/delay.h>
  44. #include <linux/errno.h>
  45. #include <linux/etherdevice.h>
  46. #include <linux/io.h>
  47. #include <linux/list.h>
  48. #include <linux/mutex.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/string.h>
  51. #include <linux/qed/qed_ll2_if.h>
  52. #include "qed.h"
  53. #include "qed_cxt.h"
  54. #include "qed_dev_api.h"
  55. #include "qed_hsi.h"
  56. #include "qed_hw.h"
  57. #include "qed_int.h"
  58. #include "qed_ll2.h"
  59. #include "qed_mcp.h"
  60. #include "qed_ooo.h"
  61. #include "qed_reg_addr.h"
  62. #include "qed_sp.h"
  63. #include "qed_rdma.h"
  64. #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
  65. #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
  66. #define QED_LL2_TX_SIZE (256)
  67. #define QED_LL2_RX_SIZE (4096)
  68. struct qed_cb_ll2_info {
  69. int rx_cnt;
  70. u32 rx_size;
  71. u8 handle;
  72. /* Lock protecting LL2 buffer lists in sleepless context */
  73. spinlock_t lock;
  74. struct list_head list;
  75. const struct qed_ll2_cb_ops *cbs;
  76. void *cb_cookie;
  77. };
  78. struct qed_ll2_buffer {
  79. struct list_head list;
  80. void *data;
  81. dma_addr_t phys_addr;
  82. };
  83. static void qed_ll2b_complete_tx_packet(void *cxt,
  84. u8 connection_handle,
  85. void *cookie,
  86. dma_addr_t first_frag_addr,
  87. bool b_last_fragment,
  88. bool b_last_packet)
  89. {
  90. struct qed_hwfn *p_hwfn = cxt;
  91. struct qed_dev *cdev = p_hwfn->cdev;
  92. struct sk_buff *skb = cookie;
  93. /* All we need to do is release the mapping */
  94. dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
  95. skb_headlen(skb), DMA_TO_DEVICE);
  96. if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
  97. cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
  98. b_last_fragment);
  99. dev_kfree_skb_any(skb);
  100. }
  101. static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
  102. u8 **data, dma_addr_t *phys_addr)
  103. {
  104. *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
  105. if (!(*data)) {
  106. DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
  107. return -ENOMEM;
  108. }
  109. *phys_addr = dma_map_single(&cdev->pdev->dev,
  110. ((*data) + NET_SKB_PAD),
  111. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  112. if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
  113. DP_INFO(cdev, "Failed to map LL2 buffer data\n");
  114. kfree((*data));
  115. return -ENOMEM;
  116. }
  117. return 0;
  118. }
  119. static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
  120. struct qed_ll2_buffer *buffer)
  121. {
  122. spin_lock_bh(&cdev->ll2->lock);
  123. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  124. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  125. kfree(buffer->data);
  126. list_del(&buffer->list);
  127. cdev->ll2->rx_cnt--;
  128. if (!cdev->ll2->rx_cnt)
  129. DP_INFO(cdev, "All LL2 entries were removed\n");
  130. spin_unlock_bh(&cdev->ll2->lock);
  131. return 0;
  132. }
  133. static void qed_ll2_kill_buffers(struct qed_dev *cdev)
  134. {
  135. struct qed_ll2_buffer *buffer, *tmp_buffer;
  136. list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
  137. qed_ll2_dealloc_buffer(cdev, buffer);
  138. }
  139. void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
  140. {
  141. struct qed_hwfn *p_hwfn = cxt;
  142. struct qed_ll2_buffer *buffer = data->cookie;
  143. struct qed_dev *cdev = p_hwfn->cdev;
  144. dma_addr_t new_phys_addr;
  145. struct sk_buff *skb;
  146. bool reuse = false;
  147. int rc = -EINVAL;
  148. u8 *new_data;
  149. DP_VERBOSE(p_hwfn,
  150. (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
  151. "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
  152. (u64)data->rx_buf_addr,
  153. data->u.placement_offset,
  154. data->length.packet_length,
  155. data->parse_flags,
  156. data->vlan, data->opaque_data_0, data->opaque_data_1);
  157. if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
  158. print_hex_dump(KERN_INFO, "",
  159. DUMP_PREFIX_OFFSET, 16, 1,
  160. buffer->data, data->length.packet_length, false);
  161. }
  162. /* Determine if data is valid */
  163. if (data->length.packet_length < ETH_HLEN)
  164. reuse = true;
  165. /* Allocate a replacement for buffer; Reuse upon failure */
  166. if (!reuse)
  167. rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
  168. &new_phys_addr);
  169. /* If need to reuse or there's no replacement buffer, repost this */
  170. if (rc)
  171. goto out_post;
  172. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  173. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  174. skb = build_skb(buffer->data, 0);
  175. if (!skb) {
  176. DP_INFO(cdev, "Failed to build SKB\n");
  177. kfree(buffer->data);
  178. goto out_post1;
  179. }
  180. data->u.placement_offset += NET_SKB_PAD;
  181. skb_reserve(skb, data->u.placement_offset);
  182. skb_put(skb, data->length.packet_length);
  183. skb_checksum_none_assert(skb);
  184. /* Get parital ethernet information instead of eth_type_trans(),
  185. * Since we don't have an associated net_device.
  186. */
  187. skb_reset_mac_header(skb);
  188. skb->protocol = eth_hdr(skb)->h_proto;
  189. /* Pass SKB onward */
  190. if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
  191. if (data->vlan)
  192. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  193. data->vlan);
  194. cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
  195. data->opaque_data_0,
  196. data->opaque_data_1);
  197. } else {
  198. DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
  199. QED_MSG_LL2 | QED_MSG_STORAGE),
  200. "Dropping the packet\n");
  201. kfree(buffer->data);
  202. }
  203. out_post1:
  204. /* Update Buffer information and update FW producer */
  205. buffer->data = new_data;
  206. buffer->phys_addr = new_phys_addr;
  207. out_post:
  208. rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
  209. buffer->phys_addr, 0, buffer, 1);
  210. if (rc)
  211. qed_ll2_dealloc_buffer(cdev, buffer);
  212. }
  213. static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
  214. u8 connection_handle,
  215. bool b_lock,
  216. bool b_only_active)
  217. {
  218. struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
  219. if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
  220. return NULL;
  221. if (!p_hwfn->p_ll2_info)
  222. return NULL;
  223. p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
  224. if (b_only_active) {
  225. if (b_lock)
  226. mutex_lock(&p_ll2_conn->mutex);
  227. if (p_ll2_conn->b_active)
  228. p_ret = p_ll2_conn;
  229. if (b_lock)
  230. mutex_unlock(&p_ll2_conn->mutex);
  231. } else {
  232. p_ret = p_ll2_conn;
  233. }
  234. return p_ret;
  235. }
  236. static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
  237. u8 connection_handle)
  238. {
  239. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
  240. }
  241. static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
  242. u8 connection_handle)
  243. {
  244. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
  245. }
  246. static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
  247. *p_hwfn,
  248. u8 connection_handle)
  249. {
  250. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
  251. }
  252. static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
  253. {
  254. bool b_last_packet = false, b_last_frag = false;
  255. struct qed_ll2_tx_packet *p_pkt = NULL;
  256. struct qed_ll2_info *p_ll2_conn;
  257. struct qed_ll2_tx_queue *p_tx;
  258. unsigned long flags = 0;
  259. dma_addr_t tx_frag;
  260. p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
  261. if (!p_ll2_conn)
  262. return;
  263. p_tx = &p_ll2_conn->tx_queue;
  264. spin_lock_irqsave(&p_tx->lock, flags);
  265. while (!list_empty(&p_tx->active_descq)) {
  266. p_pkt = list_first_entry(&p_tx->active_descq,
  267. struct qed_ll2_tx_packet, list_entry);
  268. if (!p_pkt)
  269. break;
  270. list_del(&p_pkt->list_entry);
  271. b_last_packet = list_empty(&p_tx->active_descq);
  272. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  273. spin_unlock_irqrestore(&p_tx->lock, flags);
  274. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
  275. struct qed_ooo_buffer *p_buffer;
  276. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  277. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  278. p_buffer);
  279. } else {
  280. p_tx->cur_completing_packet = *p_pkt;
  281. p_tx->cur_completing_bd_idx = 1;
  282. b_last_frag =
  283. p_tx->cur_completing_bd_idx == p_pkt->bd_used;
  284. tx_frag = p_pkt->bds_set[0].tx_frag;
  285. p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
  286. p_ll2_conn->my_id,
  287. p_pkt->cookie,
  288. tx_frag,
  289. b_last_frag,
  290. b_last_packet);
  291. }
  292. spin_lock_irqsave(&p_tx->lock, flags);
  293. }
  294. spin_unlock_irqrestore(&p_tx->lock, flags);
  295. }
  296. static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  297. {
  298. struct qed_ll2_info *p_ll2_conn = p_cookie;
  299. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  300. u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
  301. struct qed_ll2_tx_packet *p_pkt;
  302. bool b_last_frag = false;
  303. unsigned long flags;
  304. int rc = -EINVAL;
  305. spin_lock_irqsave(&p_tx->lock, flags);
  306. if (p_tx->b_completing_packet) {
  307. rc = -EBUSY;
  308. goto out;
  309. }
  310. new_idx = le16_to_cpu(*p_tx->p_fw_cons);
  311. num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
  312. while (num_bds) {
  313. if (list_empty(&p_tx->active_descq))
  314. goto out;
  315. p_pkt = list_first_entry(&p_tx->active_descq,
  316. struct qed_ll2_tx_packet, list_entry);
  317. if (!p_pkt)
  318. goto out;
  319. p_tx->b_completing_packet = true;
  320. p_tx->cur_completing_packet = *p_pkt;
  321. num_bds_in_packet = p_pkt->bd_used;
  322. list_del(&p_pkt->list_entry);
  323. if (num_bds < num_bds_in_packet) {
  324. DP_NOTICE(p_hwfn,
  325. "Rest of BDs does not cover whole packet\n");
  326. goto out;
  327. }
  328. num_bds -= num_bds_in_packet;
  329. p_tx->bds_idx += num_bds_in_packet;
  330. while (num_bds_in_packet--)
  331. qed_chain_consume(&p_tx->txq_chain);
  332. p_tx->cur_completing_bd_idx = 1;
  333. b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
  334. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  335. spin_unlock_irqrestore(&p_tx->lock, flags);
  336. p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
  337. p_ll2_conn->my_id,
  338. p_pkt->cookie,
  339. p_pkt->bds_set[0].tx_frag,
  340. b_last_frag, !num_bds);
  341. spin_lock_irqsave(&p_tx->lock, flags);
  342. }
  343. p_tx->b_completing_packet = false;
  344. rc = 0;
  345. out:
  346. spin_unlock_irqrestore(&p_tx->lock, flags);
  347. return rc;
  348. }
  349. static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
  350. union core_rx_cqe_union *p_cqe,
  351. struct qed_ll2_comp_rx_data *data)
  352. {
  353. data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
  354. data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
  355. data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
  356. data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
  357. data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
  358. data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
  359. data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
  360. data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
  361. }
  362. static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
  363. union core_rx_cqe_union *p_cqe,
  364. struct qed_ll2_comp_rx_data *data)
  365. {
  366. data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
  367. data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
  368. data->length.packet_length =
  369. le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
  370. data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
  371. data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
  372. data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
  373. data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
  374. }
  375. static int
  376. qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
  377. struct qed_ll2_info *p_ll2_conn,
  378. union core_rx_cqe_union *p_cqe,
  379. unsigned long *p_lock_flags)
  380. {
  381. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  382. struct core_rx_slow_path_cqe *sp_cqe;
  383. sp_cqe = &p_cqe->rx_cqe_sp;
  384. if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
  385. DP_NOTICE(p_hwfn,
  386. "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
  387. sp_cqe->ramrod_cmd_id);
  388. return -EINVAL;
  389. }
  390. if (!p_ll2_conn->cbs.slowpath_cb) {
  391. DP_NOTICE(p_hwfn,
  392. "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
  393. return -EINVAL;
  394. }
  395. spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
  396. p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
  397. p_ll2_conn->my_id,
  398. le32_to_cpu(sp_cqe->opaque_data.data[0]),
  399. le32_to_cpu(sp_cqe->opaque_data.data[1]));
  400. spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
  401. return 0;
  402. }
  403. static int
  404. qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
  405. struct qed_ll2_info *p_ll2_conn,
  406. union core_rx_cqe_union *p_cqe,
  407. unsigned long *p_lock_flags, bool b_last_cqe)
  408. {
  409. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  410. struct qed_ll2_rx_packet *p_pkt = NULL;
  411. struct qed_ll2_comp_rx_data data;
  412. if (!list_empty(&p_rx->active_descq))
  413. p_pkt = list_first_entry(&p_rx->active_descq,
  414. struct qed_ll2_rx_packet, list_entry);
  415. if (!p_pkt) {
  416. DP_NOTICE(p_hwfn,
  417. "[%d] LL2 Rx completion but active_descq is empty\n",
  418. p_ll2_conn->input.conn_type);
  419. return -EIO;
  420. }
  421. list_del(&p_pkt->list_entry);
  422. if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
  423. qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
  424. else
  425. qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
  426. if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
  427. DP_NOTICE(p_hwfn,
  428. "Mismatch between active_descq and the LL2 Rx chain\n");
  429. list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  430. data.connection_handle = p_ll2_conn->my_id;
  431. data.cookie = p_pkt->cookie;
  432. data.rx_buf_addr = p_pkt->rx_buf_addr;
  433. data.b_last_packet = b_last_cqe;
  434. spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
  435. p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
  436. spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
  437. return 0;
  438. }
  439. static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
  440. {
  441. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
  442. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  443. union core_rx_cqe_union *cqe = NULL;
  444. u16 cq_new_idx = 0, cq_old_idx = 0;
  445. unsigned long flags = 0;
  446. int rc = 0;
  447. spin_lock_irqsave(&p_rx->lock, flags);
  448. cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
  449. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  450. while (cq_new_idx != cq_old_idx) {
  451. bool b_last_cqe = (cq_new_idx == cq_old_idx);
  452. cqe =
  453. (union core_rx_cqe_union *)
  454. qed_chain_consume(&p_rx->rcq_chain);
  455. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  456. DP_VERBOSE(p_hwfn,
  457. QED_MSG_LL2,
  458. "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
  459. cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
  460. switch (cqe->rx_cqe_sp.type) {
  461. case CORE_RX_CQE_TYPE_SLOW_PATH:
  462. rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
  463. cqe, &flags);
  464. break;
  465. case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
  466. case CORE_RX_CQE_TYPE_REGULAR:
  467. rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
  468. cqe, &flags,
  469. b_last_cqe);
  470. break;
  471. default:
  472. rc = -EIO;
  473. }
  474. }
  475. spin_unlock_irqrestore(&p_rx->lock, flags);
  476. return rc;
  477. }
  478. static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
  479. {
  480. struct qed_ll2_info *p_ll2_conn = NULL;
  481. struct qed_ll2_rx_packet *p_pkt = NULL;
  482. struct qed_ll2_rx_queue *p_rx;
  483. unsigned long flags = 0;
  484. p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
  485. if (!p_ll2_conn)
  486. return;
  487. p_rx = &p_ll2_conn->rx_queue;
  488. spin_lock_irqsave(&p_rx->lock, flags);
  489. while (!list_empty(&p_rx->active_descq)) {
  490. p_pkt = list_first_entry(&p_rx->active_descq,
  491. struct qed_ll2_rx_packet, list_entry);
  492. if (!p_pkt)
  493. break;
  494. list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
  495. spin_unlock_irqrestore(&p_rx->lock, flags);
  496. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
  497. struct qed_ooo_buffer *p_buffer;
  498. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  499. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  500. p_buffer);
  501. } else {
  502. dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
  503. void *cookie = p_pkt->cookie;
  504. bool b_last;
  505. b_last = list_empty(&p_rx->active_descq);
  506. p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
  507. p_ll2_conn->my_id,
  508. cookie,
  509. rx_buf_addr, b_last);
  510. }
  511. spin_lock_irqsave(&p_rx->lock, flags);
  512. }
  513. spin_unlock_irqrestore(&p_rx->lock, flags);
  514. }
  515. static bool
  516. qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
  517. struct core_rx_slow_path_cqe *p_cqe)
  518. {
  519. struct ooo_opaque *iscsi_ooo;
  520. u32 cid;
  521. if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
  522. return false;
  523. iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
  524. if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
  525. return false;
  526. /* Need to make a flush */
  527. cid = le32_to_cpu(iscsi_ooo->cid);
  528. qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
  529. return true;
  530. }
  531. static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
  532. struct qed_ll2_info *p_ll2_conn)
  533. {
  534. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  535. u16 packet_length = 0, parse_flags = 0, vlan = 0;
  536. struct qed_ll2_rx_packet *p_pkt = NULL;
  537. u32 num_ooo_add_to_peninsula = 0, cid;
  538. union core_rx_cqe_union *cqe = NULL;
  539. u16 cq_new_idx = 0, cq_old_idx = 0;
  540. struct qed_ooo_buffer *p_buffer;
  541. struct ooo_opaque *iscsi_ooo;
  542. u8 placement_offset = 0;
  543. u8 cqe_type;
  544. cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
  545. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  546. if (cq_new_idx == cq_old_idx)
  547. return 0;
  548. while (cq_new_idx != cq_old_idx) {
  549. struct core_rx_fast_path_cqe *p_cqe_fp;
  550. cqe = qed_chain_consume(&p_rx->rcq_chain);
  551. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  552. cqe_type = cqe->rx_cqe_sp.type;
  553. if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
  554. if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
  555. &cqe->rx_cqe_sp))
  556. continue;
  557. if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
  558. DP_NOTICE(p_hwfn,
  559. "Got a non-regular LB LL2 completion [type 0x%02x]\n",
  560. cqe_type);
  561. return -EINVAL;
  562. }
  563. p_cqe_fp = &cqe->rx_cqe_fp;
  564. placement_offset = p_cqe_fp->placement_offset;
  565. parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
  566. packet_length = le16_to_cpu(p_cqe_fp->packet_length);
  567. vlan = le16_to_cpu(p_cqe_fp->vlan);
  568. iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
  569. qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
  570. iscsi_ooo);
  571. cid = le32_to_cpu(iscsi_ooo->cid);
  572. /* Process delete isle first */
  573. if (iscsi_ooo->drop_size)
  574. qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
  575. iscsi_ooo->drop_isle,
  576. iscsi_ooo->drop_size);
  577. if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
  578. continue;
  579. /* Now process create/add/join isles */
  580. if (list_empty(&p_rx->active_descq)) {
  581. DP_NOTICE(p_hwfn,
  582. "LL2 OOO RX chain has no submitted buffers\n"
  583. );
  584. return -EIO;
  585. }
  586. p_pkt = list_first_entry(&p_rx->active_descq,
  587. struct qed_ll2_rx_packet, list_entry);
  588. if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
  589. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
  590. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
  591. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
  592. (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
  593. if (!p_pkt) {
  594. DP_NOTICE(p_hwfn,
  595. "LL2 OOO RX packet is not valid\n");
  596. return -EIO;
  597. }
  598. list_del(&p_pkt->list_entry);
  599. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  600. p_buffer->packet_length = packet_length;
  601. p_buffer->parse_flags = parse_flags;
  602. p_buffer->vlan = vlan;
  603. p_buffer->placement_offset = placement_offset;
  604. qed_chain_consume(&p_rx->rxq_chain);
  605. list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  606. switch (iscsi_ooo->ooo_opcode) {
  607. case TCP_EVENT_ADD_NEW_ISLE:
  608. qed_ooo_add_new_isle(p_hwfn,
  609. p_hwfn->p_ooo_info,
  610. cid,
  611. iscsi_ooo->ooo_isle,
  612. p_buffer);
  613. break;
  614. case TCP_EVENT_ADD_ISLE_RIGHT:
  615. qed_ooo_add_new_buffer(p_hwfn,
  616. p_hwfn->p_ooo_info,
  617. cid,
  618. iscsi_ooo->ooo_isle,
  619. p_buffer,
  620. QED_OOO_RIGHT_BUF);
  621. break;
  622. case TCP_EVENT_ADD_ISLE_LEFT:
  623. qed_ooo_add_new_buffer(p_hwfn,
  624. p_hwfn->p_ooo_info,
  625. cid,
  626. iscsi_ooo->ooo_isle,
  627. p_buffer,
  628. QED_OOO_LEFT_BUF);
  629. break;
  630. case TCP_EVENT_JOIN:
  631. qed_ooo_add_new_buffer(p_hwfn,
  632. p_hwfn->p_ooo_info,
  633. cid,
  634. iscsi_ooo->ooo_isle +
  635. 1,
  636. p_buffer,
  637. QED_OOO_LEFT_BUF);
  638. qed_ooo_join_isles(p_hwfn,
  639. p_hwfn->p_ooo_info,
  640. cid, iscsi_ooo->ooo_isle);
  641. break;
  642. case TCP_EVENT_ADD_PEN:
  643. num_ooo_add_to_peninsula++;
  644. qed_ooo_put_ready_buffer(p_hwfn,
  645. p_hwfn->p_ooo_info,
  646. p_buffer, true);
  647. break;
  648. }
  649. } else {
  650. DP_NOTICE(p_hwfn,
  651. "Unexpected event (%d) TX OOO completion\n",
  652. iscsi_ooo->ooo_opcode);
  653. }
  654. }
  655. return 0;
  656. }
  657. static void
  658. qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
  659. struct qed_ll2_info *p_ll2_conn)
  660. {
  661. struct qed_ll2_tx_pkt_info tx_pkt;
  662. struct qed_ooo_buffer *p_buffer;
  663. u16 l4_hdr_offset_w;
  664. dma_addr_t first_frag;
  665. u8 bd_flags;
  666. int rc;
  667. /* Submit Tx buffers here */
  668. while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
  669. p_hwfn->p_ooo_info))) {
  670. l4_hdr_offset_w = 0;
  671. bd_flags = 0;
  672. first_frag = p_buffer->rx_buffer_phys_addr +
  673. p_buffer->placement_offset;
  674. SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
  675. SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
  676. memset(&tx_pkt, 0, sizeof(tx_pkt));
  677. tx_pkt.num_of_bds = 1;
  678. tx_pkt.vlan = p_buffer->vlan;
  679. tx_pkt.bd_flags = bd_flags;
  680. tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
  681. tx_pkt.tx_dest = p_ll2_conn->tx_dest;
  682. tx_pkt.first_frag = first_frag;
  683. tx_pkt.first_frag_len = p_buffer->packet_length;
  684. tx_pkt.cookie = p_buffer;
  685. rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
  686. &tx_pkt, true);
  687. if (rc) {
  688. qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
  689. p_buffer, false);
  690. break;
  691. }
  692. }
  693. }
  694. static void
  695. qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
  696. struct qed_ll2_info *p_ll2_conn)
  697. {
  698. struct qed_ooo_buffer *p_buffer;
  699. int rc;
  700. while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
  701. p_hwfn->p_ooo_info))) {
  702. rc = qed_ll2_post_rx_buffer(p_hwfn,
  703. p_ll2_conn->my_id,
  704. p_buffer->rx_buffer_phys_addr,
  705. 0, p_buffer, true);
  706. if (rc) {
  707. qed_ooo_put_free_buffer(p_hwfn,
  708. p_hwfn->p_ooo_info, p_buffer);
  709. break;
  710. }
  711. }
  712. }
  713. static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  714. {
  715. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
  716. int rc;
  717. if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
  718. return 0;
  719. rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
  720. if (rc)
  721. return rc;
  722. qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
  723. qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
  724. return 0;
  725. }
  726. static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  727. {
  728. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
  729. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  730. struct qed_ll2_tx_packet *p_pkt = NULL;
  731. struct qed_ooo_buffer *p_buffer;
  732. bool b_dont_submit_rx = false;
  733. u16 new_idx = 0, num_bds = 0;
  734. int rc;
  735. if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
  736. return 0;
  737. new_idx = le16_to_cpu(*p_tx->p_fw_cons);
  738. num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
  739. if (!num_bds)
  740. return 0;
  741. while (num_bds) {
  742. if (list_empty(&p_tx->active_descq))
  743. return -EINVAL;
  744. p_pkt = list_first_entry(&p_tx->active_descq,
  745. struct qed_ll2_tx_packet, list_entry);
  746. if (!p_pkt)
  747. return -EINVAL;
  748. if (p_pkt->bd_used != 1) {
  749. DP_NOTICE(p_hwfn,
  750. "Unexpectedly many BDs(%d) in TX OOO completion\n",
  751. p_pkt->bd_used);
  752. return -EINVAL;
  753. }
  754. list_del(&p_pkt->list_entry);
  755. num_bds--;
  756. p_tx->bds_idx++;
  757. qed_chain_consume(&p_tx->txq_chain);
  758. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  759. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  760. if (b_dont_submit_rx) {
  761. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  762. p_buffer);
  763. continue;
  764. }
  765. rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
  766. p_buffer->rx_buffer_phys_addr, 0,
  767. p_buffer, true);
  768. if (rc != 0) {
  769. qed_ooo_put_free_buffer(p_hwfn,
  770. p_hwfn->p_ooo_info, p_buffer);
  771. b_dont_submit_rx = true;
  772. }
  773. }
  774. qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
  775. return 0;
  776. }
  777. static void qed_ll2_stop_ooo(struct qed_dev *cdev)
  778. {
  779. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  780. u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
  781. DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
  782. *handle);
  783. qed_ll2_terminate_connection(hwfn, *handle);
  784. qed_ll2_release_connection(hwfn, *handle);
  785. *handle = QED_LL2_UNUSED_HANDLE;
  786. }
  787. static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
  788. struct qed_ll2_info *p_ll2_conn,
  789. u8 action_on_error)
  790. {
  791. enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
  792. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  793. struct core_rx_start_ramrod_data *p_ramrod = NULL;
  794. struct qed_spq_entry *p_ent = NULL;
  795. struct qed_sp_init_data init_data;
  796. u16 cqe_pbl_size;
  797. int rc = 0;
  798. /* Get SPQ entry */
  799. memset(&init_data, 0, sizeof(init_data));
  800. init_data.cid = p_ll2_conn->cid;
  801. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  802. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  803. rc = qed_sp_init_request(p_hwfn, &p_ent,
  804. CORE_RAMROD_RX_QUEUE_START,
  805. PROTOCOLID_CORE, &init_data);
  806. if (rc)
  807. return rc;
  808. p_ramrod = &p_ent->ramrod.core_rx_queue_start;
  809. p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
  810. p_ramrod->sb_index = p_rx->rx_sb_index;
  811. p_ramrod->complete_event_flg = 1;
  812. p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
  813. DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
  814. cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
  815. p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
  816. DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
  817. qed_chain_get_pbl_phys(&p_rx->rcq_chain));
  818. p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
  819. p_ramrod->inner_vlan_stripping_en =
  820. p_ll2_conn->input.rx_vlan_removal_en;
  821. if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
  822. p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
  823. p_ramrod->report_outer_vlan = 1;
  824. p_ramrod->queue_id = p_ll2_conn->queue_id;
  825. p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
  826. if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
  827. p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
  828. conn_type != QED_LL2_TYPE_IWARP) {
  829. p_ramrod->mf_si_bcast_accept_all = 1;
  830. p_ramrod->mf_si_mcast_accept_all = 1;
  831. } else {
  832. p_ramrod->mf_si_bcast_accept_all = 0;
  833. p_ramrod->mf_si_mcast_accept_all = 0;
  834. }
  835. p_ramrod->action_on_error.error_type = action_on_error;
  836. p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
  837. return qed_spq_post(p_hwfn, p_ent, NULL);
  838. }
  839. static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
  840. struct qed_ll2_info *p_ll2_conn)
  841. {
  842. enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
  843. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  844. struct core_tx_start_ramrod_data *p_ramrod = NULL;
  845. struct qed_spq_entry *p_ent = NULL;
  846. struct qed_sp_init_data init_data;
  847. u16 pq_id = 0, pbl_size;
  848. int rc = -EINVAL;
  849. if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
  850. return 0;
  851. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
  852. p_ll2_conn->tx_stats_en = 0;
  853. else
  854. p_ll2_conn->tx_stats_en = 1;
  855. /* Get SPQ entry */
  856. memset(&init_data, 0, sizeof(init_data));
  857. init_data.cid = p_ll2_conn->cid;
  858. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  859. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  860. rc = qed_sp_init_request(p_hwfn, &p_ent,
  861. CORE_RAMROD_TX_QUEUE_START,
  862. PROTOCOLID_CORE, &init_data);
  863. if (rc)
  864. return rc;
  865. p_ramrod = &p_ent->ramrod.core_tx_queue_start;
  866. p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
  867. p_ramrod->sb_index = p_tx->tx_sb_index;
  868. p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
  869. p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
  870. p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
  871. DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
  872. qed_chain_get_pbl_phys(&p_tx->txq_chain));
  873. pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
  874. p_ramrod->pbl_size = cpu_to_le16(pbl_size);
  875. switch (p_ll2_conn->input.tx_tc) {
  876. case PURE_LB_TC:
  877. pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
  878. break;
  879. case PKT_LB_TC:
  880. pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
  881. break;
  882. default:
  883. pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  884. break;
  885. }
  886. p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
  887. switch (conn_type) {
  888. case QED_LL2_TYPE_FCOE:
  889. p_ramrod->conn_type = PROTOCOLID_FCOE;
  890. break;
  891. case QED_LL2_TYPE_ISCSI:
  892. p_ramrod->conn_type = PROTOCOLID_ISCSI;
  893. break;
  894. case QED_LL2_TYPE_ROCE:
  895. p_ramrod->conn_type = PROTOCOLID_ROCE;
  896. break;
  897. case QED_LL2_TYPE_IWARP:
  898. p_ramrod->conn_type = PROTOCOLID_IWARP;
  899. break;
  900. case QED_LL2_TYPE_OOO:
  901. if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
  902. p_ramrod->conn_type = PROTOCOLID_ISCSI;
  903. else
  904. p_ramrod->conn_type = PROTOCOLID_IWARP;
  905. break;
  906. default:
  907. p_ramrod->conn_type = PROTOCOLID_ETH;
  908. DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
  909. }
  910. p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
  911. return qed_spq_post(p_hwfn, p_ent, NULL);
  912. }
  913. static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
  914. struct qed_ll2_info *p_ll2_conn)
  915. {
  916. struct core_rx_stop_ramrod_data *p_ramrod = NULL;
  917. struct qed_spq_entry *p_ent = NULL;
  918. struct qed_sp_init_data init_data;
  919. int rc = -EINVAL;
  920. /* Get SPQ entry */
  921. memset(&init_data, 0, sizeof(init_data));
  922. init_data.cid = p_ll2_conn->cid;
  923. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  924. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  925. rc = qed_sp_init_request(p_hwfn, &p_ent,
  926. CORE_RAMROD_RX_QUEUE_STOP,
  927. PROTOCOLID_CORE, &init_data);
  928. if (rc)
  929. return rc;
  930. p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
  931. p_ramrod->complete_event_flg = 1;
  932. p_ramrod->queue_id = p_ll2_conn->queue_id;
  933. return qed_spq_post(p_hwfn, p_ent, NULL);
  934. }
  935. static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
  936. struct qed_ll2_info *p_ll2_conn)
  937. {
  938. struct qed_spq_entry *p_ent = NULL;
  939. struct qed_sp_init_data init_data;
  940. int rc = -EINVAL;
  941. /* Get SPQ entry */
  942. memset(&init_data, 0, sizeof(init_data));
  943. init_data.cid = p_ll2_conn->cid;
  944. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  945. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  946. rc = qed_sp_init_request(p_hwfn, &p_ent,
  947. CORE_RAMROD_TX_QUEUE_STOP,
  948. PROTOCOLID_CORE, &init_data);
  949. if (rc)
  950. return rc;
  951. return qed_spq_post(p_hwfn, p_ent, NULL);
  952. }
  953. static int
  954. qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
  955. struct qed_ll2_info *p_ll2_info)
  956. {
  957. struct qed_ll2_rx_packet *p_descq;
  958. u32 capacity;
  959. int rc = 0;
  960. if (!p_ll2_info->input.rx_num_desc)
  961. goto out;
  962. rc = qed_chain_alloc(p_hwfn->cdev,
  963. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  964. QED_CHAIN_MODE_NEXT_PTR,
  965. QED_CHAIN_CNT_TYPE_U16,
  966. p_ll2_info->input.rx_num_desc,
  967. sizeof(struct core_rx_bd),
  968. &p_ll2_info->rx_queue.rxq_chain, NULL);
  969. if (rc) {
  970. DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
  971. goto out;
  972. }
  973. capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
  974. p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
  975. GFP_KERNEL);
  976. if (!p_descq) {
  977. rc = -ENOMEM;
  978. DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
  979. goto out;
  980. }
  981. p_ll2_info->rx_queue.descq_array = p_descq;
  982. rc = qed_chain_alloc(p_hwfn->cdev,
  983. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  984. QED_CHAIN_MODE_PBL,
  985. QED_CHAIN_CNT_TYPE_U16,
  986. p_ll2_info->input.rx_num_desc,
  987. sizeof(struct core_rx_fast_path_cqe),
  988. &p_ll2_info->rx_queue.rcq_chain, NULL);
  989. if (rc) {
  990. DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
  991. goto out;
  992. }
  993. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  994. "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
  995. p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
  996. out:
  997. return rc;
  998. }
  999. static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
  1000. struct qed_ll2_info *p_ll2_info)
  1001. {
  1002. struct qed_ll2_tx_packet *p_descq;
  1003. u32 desc_size;
  1004. u32 capacity;
  1005. int rc = 0;
  1006. if (!p_ll2_info->input.tx_num_desc)
  1007. goto out;
  1008. rc = qed_chain_alloc(p_hwfn->cdev,
  1009. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  1010. QED_CHAIN_MODE_PBL,
  1011. QED_CHAIN_CNT_TYPE_U16,
  1012. p_ll2_info->input.tx_num_desc,
  1013. sizeof(struct core_tx_bd),
  1014. &p_ll2_info->tx_queue.txq_chain, NULL);
  1015. if (rc)
  1016. goto out;
  1017. capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
  1018. /* First element is part of the packet, rest are flexibly added */
  1019. desc_size = (sizeof(*p_descq) +
  1020. (p_ll2_info->input.tx_max_bds_per_packet - 1) *
  1021. sizeof(p_descq->bds_set));
  1022. p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
  1023. if (!p_descq) {
  1024. rc = -ENOMEM;
  1025. goto out;
  1026. }
  1027. p_ll2_info->tx_queue.descq_mem = p_descq;
  1028. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  1029. "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
  1030. p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
  1031. out:
  1032. if (rc)
  1033. DP_NOTICE(p_hwfn,
  1034. "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
  1035. p_ll2_info->input.tx_num_desc);
  1036. return rc;
  1037. }
  1038. static int
  1039. qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
  1040. struct qed_ll2_info *p_ll2_info, u16 mtu)
  1041. {
  1042. struct qed_ooo_buffer *p_buf = NULL;
  1043. void *p_virt;
  1044. u16 buf_idx;
  1045. int rc = 0;
  1046. if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
  1047. return rc;
  1048. /* Correct number of requested OOO buffers if needed */
  1049. if (!p_ll2_info->input.rx_num_ooo_buffers) {
  1050. u16 num_desc = p_ll2_info->input.rx_num_desc;
  1051. if (!num_desc)
  1052. return -EINVAL;
  1053. p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
  1054. }
  1055. for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
  1056. buf_idx++) {
  1057. p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
  1058. if (!p_buf) {
  1059. rc = -ENOMEM;
  1060. goto out;
  1061. }
  1062. p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
  1063. p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
  1064. ETH_CACHE_LINE_SIZE - 1) &
  1065. ~(ETH_CACHE_LINE_SIZE - 1);
  1066. p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  1067. p_buf->rx_buffer_size,
  1068. &p_buf->rx_buffer_phys_addr,
  1069. GFP_KERNEL);
  1070. if (!p_virt) {
  1071. kfree(p_buf);
  1072. rc = -ENOMEM;
  1073. goto out;
  1074. }
  1075. p_buf->rx_buffer_virt_addr = p_virt;
  1076. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
  1077. }
  1078. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  1079. "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
  1080. p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
  1081. out:
  1082. return rc;
  1083. }
  1084. static int
  1085. qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
  1086. {
  1087. if (!cbs || (!cbs->rx_comp_cb ||
  1088. !cbs->rx_release_cb ||
  1089. !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
  1090. return -EINVAL;
  1091. p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
  1092. p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
  1093. p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
  1094. p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
  1095. p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
  1096. p_ll2_info->cbs.cookie = cbs->cookie;
  1097. return 0;
  1098. }
  1099. static enum core_error_handle
  1100. qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
  1101. {
  1102. switch (err) {
  1103. case QED_LL2_DROP_PACKET:
  1104. return LL2_DROP_PACKET;
  1105. case QED_LL2_DO_NOTHING:
  1106. return LL2_DO_NOTHING;
  1107. case QED_LL2_ASSERT:
  1108. return LL2_ASSERT;
  1109. default:
  1110. return LL2_DO_NOTHING;
  1111. }
  1112. }
  1113. int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
  1114. {
  1115. struct qed_hwfn *p_hwfn = cxt;
  1116. qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
  1117. struct qed_ll2_info *p_ll2_info = NULL;
  1118. u8 i, *p_tx_max;
  1119. int rc;
  1120. if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
  1121. return -EINVAL;
  1122. /* Find a free connection to be used */
  1123. for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
  1124. mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
  1125. if (p_hwfn->p_ll2_info[i].b_active) {
  1126. mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
  1127. continue;
  1128. }
  1129. p_hwfn->p_ll2_info[i].b_active = true;
  1130. p_ll2_info = &p_hwfn->p_ll2_info[i];
  1131. mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
  1132. break;
  1133. }
  1134. if (!p_ll2_info)
  1135. return -EBUSY;
  1136. memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
  1137. switch (data->input.tx_dest) {
  1138. case QED_LL2_TX_DEST_NW:
  1139. p_ll2_info->tx_dest = CORE_TX_DEST_NW;
  1140. break;
  1141. case QED_LL2_TX_DEST_LB:
  1142. p_ll2_info->tx_dest = CORE_TX_DEST_LB;
  1143. break;
  1144. case QED_LL2_TX_DEST_DROP:
  1145. p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
  1146. break;
  1147. default:
  1148. return -EINVAL;
  1149. }
  1150. if (data->input.conn_type == QED_LL2_TYPE_OOO ||
  1151. data->input.secondary_queue)
  1152. p_ll2_info->main_func_queue = false;
  1153. else
  1154. p_ll2_info->main_func_queue = true;
  1155. /* Correct maximum number of Tx BDs */
  1156. p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
  1157. if (*p_tx_max == 0)
  1158. *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
  1159. else
  1160. *p_tx_max = min_t(u8, *p_tx_max,
  1161. CORE_LL2_TX_MAX_BDS_PER_PACKET);
  1162. rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
  1163. if (rc) {
  1164. DP_NOTICE(p_hwfn, "Invalid callback functions\n");
  1165. goto q_allocate_fail;
  1166. }
  1167. rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
  1168. if (rc)
  1169. goto q_allocate_fail;
  1170. rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
  1171. if (rc)
  1172. goto q_allocate_fail;
  1173. rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
  1174. data->input.mtu);
  1175. if (rc)
  1176. goto q_allocate_fail;
  1177. /* Register callbacks for the Rx/Tx queues */
  1178. if (data->input.conn_type == QED_LL2_TYPE_OOO) {
  1179. comp_rx_cb = qed_ll2_lb_rxq_completion;
  1180. comp_tx_cb = qed_ll2_lb_txq_completion;
  1181. } else {
  1182. comp_rx_cb = qed_ll2_rxq_completion;
  1183. comp_tx_cb = qed_ll2_txq_completion;
  1184. }
  1185. if (data->input.rx_num_desc) {
  1186. qed_int_register_cb(p_hwfn, comp_rx_cb,
  1187. &p_hwfn->p_ll2_info[i],
  1188. &p_ll2_info->rx_queue.rx_sb_index,
  1189. &p_ll2_info->rx_queue.p_fw_cons);
  1190. p_ll2_info->rx_queue.b_cb_registred = true;
  1191. }
  1192. if (data->input.tx_num_desc) {
  1193. qed_int_register_cb(p_hwfn,
  1194. comp_tx_cb,
  1195. &p_hwfn->p_ll2_info[i],
  1196. &p_ll2_info->tx_queue.tx_sb_index,
  1197. &p_ll2_info->tx_queue.p_fw_cons);
  1198. p_ll2_info->tx_queue.b_cb_registred = true;
  1199. }
  1200. *data->p_connection_handle = i;
  1201. return rc;
  1202. q_allocate_fail:
  1203. qed_ll2_release_connection(p_hwfn, i);
  1204. return -ENOMEM;
  1205. }
  1206. static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
  1207. struct qed_ll2_info *p_ll2_conn)
  1208. {
  1209. enum qed_ll2_error_handle error_input;
  1210. enum core_error_handle error_mode;
  1211. u8 action_on_error = 0;
  1212. if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
  1213. return 0;
  1214. DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
  1215. error_input = p_ll2_conn->input.ai_err_packet_too_big;
  1216. error_mode = qed_ll2_get_error_choice(error_input);
  1217. SET_FIELD(action_on_error,
  1218. CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
  1219. error_input = p_ll2_conn->input.ai_err_no_buf;
  1220. error_mode = qed_ll2_get_error_choice(error_input);
  1221. SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
  1222. return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
  1223. }
  1224. static void
  1225. qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
  1226. struct qed_ll2_info *p_ll2_conn)
  1227. {
  1228. if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
  1229. return;
  1230. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  1231. qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
  1232. }
  1233. int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
  1234. {
  1235. struct qed_hwfn *p_hwfn = cxt;
  1236. struct qed_ll2_info *p_ll2_conn;
  1237. struct qed_ll2_tx_packet *p_pkt;
  1238. struct qed_ll2_rx_queue *p_rx;
  1239. struct qed_ll2_tx_queue *p_tx;
  1240. struct qed_ptt *p_ptt;
  1241. int rc = -EINVAL;
  1242. u32 i, capacity;
  1243. u32 desc_size;
  1244. u8 qid;
  1245. p_ptt = qed_ptt_acquire(p_hwfn);
  1246. if (!p_ptt)
  1247. return -EAGAIN;
  1248. p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
  1249. if (!p_ll2_conn) {
  1250. rc = -EINVAL;
  1251. goto out;
  1252. }
  1253. p_rx = &p_ll2_conn->rx_queue;
  1254. p_tx = &p_ll2_conn->tx_queue;
  1255. qed_chain_reset(&p_rx->rxq_chain);
  1256. qed_chain_reset(&p_rx->rcq_chain);
  1257. INIT_LIST_HEAD(&p_rx->active_descq);
  1258. INIT_LIST_HEAD(&p_rx->free_descq);
  1259. INIT_LIST_HEAD(&p_rx->posting_descq);
  1260. spin_lock_init(&p_rx->lock);
  1261. capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
  1262. for (i = 0; i < capacity; i++)
  1263. list_add_tail(&p_rx->descq_array[i].list_entry,
  1264. &p_rx->free_descq);
  1265. *p_rx->p_fw_cons = 0;
  1266. qed_chain_reset(&p_tx->txq_chain);
  1267. INIT_LIST_HEAD(&p_tx->active_descq);
  1268. INIT_LIST_HEAD(&p_tx->free_descq);
  1269. INIT_LIST_HEAD(&p_tx->sending_descq);
  1270. spin_lock_init(&p_tx->lock);
  1271. capacity = qed_chain_get_capacity(&p_tx->txq_chain);
  1272. /* First element is part of the packet, rest are flexibly added */
  1273. desc_size = (sizeof(*p_pkt) +
  1274. (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
  1275. sizeof(p_pkt->bds_set));
  1276. for (i = 0; i < capacity; i++) {
  1277. p_pkt = p_tx->descq_mem + desc_size * i;
  1278. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  1279. }
  1280. p_tx->cur_completing_bd_idx = 0;
  1281. p_tx->bds_idx = 0;
  1282. p_tx->b_completing_packet = false;
  1283. p_tx->cur_send_packet = NULL;
  1284. p_tx->cur_send_frag_num = 0;
  1285. p_tx->cur_completing_frag_num = 0;
  1286. *p_tx->p_fw_cons = 0;
  1287. rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
  1288. if (rc)
  1289. goto out;
  1290. qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
  1291. p_ll2_conn->queue_id = qid;
  1292. p_ll2_conn->tx_stats_id = qid;
  1293. p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
  1294. GTT_BAR0_MAP_REG_TSDM_RAM +
  1295. TSTORM_LL2_RX_PRODS_OFFSET(qid);
  1296. p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
  1297. qed_db_addr(p_ll2_conn->cid,
  1298. DQ_DEMS_LEGACY);
  1299. rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
  1300. if (rc)
  1301. goto out;
  1302. rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
  1303. if (rc)
  1304. goto out;
  1305. if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
  1306. qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
  1307. qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
  1308. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
  1309. if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
  1310. qed_llh_add_protocol_filter(p_hwfn, p_ptt,
  1311. ETH_P_FCOE, 0,
  1312. QED_LLH_FILTER_ETHERTYPE);
  1313. qed_llh_add_protocol_filter(p_hwfn, p_ptt,
  1314. ETH_P_FIP, 0,
  1315. QED_LLH_FILTER_ETHERTYPE);
  1316. }
  1317. out:
  1318. qed_ptt_release(p_hwfn, p_ptt);
  1319. return rc;
  1320. }
  1321. static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
  1322. struct qed_ll2_rx_queue *p_rx,
  1323. struct qed_ll2_rx_packet *p_curp)
  1324. {
  1325. struct qed_ll2_rx_packet *p_posting_packet = NULL;
  1326. struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
  1327. bool b_notify_fw = false;
  1328. u16 bd_prod, cq_prod;
  1329. /* This handles the flushing of already posted buffers */
  1330. while (!list_empty(&p_rx->posting_descq)) {
  1331. p_posting_packet = list_first_entry(&p_rx->posting_descq,
  1332. struct qed_ll2_rx_packet,
  1333. list_entry);
  1334. list_move_tail(&p_posting_packet->list_entry,
  1335. &p_rx->active_descq);
  1336. b_notify_fw = true;
  1337. }
  1338. /* This handles the supplied packet [if there is one] */
  1339. if (p_curp) {
  1340. list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
  1341. b_notify_fw = true;
  1342. }
  1343. if (!b_notify_fw)
  1344. return;
  1345. bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
  1346. cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
  1347. rx_prod.bd_prod = cpu_to_le16(bd_prod);
  1348. rx_prod.cqe_prod = cpu_to_le16(cq_prod);
  1349. DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
  1350. }
  1351. int qed_ll2_post_rx_buffer(void *cxt,
  1352. u8 connection_handle,
  1353. dma_addr_t addr,
  1354. u16 buf_len, void *cookie, u8 notify_fw)
  1355. {
  1356. struct qed_hwfn *p_hwfn = cxt;
  1357. struct core_rx_bd_with_buff_len *p_curb = NULL;
  1358. struct qed_ll2_rx_packet *p_curp = NULL;
  1359. struct qed_ll2_info *p_ll2_conn;
  1360. struct qed_ll2_rx_queue *p_rx;
  1361. unsigned long flags;
  1362. void *p_data;
  1363. int rc = 0;
  1364. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1365. if (!p_ll2_conn)
  1366. return -EINVAL;
  1367. p_rx = &p_ll2_conn->rx_queue;
  1368. spin_lock_irqsave(&p_rx->lock, flags);
  1369. if (!list_empty(&p_rx->free_descq))
  1370. p_curp = list_first_entry(&p_rx->free_descq,
  1371. struct qed_ll2_rx_packet, list_entry);
  1372. if (p_curp) {
  1373. if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
  1374. qed_chain_get_elem_left(&p_rx->rcq_chain)) {
  1375. p_data = qed_chain_produce(&p_rx->rxq_chain);
  1376. p_curb = (struct core_rx_bd_with_buff_len *)p_data;
  1377. qed_chain_produce(&p_rx->rcq_chain);
  1378. }
  1379. }
  1380. /* If we're lacking entires, let's try to flush buffers to FW */
  1381. if (!p_curp || !p_curb) {
  1382. rc = -EBUSY;
  1383. p_curp = NULL;
  1384. goto out_notify;
  1385. }
  1386. /* We have an Rx packet we can fill */
  1387. DMA_REGPAIR_LE(p_curb->addr, addr);
  1388. p_curb->buff_length = cpu_to_le16(buf_len);
  1389. p_curp->rx_buf_addr = addr;
  1390. p_curp->cookie = cookie;
  1391. p_curp->rxq_bd = p_curb;
  1392. p_curp->buf_length = buf_len;
  1393. list_del(&p_curp->list_entry);
  1394. /* Check if we only want to enqueue this packet without informing FW */
  1395. if (!notify_fw) {
  1396. list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
  1397. goto out;
  1398. }
  1399. out_notify:
  1400. qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
  1401. out:
  1402. spin_unlock_irqrestore(&p_rx->lock, flags);
  1403. return rc;
  1404. }
  1405. static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
  1406. struct qed_ll2_tx_queue *p_tx,
  1407. struct qed_ll2_tx_packet *p_curp,
  1408. struct qed_ll2_tx_pkt_info *pkt,
  1409. u8 notify_fw)
  1410. {
  1411. list_del(&p_curp->list_entry);
  1412. p_curp->cookie = pkt->cookie;
  1413. p_curp->bd_used = pkt->num_of_bds;
  1414. p_curp->notify_fw = notify_fw;
  1415. p_tx->cur_send_packet = p_curp;
  1416. p_tx->cur_send_frag_num = 0;
  1417. p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
  1418. p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
  1419. p_tx->cur_send_frag_num++;
  1420. }
  1421. static void
  1422. qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
  1423. struct qed_ll2_info *p_ll2,
  1424. struct qed_ll2_tx_packet *p_curp,
  1425. struct qed_ll2_tx_pkt_info *pkt)
  1426. {
  1427. struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
  1428. u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
  1429. struct core_tx_bd *start_bd = NULL;
  1430. enum core_roce_flavor_type roce_flavor;
  1431. enum core_tx_dest tx_dest;
  1432. u16 bd_data = 0, frag_idx;
  1433. roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
  1434. : CORE_RROCE;
  1435. switch (pkt->tx_dest) {
  1436. case QED_LL2_TX_DEST_NW:
  1437. tx_dest = CORE_TX_DEST_NW;
  1438. break;
  1439. case QED_LL2_TX_DEST_LB:
  1440. tx_dest = CORE_TX_DEST_LB;
  1441. break;
  1442. case QED_LL2_TX_DEST_DROP:
  1443. tx_dest = CORE_TX_DEST_DROP;
  1444. break;
  1445. default:
  1446. tx_dest = CORE_TX_DEST_LB;
  1447. break;
  1448. }
  1449. start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
  1450. if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
  1451. p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
  1452. start_bd->nw_vlan_or_lb_echo =
  1453. cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
  1454. } else {
  1455. start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
  1456. if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
  1457. p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
  1458. pkt->remove_stag = true;
  1459. }
  1460. SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
  1461. cpu_to_le16(pkt->l4_hdr_offset_w));
  1462. SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
  1463. bd_data |= pkt->bd_flags;
  1464. SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
  1465. SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
  1466. SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
  1467. SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
  1468. SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
  1469. SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
  1470. SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
  1471. !!(pkt->remove_stag));
  1472. start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
  1473. DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
  1474. start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
  1475. DP_VERBOSE(p_hwfn,
  1476. (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
  1477. "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
  1478. p_ll2->queue_id,
  1479. p_ll2->cid,
  1480. p_ll2->input.conn_type,
  1481. prod_idx,
  1482. pkt->first_frag_len,
  1483. pkt->num_of_bds,
  1484. le32_to_cpu(start_bd->addr.hi),
  1485. le32_to_cpu(start_bd->addr.lo));
  1486. if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
  1487. return;
  1488. /* Need to provide the packet with additional BDs for frags */
  1489. for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
  1490. frag_idx < pkt->num_of_bds; frag_idx++) {
  1491. struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
  1492. *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
  1493. (*p_bd)->bd_data.as_bitfield = 0;
  1494. (*p_bd)->bitfield1 = 0;
  1495. p_curp->bds_set[frag_idx].tx_frag = 0;
  1496. p_curp->bds_set[frag_idx].frag_len = 0;
  1497. }
  1498. }
  1499. /* This should be called while the Txq spinlock is being held */
  1500. static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
  1501. struct qed_ll2_info *p_ll2_conn)
  1502. {
  1503. bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
  1504. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  1505. struct qed_ll2_tx_packet *p_pkt = NULL;
  1506. struct core_db_data db_msg = { 0, 0, 0 };
  1507. u16 bd_prod;
  1508. /* If there are missing BDs, don't do anything now */
  1509. if (p_ll2_conn->tx_queue.cur_send_frag_num !=
  1510. p_ll2_conn->tx_queue.cur_send_packet->bd_used)
  1511. return;
  1512. /* Push the current packet to the list and clean after it */
  1513. list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
  1514. &p_ll2_conn->tx_queue.sending_descq);
  1515. p_ll2_conn->tx_queue.cur_send_packet = NULL;
  1516. p_ll2_conn->tx_queue.cur_send_frag_num = 0;
  1517. /* Notify FW of packet only if requested to */
  1518. if (!b_notify)
  1519. return;
  1520. bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
  1521. while (!list_empty(&p_tx->sending_descq)) {
  1522. p_pkt = list_first_entry(&p_tx->sending_descq,
  1523. struct qed_ll2_tx_packet, list_entry);
  1524. if (!p_pkt)
  1525. break;
  1526. list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
  1527. }
  1528. SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
  1529. SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
  1530. SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
  1531. DQ_XCM_CORE_TX_BD_PROD_CMD);
  1532. db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
  1533. db_msg.spq_prod = cpu_to_le16(bd_prod);
  1534. /* Make sure the BDs data is updated before ringing the doorbell */
  1535. wmb();
  1536. DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
  1537. DP_VERBOSE(p_hwfn,
  1538. (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
  1539. "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
  1540. p_ll2_conn->queue_id,
  1541. p_ll2_conn->cid,
  1542. p_ll2_conn->input.conn_type, db_msg.spq_prod);
  1543. }
  1544. int qed_ll2_prepare_tx_packet(void *cxt,
  1545. u8 connection_handle,
  1546. struct qed_ll2_tx_pkt_info *pkt,
  1547. bool notify_fw)
  1548. {
  1549. struct qed_hwfn *p_hwfn = cxt;
  1550. struct qed_ll2_tx_packet *p_curp = NULL;
  1551. struct qed_ll2_info *p_ll2_conn = NULL;
  1552. struct qed_ll2_tx_queue *p_tx;
  1553. struct qed_chain *p_tx_chain;
  1554. unsigned long flags;
  1555. int rc = 0;
  1556. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1557. if (!p_ll2_conn)
  1558. return -EINVAL;
  1559. p_tx = &p_ll2_conn->tx_queue;
  1560. p_tx_chain = &p_tx->txq_chain;
  1561. if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
  1562. return -EIO;
  1563. spin_lock_irqsave(&p_tx->lock, flags);
  1564. if (p_tx->cur_send_packet) {
  1565. rc = -EEXIST;
  1566. goto out;
  1567. }
  1568. /* Get entry, but only if we have tx elements for it */
  1569. if (!list_empty(&p_tx->free_descq))
  1570. p_curp = list_first_entry(&p_tx->free_descq,
  1571. struct qed_ll2_tx_packet, list_entry);
  1572. if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
  1573. p_curp = NULL;
  1574. if (!p_curp) {
  1575. rc = -EBUSY;
  1576. goto out;
  1577. }
  1578. /* Prepare packet and BD, and perhaps send a doorbell to FW */
  1579. qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
  1580. qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
  1581. qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
  1582. out:
  1583. spin_unlock_irqrestore(&p_tx->lock, flags);
  1584. return rc;
  1585. }
  1586. int qed_ll2_set_fragment_of_tx_packet(void *cxt,
  1587. u8 connection_handle,
  1588. dma_addr_t addr, u16 nbytes)
  1589. {
  1590. struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
  1591. struct qed_hwfn *p_hwfn = cxt;
  1592. struct qed_ll2_info *p_ll2_conn = NULL;
  1593. u16 cur_send_frag_num = 0;
  1594. struct core_tx_bd *p_bd;
  1595. unsigned long flags;
  1596. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1597. if (!p_ll2_conn)
  1598. return -EINVAL;
  1599. if (!p_ll2_conn->tx_queue.cur_send_packet)
  1600. return -EINVAL;
  1601. p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
  1602. cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
  1603. if (cur_send_frag_num >= p_cur_send_packet->bd_used)
  1604. return -EINVAL;
  1605. /* Fill the BD information, and possibly notify FW */
  1606. p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
  1607. DMA_REGPAIR_LE(p_bd->addr, addr);
  1608. p_bd->nbytes = cpu_to_le16(nbytes);
  1609. p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
  1610. p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
  1611. p_ll2_conn->tx_queue.cur_send_frag_num++;
  1612. spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
  1613. qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
  1614. spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
  1615. return 0;
  1616. }
  1617. int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
  1618. {
  1619. struct qed_hwfn *p_hwfn = cxt;
  1620. struct qed_ll2_info *p_ll2_conn = NULL;
  1621. int rc = -EINVAL;
  1622. struct qed_ptt *p_ptt;
  1623. p_ptt = qed_ptt_acquire(p_hwfn);
  1624. if (!p_ptt)
  1625. return -EAGAIN;
  1626. p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
  1627. if (!p_ll2_conn) {
  1628. rc = -EINVAL;
  1629. goto out;
  1630. }
  1631. /* Stop Tx & Rx of connection, if needed */
  1632. if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
  1633. p_ll2_conn->tx_queue.b_cb_registred = false;
  1634. smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
  1635. rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
  1636. if (rc)
  1637. goto out;
  1638. qed_ll2_txq_flush(p_hwfn, connection_handle);
  1639. qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
  1640. }
  1641. if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
  1642. p_ll2_conn->rx_queue.b_cb_registred = false;
  1643. smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
  1644. rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
  1645. if (rc)
  1646. goto out;
  1647. qed_ll2_rxq_flush(p_hwfn, connection_handle);
  1648. qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
  1649. }
  1650. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
  1651. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  1652. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
  1653. if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
  1654. qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
  1655. ETH_P_FCOE, 0,
  1656. QED_LLH_FILTER_ETHERTYPE);
  1657. qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
  1658. ETH_P_FIP, 0,
  1659. QED_LLH_FILTER_ETHERTYPE);
  1660. }
  1661. out:
  1662. qed_ptt_release(p_hwfn, p_ptt);
  1663. return rc;
  1664. }
  1665. static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
  1666. struct qed_ll2_info *p_ll2_conn)
  1667. {
  1668. struct qed_ooo_buffer *p_buffer;
  1669. if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
  1670. return;
  1671. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  1672. while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
  1673. p_hwfn->p_ooo_info))) {
  1674. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1675. p_buffer->rx_buffer_size,
  1676. p_buffer->rx_buffer_virt_addr,
  1677. p_buffer->rx_buffer_phys_addr);
  1678. kfree(p_buffer);
  1679. }
  1680. }
  1681. void qed_ll2_release_connection(void *cxt, u8 connection_handle)
  1682. {
  1683. struct qed_hwfn *p_hwfn = cxt;
  1684. struct qed_ll2_info *p_ll2_conn = NULL;
  1685. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1686. if (!p_ll2_conn)
  1687. return;
  1688. kfree(p_ll2_conn->tx_queue.descq_mem);
  1689. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
  1690. kfree(p_ll2_conn->rx_queue.descq_array);
  1691. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
  1692. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
  1693. qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
  1694. qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
  1695. mutex_lock(&p_ll2_conn->mutex);
  1696. p_ll2_conn->b_active = false;
  1697. mutex_unlock(&p_ll2_conn->mutex);
  1698. }
  1699. int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
  1700. {
  1701. struct qed_ll2_info *p_ll2_connections;
  1702. u8 i;
  1703. /* Allocate LL2's set struct */
  1704. p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
  1705. sizeof(struct qed_ll2_info), GFP_KERNEL);
  1706. if (!p_ll2_connections) {
  1707. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
  1708. return -ENOMEM;
  1709. }
  1710. for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
  1711. p_ll2_connections[i].my_id = i;
  1712. p_hwfn->p_ll2_info = p_ll2_connections;
  1713. return 0;
  1714. }
  1715. void qed_ll2_setup(struct qed_hwfn *p_hwfn)
  1716. {
  1717. int i;
  1718. for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
  1719. mutex_init(&p_hwfn->p_ll2_info[i].mutex);
  1720. }
  1721. void qed_ll2_free(struct qed_hwfn *p_hwfn)
  1722. {
  1723. if (!p_hwfn->p_ll2_info)
  1724. return;
  1725. kfree(p_hwfn->p_ll2_info);
  1726. p_hwfn->p_ll2_info = NULL;
  1727. }
  1728. static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
  1729. struct qed_ptt *p_ptt,
  1730. struct qed_ll2_stats *p_stats)
  1731. {
  1732. struct core_ll2_port_stats port_stats;
  1733. memset(&port_stats, 0, sizeof(port_stats));
  1734. qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
  1735. BAR0_MAP_REG_TSDM_RAM +
  1736. TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
  1737. sizeof(port_stats));
  1738. p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
  1739. p_stats->gsi_invalid_pkt_length =
  1740. HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
  1741. p_stats->gsi_unsupported_pkt_typ =
  1742. HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
  1743. p_stats->gsi_crcchksm_error =
  1744. HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
  1745. }
  1746. static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
  1747. struct qed_ptt *p_ptt,
  1748. struct qed_ll2_info *p_ll2_conn,
  1749. struct qed_ll2_stats *p_stats)
  1750. {
  1751. struct core_ll2_tstorm_per_queue_stat tstats;
  1752. u8 qid = p_ll2_conn->queue_id;
  1753. u32 tstats_addr;
  1754. memset(&tstats, 0, sizeof(tstats));
  1755. tstats_addr = BAR0_MAP_REG_TSDM_RAM +
  1756. CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
  1757. qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
  1758. p_stats->packet_too_big_discard =
  1759. HILO_64_REGPAIR(tstats.packet_too_big_discard);
  1760. p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
  1761. }
  1762. static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
  1763. struct qed_ptt *p_ptt,
  1764. struct qed_ll2_info *p_ll2_conn,
  1765. struct qed_ll2_stats *p_stats)
  1766. {
  1767. struct core_ll2_ustorm_per_queue_stat ustats;
  1768. u8 qid = p_ll2_conn->queue_id;
  1769. u32 ustats_addr;
  1770. memset(&ustats, 0, sizeof(ustats));
  1771. ustats_addr = BAR0_MAP_REG_USDM_RAM +
  1772. CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
  1773. qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
  1774. p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
  1775. p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
  1776. p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
  1777. p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
  1778. p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
  1779. p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
  1780. }
  1781. static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
  1782. struct qed_ptt *p_ptt,
  1783. struct qed_ll2_info *p_ll2_conn,
  1784. struct qed_ll2_stats *p_stats)
  1785. {
  1786. struct core_ll2_pstorm_per_queue_stat pstats;
  1787. u8 stats_id = p_ll2_conn->tx_stats_id;
  1788. u32 pstats_addr;
  1789. memset(&pstats, 0, sizeof(pstats));
  1790. pstats_addr = BAR0_MAP_REG_PSDM_RAM +
  1791. CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
  1792. qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
  1793. p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
  1794. p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
  1795. p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
  1796. p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
  1797. p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
  1798. p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
  1799. }
  1800. int qed_ll2_get_stats(void *cxt,
  1801. u8 connection_handle, struct qed_ll2_stats *p_stats)
  1802. {
  1803. struct qed_hwfn *p_hwfn = cxt;
  1804. struct qed_ll2_info *p_ll2_conn = NULL;
  1805. struct qed_ptt *p_ptt;
  1806. memset(p_stats, 0, sizeof(*p_stats));
  1807. if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
  1808. !p_hwfn->p_ll2_info)
  1809. return -EINVAL;
  1810. p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
  1811. p_ptt = qed_ptt_acquire(p_hwfn);
  1812. if (!p_ptt) {
  1813. DP_ERR(p_hwfn, "Failed to acquire ptt\n");
  1814. return -EINVAL;
  1815. }
  1816. if (p_ll2_conn->input.gsi_enable)
  1817. _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
  1818. _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1819. _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1820. if (p_ll2_conn->tx_stats_en)
  1821. _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1822. qed_ptt_release(p_hwfn, p_ptt);
  1823. return 0;
  1824. }
  1825. static void qed_ll2b_release_rx_packet(void *cxt,
  1826. u8 connection_handle,
  1827. void *cookie,
  1828. dma_addr_t rx_buf_addr,
  1829. bool b_last_packet)
  1830. {
  1831. struct qed_hwfn *p_hwfn = cxt;
  1832. qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
  1833. }
  1834. static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
  1835. const struct qed_ll2_cb_ops *ops,
  1836. void *cookie)
  1837. {
  1838. cdev->ll2->cbs = ops;
  1839. cdev->ll2->cb_cookie = cookie;
  1840. }
  1841. struct qed_ll2_cbs ll2_cbs = {
  1842. .rx_comp_cb = &qed_ll2b_complete_rx_packet,
  1843. .rx_release_cb = &qed_ll2b_release_rx_packet,
  1844. .tx_comp_cb = &qed_ll2b_complete_tx_packet,
  1845. .tx_release_cb = &qed_ll2b_complete_tx_packet,
  1846. };
  1847. static void qed_ll2_set_conn_data(struct qed_dev *cdev,
  1848. struct qed_ll2_acquire_data *data,
  1849. struct qed_ll2_params *params,
  1850. enum qed_ll2_conn_type conn_type,
  1851. u8 *handle, bool lb)
  1852. {
  1853. memset(data, 0, sizeof(*data));
  1854. data->input.conn_type = conn_type;
  1855. data->input.mtu = params->mtu;
  1856. data->input.rx_num_desc = QED_LL2_RX_SIZE;
  1857. data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
  1858. data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
  1859. data->input.tx_num_desc = QED_LL2_TX_SIZE;
  1860. data->p_connection_handle = handle;
  1861. data->cbs = &ll2_cbs;
  1862. ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
  1863. if (lb) {
  1864. data->input.tx_tc = PKT_LB_TC;
  1865. data->input.tx_dest = QED_LL2_TX_DEST_LB;
  1866. } else {
  1867. data->input.tx_tc = 0;
  1868. data->input.tx_dest = QED_LL2_TX_DEST_NW;
  1869. }
  1870. }
  1871. static int qed_ll2_start_ooo(struct qed_dev *cdev,
  1872. struct qed_ll2_params *params)
  1873. {
  1874. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  1875. u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
  1876. struct qed_ll2_acquire_data data;
  1877. int rc;
  1878. qed_ll2_set_conn_data(cdev, &data, params,
  1879. QED_LL2_TYPE_OOO, handle, true);
  1880. rc = qed_ll2_acquire_connection(hwfn, &data);
  1881. if (rc) {
  1882. DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
  1883. goto out;
  1884. }
  1885. rc = qed_ll2_establish_connection(hwfn, *handle);
  1886. if (rc) {
  1887. DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
  1888. goto fail;
  1889. }
  1890. return 0;
  1891. fail:
  1892. qed_ll2_release_connection(hwfn, *handle);
  1893. out:
  1894. *handle = QED_LL2_UNUSED_HANDLE;
  1895. return rc;
  1896. }
  1897. static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
  1898. {
  1899. struct qed_ll2_buffer *buffer, *tmp_buffer;
  1900. enum qed_ll2_conn_type conn_type;
  1901. struct qed_ll2_acquire_data data;
  1902. struct qed_ptt *p_ptt;
  1903. int rc, i;
  1904. /* Initialize LL2 locks & lists */
  1905. INIT_LIST_HEAD(&cdev->ll2->list);
  1906. spin_lock_init(&cdev->ll2->lock);
  1907. cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
  1908. L1_CACHE_BYTES + params->mtu;
  1909. /*Allocate memory for LL2 */
  1910. DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
  1911. cdev->ll2->rx_size);
  1912. for (i = 0; i < QED_LL2_RX_SIZE; i++) {
  1913. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  1914. if (!buffer) {
  1915. DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
  1916. goto fail;
  1917. }
  1918. rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
  1919. &buffer->phys_addr);
  1920. if (rc) {
  1921. kfree(buffer);
  1922. goto fail;
  1923. }
  1924. list_add_tail(&buffer->list, &cdev->ll2->list);
  1925. }
  1926. switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
  1927. case QED_PCI_FCOE:
  1928. conn_type = QED_LL2_TYPE_FCOE;
  1929. break;
  1930. case QED_PCI_ISCSI:
  1931. conn_type = QED_LL2_TYPE_ISCSI;
  1932. break;
  1933. case QED_PCI_ETH_ROCE:
  1934. conn_type = QED_LL2_TYPE_ROCE;
  1935. break;
  1936. default:
  1937. conn_type = QED_LL2_TYPE_TEST;
  1938. }
  1939. qed_ll2_set_conn_data(cdev, &data, params, conn_type,
  1940. &cdev->ll2->handle, false);
  1941. rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
  1942. if (rc) {
  1943. DP_INFO(cdev, "Failed to acquire LL2 connection\n");
  1944. goto fail;
  1945. }
  1946. rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
  1947. cdev->ll2->handle);
  1948. if (rc) {
  1949. DP_INFO(cdev, "Failed to establish LL2 connection\n");
  1950. goto release_fail;
  1951. }
  1952. /* Post all Rx buffers to FW */
  1953. spin_lock_bh(&cdev->ll2->lock);
  1954. list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
  1955. rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
  1956. cdev->ll2->handle,
  1957. buffer->phys_addr, 0, buffer, 1);
  1958. if (rc) {
  1959. DP_INFO(cdev,
  1960. "Failed to post an Rx buffer; Deleting it\n");
  1961. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  1962. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  1963. kfree(buffer->data);
  1964. list_del(&buffer->list);
  1965. kfree(buffer);
  1966. } else {
  1967. cdev->ll2->rx_cnt++;
  1968. }
  1969. }
  1970. spin_unlock_bh(&cdev->ll2->lock);
  1971. if (!cdev->ll2->rx_cnt) {
  1972. DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
  1973. goto release_terminate;
  1974. }
  1975. if (!is_valid_ether_addr(params->ll2_mac_address)) {
  1976. DP_INFO(cdev, "Invalid Ethernet address\n");
  1977. goto release_terminate;
  1978. }
  1979. if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
  1980. DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
  1981. rc = qed_ll2_start_ooo(cdev, params);
  1982. if (rc) {
  1983. DP_INFO(cdev,
  1984. "Failed to initialize the OOO LL2 queue\n");
  1985. goto release_terminate;
  1986. }
  1987. }
  1988. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  1989. if (!p_ptt) {
  1990. DP_INFO(cdev, "Failed to acquire PTT\n");
  1991. goto release_terminate;
  1992. }
  1993. rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  1994. params->ll2_mac_address);
  1995. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  1996. if (rc) {
  1997. DP_ERR(cdev, "Failed to allocate LLH filter\n");
  1998. goto release_terminate_all;
  1999. }
  2000. ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
  2001. return 0;
  2002. release_terminate_all:
  2003. release_terminate:
  2004. qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  2005. release_fail:
  2006. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  2007. fail:
  2008. qed_ll2_kill_buffers(cdev);
  2009. cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
  2010. return -EINVAL;
  2011. }
  2012. static int qed_ll2_stop(struct qed_dev *cdev)
  2013. {
  2014. struct qed_ptt *p_ptt;
  2015. int rc;
  2016. if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
  2017. return 0;
  2018. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  2019. if (!p_ptt) {
  2020. DP_INFO(cdev, "Failed to acquire PTT\n");
  2021. goto fail;
  2022. }
  2023. qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  2024. cdev->ll2_mac_address);
  2025. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  2026. eth_zero_addr(cdev->ll2_mac_address);
  2027. if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
  2028. qed_ll2_stop_ooo(cdev);
  2029. rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
  2030. cdev->ll2->handle);
  2031. if (rc)
  2032. DP_INFO(cdev, "Failed to terminate LL2 connection\n");
  2033. qed_ll2_kill_buffers(cdev);
  2034. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  2035. cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
  2036. return rc;
  2037. fail:
  2038. return -EINVAL;
  2039. }
  2040. static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
  2041. unsigned long xmit_flags)
  2042. {
  2043. struct qed_ll2_tx_pkt_info pkt;
  2044. const skb_frag_t *frag;
  2045. int rc = -EINVAL, i;
  2046. dma_addr_t mapping;
  2047. u16 vlan = 0;
  2048. u8 flags = 0;
  2049. if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
  2050. DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
  2051. return -EINVAL;
  2052. }
  2053. if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
  2054. DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
  2055. 1 + skb_shinfo(skb)->nr_frags);
  2056. return -EINVAL;
  2057. }
  2058. mapping = dma_map_single(&cdev->pdev->dev, skb->data,
  2059. skb->len, DMA_TO_DEVICE);
  2060. if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
  2061. DP_NOTICE(cdev, "SKB mapping failed\n");
  2062. return -EINVAL;
  2063. }
  2064. /* Request HW to calculate IP csum */
  2065. if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
  2066. ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
  2067. flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
  2068. if (skb_vlan_tag_present(skb)) {
  2069. vlan = skb_vlan_tag_get(skb);
  2070. flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
  2071. }
  2072. memset(&pkt, 0, sizeof(pkt));
  2073. pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
  2074. pkt.vlan = vlan;
  2075. pkt.bd_flags = flags;
  2076. pkt.tx_dest = QED_LL2_TX_DEST_NW;
  2077. pkt.first_frag = mapping;
  2078. pkt.first_frag_len = skb->len;
  2079. pkt.cookie = skb;
  2080. if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
  2081. test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
  2082. pkt.remove_stag = true;
  2083. rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
  2084. &pkt, 1);
  2085. if (rc)
  2086. goto err;
  2087. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2088. frag = &skb_shinfo(skb)->frags[i];
  2089. mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
  2090. skb_frag_size(frag), DMA_TO_DEVICE);
  2091. if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
  2092. DP_NOTICE(cdev,
  2093. "Unable to map frag - dropping packet\n");
  2094. goto err;
  2095. }
  2096. rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
  2097. cdev->ll2->handle,
  2098. mapping,
  2099. skb_frag_size(frag));
  2100. /* if failed not much to do here, partial packet has been posted
  2101. * we can't free memory, will need to wait for completion.
  2102. */
  2103. if (rc)
  2104. goto err2;
  2105. }
  2106. return 0;
  2107. err:
  2108. dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
  2109. err2:
  2110. return rc;
  2111. }
  2112. static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
  2113. {
  2114. if (!cdev->ll2)
  2115. return -EINVAL;
  2116. return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
  2117. cdev->ll2->handle, stats);
  2118. }
  2119. const struct qed_ll2_ops qed_ll2_ops_pass = {
  2120. .start = &qed_ll2_start,
  2121. .stop = &qed_ll2_stop,
  2122. .start_xmit = &qed_ll2_start_xmit,
  2123. .register_cb_ops = &qed_ll2_register_cb_ops,
  2124. .get_stats = &qed_ll2_stats,
  2125. };
  2126. int qed_ll2_alloc_if(struct qed_dev *cdev)
  2127. {
  2128. cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
  2129. return cdev->ll2 ? 0 : -ENOMEM;
  2130. }
  2131. void qed_ll2_dealloc_if(struct qed_dev *cdev)
  2132. {
  2133. kfree(cdev->ll2);
  2134. cdev->ll2 = NULL;
  2135. }