qed_ll2.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <asm/byteorder.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/kernel.h>
  37. #include <linux/pci.h>
  38. #include <linux/slab.h>
  39. #include <linux/stddef.h>
  40. #include <linux/workqueue.h>
  41. #include <net/ipv6.h>
  42. #include <linux/bitops.h>
  43. #include <linux/delay.h>
  44. #include <linux/errno.h>
  45. #include <linux/etherdevice.h>
  46. #include <linux/io.h>
  47. #include <linux/list.h>
  48. #include <linux/mutex.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/string.h>
  51. #include <linux/qed/qed_ll2_if.h>
  52. #include "qed.h"
  53. #include "qed_cxt.h"
  54. #include "qed_dev_api.h"
  55. #include "qed_hsi.h"
  56. #include "qed_hw.h"
  57. #include "qed_int.h"
  58. #include "qed_ll2.h"
  59. #include "qed_mcp.h"
  60. #include "qed_ooo.h"
  61. #include "qed_reg_addr.h"
  62. #include "qed_sp.h"
  63. #include "qed_rdma.h"
  64. #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
  65. #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
  66. #define QED_LL2_TX_SIZE (256)
  67. #define QED_LL2_RX_SIZE (4096)
  68. struct qed_cb_ll2_info {
  69. int rx_cnt;
  70. u32 rx_size;
  71. u8 handle;
  72. /* Lock protecting LL2 buffer lists in sleepless context */
  73. spinlock_t lock;
  74. struct list_head list;
  75. const struct qed_ll2_cb_ops *cbs;
  76. void *cb_cookie;
  77. };
  78. struct qed_ll2_buffer {
  79. struct list_head list;
  80. void *data;
  81. dma_addr_t phys_addr;
  82. };
  83. static void qed_ll2b_complete_tx_packet(void *cxt,
  84. u8 connection_handle,
  85. void *cookie,
  86. dma_addr_t first_frag_addr,
  87. bool b_last_fragment,
  88. bool b_last_packet)
  89. {
  90. struct qed_hwfn *p_hwfn = cxt;
  91. struct qed_dev *cdev = p_hwfn->cdev;
  92. struct sk_buff *skb = cookie;
  93. /* All we need to do is release the mapping */
  94. dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
  95. skb_headlen(skb), DMA_TO_DEVICE);
  96. if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
  97. cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
  98. b_last_fragment);
  99. dev_kfree_skb_any(skb);
  100. }
  101. static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
  102. u8 **data, dma_addr_t *phys_addr)
  103. {
  104. *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
  105. if (!(*data)) {
  106. DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
  107. return -ENOMEM;
  108. }
  109. *phys_addr = dma_map_single(&cdev->pdev->dev,
  110. ((*data) + NET_SKB_PAD),
  111. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  112. if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
  113. DP_INFO(cdev, "Failed to map LL2 buffer data\n");
  114. kfree((*data));
  115. return -ENOMEM;
  116. }
  117. return 0;
  118. }
  119. static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
  120. struct qed_ll2_buffer *buffer)
  121. {
  122. spin_lock_bh(&cdev->ll2->lock);
  123. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  124. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  125. kfree(buffer->data);
  126. list_del(&buffer->list);
  127. cdev->ll2->rx_cnt--;
  128. if (!cdev->ll2->rx_cnt)
  129. DP_INFO(cdev, "All LL2 entries were removed\n");
  130. spin_unlock_bh(&cdev->ll2->lock);
  131. return 0;
  132. }
  133. static void qed_ll2_kill_buffers(struct qed_dev *cdev)
  134. {
  135. struct qed_ll2_buffer *buffer, *tmp_buffer;
  136. list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
  137. qed_ll2_dealloc_buffer(cdev, buffer);
  138. }
  139. void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
  140. {
  141. struct qed_hwfn *p_hwfn = cxt;
  142. struct qed_ll2_buffer *buffer = data->cookie;
  143. struct qed_dev *cdev = p_hwfn->cdev;
  144. dma_addr_t new_phys_addr;
  145. struct sk_buff *skb;
  146. bool reuse = false;
  147. int rc = -EINVAL;
  148. u8 *new_data;
  149. DP_VERBOSE(p_hwfn,
  150. (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
  151. "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
  152. (u64)data->rx_buf_addr,
  153. data->u.placement_offset,
  154. data->length.packet_length,
  155. data->parse_flags,
  156. data->vlan, data->opaque_data_0, data->opaque_data_1);
  157. if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
  158. print_hex_dump(KERN_INFO, "",
  159. DUMP_PREFIX_OFFSET, 16, 1,
  160. buffer->data, data->length.packet_length, false);
  161. }
  162. /* Determine if data is valid */
  163. if (data->length.packet_length < ETH_HLEN)
  164. reuse = true;
  165. /* Allocate a replacement for buffer; Reuse upon failure */
  166. if (!reuse)
  167. rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
  168. &new_phys_addr);
  169. /* If need to reuse or there's no replacement buffer, repost this */
  170. if (rc)
  171. goto out_post;
  172. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  173. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  174. skb = build_skb(buffer->data, 0);
  175. if (!skb) {
  176. rc = -ENOMEM;
  177. goto out_post;
  178. }
  179. data->u.placement_offset += NET_SKB_PAD;
  180. skb_reserve(skb, data->u.placement_offset);
  181. skb_put(skb, data->length.packet_length);
  182. skb_checksum_none_assert(skb);
  183. /* Get parital ethernet information instead of eth_type_trans(),
  184. * Since we don't have an associated net_device.
  185. */
  186. skb_reset_mac_header(skb);
  187. skb->protocol = eth_hdr(skb)->h_proto;
  188. /* Pass SKB onward */
  189. if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
  190. if (data->vlan)
  191. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  192. data->vlan);
  193. cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
  194. data->opaque_data_0,
  195. data->opaque_data_1);
  196. }
  197. /* Update Buffer information and update FW producer */
  198. buffer->data = new_data;
  199. buffer->phys_addr = new_phys_addr;
  200. out_post:
  201. rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
  202. buffer->phys_addr, 0, buffer, 1);
  203. if (rc)
  204. qed_ll2_dealloc_buffer(cdev, buffer);
  205. }
  206. static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
  207. u8 connection_handle,
  208. bool b_lock,
  209. bool b_only_active)
  210. {
  211. struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
  212. if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
  213. return NULL;
  214. if (!p_hwfn->p_ll2_info)
  215. return NULL;
  216. p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
  217. if (b_only_active) {
  218. if (b_lock)
  219. mutex_lock(&p_ll2_conn->mutex);
  220. if (p_ll2_conn->b_active)
  221. p_ret = p_ll2_conn;
  222. if (b_lock)
  223. mutex_unlock(&p_ll2_conn->mutex);
  224. } else {
  225. p_ret = p_ll2_conn;
  226. }
  227. return p_ret;
  228. }
  229. static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
  230. u8 connection_handle)
  231. {
  232. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
  233. }
  234. static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
  235. u8 connection_handle)
  236. {
  237. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
  238. }
  239. static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
  240. *p_hwfn,
  241. u8 connection_handle)
  242. {
  243. return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
  244. }
  245. static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
  246. {
  247. bool b_last_packet = false, b_last_frag = false;
  248. struct qed_ll2_tx_packet *p_pkt = NULL;
  249. struct qed_ll2_info *p_ll2_conn;
  250. struct qed_ll2_tx_queue *p_tx;
  251. dma_addr_t tx_frag;
  252. p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
  253. if (!p_ll2_conn)
  254. return;
  255. p_tx = &p_ll2_conn->tx_queue;
  256. while (!list_empty(&p_tx->active_descq)) {
  257. p_pkt = list_first_entry(&p_tx->active_descq,
  258. struct qed_ll2_tx_packet, list_entry);
  259. if (!p_pkt)
  260. break;
  261. list_del(&p_pkt->list_entry);
  262. b_last_packet = list_empty(&p_tx->active_descq);
  263. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  264. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
  265. struct qed_ooo_buffer *p_buffer;
  266. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  267. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  268. p_buffer);
  269. } else {
  270. p_tx->cur_completing_packet = *p_pkt;
  271. p_tx->cur_completing_bd_idx = 1;
  272. b_last_frag =
  273. p_tx->cur_completing_bd_idx == p_pkt->bd_used;
  274. tx_frag = p_pkt->bds_set[0].tx_frag;
  275. p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
  276. p_ll2_conn->my_id,
  277. p_pkt->cookie,
  278. tx_frag,
  279. b_last_frag,
  280. b_last_packet);
  281. }
  282. }
  283. }
  284. static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  285. {
  286. struct qed_ll2_info *p_ll2_conn = p_cookie;
  287. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  288. u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
  289. struct qed_ll2_tx_packet *p_pkt;
  290. bool b_last_frag = false;
  291. unsigned long flags;
  292. int rc = -EINVAL;
  293. spin_lock_irqsave(&p_tx->lock, flags);
  294. if (p_tx->b_completing_packet) {
  295. rc = -EBUSY;
  296. goto out;
  297. }
  298. new_idx = le16_to_cpu(*p_tx->p_fw_cons);
  299. num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
  300. while (num_bds) {
  301. if (list_empty(&p_tx->active_descq))
  302. goto out;
  303. p_pkt = list_first_entry(&p_tx->active_descq,
  304. struct qed_ll2_tx_packet, list_entry);
  305. if (!p_pkt)
  306. goto out;
  307. p_tx->b_completing_packet = true;
  308. p_tx->cur_completing_packet = *p_pkt;
  309. num_bds_in_packet = p_pkt->bd_used;
  310. list_del(&p_pkt->list_entry);
  311. if (num_bds < num_bds_in_packet) {
  312. DP_NOTICE(p_hwfn,
  313. "Rest of BDs does not cover whole packet\n");
  314. goto out;
  315. }
  316. num_bds -= num_bds_in_packet;
  317. p_tx->bds_idx += num_bds_in_packet;
  318. while (num_bds_in_packet--)
  319. qed_chain_consume(&p_tx->txq_chain);
  320. p_tx->cur_completing_bd_idx = 1;
  321. b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
  322. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  323. spin_unlock_irqrestore(&p_tx->lock, flags);
  324. p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
  325. p_ll2_conn->my_id,
  326. p_pkt->cookie,
  327. p_pkt->bds_set[0].tx_frag,
  328. b_last_frag, !num_bds);
  329. spin_lock_irqsave(&p_tx->lock, flags);
  330. }
  331. p_tx->b_completing_packet = false;
  332. rc = 0;
  333. out:
  334. spin_unlock_irqrestore(&p_tx->lock, flags);
  335. return rc;
  336. }
  337. static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
  338. union core_rx_cqe_union *p_cqe,
  339. struct qed_ll2_comp_rx_data *data)
  340. {
  341. data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
  342. data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
  343. data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
  344. data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
  345. data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
  346. data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
  347. }
  348. static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
  349. union core_rx_cqe_union *p_cqe,
  350. struct qed_ll2_comp_rx_data *data)
  351. {
  352. data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
  353. data->length.packet_length =
  354. le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
  355. data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
  356. data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
  357. data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
  358. data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
  359. }
  360. static int
  361. qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
  362. struct qed_ll2_info *p_ll2_conn,
  363. union core_rx_cqe_union *p_cqe,
  364. unsigned long *p_lock_flags, bool b_last_cqe)
  365. {
  366. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  367. struct qed_ll2_rx_packet *p_pkt = NULL;
  368. struct qed_ll2_comp_rx_data data;
  369. if (!list_empty(&p_rx->active_descq))
  370. p_pkt = list_first_entry(&p_rx->active_descq,
  371. struct qed_ll2_rx_packet, list_entry);
  372. if (!p_pkt) {
  373. DP_NOTICE(p_hwfn,
  374. "[%d] LL2 Rx completion but active_descq is empty\n",
  375. p_ll2_conn->input.conn_type);
  376. return -EIO;
  377. }
  378. list_del(&p_pkt->list_entry);
  379. if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
  380. qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
  381. else
  382. qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
  383. if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
  384. DP_NOTICE(p_hwfn,
  385. "Mismatch between active_descq and the LL2 Rx chain\n");
  386. list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  387. data.connection_handle = p_ll2_conn->my_id;
  388. data.cookie = p_pkt->cookie;
  389. data.rx_buf_addr = p_pkt->rx_buf_addr;
  390. data.b_last_packet = b_last_cqe;
  391. spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
  392. p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
  393. spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
  394. return 0;
  395. }
  396. static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
  397. {
  398. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
  399. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  400. union core_rx_cqe_union *cqe = NULL;
  401. u16 cq_new_idx = 0, cq_old_idx = 0;
  402. unsigned long flags = 0;
  403. int rc = 0;
  404. spin_lock_irqsave(&p_rx->lock, flags);
  405. cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
  406. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  407. while (cq_new_idx != cq_old_idx) {
  408. bool b_last_cqe = (cq_new_idx == cq_old_idx);
  409. cqe =
  410. (union core_rx_cqe_union *)
  411. qed_chain_consume(&p_rx->rcq_chain);
  412. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  413. DP_VERBOSE(p_hwfn,
  414. QED_MSG_LL2,
  415. "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
  416. cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
  417. switch (cqe->rx_cqe_sp.type) {
  418. case CORE_RX_CQE_TYPE_SLOW_PATH:
  419. DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
  420. rc = -EINVAL;
  421. break;
  422. case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
  423. case CORE_RX_CQE_TYPE_REGULAR:
  424. rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
  425. cqe, &flags,
  426. b_last_cqe);
  427. break;
  428. default:
  429. rc = -EIO;
  430. }
  431. }
  432. spin_unlock_irqrestore(&p_rx->lock, flags);
  433. return rc;
  434. }
  435. static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
  436. {
  437. struct qed_ll2_info *p_ll2_conn = NULL;
  438. struct qed_ll2_rx_packet *p_pkt = NULL;
  439. struct qed_ll2_rx_queue *p_rx;
  440. p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
  441. if (!p_ll2_conn)
  442. return;
  443. p_rx = &p_ll2_conn->rx_queue;
  444. while (!list_empty(&p_rx->active_descq)) {
  445. p_pkt = list_first_entry(&p_rx->active_descq,
  446. struct qed_ll2_rx_packet, list_entry);
  447. if (!p_pkt)
  448. break;
  449. list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
  450. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
  451. struct qed_ooo_buffer *p_buffer;
  452. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  453. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  454. p_buffer);
  455. } else {
  456. dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
  457. void *cookie = p_pkt->cookie;
  458. bool b_last;
  459. b_last = list_empty(&p_rx->active_descq);
  460. p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
  461. p_ll2_conn->my_id,
  462. cookie,
  463. rx_buf_addr, b_last);
  464. }
  465. }
  466. }
  467. static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
  468. {
  469. u8 bd_flags = 0;
  470. if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
  471. SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
  472. return bd_flags;
  473. }
  474. static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
  475. struct qed_ll2_info *p_ll2_conn)
  476. {
  477. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  478. u16 packet_length = 0, parse_flags = 0, vlan = 0;
  479. struct qed_ll2_rx_packet *p_pkt = NULL;
  480. u32 num_ooo_add_to_peninsula = 0, cid;
  481. union core_rx_cqe_union *cqe = NULL;
  482. u16 cq_new_idx = 0, cq_old_idx = 0;
  483. struct qed_ooo_buffer *p_buffer;
  484. struct ooo_opaque *iscsi_ooo;
  485. u8 placement_offset = 0;
  486. u8 cqe_type;
  487. cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
  488. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  489. if (cq_new_idx == cq_old_idx)
  490. return 0;
  491. while (cq_new_idx != cq_old_idx) {
  492. struct core_rx_fast_path_cqe *p_cqe_fp;
  493. cqe = qed_chain_consume(&p_rx->rcq_chain);
  494. cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
  495. cqe_type = cqe->rx_cqe_sp.type;
  496. if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
  497. DP_NOTICE(p_hwfn,
  498. "Got a non-regular LB LL2 completion [type 0x%02x]\n",
  499. cqe_type);
  500. return -EINVAL;
  501. }
  502. p_cqe_fp = &cqe->rx_cqe_fp;
  503. placement_offset = p_cqe_fp->placement_offset;
  504. parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
  505. packet_length = le16_to_cpu(p_cqe_fp->packet_length);
  506. vlan = le16_to_cpu(p_cqe_fp->vlan);
  507. iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
  508. qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
  509. iscsi_ooo);
  510. cid = le32_to_cpu(iscsi_ooo->cid);
  511. /* Process delete isle first */
  512. if (iscsi_ooo->drop_size)
  513. qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
  514. iscsi_ooo->drop_isle,
  515. iscsi_ooo->drop_size);
  516. if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
  517. continue;
  518. /* Now process create/add/join isles */
  519. if (list_empty(&p_rx->active_descq)) {
  520. DP_NOTICE(p_hwfn,
  521. "LL2 OOO RX chain has no submitted buffers\n"
  522. );
  523. return -EIO;
  524. }
  525. p_pkt = list_first_entry(&p_rx->active_descq,
  526. struct qed_ll2_rx_packet, list_entry);
  527. if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
  528. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
  529. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
  530. (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
  531. (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
  532. if (!p_pkt) {
  533. DP_NOTICE(p_hwfn,
  534. "LL2 OOO RX packet is not valid\n");
  535. return -EIO;
  536. }
  537. list_del(&p_pkt->list_entry);
  538. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  539. p_buffer->packet_length = packet_length;
  540. p_buffer->parse_flags = parse_flags;
  541. p_buffer->vlan = vlan;
  542. p_buffer->placement_offset = placement_offset;
  543. qed_chain_consume(&p_rx->rxq_chain);
  544. list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  545. switch (iscsi_ooo->ooo_opcode) {
  546. case TCP_EVENT_ADD_NEW_ISLE:
  547. qed_ooo_add_new_isle(p_hwfn,
  548. p_hwfn->p_ooo_info,
  549. cid,
  550. iscsi_ooo->ooo_isle,
  551. p_buffer);
  552. break;
  553. case TCP_EVENT_ADD_ISLE_RIGHT:
  554. qed_ooo_add_new_buffer(p_hwfn,
  555. p_hwfn->p_ooo_info,
  556. cid,
  557. iscsi_ooo->ooo_isle,
  558. p_buffer,
  559. QED_OOO_RIGHT_BUF);
  560. break;
  561. case TCP_EVENT_ADD_ISLE_LEFT:
  562. qed_ooo_add_new_buffer(p_hwfn,
  563. p_hwfn->p_ooo_info,
  564. cid,
  565. iscsi_ooo->ooo_isle,
  566. p_buffer,
  567. QED_OOO_LEFT_BUF);
  568. break;
  569. case TCP_EVENT_JOIN:
  570. qed_ooo_add_new_buffer(p_hwfn,
  571. p_hwfn->p_ooo_info,
  572. cid,
  573. iscsi_ooo->ooo_isle +
  574. 1,
  575. p_buffer,
  576. QED_OOO_LEFT_BUF);
  577. qed_ooo_join_isles(p_hwfn,
  578. p_hwfn->p_ooo_info,
  579. cid, iscsi_ooo->ooo_isle);
  580. break;
  581. case TCP_EVENT_ADD_PEN:
  582. num_ooo_add_to_peninsula++;
  583. qed_ooo_put_ready_buffer(p_hwfn,
  584. p_hwfn->p_ooo_info,
  585. p_buffer, true);
  586. break;
  587. }
  588. } else {
  589. DP_NOTICE(p_hwfn,
  590. "Unexpected event (%d) TX OOO completion\n",
  591. iscsi_ooo->ooo_opcode);
  592. }
  593. }
  594. return 0;
  595. }
  596. static void
  597. qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
  598. struct qed_ll2_info *p_ll2_conn)
  599. {
  600. struct qed_ll2_tx_pkt_info tx_pkt;
  601. struct qed_ooo_buffer *p_buffer;
  602. u16 l4_hdr_offset_w;
  603. dma_addr_t first_frag;
  604. u16 parse_flags;
  605. u8 bd_flags;
  606. int rc;
  607. /* Submit Tx buffers here */
  608. while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
  609. p_hwfn->p_ooo_info))) {
  610. l4_hdr_offset_w = 0;
  611. bd_flags = 0;
  612. first_frag = p_buffer->rx_buffer_phys_addr +
  613. p_buffer->placement_offset;
  614. parse_flags = p_buffer->parse_flags;
  615. bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
  616. SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
  617. SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
  618. memset(&tx_pkt, 0, sizeof(tx_pkt));
  619. tx_pkt.num_of_bds = 1;
  620. tx_pkt.vlan = p_buffer->vlan;
  621. tx_pkt.bd_flags = bd_flags;
  622. tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
  623. tx_pkt.tx_dest = p_ll2_conn->tx_dest;
  624. tx_pkt.first_frag = first_frag;
  625. tx_pkt.first_frag_len = p_buffer->packet_length;
  626. tx_pkt.cookie = p_buffer;
  627. rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
  628. &tx_pkt, true);
  629. if (rc) {
  630. qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
  631. p_buffer, false);
  632. break;
  633. }
  634. }
  635. }
  636. static void
  637. qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
  638. struct qed_ll2_info *p_ll2_conn)
  639. {
  640. struct qed_ooo_buffer *p_buffer;
  641. int rc;
  642. while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
  643. p_hwfn->p_ooo_info))) {
  644. rc = qed_ll2_post_rx_buffer(p_hwfn,
  645. p_ll2_conn->my_id,
  646. p_buffer->rx_buffer_phys_addr,
  647. 0, p_buffer, true);
  648. if (rc) {
  649. qed_ooo_put_free_buffer(p_hwfn,
  650. p_hwfn->p_ooo_info, p_buffer);
  651. break;
  652. }
  653. }
  654. }
  655. static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  656. {
  657. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
  658. int rc;
  659. rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
  660. if (rc)
  661. return rc;
  662. qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
  663. qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
  664. return 0;
  665. }
  666. static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
  667. {
  668. struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
  669. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  670. struct qed_ll2_tx_packet *p_pkt = NULL;
  671. struct qed_ooo_buffer *p_buffer;
  672. bool b_dont_submit_rx = false;
  673. u16 new_idx = 0, num_bds = 0;
  674. int rc;
  675. new_idx = le16_to_cpu(*p_tx->p_fw_cons);
  676. num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
  677. if (!num_bds)
  678. return 0;
  679. while (num_bds) {
  680. if (list_empty(&p_tx->active_descq))
  681. return -EINVAL;
  682. p_pkt = list_first_entry(&p_tx->active_descq,
  683. struct qed_ll2_tx_packet, list_entry);
  684. if (!p_pkt)
  685. return -EINVAL;
  686. if (p_pkt->bd_used != 1) {
  687. DP_NOTICE(p_hwfn,
  688. "Unexpectedly many BDs(%d) in TX OOO completion\n",
  689. p_pkt->bd_used);
  690. return -EINVAL;
  691. }
  692. list_del(&p_pkt->list_entry);
  693. num_bds--;
  694. p_tx->bds_idx++;
  695. qed_chain_consume(&p_tx->txq_chain);
  696. p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
  697. list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
  698. if (b_dont_submit_rx) {
  699. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
  700. p_buffer);
  701. continue;
  702. }
  703. rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
  704. p_buffer->rx_buffer_phys_addr, 0,
  705. p_buffer, true);
  706. if (rc != 0) {
  707. qed_ooo_put_free_buffer(p_hwfn,
  708. p_hwfn->p_ooo_info, p_buffer);
  709. b_dont_submit_rx = true;
  710. }
  711. }
  712. qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
  713. return 0;
  714. }
  715. static void qed_ll2_stop_ooo(struct qed_dev *cdev)
  716. {
  717. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  718. u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
  719. DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
  720. *handle);
  721. qed_ll2_terminate_connection(hwfn, *handle);
  722. qed_ll2_release_connection(hwfn, *handle);
  723. *handle = QED_LL2_UNUSED_HANDLE;
  724. }
  725. static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
  726. struct qed_ll2_info *p_ll2_conn,
  727. u8 action_on_error)
  728. {
  729. enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
  730. struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
  731. struct core_rx_start_ramrod_data *p_ramrod = NULL;
  732. struct qed_spq_entry *p_ent = NULL;
  733. struct qed_sp_init_data init_data;
  734. u16 cqe_pbl_size;
  735. int rc = 0;
  736. /* Get SPQ entry */
  737. memset(&init_data, 0, sizeof(init_data));
  738. init_data.cid = p_ll2_conn->cid;
  739. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  740. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  741. rc = qed_sp_init_request(p_hwfn, &p_ent,
  742. CORE_RAMROD_RX_QUEUE_START,
  743. PROTOCOLID_CORE, &init_data);
  744. if (rc)
  745. return rc;
  746. p_ramrod = &p_ent->ramrod.core_rx_queue_start;
  747. p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
  748. p_ramrod->sb_index = p_rx->rx_sb_index;
  749. p_ramrod->complete_event_flg = 1;
  750. p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
  751. DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
  752. cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
  753. p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
  754. DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
  755. qed_chain_get_pbl_phys(&p_rx->rcq_chain));
  756. p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
  757. p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
  758. p_ramrod->queue_id = p_ll2_conn->queue_id;
  759. p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
  760. if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
  761. p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
  762. (conn_type != QED_LL2_TYPE_IWARP)) {
  763. p_ramrod->mf_si_bcast_accept_all = 1;
  764. p_ramrod->mf_si_mcast_accept_all = 1;
  765. } else {
  766. p_ramrod->mf_si_bcast_accept_all = 0;
  767. p_ramrod->mf_si_mcast_accept_all = 0;
  768. }
  769. p_ramrod->action_on_error.error_type = action_on_error;
  770. p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
  771. return qed_spq_post(p_hwfn, p_ent, NULL);
  772. }
  773. static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
  774. struct qed_ll2_info *p_ll2_conn)
  775. {
  776. enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
  777. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  778. struct core_tx_start_ramrod_data *p_ramrod = NULL;
  779. struct qed_spq_entry *p_ent = NULL;
  780. struct qed_sp_init_data init_data;
  781. u16 pq_id = 0, pbl_size;
  782. int rc = -EINVAL;
  783. if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
  784. return 0;
  785. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
  786. p_ll2_conn->tx_stats_en = 0;
  787. else
  788. p_ll2_conn->tx_stats_en = 1;
  789. /* Get SPQ entry */
  790. memset(&init_data, 0, sizeof(init_data));
  791. init_data.cid = p_ll2_conn->cid;
  792. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  793. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  794. rc = qed_sp_init_request(p_hwfn, &p_ent,
  795. CORE_RAMROD_TX_QUEUE_START,
  796. PROTOCOLID_CORE, &init_data);
  797. if (rc)
  798. return rc;
  799. p_ramrod = &p_ent->ramrod.core_tx_queue_start;
  800. p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
  801. p_ramrod->sb_index = p_tx->tx_sb_index;
  802. p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
  803. p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
  804. p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
  805. DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
  806. qed_chain_get_pbl_phys(&p_tx->txq_chain));
  807. pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
  808. p_ramrod->pbl_size = cpu_to_le16(pbl_size);
  809. switch (p_ll2_conn->input.tx_tc) {
  810. case PURE_LB_TC:
  811. pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
  812. break;
  813. case PKT_LB_TC:
  814. pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
  815. break;
  816. default:
  817. pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  818. break;
  819. }
  820. p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
  821. switch (conn_type) {
  822. case QED_LL2_TYPE_FCOE:
  823. p_ramrod->conn_type = PROTOCOLID_FCOE;
  824. break;
  825. case QED_LL2_TYPE_ISCSI:
  826. p_ramrod->conn_type = PROTOCOLID_ISCSI;
  827. break;
  828. case QED_LL2_TYPE_ROCE:
  829. p_ramrod->conn_type = PROTOCOLID_ROCE;
  830. break;
  831. case QED_LL2_TYPE_IWARP:
  832. p_ramrod->conn_type = PROTOCOLID_IWARP;
  833. break;
  834. case QED_LL2_TYPE_OOO:
  835. if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
  836. p_ramrod->conn_type = PROTOCOLID_ISCSI;
  837. else
  838. p_ramrod->conn_type = PROTOCOLID_IWARP;
  839. break;
  840. default:
  841. p_ramrod->conn_type = PROTOCOLID_ETH;
  842. DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
  843. }
  844. p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
  845. return qed_spq_post(p_hwfn, p_ent, NULL);
  846. }
  847. static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
  848. struct qed_ll2_info *p_ll2_conn)
  849. {
  850. struct core_rx_stop_ramrod_data *p_ramrod = NULL;
  851. struct qed_spq_entry *p_ent = NULL;
  852. struct qed_sp_init_data init_data;
  853. int rc = -EINVAL;
  854. /* Get SPQ entry */
  855. memset(&init_data, 0, sizeof(init_data));
  856. init_data.cid = p_ll2_conn->cid;
  857. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  858. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  859. rc = qed_sp_init_request(p_hwfn, &p_ent,
  860. CORE_RAMROD_RX_QUEUE_STOP,
  861. PROTOCOLID_CORE, &init_data);
  862. if (rc)
  863. return rc;
  864. p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
  865. p_ramrod->complete_event_flg = 1;
  866. p_ramrod->queue_id = p_ll2_conn->queue_id;
  867. return qed_spq_post(p_hwfn, p_ent, NULL);
  868. }
  869. static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
  870. struct qed_ll2_info *p_ll2_conn)
  871. {
  872. struct qed_spq_entry *p_ent = NULL;
  873. struct qed_sp_init_data init_data;
  874. int rc = -EINVAL;
  875. /* Get SPQ entry */
  876. memset(&init_data, 0, sizeof(init_data));
  877. init_data.cid = p_ll2_conn->cid;
  878. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  879. init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  880. rc = qed_sp_init_request(p_hwfn, &p_ent,
  881. CORE_RAMROD_TX_QUEUE_STOP,
  882. PROTOCOLID_CORE, &init_data);
  883. if (rc)
  884. return rc;
  885. return qed_spq_post(p_hwfn, p_ent, NULL);
  886. }
  887. static int
  888. qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
  889. struct qed_ll2_info *p_ll2_info)
  890. {
  891. struct qed_ll2_rx_packet *p_descq;
  892. u32 capacity;
  893. int rc = 0;
  894. if (!p_ll2_info->input.rx_num_desc)
  895. goto out;
  896. rc = qed_chain_alloc(p_hwfn->cdev,
  897. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  898. QED_CHAIN_MODE_NEXT_PTR,
  899. QED_CHAIN_CNT_TYPE_U16,
  900. p_ll2_info->input.rx_num_desc,
  901. sizeof(struct core_rx_bd),
  902. &p_ll2_info->rx_queue.rxq_chain, NULL);
  903. if (rc) {
  904. DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
  905. goto out;
  906. }
  907. capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
  908. p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
  909. GFP_KERNEL);
  910. if (!p_descq) {
  911. rc = -ENOMEM;
  912. DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
  913. goto out;
  914. }
  915. p_ll2_info->rx_queue.descq_array = p_descq;
  916. rc = qed_chain_alloc(p_hwfn->cdev,
  917. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  918. QED_CHAIN_MODE_PBL,
  919. QED_CHAIN_CNT_TYPE_U16,
  920. p_ll2_info->input.rx_num_desc,
  921. sizeof(struct core_rx_fast_path_cqe),
  922. &p_ll2_info->rx_queue.rcq_chain, NULL);
  923. if (rc) {
  924. DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
  925. goto out;
  926. }
  927. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  928. "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
  929. p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
  930. out:
  931. return rc;
  932. }
  933. static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
  934. struct qed_ll2_info *p_ll2_info)
  935. {
  936. struct qed_ll2_tx_packet *p_descq;
  937. u32 capacity;
  938. int rc = 0;
  939. if (!p_ll2_info->input.tx_num_desc)
  940. goto out;
  941. rc = qed_chain_alloc(p_hwfn->cdev,
  942. QED_CHAIN_USE_TO_CONSUME_PRODUCE,
  943. QED_CHAIN_MODE_PBL,
  944. QED_CHAIN_CNT_TYPE_U16,
  945. p_ll2_info->input.tx_num_desc,
  946. sizeof(struct core_tx_bd),
  947. &p_ll2_info->tx_queue.txq_chain, NULL);
  948. if (rc)
  949. goto out;
  950. capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
  951. p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
  952. GFP_KERNEL);
  953. if (!p_descq) {
  954. rc = -ENOMEM;
  955. goto out;
  956. }
  957. p_ll2_info->tx_queue.descq_array = p_descq;
  958. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  959. "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
  960. p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
  961. out:
  962. if (rc)
  963. DP_NOTICE(p_hwfn,
  964. "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
  965. p_ll2_info->input.tx_num_desc);
  966. return rc;
  967. }
  968. static int
  969. qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
  970. struct qed_ll2_info *p_ll2_info, u16 mtu)
  971. {
  972. struct qed_ooo_buffer *p_buf = NULL;
  973. void *p_virt;
  974. u16 buf_idx;
  975. int rc = 0;
  976. if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
  977. return rc;
  978. /* Correct number of requested OOO buffers if needed */
  979. if (!p_ll2_info->input.rx_num_ooo_buffers) {
  980. u16 num_desc = p_ll2_info->input.rx_num_desc;
  981. if (!num_desc)
  982. return -EINVAL;
  983. p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
  984. }
  985. for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
  986. buf_idx++) {
  987. p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
  988. if (!p_buf) {
  989. rc = -ENOMEM;
  990. goto out;
  991. }
  992. p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
  993. p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
  994. ETH_CACHE_LINE_SIZE - 1) &
  995. ~(ETH_CACHE_LINE_SIZE - 1);
  996. p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  997. p_buf->rx_buffer_size,
  998. &p_buf->rx_buffer_phys_addr,
  999. GFP_KERNEL);
  1000. if (!p_virt) {
  1001. kfree(p_buf);
  1002. rc = -ENOMEM;
  1003. goto out;
  1004. }
  1005. p_buf->rx_buffer_virt_addr = p_virt;
  1006. qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
  1007. }
  1008. DP_VERBOSE(p_hwfn, QED_MSG_LL2,
  1009. "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
  1010. p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
  1011. out:
  1012. return rc;
  1013. }
  1014. static int
  1015. qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
  1016. {
  1017. if (!cbs || (!cbs->rx_comp_cb ||
  1018. !cbs->rx_release_cb ||
  1019. !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
  1020. return -EINVAL;
  1021. p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
  1022. p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
  1023. p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
  1024. p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
  1025. p_ll2_info->cbs.cookie = cbs->cookie;
  1026. return 0;
  1027. }
  1028. static enum core_error_handle
  1029. qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
  1030. {
  1031. switch (err) {
  1032. case QED_LL2_DROP_PACKET:
  1033. return LL2_DROP_PACKET;
  1034. case QED_LL2_DO_NOTHING:
  1035. return LL2_DO_NOTHING;
  1036. case QED_LL2_ASSERT:
  1037. return LL2_ASSERT;
  1038. default:
  1039. return LL2_DO_NOTHING;
  1040. }
  1041. }
  1042. int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
  1043. {
  1044. struct qed_hwfn *p_hwfn = cxt;
  1045. qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
  1046. struct qed_ll2_info *p_ll2_info = NULL;
  1047. u8 i, *p_tx_max;
  1048. int rc;
  1049. if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
  1050. return -EINVAL;
  1051. /* Find a free connection to be used */
  1052. for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
  1053. mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
  1054. if (p_hwfn->p_ll2_info[i].b_active) {
  1055. mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
  1056. continue;
  1057. }
  1058. p_hwfn->p_ll2_info[i].b_active = true;
  1059. p_ll2_info = &p_hwfn->p_ll2_info[i];
  1060. mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
  1061. break;
  1062. }
  1063. if (!p_ll2_info)
  1064. return -EBUSY;
  1065. memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
  1066. p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
  1067. CORE_TX_DEST_NW : CORE_TX_DEST_LB;
  1068. /* Correct maximum number of Tx BDs */
  1069. p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
  1070. if (*p_tx_max == 0)
  1071. *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
  1072. else
  1073. *p_tx_max = min_t(u8, *p_tx_max,
  1074. CORE_LL2_TX_MAX_BDS_PER_PACKET);
  1075. rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
  1076. if (rc) {
  1077. DP_NOTICE(p_hwfn, "Invalid callback functions\n");
  1078. goto q_allocate_fail;
  1079. }
  1080. rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
  1081. if (rc)
  1082. goto q_allocate_fail;
  1083. rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
  1084. if (rc)
  1085. goto q_allocate_fail;
  1086. rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
  1087. data->input.mtu);
  1088. if (rc)
  1089. goto q_allocate_fail;
  1090. /* Register callbacks for the Rx/Tx queues */
  1091. if (data->input.conn_type == QED_LL2_TYPE_OOO) {
  1092. comp_rx_cb = qed_ll2_lb_rxq_completion;
  1093. comp_tx_cb = qed_ll2_lb_txq_completion;
  1094. } else {
  1095. comp_rx_cb = qed_ll2_rxq_completion;
  1096. comp_tx_cb = qed_ll2_txq_completion;
  1097. }
  1098. if (data->input.rx_num_desc) {
  1099. qed_int_register_cb(p_hwfn, comp_rx_cb,
  1100. &p_hwfn->p_ll2_info[i],
  1101. &p_ll2_info->rx_queue.rx_sb_index,
  1102. &p_ll2_info->rx_queue.p_fw_cons);
  1103. p_ll2_info->rx_queue.b_cb_registred = true;
  1104. }
  1105. if (data->input.tx_num_desc) {
  1106. qed_int_register_cb(p_hwfn,
  1107. comp_tx_cb,
  1108. &p_hwfn->p_ll2_info[i],
  1109. &p_ll2_info->tx_queue.tx_sb_index,
  1110. &p_ll2_info->tx_queue.p_fw_cons);
  1111. p_ll2_info->tx_queue.b_cb_registred = true;
  1112. }
  1113. *data->p_connection_handle = i;
  1114. return rc;
  1115. q_allocate_fail:
  1116. qed_ll2_release_connection(p_hwfn, i);
  1117. return -ENOMEM;
  1118. }
  1119. static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
  1120. struct qed_ll2_info *p_ll2_conn)
  1121. {
  1122. enum qed_ll2_error_handle error_input;
  1123. enum core_error_handle error_mode;
  1124. u8 action_on_error = 0;
  1125. if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
  1126. return 0;
  1127. DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
  1128. error_input = p_ll2_conn->input.ai_err_packet_too_big;
  1129. error_mode = qed_ll2_get_error_choice(error_input);
  1130. SET_FIELD(action_on_error,
  1131. CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
  1132. error_input = p_ll2_conn->input.ai_err_no_buf;
  1133. error_mode = qed_ll2_get_error_choice(error_input);
  1134. SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
  1135. return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
  1136. }
  1137. static void
  1138. qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
  1139. struct qed_ll2_info *p_ll2_conn)
  1140. {
  1141. if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
  1142. return;
  1143. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  1144. qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
  1145. }
  1146. int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
  1147. {
  1148. struct qed_hwfn *p_hwfn = cxt;
  1149. struct qed_ll2_info *p_ll2_conn;
  1150. struct qed_ll2_rx_queue *p_rx;
  1151. struct qed_ll2_tx_queue *p_tx;
  1152. struct qed_ptt *p_ptt;
  1153. int rc = -EINVAL;
  1154. u32 i, capacity;
  1155. u8 qid;
  1156. p_ptt = qed_ptt_acquire(p_hwfn);
  1157. if (!p_ptt)
  1158. return -EAGAIN;
  1159. p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
  1160. if (!p_ll2_conn) {
  1161. rc = -EINVAL;
  1162. goto out;
  1163. }
  1164. p_rx = &p_ll2_conn->rx_queue;
  1165. p_tx = &p_ll2_conn->tx_queue;
  1166. qed_chain_reset(&p_rx->rxq_chain);
  1167. qed_chain_reset(&p_rx->rcq_chain);
  1168. INIT_LIST_HEAD(&p_rx->active_descq);
  1169. INIT_LIST_HEAD(&p_rx->free_descq);
  1170. INIT_LIST_HEAD(&p_rx->posting_descq);
  1171. spin_lock_init(&p_rx->lock);
  1172. capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
  1173. for (i = 0; i < capacity; i++)
  1174. list_add_tail(&p_rx->descq_array[i].list_entry,
  1175. &p_rx->free_descq);
  1176. *p_rx->p_fw_cons = 0;
  1177. qed_chain_reset(&p_tx->txq_chain);
  1178. INIT_LIST_HEAD(&p_tx->active_descq);
  1179. INIT_LIST_HEAD(&p_tx->free_descq);
  1180. INIT_LIST_HEAD(&p_tx->sending_descq);
  1181. spin_lock_init(&p_tx->lock);
  1182. capacity = qed_chain_get_capacity(&p_tx->txq_chain);
  1183. for (i = 0; i < capacity; i++)
  1184. list_add_tail(&p_tx->descq_array[i].list_entry,
  1185. &p_tx->free_descq);
  1186. p_tx->cur_completing_bd_idx = 0;
  1187. p_tx->bds_idx = 0;
  1188. p_tx->b_completing_packet = false;
  1189. p_tx->cur_send_packet = NULL;
  1190. p_tx->cur_send_frag_num = 0;
  1191. p_tx->cur_completing_frag_num = 0;
  1192. *p_tx->p_fw_cons = 0;
  1193. rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
  1194. if (rc)
  1195. goto out;
  1196. qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
  1197. p_ll2_conn->queue_id = qid;
  1198. p_ll2_conn->tx_stats_id = qid;
  1199. p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
  1200. GTT_BAR0_MAP_REG_TSDM_RAM +
  1201. TSTORM_LL2_RX_PRODS_OFFSET(qid);
  1202. p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
  1203. qed_db_addr(p_ll2_conn->cid,
  1204. DQ_DEMS_LEGACY);
  1205. rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
  1206. if (rc)
  1207. goto out;
  1208. rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
  1209. if (rc)
  1210. goto out;
  1211. if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
  1212. qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
  1213. qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
  1214. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
  1215. qed_llh_add_protocol_filter(p_hwfn, p_ptt,
  1216. 0x8906, 0,
  1217. QED_LLH_FILTER_ETHERTYPE);
  1218. qed_llh_add_protocol_filter(p_hwfn, p_ptt,
  1219. 0x8914, 0,
  1220. QED_LLH_FILTER_ETHERTYPE);
  1221. }
  1222. out:
  1223. qed_ptt_release(p_hwfn, p_ptt);
  1224. return rc;
  1225. }
  1226. static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
  1227. struct qed_ll2_rx_queue *p_rx,
  1228. struct qed_ll2_rx_packet *p_curp)
  1229. {
  1230. struct qed_ll2_rx_packet *p_posting_packet = NULL;
  1231. struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
  1232. bool b_notify_fw = false;
  1233. u16 bd_prod, cq_prod;
  1234. /* This handles the flushing of already posted buffers */
  1235. while (!list_empty(&p_rx->posting_descq)) {
  1236. p_posting_packet = list_first_entry(&p_rx->posting_descq,
  1237. struct qed_ll2_rx_packet,
  1238. list_entry);
  1239. list_move_tail(&p_posting_packet->list_entry,
  1240. &p_rx->active_descq);
  1241. b_notify_fw = true;
  1242. }
  1243. /* This handles the supplied packet [if there is one] */
  1244. if (p_curp) {
  1245. list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
  1246. b_notify_fw = true;
  1247. }
  1248. if (!b_notify_fw)
  1249. return;
  1250. bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
  1251. cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
  1252. rx_prod.bd_prod = cpu_to_le16(bd_prod);
  1253. rx_prod.cqe_prod = cpu_to_le16(cq_prod);
  1254. DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
  1255. }
  1256. int qed_ll2_post_rx_buffer(void *cxt,
  1257. u8 connection_handle,
  1258. dma_addr_t addr,
  1259. u16 buf_len, void *cookie, u8 notify_fw)
  1260. {
  1261. struct qed_hwfn *p_hwfn = cxt;
  1262. struct core_rx_bd_with_buff_len *p_curb = NULL;
  1263. struct qed_ll2_rx_packet *p_curp = NULL;
  1264. struct qed_ll2_info *p_ll2_conn;
  1265. struct qed_ll2_rx_queue *p_rx;
  1266. unsigned long flags;
  1267. void *p_data;
  1268. int rc = 0;
  1269. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1270. if (!p_ll2_conn)
  1271. return -EINVAL;
  1272. p_rx = &p_ll2_conn->rx_queue;
  1273. spin_lock_irqsave(&p_rx->lock, flags);
  1274. if (!list_empty(&p_rx->free_descq))
  1275. p_curp = list_first_entry(&p_rx->free_descq,
  1276. struct qed_ll2_rx_packet, list_entry);
  1277. if (p_curp) {
  1278. if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
  1279. qed_chain_get_elem_left(&p_rx->rcq_chain)) {
  1280. p_data = qed_chain_produce(&p_rx->rxq_chain);
  1281. p_curb = (struct core_rx_bd_with_buff_len *)p_data;
  1282. qed_chain_produce(&p_rx->rcq_chain);
  1283. }
  1284. }
  1285. /* If we're lacking entires, let's try to flush buffers to FW */
  1286. if (!p_curp || !p_curb) {
  1287. rc = -EBUSY;
  1288. p_curp = NULL;
  1289. goto out_notify;
  1290. }
  1291. /* We have an Rx packet we can fill */
  1292. DMA_REGPAIR_LE(p_curb->addr, addr);
  1293. p_curb->buff_length = cpu_to_le16(buf_len);
  1294. p_curp->rx_buf_addr = addr;
  1295. p_curp->cookie = cookie;
  1296. p_curp->rxq_bd = p_curb;
  1297. p_curp->buf_length = buf_len;
  1298. list_del(&p_curp->list_entry);
  1299. /* Check if we only want to enqueue this packet without informing FW */
  1300. if (!notify_fw) {
  1301. list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
  1302. goto out;
  1303. }
  1304. out_notify:
  1305. qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
  1306. out:
  1307. spin_unlock_irqrestore(&p_rx->lock, flags);
  1308. return rc;
  1309. }
  1310. static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
  1311. struct qed_ll2_tx_queue *p_tx,
  1312. struct qed_ll2_tx_packet *p_curp,
  1313. struct qed_ll2_tx_pkt_info *pkt,
  1314. u8 notify_fw)
  1315. {
  1316. list_del(&p_curp->list_entry);
  1317. p_curp->cookie = pkt->cookie;
  1318. p_curp->bd_used = pkt->num_of_bds;
  1319. p_curp->notify_fw = notify_fw;
  1320. p_tx->cur_send_packet = p_curp;
  1321. p_tx->cur_send_frag_num = 0;
  1322. p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
  1323. p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
  1324. p_tx->cur_send_frag_num++;
  1325. }
  1326. static void
  1327. qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
  1328. struct qed_ll2_info *p_ll2,
  1329. struct qed_ll2_tx_packet *p_curp,
  1330. struct qed_ll2_tx_pkt_info *pkt)
  1331. {
  1332. struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
  1333. u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
  1334. struct core_tx_bd *start_bd = NULL;
  1335. enum core_roce_flavor_type roce_flavor;
  1336. enum core_tx_dest tx_dest;
  1337. u16 bd_data = 0, frag_idx;
  1338. roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
  1339. : CORE_RROCE;
  1340. tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
  1341. : CORE_TX_DEST_LB;
  1342. start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
  1343. start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
  1344. SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
  1345. cpu_to_le16(pkt->l4_hdr_offset_w));
  1346. SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
  1347. bd_data |= pkt->bd_flags;
  1348. SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
  1349. SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
  1350. SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
  1351. start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
  1352. DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
  1353. start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
  1354. DP_VERBOSE(p_hwfn,
  1355. (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
  1356. "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
  1357. p_ll2->queue_id,
  1358. p_ll2->cid,
  1359. p_ll2->input.conn_type,
  1360. prod_idx,
  1361. pkt->first_frag_len,
  1362. pkt->num_of_bds,
  1363. le32_to_cpu(start_bd->addr.hi),
  1364. le32_to_cpu(start_bd->addr.lo));
  1365. if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
  1366. return;
  1367. /* Need to provide the packet with additional BDs for frags */
  1368. for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
  1369. frag_idx < pkt->num_of_bds; frag_idx++) {
  1370. struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
  1371. *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
  1372. (*p_bd)->bd_data.as_bitfield = 0;
  1373. (*p_bd)->bitfield1 = 0;
  1374. p_curp->bds_set[frag_idx].tx_frag = 0;
  1375. p_curp->bds_set[frag_idx].frag_len = 0;
  1376. }
  1377. }
  1378. /* This should be called while the Txq spinlock is being held */
  1379. static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
  1380. struct qed_ll2_info *p_ll2_conn)
  1381. {
  1382. bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
  1383. struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
  1384. struct qed_ll2_tx_packet *p_pkt = NULL;
  1385. struct core_db_data db_msg = { 0, 0, 0 };
  1386. u16 bd_prod;
  1387. /* If there are missing BDs, don't do anything now */
  1388. if (p_ll2_conn->tx_queue.cur_send_frag_num !=
  1389. p_ll2_conn->tx_queue.cur_send_packet->bd_used)
  1390. return;
  1391. /* Push the current packet to the list and clean after it */
  1392. list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
  1393. &p_ll2_conn->tx_queue.sending_descq);
  1394. p_ll2_conn->tx_queue.cur_send_packet = NULL;
  1395. p_ll2_conn->tx_queue.cur_send_frag_num = 0;
  1396. /* Notify FW of packet only if requested to */
  1397. if (!b_notify)
  1398. return;
  1399. bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
  1400. while (!list_empty(&p_tx->sending_descq)) {
  1401. p_pkt = list_first_entry(&p_tx->sending_descq,
  1402. struct qed_ll2_tx_packet, list_entry);
  1403. if (!p_pkt)
  1404. break;
  1405. list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
  1406. }
  1407. SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
  1408. SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
  1409. SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
  1410. DQ_XCM_CORE_TX_BD_PROD_CMD);
  1411. db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
  1412. db_msg.spq_prod = cpu_to_le16(bd_prod);
  1413. /* Make sure the BDs data is updated before ringing the doorbell */
  1414. wmb();
  1415. DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
  1416. DP_VERBOSE(p_hwfn,
  1417. (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
  1418. "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
  1419. p_ll2_conn->queue_id,
  1420. p_ll2_conn->cid,
  1421. p_ll2_conn->input.conn_type, db_msg.spq_prod);
  1422. }
  1423. int qed_ll2_prepare_tx_packet(void *cxt,
  1424. u8 connection_handle,
  1425. struct qed_ll2_tx_pkt_info *pkt,
  1426. bool notify_fw)
  1427. {
  1428. struct qed_hwfn *p_hwfn = cxt;
  1429. struct qed_ll2_tx_packet *p_curp = NULL;
  1430. struct qed_ll2_info *p_ll2_conn = NULL;
  1431. struct qed_ll2_tx_queue *p_tx;
  1432. struct qed_chain *p_tx_chain;
  1433. unsigned long flags;
  1434. int rc = 0;
  1435. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1436. if (!p_ll2_conn)
  1437. return -EINVAL;
  1438. p_tx = &p_ll2_conn->tx_queue;
  1439. p_tx_chain = &p_tx->txq_chain;
  1440. if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
  1441. return -EIO;
  1442. spin_lock_irqsave(&p_tx->lock, flags);
  1443. if (p_tx->cur_send_packet) {
  1444. rc = -EEXIST;
  1445. goto out;
  1446. }
  1447. /* Get entry, but only if we have tx elements for it */
  1448. if (!list_empty(&p_tx->free_descq))
  1449. p_curp = list_first_entry(&p_tx->free_descq,
  1450. struct qed_ll2_tx_packet, list_entry);
  1451. if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
  1452. p_curp = NULL;
  1453. if (!p_curp) {
  1454. rc = -EBUSY;
  1455. goto out;
  1456. }
  1457. /* Prepare packet and BD, and perhaps send a doorbell to FW */
  1458. qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
  1459. qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
  1460. qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
  1461. out:
  1462. spin_unlock_irqrestore(&p_tx->lock, flags);
  1463. return rc;
  1464. }
  1465. int qed_ll2_set_fragment_of_tx_packet(void *cxt,
  1466. u8 connection_handle,
  1467. dma_addr_t addr, u16 nbytes)
  1468. {
  1469. struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
  1470. struct qed_hwfn *p_hwfn = cxt;
  1471. struct qed_ll2_info *p_ll2_conn = NULL;
  1472. u16 cur_send_frag_num = 0;
  1473. struct core_tx_bd *p_bd;
  1474. unsigned long flags;
  1475. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1476. if (!p_ll2_conn)
  1477. return -EINVAL;
  1478. if (!p_ll2_conn->tx_queue.cur_send_packet)
  1479. return -EINVAL;
  1480. p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
  1481. cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
  1482. if (cur_send_frag_num >= p_cur_send_packet->bd_used)
  1483. return -EINVAL;
  1484. /* Fill the BD information, and possibly notify FW */
  1485. p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
  1486. DMA_REGPAIR_LE(p_bd->addr, addr);
  1487. p_bd->nbytes = cpu_to_le16(nbytes);
  1488. p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
  1489. p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
  1490. p_ll2_conn->tx_queue.cur_send_frag_num++;
  1491. spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
  1492. qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
  1493. spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
  1494. return 0;
  1495. }
  1496. int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
  1497. {
  1498. struct qed_hwfn *p_hwfn = cxt;
  1499. struct qed_ll2_info *p_ll2_conn = NULL;
  1500. int rc = -EINVAL;
  1501. struct qed_ptt *p_ptt;
  1502. p_ptt = qed_ptt_acquire(p_hwfn);
  1503. if (!p_ptt)
  1504. return -EAGAIN;
  1505. p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
  1506. if (!p_ll2_conn) {
  1507. rc = -EINVAL;
  1508. goto out;
  1509. }
  1510. /* Stop Tx & Rx of connection, if needed */
  1511. if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
  1512. rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
  1513. if (rc)
  1514. goto out;
  1515. qed_ll2_txq_flush(p_hwfn, connection_handle);
  1516. }
  1517. if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
  1518. rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
  1519. if (rc)
  1520. goto out;
  1521. qed_ll2_rxq_flush(p_hwfn, connection_handle);
  1522. }
  1523. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
  1524. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  1525. if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
  1526. qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
  1527. 0x8906, 0,
  1528. QED_LLH_FILTER_ETHERTYPE);
  1529. qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
  1530. 0x8914, 0,
  1531. QED_LLH_FILTER_ETHERTYPE);
  1532. }
  1533. out:
  1534. qed_ptt_release(p_hwfn, p_ptt);
  1535. return rc;
  1536. }
  1537. static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
  1538. struct qed_ll2_info *p_ll2_conn)
  1539. {
  1540. struct qed_ooo_buffer *p_buffer;
  1541. if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
  1542. return;
  1543. qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
  1544. while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
  1545. p_hwfn->p_ooo_info))) {
  1546. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  1547. p_buffer->rx_buffer_size,
  1548. p_buffer->rx_buffer_virt_addr,
  1549. p_buffer->rx_buffer_phys_addr);
  1550. kfree(p_buffer);
  1551. }
  1552. }
  1553. void qed_ll2_release_connection(void *cxt, u8 connection_handle)
  1554. {
  1555. struct qed_hwfn *p_hwfn = cxt;
  1556. struct qed_ll2_info *p_ll2_conn = NULL;
  1557. p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
  1558. if (!p_ll2_conn)
  1559. return;
  1560. if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
  1561. p_ll2_conn->rx_queue.b_cb_registred = false;
  1562. qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
  1563. }
  1564. if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
  1565. p_ll2_conn->tx_queue.b_cb_registred = false;
  1566. qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
  1567. }
  1568. kfree(p_ll2_conn->tx_queue.descq_array);
  1569. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
  1570. kfree(p_ll2_conn->rx_queue.descq_array);
  1571. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
  1572. qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
  1573. qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
  1574. qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
  1575. mutex_lock(&p_ll2_conn->mutex);
  1576. p_ll2_conn->b_active = false;
  1577. mutex_unlock(&p_ll2_conn->mutex);
  1578. }
  1579. int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
  1580. {
  1581. struct qed_ll2_info *p_ll2_connections;
  1582. u8 i;
  1583. /* Allocate LL2's set struct */
  1584. p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
  1585. sizeof(struct qed_ll2_info), GFP_KERNEL);
  1586. if (!p_ll2_connections) {
  1587. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
  1588. return -ENOMEM;
  1589. }
  1590. for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
  1591. p_ll2_connections[i].my_id = i;
  1592. p_hwfn->p_ll2_info = p_ll2_connections;
  1593. return 0;
  1594. }
  1595. void qed_ll2_setup(struct qed_hwfn *p_hwfn)
  1596. {
  1597. int i;
  1598. for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
  1599. mutex_init(&p_hwfn->p_ll2_info[i].mutex);
  1600. }
  1601. void qed_ll2_free(struct qed_hwfn *p_hwfn)
  1602. {
  1603. if (!p_hwfn->p_ll2_info)
  1604. return;
  1605. kfree(p_hwfn->p_ll2_info);
  1606. p_hwfn->p_ll2_info = NULL;
  1607. }
  1608. static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
  1609. struct qed_ptt *p_ptt,
  1610. struct qed_ll2_stats *p_stats)
  1611. {
  1612. struct core_ll2_port_stats port_stats;
  1613. memset(&port_stats, 0, sizeof(port_stats));
  1614. qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
  1615. BAR0_MAP_REG_TSDM_RAM +
  1616. TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
  1617. sizeof(port_stats));
  1618. p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
  1619. p_stats->gsi_invalid_pkt_length =
  1620. HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
  1621. p_stats->gsi_unsupported_pkt_typ =
  1622. HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
  1623. p_stats->gsi_crcchksm_error =
  1624. HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
  1625. }
  1626. static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
  1627. struct qed_ptt *p_ptt,
  1628. struct qed_ll2_info *p_ll2_conn,
  1629. struct qed_ll2_stats *p_stats)
  1630. {
  1631. struct core_ll2_tstorm_per_queue_stat tstats;
  1632. u8 qid = p_ll2_conn->queue_id;
  1633. u32 tstats_addr;
  1634. memset(&tstats, 0, sizeof(tstats));
  1635. tstats_addr = BAR0_MAP_REG_TSDM_RAM +
  1636. CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
  1637. qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
  1638. p_stats->packet_too_big_discard =
  1639. HILO_64_REGPAIR(tstats.packet_too_big_discard);
  1640. p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
  1641. }
  1642. static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
  1643. struct qed_ptt *p_ptt,
  1644. struct qed_ll2_info *p_ll2_conn,
  1645. struct qed_ll2_stats *p_stats)
  1646. {
  1647. struct core_ll2_ustorm_per_queue_stat ustats;
  1648. u8 qid = p_ll2_conn->queue_id;
  1649. u32 ustats_addr;
  1650. memset(&ustats, 0, sizeof(ustats));
  1651. ustats_addr = BAR0_MAP_REG_USDM_RAM +
  1652. CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
  1653. qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
  1654. p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
  1655. p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
  1656. p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
  1657. p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
  1658. p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
  1659. p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
  1660. }
  1661. static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
  1662. struct qed_ptt *p_ptt,
  1663. struct qed_ll2_info *p_ll2_conn,
  1664. struct qed_ll2_stats *p_stats)
  1665. {
  1666. struct core_ll2_pstorm_per_queue_stat pstats;
  1667. u8 stats_id = p_ll2_conn->tx_stats_id;
  1668. u32 pstats_addr;
  1669. memset(&pstats, 0, sizeof(pstats));
  1670. pstats_addr = BAR0_MAP_REG_PSDM_RAM +
  1671. CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
  1672. qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
  1673. p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
  1674. p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
  1675. p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
  1676. p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
  1677. p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
  1678. p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
  1679. }
  1680. int qed_ll2_get_stats(void *cxt,
  1681. u8 connection_handle, struct qed_ll2_stats *p_stats)
  1682. {
  1683. struct qed_hwfn *p_hwfn = cxt;
  1684. struct qed_ll2_info *p_ll2_conn = NULL;
  1685. struct qed_ptt *p_ptt;
  1686. memset(p_stats, 0, sizeof(*p_stats));
  1687. if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
  1688. !p_hwfn->p_ll2_info)
  1689. return -EINVAL;
  1690. p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
  1691. p_ptt = qed_ptt_acquire(p_hwfn);
  1692. if (!p_ptt) {
  1693. DP_ERR(p_hwfn, "Failed to acquire ptt\n");
  1694. return -EINVAL;
  1695. }
  1696. if (p_ll2_conn->input.gsi_enable)
  1697. _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
  1698. _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1699. _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1700. if (p_ll2_conn->tx_stats_en)
  1701. _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
  1702. qed_ptt_release(p_hwfn, p_ptt);
  1703. return 0;
  1704. }
  1705. static void qed_ll2b_release_rx_packet(void *cxt,
  1706. u8 connection_handle,
  1707. void *cookie,
  1708. dma_addr_t rx_buf_addr,
  1709. bool b_last_packet)
  1710. {
  1711. struct qed_hwfn *p_hwfn = cxt;
  1712. qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
  1713. }
  1714. static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
  1715. const struct qed_ll2_cb_ops *ops,
  1716. void *cookie)
  1717. {
  1718. cdev->ll2->cbs = ops;
  1719. cdev->ll2->cb_cookie = cookie;
  1720. }
  1721. struct qed_ll2_cbs ll2_cbs = {
  1722. .rx_comp_cb = &qed_ll2b_complete_rx_packet,
  1723. .rx_release_cb = &qed_ll2b_release_rx_packet,
  1724. .tx_comp_cb = &qed_ll2b_complete_tx_packet,
  1725. .tx_release_cb = &qed_ll2b_complete_tx_packet,
  1726. };
  1727. static void qed_ll2_set_conn_data(struct qed_dev *cdev,
  1728. struct qed_ll2_acquire_data *data,
  1729. struct qed_ll2_params *params,
  1730. enum qed_ll2_conn_type conn_type,
  1731. u8 *handle, bool lb)
  1732. {
  1733. memset(data, 0, sizeof(*data));
  1734. data->input.conn_type = conn_type;
  1735. data->input.mtu = params->mtu;
  1736. data->input.rx_num_desc = QED_LL2_RX_SIZE;
  1737. data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
  1738. data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
  1739. data->input.tx_num_desc = QED_LL2_TX_SIZE;
  1740. data->p_connection_handle = handle;
  1741. data->cbs = &ll2_cbs;
  1742. ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
  1743. if (lb) {
  1744. data->input.tx_tc = PKT_LB_TC;
  1745. data->input.tx_dest = QED_LL2_TX_DEST_LB;
  1746. } else {
  1747. data->input.tx_tc = 0;
  1748. data->input.tx_dest = QED_LL2_TX_DEST_NW;
  1749. }
  1750. }
  1751. static int qed_ll2_start_ooo(struct qed_dev *cdev,
  1752. struct qed_ll2_params *params)
  1753. {
  1754. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  1755. u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
  1756. struct qed_ll2_acquire_data data;
  1757. int rc;
  1758. qed_ll2_set_conn_data(cdev, &data, params,
  1759. QED_LL2_TYPE_OOO, handle, true);
  1760. rc = qed_ll2_acquire_connection(hwfn, &data);
  1761. if (rc) {
  1762. DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
  1763. goto out;
  1764. }
  1765. rc = qed_ll2_establish_connection(hwfn, *handle);
  1766. if (rc) {
  1767. DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
  1768. goto fail;
  1769. }
  1770. return 0;
  1771. fail:
  1772. qed_ll2_release_connection(hwfn, *handle);
  1773. out:
  1774. *handle = QED_LL2_UNUSED_HANDLE;
  1775. return rc;
  1776. }
  1777. static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
  1778. {
  1779. struct qed_ll2_buffer *buffer, *tmp_buffer;
  1780. enum qed_ll2_conn_type conn_type;
  1781. struct qed_ll2_acquire_data data;
  1782. struct qed_ptt *p_ptt;
  1783. int rc, i;
  1784. /* Initialize LL2 locks & lists */
  1785. INIT_LIST_HEAD(&cdev->ll2->list);
  1786. spin_lock_init(&cdev->ll2->lock);
  1787. cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
  1788. L1_CACHE_BYTES + params->mtu;
  1789. /*Allocate memory for LL2 */
  1790. DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
  1791. cdev->ll2->rx_size);
  1792. for (i = 0; i < QED_LL2_RX_SIZE; i++) {
  1793. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  1794. if (!buffer) {
  1795. DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
  1796. goto fail;
  1797. }
  1798. rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
  1799. &buffer->phys_addr);
  1800. if (rc) {
  1801. kfree(buffer);
  1802. goto fail;
  1803. }
  1804. list_add_tail(&buffer->list, &cdev->ll2->list);
  1805. }
  1806. switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
  1807. case QED_PCI_FCOE:
  1808. conn_type = QED_LL2_TYPE_FCOE;
  1809. break;
  1810. case QED_PCI_ISCSI:
  1811. conn_type = QED_LL2_TYPE_ISCSI;
  1812. break;
  1813. case QED_PCI_ETH_ROCE:
  1814. conn_type = QED_LL2_TYPE_ROCE;
  1815. break;
  1816. default:
  1817. conn_type = QED_LL2_TYPE_TEST;
  1818. }
  1819. qed_ll2_set_conn_data(cdev, &data, params, conn_type,
  1820. &cdev->ll2->handle, false);
  1821. rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
  1822. if (rc) {
  1823. DP_INFO(cdev, "Failed to acquire LL2 connection\n");
  1824. goto fail;
  1825. }
  1826. rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
  1827. cdev->ll2->handle);
  1828. if (rc) {
  1829. DP_INFO(cdev, "Failed to establish LL2 connection\n");
  1830. goto release_fail;
  1831. }
  1832. /* Post all Rx buffers to FW */
  1833. spin_lock_bh(&cdev->ll2->lock);
  1834. list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
  1835. rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
  1836. cdev->ll2->handle,
  1837. buffer->phys_addr, 0, buffer, 1);
  1838. if (rc) {
  1839. DP_INFO(cdev,
  1840. "Failed to post an Rx buffer; Deleting it\n");
  1841. dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
  1842. cdev->ll2->rx_size, DMA_FROM_DEVICE);
  1843. kfree(buffer->data);
  1844. list_del(&buffer->list);
  1845. kfree(buffer);
  1846. } else {
  1847. cdev->ll2->rx_cnt++;
  1848. }
  1849. }
  1850. spin_unlock_bh(&cdev->ll2->lock);
  1851. if (!cdev->ll2->rx_cnt) {
  1852. DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
  1853. goto release_terminate;
  1854. }
  1855. if (!is_valid_ether_addr(params->ll2_mac_address)) {
  1856. DP_INFO(cdev, "Invalid Ethernet address\n");
  1857. goto release_terminate;
  1858. }
  1859. if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
  1860. cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
  1861. DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
  1862. rc = qed_ll2_start_ooo(cdev, params);
  1863. if (rc) {
  1864. DP_INFO(cdev,
  1865. "Failed to initialize the OOO LL2 queue\n");
  1866. goto release_terminate;
  1867. }
  1868. }
  1869. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  1870. if (!p_ptt) {
  1871. DP_INFO(cdev, "Failed to acquire PTT\n");
  1872. goto release_terminate;
  1873. }
  1874. rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  1875. params->ll2_mac_address);
  1876. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  1877. if (rc) {
  1878. DP_ERR(cdev, "Failed to allocate LLH filter\n");
  1879. goto release_terminate_all;
  1880. }
  1881. ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
  1882. return 0;
  1883. release_terminate_all:
  1884. release_terminate:
  1885. qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  1886. release_fail:
  1887. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  1888. fail:
  1889. qed_ll2_kill_buffers(cdev);
  1890. cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
  1891. return -EINVAL;
  1892. }
  1893. static int qed_ll2_stop(struct qed_dev *cdev)
  1894. {
  1895. struct qed_ptt *p_ptt;
  1896. int rc;
  1897. if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
  1898. return 0;
  1899. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  1900. if (!p_ptt) {
  1901. DP_INFO(cdev, "Failed to acquire PTT\n");
  1902. goto fail;
  1903. }
  1904. qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
  1905. cdev->ll2_mac_address);
  1906. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  1907. eth_zero_addr(cdev->ll2_mac_address);
  1908. if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
  1909. cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
  1910. qed_ll2_stop_ooo(cdev);
  1911. rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
  1912. cdev->ll2->handle);
  1913. if (rc)
  1914. DP_INFO(cdev, "Failed to terminate LL2 connection\n");
  1915. qed_ll2_kill_buffers(cdev);
  1916. qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
  1917. cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
  1918. return rc;
  1919. fail:
  1920. return -EINVAL;
  1921. }
  1922. static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
  1923. {
  1924. struct qed_ll2_tx_pkt_info pkt;
  1925. const skb_frag_t *frag;
  1926. int rc = -EINVAL, i;
  1927. dma_addr_t mapping;
  1928. u16 vlan = 0;
  1929. u8 flags = 0;
  1930. if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
  1931. DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
  1932. return -EINVAL;
  1933. }
  1934. if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
  1935. DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
  1936. 1 + skb_shinfo(skb)->nr_frags);
  1937. return -EINVAL;
  1938. }
  1939. mapping = dma_map_single(&cdev->pdev->dev, skb->data,
  1940. skb->len, DMA_TO_DEVICE);
  1941. if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
  1942. DP_NOTICE(cdev, "SKB mapping failed\n");
  1943. return -EINVAL;
  1944. }
  1945. /* Request HW to calculate IP csum */
  1946. if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
  1947. ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
  1948. flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
  1949. if (skb_vlan_tag_present(skb)) {
  1950. vlan = skb_vlan_tag_get(skb);
  1951. flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
  1952. }
  1953. memset(&pkt, 0, sizeof(pkt));
  1954. pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
  1955. pkt.vlan = vlan;
  1956. pkt.bd_flags = flags;
  1957. pkt.tx_dest = QED_LL2_TX_DEST_NW;
  1958. pkt.first_frag = mapping;
  1959. pkt.first_frag_len = skb->len;
  1960. pkt.cookie = skb;
  1961. rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
  1962. &pkt, 1);
  1963. if (rc)
  1964. goto err;
  1965. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1966. frag = &skb_shinfo(skb)->frags[i];
  1967. mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
  1968. skb_frag_size(frag), DMA_TO_DEVICE);
  1969. if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
  1970. DP_NOTICE(cdev,
  1971. "Unable to map frag - dropping packet\n");
  1972. goto err;
  1973. }
  1974. rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
  1975. cdev->ll2->handle,
  1976. mapping,
  1977. skb_frag_size(frag));
  1978. /* if failed not much to do here, partial packet has been posted
  1979. * we can't free memory, will need to wait for completion.
  1980. */
  1981. if (rc)
  1982. goto err2;
  1983. }
  1984. return 0;
  1985. err:
  1986. dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
  1987. err2:
  1988. return rc;
  1989. }
  1990. static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
  1991. {
  1992. if (!cdev->ll2)
  1993. return -EINVAL;
  1994. return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
  1995. cdev->ll2->handle, stats);
  1996. }
  1997. const struct qed_ll2_ops qed_ll2_ops_pass = {
  1998. .start = &qed_ll2_start,
  1999. .stop = &qed_ll2_stop,
  2000. .start_xmit = &qed_ll2_start_xmit,
  2001. .register_cb_ops = &qed_ll2_register_cb_ops,
  2002. .get_stats = &qed_ll2_stats,
  2003. };
  2004. int qed_ll2_alloc_if(struct qed_dev *cdev)
  2005. {
  2006. cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
  2007. return cdev->ll2 ? 0 : -ENOMEM;
  2008. }
  2009. void qed_ll2_dealloc_if(struct qed_dev *cdev)
  2010. {
  2011. kfree(cdev->ll2);
  2012. cdev->ll2 = NULL;
  2013. }