qed_fcoe.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <asm/byteorder.h>
  34. #include <asm/param.h>
  35. #include <linux/delay.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/interrupt.h>
  38. #include <linux/kernel.h>
  39. #include <linux/log2.h>
  40. #include <linux/module.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/stddef.h>
  44. #include <linux/string.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/errno.h>
  47. #include <linux/list.h>
  48. #include <linux/spinlock.h>
  49. #define __PREVENT_DUMP_MEM_ARR__
  50. #define __PREVENT_PXP_GLOBAL_WIN__
  51. #include "qed.h"
  52. #include "qed_cxt.h"
  53. #include "qed_dev_api.h"
  54. #include "qed_fcoe.h"
  55. #include "qed_hsi.h"
  56. #include "qed_hw.h"
  57. #include "qed_int.h"
  58. #include "qed_ll2.h"
  59. #include "qed_mcp.h"
  60. #include "qed_reg_addr.h"
  61. #include "qed_sp.h"
  62. #include "qed_sriov.h"
  63. #include <linux/qed/qed_fcoe_if.h>
  64. struct qed_fcoe_conn {
  65. struct list_head list_entry;
  66. bool free_on_delete;
  67. u16 conn_id;
  68. u32 icid;
  69. u32 fw_cid;
  70. u8 layer_code;
  71. dma_addr_t sq_pbl_addr;
  72. dma_addr_t sq_curr_page_addr;
  73. dma_addr_t sq_next_page_addr;
  74. dma_addr_t xferq_pbl_addr;
  75. void *xferq_pbl_addr_virt_addr;
  76. dma_addr_t xferq_addr[4];
  77. void *xferq_addr_virt_addr[4];
  78. dma_addr_t confq_pbl_addr;
  79. void *confq_pbl_addr_virt_addr;
  80. dma_addr_t confq_addr[2];
  81. void *confq_addr_virt_addr[2];
  82. dma_addr_t terminate_params;
  83. u16 dst_mac_addr_lo;
  84. u16 dst_mac_addr_mid;
  85. u16 dst_mac_addr_hi;
  86. u16 src_mac_addr_lo;
  87. u16 src_mac_addr_mid;
  88. u16 src_mac_addr_hi;
  89. u16 tx_max_fc_pay_len;
  90. u16 e_d_tov_timer_val;
  91. u16 rec_tov_timer_val;
  92. u16 rx_max_fc_pay_len;
  93. u16 vlan_tag;
  94. u16 physical_q0;
  95. struct fc_addr_nw s_id;
  96. u8 max_conc_seqs_c3;
  97. struct fc_addr_nw d_id;
  98. u8 flags;
  99. u8 def_q_idx;
  100. };
  101. static int
  102. qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
  103. enum spq_mode comp_mode,
  104. struct qed_spq_comp_cb *p_comp_addr)
  105. {
  106. struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
  107. struct fcoe_init_ramrod_params *p_ramrod = NULL;
  108. struct fcoe_init_func_ramrod_data *p_data;
  109. struct e4_fcoe_conn_context *p_cxt = NULL;
  110. struct qed_spq_entry *p_ent = NULL;
  111. struct qed_sp_init_data init_data;
  112. struct qed_cxt_info cxt_info;
  113. u32 dummy_cid;
  114. int rc = 0;
  115. u16 tmp;
  116. u8 i;
  117. /* Get SPQ entry */
  118. memset(&init_data, 0, sizeof(init_data));
  119. init_data.cid = qed_spq_get_cid(p_hwfn);
  120. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  121. init_data.comp_mode = comp_mode;
  122. init_data.p_comp_data = p_comp_addr;
  123. rc = qed_sp_init_request(p_hwfn, &p_ent,
  124. FCOE_RAMROD_CMD_ID_INIT_FUNC,
  125. PROTOCOLID_FCOE, &init_data);
  126. if (rc)
  127. return rc;
  128. p_ramrod = &p_ent->ramrod.fcoe_init;
  129. p_data = &p_ramrod->init_ramrod_data;
  130. fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
  131. /* Sanity */
  132. if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
  133. DP_ERR(p_hwfn,
  134. "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
  135. fcoe_pf_params->num_cqs,
  136. p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
  137. return -EINVAL;
  138. }
  139. p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
  140. tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
  141. p_data->sq_num_pages_in_pbl = tmp;
  142. rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
  143. if (rc)
  144. return rc;
  145. cxt_info.iid = dummy_cid;
  146. rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
  147. if (rc) {
  148. DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
  149. dummy_cid);
  150. return rc;
  151. }
  152. p_cxt = cxt_info.p_cxt;
  153. SET_FIELD(p_cxt->tstorm_ag_context.flags3,
  154. E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
  155. fcoe_pf_params->dummy_icid = (u16)dummy_cid;
  156. tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
  157. p_data->func_params.num_tasks = tmp;
  158. p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
  159. p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
  160. DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
  161. fcoe_pf_params->glbl_q_params_addr);
  162. tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
  163. p_data->q_params.cq_num_entries = tmp;
  164. tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
  165. p_data->q_params.cmdq_num_entries = tmp;
  166. tmp = fcoe_pf_params->num_cqs;
  167. p_data->q_params.num_queues = (u8)tmp;
  168. tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
  169. p_data->q_params.queue_relative_offset = (u8)tmp;
  170. for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
  171. u16 igu_sb_id;
  172. igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
  173. tmp = cpu_to_le16(igu_sb_id);
  174. p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
  175. }
  176. p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
  177. p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
  178. p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
  179. DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
  180. fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
  181. p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
  182. fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
  183. tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
  184. p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
  185. tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
  186. p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
  187. DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
  188. fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
  189. p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
  190. fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
  191. tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
  192. p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
  193. tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
  194. p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
  195. tmp = fcoe_pf_params->rq_buffer_size;
  196. p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
  197. if (fcoe_pf_params->is_target) {
  198. SET_FIELD(p_data->q_params.q_validity,
  199. SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
  200. if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
  201. SET_FIELD(p_data->q_params.q_validity,
  202. SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
  203. SET_FIELD(p_data->q_params.q_validity,
  204. SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
  205. } else {
  206. SET_FIELD(p_data->q_params.q_validity,
  207. SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
  208. }
  209. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  210. return rc;
  211. }
  212. static int
  213. qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
  214. struct qed_fcoe_conn *p_conn,
  215. enum spq_mode comp_mode,
  216. struct qed_spq_comp_cb *p_comp_addr)
  217. {
  218. struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
  219. struct fcoe_conn_offload_ramrod_data *p_data;
  220. struct qed_spq_entry *p_ent = NULL;
  221. struct qed_sp_init_data init_data;
  222. u16 physical_q0, tmp;
  223. int rc;
  224. /* Get SPQ entry */
  225. memset(&init_data, 0, sizeof(init_data));
  226. init_data.cid = p_conn->icid;
  227. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  228. init_data.comp_mode = comp_mode;
  229. init_data.p_comp_data = p_comp_addr;
  230. rc = qed_sp_init_request(p_hwfn, &p_ent,
  231. FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
  232. PROTOCOLID_FCOE, &init_data);
  233. if (rc)
  234. return rc;
  235. p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
  236. p_data = &p_ramrod->offload_ramrod_data;
  237. /* Transmission PQ is the first of the PF */
  238. physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  239. p_conn->physical_q0 = cpu_to_le16(physical_q0);
  240. p_data->physical_q0 = cpu_to_le16(physical_q0);
  241. p_data->conn_id = cpu_to_le16(p_conn->conn_id);
  242. DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
  243. DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
  244. DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
  245. DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
  246. DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
  247. DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
  248. DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
  249. DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
  250. DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
  251. p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
  252. p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
  253. p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
  254. p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
  255. p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
  256. p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
  257. tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
  258. p_data->tx_max_fc_pay_len = tmp;
  259. tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
  260. p_data->e_d_tov_timer_val = tmp;
  261. tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
  262. p_data->rec_rr_tov_timer_val = tmp;
  263. tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
  264. p_data->rx_max_fc_pay_len = tmp;
  265. p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
  266. p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
  267. p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
  268. p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
  269. p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
  270. p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
  271. p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
  272. p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
  273. p_data->flags = p_conn->flags;
  274. if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
  275. SET_FIELD(p_data->flags,
  276. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
  277. p_data->def_q_idx = p_conn->def_q_idx;
  278. return qed_spq_post(p_hwfn, p_ent, NULL);
  279. }
  280. static int
  281. qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
  282. struct qed_fcoe_conn *p_conn,
  283. enum spq_mode comp_mode,
  284. struct qed_spq_comp_cb *p_comp_addr)
  285. {
  286. struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
  287. struct qed_spq_entry *p_ent = NULL;
  288. struct qed_sp_init_data init_data;
  289. int rc = 0;
  290. /* Get SPQ entry */
  291. memset(&init_data, 0, sizeof(init_data));
  292. init_data.cid = p_conn->icid;
  293. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  294. init_data.comp_mode = comp_mode;
  295. init_data.p_comp_data = p_comp_addr;
  296. rc = qed_sp_init_request(p_hwfn, &p_ent,
  297. FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
  298. PROTOCOLID_FCOE, &init_data);
  299. if (rc)
  300. return rc;
  301. p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
  302. DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
  303. p_conn->terminate_params);
  304. return qed_spq_post(p_hwfn, p_ent, NULL);
  305. }
  306. static int
  307. qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
  308. struct qed_ptt *p_ptt,
  309. enum spq_mode comp_mode,
  310. struct qed_spq_comp_cb *p_comp_addr)
  311. {
  312. struct qed_spq_entry *p_ent = NULL;
  313. struct qed_sp_init_data init_data;
  314. u32 active_segs = 0;
  315. int rc = 0;
  316. /* Get SPQ entry */
  317. memset(&init_data, 0, sizeof(init_data));
  318. init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
  319. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  320. init_data.comp_mode = comp_mode;
  321. init_data.p_comp_data = p_comp_addr;
  322. rc = qed_sp_init_request(p_hwfn, &p_ent,
  323. FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
  324. PROTOCOLID_FCOE, &init_data);
  325. if (rc)
  326. return rc;
  327. active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
  328. active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
  329. qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
  330. return qed_spq_post(p_hwfn, p_ent, NULL);
  331. }
  332. static int
  333. qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
  334. struct qed_fcoe_conn **p_out_conn)
  335. {
  336. struct qed_fcoe_conn *p_conn = NULL;
  337. void *p_addr;
  338. u32 i;
  339. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  340. if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
  341. p_conn =
  342. list_first_entry(&p_hwfn->p_fcoe_info->free_list,
  343. struct qed_fcoe_conn, list_entry);
  344. if (p_conn) {
  345. list_del(&p_conn->list_entry);
  346. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  347. *p_out_conn = p_conn;
  348. return 0;
  349. }
  350. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  351. p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
  352. if (!p_conn)
  353. return -ENOMEM;
  354. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  355. QED_CHAIN_PAGE_SIZE,
  356. &p_conn->xferq_pbl_addr, GFP_KERNEL);
  357. if (!p_addr)
  358. goto nomem_pbl_xferq;
  359. p_conn->xferq_pbl_addr_virt_addr = p_addr;
  360. for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
  361. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  362. QED_CHAIN_PAGE_SIZE,
  363. &p_conn->xferq_addr[i], GFP_KERNEL);
  364. if (!p_addr)
  365. goto nomem_xferq;
  366. p_conn->xferq_addr_virt_addr[i] = p_addr;
  367. p_addr = p_conn->xferq_pbl_addr_virt_addr;
  368. ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
  369. }
  370. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  371. QED_CHAIN_PAGE_SIZE,
  372. &p_conn->confq_pbl_addr, GFP_KERNEL);
  373. if (!p_addr)
  374. goto nomem_xferq;
  375. p_conn->confq_pbl_addr_virt_addr = p_addr;
  376. for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
  377. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  378. QED_CHAIN_PAGE_SIZE,
  379. &p_conn->confq_addr[i], GFP_KERNEL);
  380. if (!p_addr)
  381. goto nomem_confq;
  382. p_conn->confq_addr_virt_addr[i] = p_addr;
  383. p_addr = p_conn->confq_pbl_addr_virt_addr;
  384. ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
  385. }
  386. p_conn->free_on_delete = true;
  387. *p_out_conn = p_conn;
  388. return 0;
  389. nomem_confq:
  390. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  391. QED_CHAIN_PAGE_SIZE,
  392. p_conn->confq_pbl_addr_virt_addr,
  393. p_conn->confq_pbl_addr);
  394. for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
  395. if (p_conn->confq_addr_virt_addr[i])
  396. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  397. QED_CHAIN_PAGE_SIZE,
  398. p_conn->confq_addr_virt_addr[i],
  399. p_conn->confq_addr[i]);
  400. nomem_xferq:
  401. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  402. QED_CHAIN_PAGE_SIZE,
  403. p_conn->xferq_pbl_addr_virt_addr,
  404. p_conn->xferq_pbl_addr);
  405. for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
  406. if (p_conn->xferq_addr_virt_addr[i])
  407. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  408. QED_CHAIN_PAGE_SIZE,
  409. p_conn->xferq_addr_virt_addr[i],
  410. p_conn->xferq_addr[i]);
  411. nomem_pbl_xferq:
  412. kfree(p_conn);
  413. return -ENOMEM;
  414. }
  415. static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
  416. struct qed_fcoe_conn *p_conn)
  417. {
  418. u32 i;
  419. if (!p_conn)
  420. return;
  421. if (p_conn->confq_pbl_addr_virt_addr)
  422. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  423. QED_CHAIN_PAGE_SIZE,
  424. p_conn->confq_pbl_addr_virt_addr,
  425. p_conn->confq_pbl_addr);
  426. for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
  427. if (!p_conn->confq_addr_virt_addr[i])
  428. continue;
  429. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  430. QED_CHAIN_PAGE_SIZE,
  431. p_conn->confq_addr_virt_addr[i],
  432. p_conn->confq_addr[i]);
  433. }
  434. if (p_conn->xferq_pbl_addr_virt_addr)
  435. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  436. QED_CHAIN_PAGE_SIZE,
  437. p_conn->xferq_pbl_addr_virt_addr,
  438. p_conn->xferq_pbl_addr);
  439. for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
  440. if (!p_conn->xferq_addr_virt_addr[i])
  441. continue;
  442. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  443. QED_CHAIN_PAGE_SIZE,
  444. p_conn->xferq_addr_virt_addr[i],
  445. p_conn->xferq_addr[i]);
  446. }
  447. kfree(p_conn);
  448. }
  449. static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
  450. {
  451. return (u8 __iomem *)p_hwfn->doorbells +
  452. qed_db_addr(cid, DQ_DEMS_LEGACY);
  453. }
  454. static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
  455. u8 bdq_id)
  456. {
  457. if (RESC_NUM(p_hwfn, QED_BDQ)) {
  458. return (u8 __iomem *)p_hwfn->regview +
  459. GTT_BAR0_MAP_REG_MSDM_RAM +
  460. MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
  461. QED_BDQ),
  462. bdq_id);
  463. } else {
  464. DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
  465. return NULL;
  466. }
  467. }
  468. static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
  469. u8 bdq_id)
  470. {
  471. if (RESC_NUM(p_hwfn, QED_BDQ)) {
  472. return (u8 __iomem *)p_hwfn->regview +
  473. GTT_BAR0_MAP_REG_TSDM_RAM +
  474. TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
  475. QED_BDQ),
  476. bdq_id);
  477. } else {
  478. DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
  479. return NULL;
  480. }
  481. }
  482. int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
  483. {
  484. struct qed_fcoe_info *p_fcoe_info;
  485. /* Allocate LL2's set struct */
  486. p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
  487. if (!p_fcoe_info) {
  488. DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
  489. return -ENOMEM;
  490. }
  491. INIT_LIST_HEAD(&p_fcoe_info->free_list);
  492. p_hwfn->p_fcoe_info = p_fcoe_info;
  493. return 0;
  494. }
  495. void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
  496. {
  497. struct e4_fcoe_task_context *p_task_ctx = NULL;
  498. int rc;
  499. u32 i;
  500. spin_lock_init(&p_hwfn->p_fcoe_info->lock);
  501. for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
  502. rc = qed_cxt_get_task_ctx(p_hwfn, i,
  503. QED_CTX_WORKING_MEM,
  504. (void **)&p_task_ctx);
  505. if (rc)
  506. continue;
  507. memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
  508. SET_FIELD(p_task_ctx->timer_context.logical_client_0,
  509. TIMERS_CONTEXT_VALIDLC0, 1);
  510. SET_FIELD(p_task_ctx->timer_context.logical_client_1,
  511. TIMERS_CONTEXT_VALIDLC1, 1);
  512. SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
  513. E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
  514. }
  515. }
  516. void qed_fcoe_free(struct qed_hwfn *p_hwfn)
  517. {
  518. struct qed_fcoe_conn *p_conn = NULL;
  519. if (!p_hwfn->p_fcoe_info)
  520. return;
  521. while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
  522. p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
  523. struct qed_fcoe_conn, list_entry);
  524. if (!p_conn)
  525. break;
  526. list_del(&p_conn->list_entry);
  527. qed_fcoe_free_connection(p_hwfn, p_conn);
  528. }
  529. kfree(p_hwfn->p_fcoe_info);
  530. p_hwfn->p_fcoe_info = NULL;
  531. }
  532. static int
  533. qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
  534. struct qed_fcoe_conn *p_in_conn,
  535. struct qed_fcoe_conn **p_out_conn)
  536. {
  537. struct qed_fcoe_conn *p_conn = NULL;
  538. int rc = 0;
  539. u32 icid;
  540. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  541. rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
  542. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  543. if (rc)
  544. return rc;
  545. /* Use input connection [if provided] or allocate a new one */
  546. if (p_in_conn) {
  547. p_conn = p_in_conn;
  548. } else {
  549. rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
  550. if (rc) {
  551. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  552. qed_cxt_release_cid(p_hwfn, icid);
  553. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  554. return rc;
  555. }
  556. }
  557. p_conn->icid = icid;
  558. p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
  559. *p_out_conn = p_conn;
  560. return rc;
  561. }
  562. static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
  563. struct qed_fcoe_conn *p_conn)
  564. {
  565. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  566. list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
  567. qed_cxt_release_cid(p_hwfn, p_conn->icid);
  568. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  569. }
  570. static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
  571. struct qed_ptt *p_ptt,
  572. struct qed_fcoe_stats *p_stats)
  573. {
  574. struct fcoe_rx_stat tstats;
  575. u32 tstats_addr;
  576. memset(&tstats, 0, sizeof(tstats));
  577. tstats_addr = BAR0_MAP_REG_TSDM_RAM +
  578. TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
  579. qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
  580. p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
  581. p_stats->fcoe_rx_data_pkt_cnt =
  582. HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
  583. p_stats->fcoe_rx_xfer_pkt_cnt =
  584. HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
  585. p_stats->fcoe_rx_other_pkt_cnt =
  586. HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
  587. p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
  588. le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
  589. p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
  590. le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
  591. p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
  592. le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
  593. p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
  594. le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
  595. p_stats->fcoe_silent_drop_total_pkt_cnt =
  596. le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
  597. }
  598. static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
  599. struct qed_ptt *p_ptt,
  600. struct qed_fcoe_stats *p_stats)
  601. {
  602. struct fcoe_tx_stat pstats;
  603. u32 pstats_addr;
  604. memset(&pstats, 0, sizeof(pstats));
  605. pstats_addr = BAR0_MAP_REG_PSDM_RAM +
  606. PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
  607. qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
  608. p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
  609. p_stats->fcoe_tx_data_pkt_cnt =
  610. HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
  611. p_stats->fcoe_tx_xfer_pkt_cnt =
  612. HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
  613. p_stats->fcoe_tx_other_pkt_cnt =
  614. HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
  615. }
  616. static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
  617. struct qed_fcoe_stats *p_stats)
  618. {
  619. struct qed_ptt *p_ptt;
  620. memset(p_stats, 0, sizeof(*p_stats));
  621. p_ptt = qed_ptt_acquire(p_hwfn);
  622. if (!p_ptt) {
  623. DP_ERR(p_hwfn, "Failed to acquire ptt\n");
  624. return -EINVAL;
  625. }
  626. _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
  627. _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
  628. qed_ptt_release(p_hwfn, p_ptt);
  629. return 0;
  630. }
  631. struct qed_hash_fcoe_con {
  632. struct hlist_node node;
  633. struct qed_fcoe_conn *con;
  634. };
  635. static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
  636. struct qed_dev_fcoe_info *info)
  637. {
  638. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  639. int rc;
  640. memset(info, 0, sizeof(*info));
  641. rc = qed_fill_dev_info(cdev, &info->common);
  642. info->primary_dbq_rq_addr =
  643. qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
  644. info->secondary_bdq_rq_addr =
  645. qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
  646. info->wwpn = hwfn->mcp_info->func_info.wwn_port;
  647. info->wwnn = hwfn->mcp_info->func_info.wwn_node;
  648. info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
  649. return rc;
  650. }
  651. static void qed_register_fcoe_ops(struct qed_dev *cdev,
  652. struct qed_fcoe_cb_ops *ops, void *cookie)
  653. {
  654. cdev->protocol_ops.fcoe = ops;
  655. cdev->ops_cookie = cookie;
  656. }
  657. static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
  658. u32 handle)
  659. {
  660. struct qed_hash_fcoe_con *hash_con = NULL;
  661. if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
  662. return NULL;
  663. hash_for_each_possible(cdev->connections, hash_con, node, handle) {
  664. if (hash_con->con->icid == handle)
  665. break;
  666. }
  667. if (!hash_con || (hash_con->con->icid != handle))
  668. return NULL;
  669. return hash_con;
  670. }
  671. static int qed_fcoe_stop(struct qed_dev *cdev)
  672. {
  673. struct qed_ptt *p_ptt;
  674. int rc;
  675. if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
  676. DP_NOTICE(cdev, "fcoe already stopped\n");
  677. return 0;
  678. }
  679. if (!hash_empty(cdev->connections)) {
  680. DP_NOTICE(cdev,
  681. "Can't stop fcoe - not all connections were returned\n");
  682. return -EINVAL;
  683. }
  684. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  685. if (!p_ptt)
  686. return -EAGAIN;
  687. /* Stop the fcoe */
  688. rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
  689. QED_SPQ_MODE_EBLOCK, NULL);
  690. cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
  691. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  692. return rc;
  693. }
  694. static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
  695. {
  696. int rc;
  697. if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
  698. DP_NOTICE(cdev, "fcoe already started;\n");
  699. return 0;
  700. }
  701. rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
  702. QED_SPQ_MODE_EBLOCK, NULL);
  703. if (rc) {
  704. DP_NOTICE(cdev, "Failed to start fcoe\n");
  705. return rc;
  706. }
  707. cdev->flags |= QED_FLAG_STORAGE_STARTED;
  708. hash_init(cdev->connections);
  709. if (tasks) {
  710. struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
  711. GFP_ATOMIC);
  712. if (!tid_info) {
  713. DP_NOTICE(cdev,
  714. "Failed to allocate tasks information\n");
  715. qed_fcoe_stop(cdev);
  716. return -ENOMEM;
  717. }
  718. rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
  719. if (rc) {
  720. DP_NOTICE(cdev, "Failed to gather task information\n");
  721. qed_fcoe_stop(cdev);
  722. kfree(tid_info);
  723. return rc;
  724. }
  725. /* Fill task information */
  726. tasks->size = tid_info->tid_size;
  727. tasks->num_tids_per_block = tid_info->num_tids_per_block;
  728. memcpy(tasks->blocks, tid_info->blocks,
  729. MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
  730. kfree(tid_info);
  731. }
  732. return 0;
  733. }
  734. static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
  735. u32 *handle,
  736. u32 *fw_cid, void __iomem **p_doorbell)
  737. {
  738. struct qed_hash_fcoe_con *hash_con;
  739. int rc;
  740. /* Allocate a hashed connection */
  741. hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
  742. if (!hash_con) {
  743. DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
  744. return -ENOMEM;
  745. }
  746. /* Acquire the connection */
  747. rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
  748. &hash_con->con);
  749. if (rc) {
  750. DP_NOTICE(cdev, "Failed to acquire Connection\n");
  751. kfree(hash_con);
  752. return rc;
  753. }
  754. /* Added the connection to hash table */
  755. *handle = hash_con->con->icid;
  756. *fw_cid = hash_con->con->fw_cid;
  757. hash_add(cdev->connections, &hash_con->node, *handle);
  758. if (p_doorbell)
  759. *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
  760. *handle);
  761. return 0;
  762. }
  763. static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
  764. {
  765. struct qed_hash_fcoe_con *hash_con;
  766. hash_con = qed_fcoe_get_hash(cdev, handle);
  767. if (!hash_con) {
  768. DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
  769. handle);
  770. return -EINVAL;
  771. }
  772. hlist_del(&hash_con->node);
  773. qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
  774. kfree(hash_con);
  775. return 0;
  776. }
  777. static int qed_fcoe_offload_conn(struct qed_dev *cdev,
  778. u32 handle,
  779. struct qed_fcoe_params_offload *conn_info)
  780. {
  781. struct qed_hash_fcoe_con *hash_con;
  782. struct qed_fcoe_conn *con;
  783. hash_con = qed_fcoe_get_hash(cdev, handle);
  784. if (!hash_con) {
  785. DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
  786. handle);
  787. return -EINVAL;
  788. }
  789. /* Update the connection with information from the params */
  790. con = hash_con->con;
  791. con->sq_pbl_addr = conn_info->sq_pbl_addr;
  792. con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
  793. con->sq_next_page_addr = conn_info->sq_next_page_addr;
  794. con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
  795. con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
  796. con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
  797. con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
  798. con->vlan_tag = conn_info->vlan_tag;
  799. con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
  800. con->flags = conn_info->flags;
  801. con->def_q_idx = conn_info->def_q_idx;
  802. con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
  803. conn_info->src_mac[4];
  804. con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
  805. conn_info->src_mac[2];
  806. con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
  807. conn_info->src_mac[0];
  808. con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
  809. conn_info->dst_mac[4];
  810. con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
  811. conn_info->dst_mac[2];
  812. con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
  813. conn_info->dst_mac[0];
  814. con->s_id.addr_hi = conn_info->s_id.addr_hi;
  815. con->s_id.addr_mid = conn_info->s_id.addr_mid;
  816. con->s_id.addr_lo = conn_info->s_id.addr_lo;
  817. con->d_id.addr_hi = conn_info->d_id.addr_hi;
  818. con->d_id.addr_mid = conn_info->d_id.addr_mid;
  819. con->d_id.addr_lo = conn_info->d_id.addr_lo;
  820. return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
  821. QED_SPQ_MODE_EBLOCK, NULL);
  822. }
  823. static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
  824. u32 handle, dma_addr_t terminate_params)
  825. {
  826. struct qed_hash_fcoe_con *hash_con;
  827. struct qed_fcoe_conn *con;
  828. hash_con = qed_fcoe_get_hash(cdev, handle);
  829. if (!hash_con) {
  830. DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
  831. handle);
  832. return -EINVAL;
  833. }
  834. /* Update the connection with information from the params */
  835. con = hash_con->con;
  836. con->terminate_params = terminate_params;
  837. return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
  838. QED_SPQ_MODE_EBLOCK, NULL);
  839. }
  840. static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
  841. {
  842. return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
  843. }
  844. void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
  845. struct qed_mcp_fcoe_stats *stats)
  846. {
  847. struct qed_fcoe_stats proto_stats;
  848. /* Retrieve FW statistics */
  849. memset(&proto_stats, 0, sizeof(proto_stats));
  850. if (qed_fcoe_stats(cdev, &proto_stats)) {
  851. DP_VERBOSE(cdev, QED_MSG_STORAGE,
  852. "Failed to collect FCoE statistics\n");
  853. return;
  854. }
  855. /* Translate FW statistics into struct */
  856. stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
  857. proto_stats.fcoe_rx_xfer_pkt_cnt +
  858. proto_stats.fcoe_rx_other_pkt_cnt;
  859. stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
  860. proto_stats.fcoe_tx_xfer_pkt_cnt +
  861. proto_stats.fcoe_tx_other_pkt_cnt;
  862. stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
  863. /* Request protocol driver to fill-in the rest */
  864. if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
  865. struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
  866. void *cookie = cdev->ops_cookie;
  867. if (ops->get_login_failures)
  868. stats->login_failure = ops->get_login_failures(cookie);
  869. }
  870. }
  871. static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
  872. .common = &qed_common_ops_pass,
  873. .ll2 = &qed_ll2_ops_pass,
  874. .fill_dev_info = &qed_fill_fcoe_dev_info,
  875. .start = &qed_fcoe_start,
  876. .stop = &qed_fcoe_stop,
  877. .register_ops = &qed_register_fcoe_ops,
  878. .acquire_conn = &qed_fcoe_acquire_conn,
  879. .release_conn = &qed_fcoe_release_conn,
  880. .offload_conn = &qed_fcoe_offload_conn,
  881. .destroy_conn = &qed_fcoe_destroy_conn,
  882. .get_stats = &qed_fcoe_stats,
  883. };
  884. const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
  885. {
  886. return &qed_fcoe_ops_pass;
  887. }
  888. EXPORT_SYMBOL(qed_get_fcoe_ops);
  889. void qed_put_fcoe_ops(void)
  890. {
  891. }
  892. EXPORT_SYMBOL(qed_put_fcoe_ops);