qed_fcoe.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <asm/byteorder.h>
  34. #include <asm/param.h>
  35. #include <linux/delay.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/interrupt.h>
  38. #include <linux/kernel.h>
  39. #include <linux/log2.h>
  40. #include <linux/module.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/stddef.h>
  44. #include <linux/string.h>
  45. #include <linux/version.h>
  46. #include <linux/workqueue.h>
  47. #include <linux/errno.h>
  48. #include <linux/list.h>
  49. #include <linux/spinlock.h>
  50. #define __PREVENT_DUMP_MEM_ARR__
  51. #define __PREVENT_PXP_GLOBAL_WIN__
  52. #include "qed.h"
  53. #include "qed_cxt.h"
  54. #include "qed_dev_api.h"
  55. #include "qed_fcoe.h"
  56. #include "qed_hsi.h"
  57. #include "qed_hw.h"
  58. #include "qed_int.h"
  59. #include "qed_ll2.h"
  60. #include "qed_mcp.h"
  61. #include "qed_reg_addr.h"
  62. #include "qed_sp.h"
  63. #include "qed_sriov.h"
  64. #include <linux/qed/qed_fcoe_if.h>
  65. struct qed_fcoe_conn {
  66. struct list_head list_entry;
  67. bool free_on_delete;
  68. u16 conn_id;
  69. u32 icid;
  70. u32 fw_cid;
  71. u8 layer_code;
  72. dma_addr_t sq_pbl_addr;
  73. dma_addr_t sq_curr_page_addr;
  74. dma_addr_t sq_next_page_addr;
  75. dma_addr_t xferq_pbl_addr;
  76. void *xferq_pbl_addr_virt_addr;
  77. dma_addr_t xferq_addr[4];
  78. void *xferq_addr_virt_addr[4];
  79. dma_addr_t confq_pbl_addr;
  80. void *confq_pbl_addr_virt_addr;
  81. dma_addr_t confq_addr[2];
  82. void *confq_addr_virt_addr[2];
  83. dma_addr_t terminate_params;
  84. u16 dst_mac_addr_lo;
  85. u16 dst_mac_addr_mid;
  86. u16 dst_mac_addr_hi;
  87. u16 src_mac_addr_lo;
  88. u16 src_mac_addr_mid;
  89. u16 src_mac_addr_hi;
  90. u16 tx_max_fc_pay_len;
  91. u16 e_d_tov_timer_val;
  92. u16 rec_tov_timer_val;
  93. u16 rx_max_fc_pay_len;
  94. u16 vlan_tag;
  95. u16 physical_q0;
  96. struct fc_addr_nw s_id;
  97. u8 max_conc_seqs_c3;
  98. struct fc_addr_nw d_id;
  99. u8 flags;
  100. u8 def_q_idx;
  101. };
  102. static int
  103. qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
  104. enum spq_mode comp_mode,
  105. struct qed_spq_comp_cb *p_comp_addr)
  106. {
  107. struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
  108. struct fcoe_init_ramrod_params *p_ramrod = NULL;
  109. struct fcoe_init_func_ramrod_data *p_data;
  110. struct fcoe_conn_context *p_cxt = NULL;
  111. struct qed_spq_entry *p_ent = NULL;
  112. struct qed_sp_init_data init_data;
  113. struct qed_cxt_info cxt_info;
  114. u32 dummy_cid;
  115. int rc = 0;
  116. u16 tmp;
  117. u8 i;
  118. /* Get SPQ entry */
  119. memset(&init_data, 0, sizeof(init_data));
  120. init_data.cid = qed_spq_get_cid(p_hwfn);
  121. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  122. init_data.comp_mode = comp_mode;
  123. init_data.p_comp_data = p_comp_addr;
  124. rc = qed_sp_init_request(p_hwfn, &p_ent,
  125. FCOE_RAMROD_CMD_ID_INIT_FUNC,
  126. PROTOCOLID_FCOE, &init_data);
  127. if (rc)
  128. return rc;
  129. p_ramrod = &p_ent->ramrod.fcoe_init;
  130. p_data = &p_ramrod->init_ramrod_data;
  131. fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
  132. p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
  133. tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
  134. p_data->sq_num_pages_in_pbl = tmp;
  135. rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
  136. if (rc)
  137. return rc;
  138. cxt_info.iid = dummy_cid;
  139. rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
  140. if (rc) {
  141. DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
  142. dummy_cid);
  143. return rc;
  144. }
  145. p_cxt = cxt_info.p_cxt;
  146. SET_FIELD(p_cxt->tstorm_ag_context.flags3,
  147. TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
  148. fcoe_pf_params->dummy_icid = (u16)dummy_cid;
  149. tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
  150. p_data->func_params.num_tasks = tmp;
  151. p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
  152. p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
  153. DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
  154. fcoe_pf_params->glbl_q_params_addr);
  155. tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
  156. p_data->q_params.cq_num_entries = tmp;
  157. tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
  158. p_data->q_params.cmdq_num_entries = tmp;
  159. tmp = fcoe_pf_params->num_cqs;
  160. p_data->q_params.num_queues = (u8)tmp;
  161. tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
  162. p_data->q_params.queue_relative_offset = (u8)tmp;
  163. for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
  164. tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
  165. p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
  166. }
  167. p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
  168. p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
  169. p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
  170. DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
  171. fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
  172. p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
  173. fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
  174. tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
  175. p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
  176. tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
  177. p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
  178. DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
  179. fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
  180. p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
  181. fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
  182. tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
  183. p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
  184. tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
  185. p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
  186. tmp = fcoe_pf_params->rq_buffer_size;
  187. p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
  188. if (fcoe_pf_params->is_target) {
  189. SET_FIELD(p_data->q_params.q_validity,
  190. SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
  191. if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
  192. SET_FIELD(p_data->q_params.q_validity,
  193. SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
  194. SET_FIELD(p_data->q_params.q_validity,
  195. SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
  196. } else {
  197. SET_FIELD(p_data->q_params.q_validity,
  198. SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
  199. }
  200. rc = qed_spq_post(p_hwfn, p_ent, NULL);
  201. return rc;
  202. }
  203. static int
  204. qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
  205. struct qed_fcoe_conn *p_conn,
  206. enum spq_mode comp_mode,
  207. struct qed_spq_comp_cb *p_comp_addr)
  208. {
  209. struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
  210. struct fcoe_conn_offload_ramrod_data *p_data;
  211. struct qed_spq_entry *p_ent = NULL;
  212. struct qed_sp_init_data init_data;
  213. u16 physical_q0, tmp;
  214. int rc;
  215. /* Get SPQ entry */
  216. memset(&init_data, 0, sizeof(init_data));
  217. init_data.cid = p_conn->icid;
  218. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  219. init_data.comp_mode = comp_mode;
  220. init_data.p_comp_data = p_comp_addr;
  221. rc = qed_sp_init_request(p_hwfn, &p_ent,
  222. FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
  223. PROTOCOLID_FCOE, &init_data);
  224. if (rc)
  225. return rc;
  226. p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
  227. p_data = &p_ramrod->offload_ramrod_data;
  228. /* Transmission PQ is the first of the PF */
  229. physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
  230. p_conn->physical_q0 = cpu_to_le16(physical_q0);
  231. p_data->physical_q0 = cpu_to_le16(physical_q0);
  232. p_data->conn_id = cpu_to_le16(p_conn->conn_id);
  233. DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
  234. DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
  235. DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
  236. DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
  237. DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
  238. DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
  239. DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
  240. DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
  241. DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
  242. p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
  243. p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
  244. p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
  245. p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
  246. p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
  247. p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
  248. tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
  249. p_data->tx_max_fc_pay_len = tmp;
  250. tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
  251. p_data->e_d_tov_timer_val = tmp;
  252. tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
  253. p_data->rec_rr_tov_timer_val = tmp;
  254. tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
  255. p_data->rx_max_fc_pay_len = tmp;
  256. p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
  257. p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
  258. p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
  259. p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
  260. p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
  261. p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
  262. p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
  263. p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
  264. p_data->flags = p_conn->flags;
  265. p_data->def_q_idx = p_conn->def_q_idx;
  266. return qed_spq_post(p_hwfn, p_ent, NULL);
  267. }
  268. static int
  269. qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
  270. struct qed_fcoe_conn *p_conn,
  271. enum spq_mode comp_mode,
  272. struct qed_spq_comp_cb *p_comp_addr)
  273. {
  274. struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
  275. struct qed_spq_entry *p_ent = NULL;
  276. struct qed_sp_init_data init_data;
  277. int rc = 0;
  278. /* Get SPQ entry */
  279. memset(&init_data, 0, sizeof(init_data));
  280. init_data.cid = p_conn->icid;
  281. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  282. init_data.comp_mode = comp_mode;
  283. init_data.p_comp_data = p_comp_addr;
  284. rc = qed_sp_init_request(p_hwfn, &p_ent,
  285. FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
  286. PROTOCOLID_FCOE, &init_data);
  287. if (rc)
  288. return rc;
  289. p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
  290. DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
  291. p_conn->terminate_params);
  292. return qed_spq_post(p_hwfn, p_ent, NULL);
  293. }
  294. static int
  295. qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
  296. struct qed_ptt *p_ptt,
  297. enum spq_mode comp_mode,
  298. struct qed_spq_comp_cb *p_comp_addr)
  299. {
  300. struct qed_spq_entry *p_ent = NULL;
  301. struct qed_sp_init_data init_data;
  302. u32 active_segs = 0;
  303. int rc = 0;
  304. /* Get SPQ entry */
  305. memset(&init_data, 0, sizeof(init_data));
  306. init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
  307. init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
  308. init_data.comp_mode = comp_mode;
  309. init_data.p_comp_data = p_comp_addr;
  310. rc = qed_sp_init_request(p_hwfn, &p_ent,
  311. FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
  312. PROTOCOLID_FCOE, &init_data);
  313. if (rc)
  314. return rc;
  315. active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
  316. active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
  317. qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
  318. return qed_spq_post(p_hwfn, p_ent, NULL);
  319. }
  320. static int
  321. qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
  322. struct qed_fcoe_conn **p_out_conn)
  323. {
  324. struct qed_fcoe_conn *p_conn = NULL;
  325. void *p_addr;
  326. u32 i;
  327. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  328. if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
  329. p_conn =
  330. list_first_entry(&p_hwfn->p_fcoe_info->free_list,
  331. struct qed_fcoe_conn, list_entry);
  332. if (p_conn) {
  333. list_del(&p_conn->list_entry);
  334. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  335. *p_out_conn = p_conn;
  336. return 0;
  337. }
  338. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  339. p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
  340. if (!p_conn)
  341. return -ENOMEM;
  342. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  343. QED_CHAIN_PAGE_SIZE,
  344. &p_conn->xferq_pbl_addr, GFP_KERNEL);
  345. if (!p_addr)
  346. goto nomem_pbl_xferq;
  347. p_conn->xferq_pbl_addr_virt_addr = p_addr;
  348. for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
  349. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  350. QED_CHAIN_PAGE_SIZE,
  351. &p_conn->xferq_addr[i], GFP_KERNEL);
  352. if (!p_addr)
  353. goto nomem_xferq;
  354. p_conn->xferq_addr_virt_addr[i] = p_addr;
  355. p_addr = p_conn->xferq_pbl_addr_virt_addr;
  356. ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
  357. }
  358. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  359. QED_CHAIN_PAGE_SIZE,
  360. &p_conn->confq_pbl_addr, GFP_KERNEL);
  361. if (!p_addr)
  362. goto nomem_xferq;
  363. p_conn->confq_pbl_addr_virt_addr = p_addr;
  364. for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
  365. p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  366. QED_CHAIN_PAGE_SIZE,
  367. &p_conn->confq_addr[i], GFP_KERNEL);
  368. if (!p_addr)
  369. goto nomem_confq;
  370. p_conn->confq_addr_virt_addr[i] = p_addr;
  371. p_addr = p_conn->confq_pbl_addr_virt_addr;
  372. ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
  373. }
  374. p_conn->free_on_delete = true;
  375. *p_out_conn = p_conn;
  376. return 0;
  377. nomem_confq:
  378. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  379. QED_CHAIN_PAGE_SIZE,
  380. p_conn->confq_pbl_addr_virt_addr,
  381. p_conn->confq_pbl_addr);
  382. for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
  383. if (p_conn->confq_addr_virt_addr[i])
  384. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  385. QED_CHAIN_PAGE_SIZE,
  386. p_conn->confq_addr_virt_addr[i],
  387. p_conn->confq_addr[i]);
  388. nomem_xferq:
  389. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  390. QED_CHAIN_PAGE_SIZE,
  391. p_conn->xferq_pbl_addr_virt_addr,
  392. p_conn->xferq_pbl_addr);
  393. for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
  394. if (p_conn->xferq_addr_virt_addr[i])
  395. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  396. QED_CHAIN_PAGE_SIZE,
  397. p_conn->xferq_addr_virt_addr[i],
  398. p_conn->xferq_addr[i]);
  399. nomem_pbl_xferq:
  400. kfree(p_conn);
  401. return -ENOMEM;
  402. }
  403. static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
  404. struct qed_fcoe_conn *p_conn)
  405. {
  406. u32 i;
  407. if (!p_conn)
  408. return;
  409. if (p_conn->confq_pbl_addr_virt_addr)
  410. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  411. QED_CHAIN_PAGE_SIZE,
  412. p_conn->confq_pbl_addr_virt_addr,
  413. p_conn->confq_pbl_addr);
  414. for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
  415. if (!p_conn->confq_addr_virt_addr[i])
  416. continue;
  417. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  418. QED_CHAIN_PAGE_SIZE,
  419. p_conn->confq_addr_virt_addr[i],
  420. p_conn->confq_addr[i]);
  421. }
  422. if (p_conn->xferq_pbl_addr_virt_addr)
  423. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  424. QED_CHAIN_PAGE_SIZE,
  425. p_conn->xferq_pbl_addr_virt_addr,
  426. p_conn->xferq_pbl_addr);
  427. for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
  428. if (!p_conn->xferq_addr_virt_addr[i])
  429. continue;
  430. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  431. QED_CHAIN_PAGE_SIZE,
  432. p_conn->xferq_addr_virt_addr[i],
  433. p_conn->xferq_addr[i]);
  434. }
  435. kfree(p_conn);
  436. }
  437. static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
  438. {
  439. return (u8 __iomem *)p_hwfn->doorbells +
  440. qed_db_addr(cid, DQ_DEMS_LEGACY);
  441. }
  442. static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
  443. u8 bdq_id)
  444. {
  445. if (RESC_NUM(p_hwfn, QED_BDQ)) {
  446. return (u8 __iomem *)p_hwfn->regview +
  447. GTT_BAR0_MAP_REG_MSDM_RAM +
  448. MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
  449. QED_BDQ),
  450. bdq_id);
  451. } else {
  452. DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
  453. return NULL;
  454. }
  455. }
  456. static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
  457. u8 bdq_id)
  458. {
  459. if (RESC_NUM(p_hwfn, QED_BDQ)) {
  460. return (u8 __iomem *)p_hwfn->regview +
  461. GTT_BAR0_MAP_REG_TSDM_RAM +
  462. TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
  463. QED_BDQ),
  464. bdq_id);
  465. } else {
  466. DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
  467. return NULL;
  468. }
  469. }
  470. struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
  471. {
  472. struct qed_fcoe_info *p_fcoe_info;
  473. /* Allocate LL2's set struct */
  474. p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
  475. if (!p_fcoe_info) {
  476. DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
  477. return NULL;
  478. }
  479. INIT_LIST_HEAD(&p_fcoe_info->free_list);
  480. return p_fcoe_info;
  481. }
  482. void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
  483. {
  484. struct fcoe_task_context *p_task_ctx = NULL;
  485. int rc;
  486. u32 i;
  487. spin_lock_init(&p_fcoe_info->lock);
  488. for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
  489. rc = qed_cxt_get_task_ctx(p_hwfn, i,
  490. QED_CTX_WORKING_MEM,
  491. (void **)&p_task_ctx);
  492. if (rc)
  493. continue;
  494. memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
  495. SET_FIELD(p_task_ctx->timer_context.logical_client_0,
  496. TIMERS_CONTEXT_VALIDLC0, 1);
  497. SET_FIELD(p_task_ctx->timer_context.logical_client_1,
  498. TIMERS_CONTEXT_VALIDLC1, 1);
  499. SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
  500. TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
  501. }
  502. }
  503. void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
  504. {
  505. struct qed_fcoe_conn *p_conn = NULL;
  506. if (!p_fcoe_info)
  507. return;
  508. while (!list_empty(&p_fcoe_info->free_list)) {
  509. p_conn = list_first_entry(&p_fcoe_info->free_list,
  510. struct qed_fcoe_conn, list_entry);
  511. if (!p_conn)
  512. break;
  513. list_del(&p_conn->list_entry);
  514. qed_fcoe_free_connection(p_hwfn, p_conn);
  515. }
  516. kfree(p_fcoe_info);
  517. }
  518. static int
  519. qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
  520. struct qed_fcoe_conn *p_in_conn,
  521. struct qed_fcoe_conn **p_out_conn)
  522. {
  523. struct qed_fcoe_conn *p_conn = NULL;
  524. int rc = 0;
  525. u32 icid;
  526. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  527. rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
  528. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  529. if (rc)
  530. return rc;
  531. /* Use input connection [if provided] or allocate a new one */
  532. if (p_in_conn) {
  533. p_conn = p_in_conn;
  534. } else {
  535. rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
  536. if (rc) {
  537. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  538. qed_cxt_release_cid(p_hwfn, icid);
  539. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  540. return rc;
  541. }
  542. }
  543. p_conn->icid = icid;
  544. p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
  545. *p_out_conn = p_conn;
  546. return rc;
  547. }
  548. static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
  549. struct qed_fcoe_conn *p_conn)
  550. {
  551. spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
  552. list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
  553. qed_cxt_release_cid(p_hwfn, p_conn->icid);
  554. spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
  555. }
  556. static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
  557. struct qed_ptt *p_ptt,
  558. struct qed_fcoe_stats *p_stats)
  559. {
  560. struct fcoe_rx_stat tstats;
  561. u32 tstats_addr;
  562. memset(&tstats, 0, sizeof(tstats));
  563. tstats_addr = BAR0_MAP_REG_TSDM_RAM +
  564. TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
  565. qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
  566. p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
  567. p_stats->fcoe_rx_data_pkt_cnt =
  568. HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
  569. p_stats->fcoe_rx_xfer_pkt_cnt =
  570. HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
  571. p_stats->fcoe_rx_other_pkt_cnt =
  572. HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
  573. p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
  574. le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
  575. p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
  576. le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
  577. p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
  578. le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
  579. p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
  580. le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
  581. p_stats->fcoe_silent_drop_total_pkt_cnt =
  582. le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
  583. }
  584. static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
  585. struct qed_ptt *p_ptt,
  586. struct qed_fcoe_stats *p_stats)
  587. {
  588. struct fcoe_tx_stat pstats;
  589. u32 pstats_addr;
  590. memset(&pstats, 0, sizeof(pstats));
  591. pstats_addr = BAR0_MAP_REG_PSDM_RAM +
  592. PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
  593. qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
  594. p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
  595. p_stats->fcoe_tx_data_pkt_cnt =
  596. HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
  597. p_stats->fcoe_tx_xfer_pkt_cnt =
  598. HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
  599. p_stats->fcoe_tx_other_pkt_cnt =
  600. HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
  601. }
  602. static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
  603. struct qed_fcoe_stats *p_stats)
  604. {
  605. struct qed_ptt *p_ptt;
  606. memset(p_stats, 0, sizeof(*p_stats));
  607. p_ptt = qed_ptt_acquire(p_hwfn);
  608. if (!p_ptt) {
  609. DP_ERR(p_hwfn, "Failed to acquire ptt\n");
  610. return -EINVAL;
  611. }
  612. _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
  613. _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
  614. qed_ptt_release(p_hwfn, p_ptt);
  615. return 0;
  616. }
  617. struct qed_hash_fcoe_con {
  618. struct hlist_node node;
  619. struct qed_fcoe_conn *con;
  620. };
  621. static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
  622. struct qed_dev_fcoe_info *info)
  623. {
  624. struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
  625. int rc;
  626. memset(info, 0, sizeof(*info));
  627. rc = qed_fill_dev_info(cdev, &info->common);
  628. info->primary_dbq_rq_addr =
  629. qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
  630. info->secondary_bdq_rq_addr =
  631. qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
  632. return rc;
  633. }
  634. static void qed_register_fcoe_ops(struct qed_dev *cdev,
  635. struct qed_fcoe_cb_ops *ops, void *cookie)
  636. {
  637. cdev->protocol_ops.fcoe = ops;
  638. cdev->ops_cookie = cookie;
  639. }
  640. static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
  641. u32 handle)
  642. {
  643. struct qed_hash_fcoe_con *hash_con = NULL;
  644. if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
  645. return NULL;
  646. hash_for_each_possible(cdev->connections, hash_con, node, handle) {
  647. if (hash_con->con->icid == handle)
  648. break;
  649. }
  650. if (!hash_con || (hash_con->con->icid != handle))
  651. return NULL;
  652. return hash_con;
  653. }
  654. static int qed_fcoe_stop(struct qed_dev *cdev)
  655. {
  656. struct qed_ptt *p_ptt;
  657. int rc;
  658. if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
  659. DP_NOTICE(cdev, "fcoe already stopped\n");
  660. return 0;
  661. }
  662. if (!hash_empty(cdev->connections)) {
  663. DP_NOTICE(cdev,
  664. "Can't stop fcoe - not all connections were returned\n");
  665. return -EINVAL;
  666. }
  667. p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
  668. if (!p_ptt)
  669. return -EAGAIN;
  670. /* Stop the fcoe */
  671. rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
  672. QED_SPQ_MODE_EBLOCK, NULL);
  673. cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
  674. qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
  675. return rc;
  676. }
  677. static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
  678. {
  679. int rc;
  680. if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
  681. DP_NOTICE(cdev, "fcoe already started;\n");
  682. return 0;
  683. }
  684. rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
  685. QED_SPQ_MODE_EBLOCK, NULL);
  686. if (rc) {
  687. DP_NOTICE(cdev, "Failed to start fcoe\n");
  688. return rc;
  689. }
  690. cdev->flags |= QED_FLAG_STORAGE_STARTED;
  691. hash_init(cdev->connections);
  692. if (tasks) {
  693. struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
  694. GFP_ATOMIC);
  695. if (!tid_info) {
  696. DP_NOTICE(cdev,
  697. "Failed to allocate tasks information\n");
  698. qed_fcoe_stop(cdev);
  699. return -ENOMEM;
  700. }
  701. rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
  702. if (rc) {
  703. DP_NOTICE(cdev, "Failed to gather task information\n");
  704. qed_fcoe_stop(cdev);
  705. kfree(tid_info);
  706. return rc;
  707. }
  708. /* Fill task information */
  709. tasks->size = tid_info->tid_size;
  710. tasks->num_tids_per_block = tid_info->num_tids_per_block;
  711. memcpy(tasks->blocks, tid_info->blocks,
  712. MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
  713. kfree(tid_info);
  714. }
  715. return 0;
  716. }
  717. static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
  718. u32 *handle,
  719. u32 *fw_cid, void __iomem **p_doorbell)
  720. {
  721. struct qed_hash_fcoe_con *hash_con;
  722. int rc;
  723. /* Allocate a hashed connection */
  724. hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
  725. if (!hash_con) {
  726. DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
  727. return -ENOMEM;
  728. }
  729. /* Acquire the connection */
  730. rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
  731. &hash_con->con);
  732. if (rc) {
  733. DP_NOTICE(cdev, "Failed to acquire Connection\n");
  734. kfree(hash_con);
  735. return rc;
  736. }
  737. /* Added the connection to hash table */
  738. *handle = hash_con->con->icid;
  739. *fw_cid = hash_con->con->fw_cid;
  740. hash_add(cdev->connections, &hash_con->node, *handle);
  741. if (p_doorbell)
  742. *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
  743. *handle);
  744. return 0;
  745. }
  746. static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
  747. {
  748. struct qed_hash_fcoe_con *hash_con;
  749. hash_con = qed_fcoe_get_hash(cdev, handle);
  750. if (!hash_con) {
  751. DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
  752. handle);
  753. return -EINVAL;
  754. }
  755. hlist_del(&hash_con->node);
  756. qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
  757. kfree(hash_con);
  758. return 0;
  759. }
  760. static int qed_fcoe_offload_conn(struct qed_dev *cdev,
  761. u32 handle,
  762. struct qed_fcoe_params_offload *conn_info)
  763. {
  764. struct qed_hash_fcoe_con *hash_con;
  765. struct qed_fcoe_conn *con;
  766. hash_con = qed_fcoe_get_hash(cdev, handle);
  767. if (!hash_con) {
  768. DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
  769. handle);
  770. return -EINVAL;
  771. }
  772. /* Update the connection with information from the params */
  773. con = hash_con->con;
  774. con->sq_pbl_addr = conn_info->sq_pbl_addr;
  775. con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
  776. con->sq_next_page_addr = conn_info->sq_next_page_addr;
  777. con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
  778. con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
  779. con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
  780. con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
  781. con->vlan_tag = conn_info->vlan_tag;
  782. con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
  783. con->flags = conn_info->flags;
  784. con->def_q_idx = conn_info->def_q_idx;
  785. con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
  786. conn_info->src_mac[4];
  787. con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
  788. conn_info->src_mac[2];
  789. con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
  790. conn_info->src_mac[0];
  791. con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
  792. conn_info->dst_mac[4];
  793. con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
  794. conn_info->dst_mac[2];
  795. con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
  796. conn_info->dst_mac[0];
  797. con->s_id.addr_hi = conn_info->s_id.addr_hi;
  798. con->s_id.addr_mid = conn_info->s_id.addr_mid;
  799. con->s_id.addr_lo = conn_info->s_id.addr_lo;
  800. con->d_id.addr_hi = conn_info->d_id.addr_hi;
  801. con->d_id.addr_mid = conn_info->d_id.addr_mid;
  802. con->d_id.addr_lo = conn_info->d_id.addr_lo;
  803. return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
  804. QED_SPQ_MODE_EBLOCK, NULL);
  805. }
  806. static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
  807. u32 handle, dma_addr_t terminate_params)
  808. {
  809. struct qed_hash_fcoe_con *hash_con;
  810. struct qed_fcoe_conn *con;
  811. hash_con = qed_fcoe_get_hash(cdev, handle);
  812. if (!hash_con) {
  813. DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
  814. handle);
  815. return -EINVAL;
  816. }
  817. /* Update the connection with information from the params */
  818. con = hash_con->con;
  819. con->terminate_params = terminate_params;
  820. return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
  821. QED_SPQ_MODE_EBLOCK, NULL);
  822. }
  823. static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
  824. {
  825. return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
  826. }
  827. void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
  828. struct qed_mcp_fcoe_stats *stats)
  829. {
  830. struct qed_fcoe_stats proto_stats;
  831. /* Retrieve FW statistics */
  832. memset(&proto_stats, 0, sizeof(proto_stats));
  833. if (qed_fcoe_stats(cdev, &proto_stats)) {
  834. DP_VERBOSE(cdev, QED_MSG_STORAGE,
  835. "Failed to collect FCoE statistics\n");
  836. return;
  837. }
  838. /* Translate FW statistics into struct */
  839. stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
  840. proto_stats.fcoe_rx_xfer_pkt_cnt +
  841. proto_stats.fcoe_rx_other_pkt_cnt;
  842. stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
  843. proto_stats.fcoe_tx_xfer_pkt_cnt +
  844. proto_stats.fcoe_tx_other_pkt_cnt;
  845. stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
  846. /* Request protocol driver to fill-in the rest */
  847. if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
  848. struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
  849. void *cookie = cdev->ops_cookie;
  850. if (ops->get_login_failures)
  851. stats->login_failure = ops->get_login_failures(cookie);
  852. }
  853. }
  854. static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
  855. .common = &qed_common_ops_pass,
  856. .ll2 = &qed_ll2_ops_pass,
  857. .fill_dev_info = &qed_fill_fcoe_dev_info,
  858. .start = &qed_fcoe_start,
  859. .stop = &qed_fcoe_stop,
  860. .register_ops = &qed_register_fcoe_ops,
  861. .acquire_conn = &qed_fcoe_acquire_conn,
  862. .release_conn = &qed_fcoe_release_conn,
  863. .offload_conn = &qed_fcoe_offload_conn,
  864. .destroy_conn = &qed_fcoe_destroy_conn,
  865. .get_stats = &qed_fcoe_stats,
  866. };
  867. const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
  868. {
  869. return &qed_fcoe_ops_pass;
  870. }
  871. EXPORT_SYMBOL(qed_get_fcoe_ops);
  872. void qed_put_fcoe_ops(void)
  873. {
  874. }
  875. EXPORT_SYMBOL(qed_put_fcoe_ops);