qed_hw.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/io.h>
  34. #include <linux/delay.h>
  35. #include <linux/dma-mapping.h>
  36. #include <linux/errno.h>
  37. #include <linux/kernel.h>
  38. #include <linux/list.h>
  39. #include <linux/mutex.h>
  40. #include <linux/pci.h>
  41. #include <linux/slab.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/string.h>
  44. #include <linux/qed/qed_chain.h>
  45. #include "qed.h"
  46. #include "qed_hsi.h"
  47. #include "qed_hw.h"
  48. #include "qed_reg_addr.h"
  49. #include "qed_sriov.h"
  50. #define QED_BAR_ACQUIRE_TIMEOUT 1000
  51. /* Invalid values */
  52. #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
  53. struct qed_ptt {
  54. struct list_head list_entry;
  55. unsigned int idx;
  56. struct pxp_ptt_entry pxp;
  57. };
  58. struct qed_ptt_pool {
  59. struct list_head free_list;
  60. spinlock_t lock; /* ptt synchronized access */
  61. struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
  62. };
  63. int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
  64. {
  65. struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
  66. int i;
  67. if (!p_pool)
  68. return -ENOMEM;
  69. INIT_LIST_HEAD(&p_pool->free_list);
  70. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  71. p_pool->ptts[i].idx = i;
  72. p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
  73. p_pool->ptts[i].pxp.pretend.control = 0;
  74. if (i >= RESERVED_PTT_MAX)
  75. list_add(&p_pool->ptts[i].list_entry,
  76. &p_pool->free_list);
  77. }
  78. p_hwfn->p_ptt_pool = p_pool;
  79. spin_lock_init(&p_pool->lock);
  80. return 0;
  81. }
  82. void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
  83. {
  84. struct qed_ptt *p_ptt;
  85. int i;
  86. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  87. p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
  88. p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
  89. }
  90. }
  91. void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
  92. {
  93. kfree(p_hwfn->p_ptt_pool);
  94. p_hwfn->p_ptt_pool = NULL;
  95. }
  96. struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
  97. {
  98. struct qed_ptt *p_ptt;
  99. unsigned int i;
  100. /* Take the free PTT from the list */
  101. for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
  102. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  103. if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
  104. p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
  105. struct qed_ptt, list_entry);
  106. list_del(&p_ptt->list_entry);
  107. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  108. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  109. "allocated ptt %d\n", p_ptt->idx);
  110. return p_ptt;
  111. }
  112. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  113. usleep_range(1000, 2000);
  114. }
  115. DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
  116. return NULL;
  117. }
  118. void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  119. {
  120. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  121. list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
  122. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  123. }
  124. u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  125. {
  126. /* The HW is using DWORDS and we need to translate it to Bytes */
  127. return le32_to_cpu(p_ptt->pxp.offset) << 2;
  128. }
  129. static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
  130. {
  131. return PXP_PF_WINDOW_ADMIN_PER_PF_START +
  132. p_ptt->idx * sizeof(struct pxp_ptt_entry);
  133. }
  134. u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
  135. {
  136. return PXP_EXTERNAL_BAR_PF_WINDOW_START +
  137. p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
  138. }
  139. void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
  140. struct qed_ptt *p_ptt, u32 new_hw_addr)
  141. {
  142. u32 prev_hw_addr;
  143. prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  144. if (new_hw_addr == prev_hw_addr)
  145. return;
  146. /* Update PTT entery in admin window */
  147. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  148. "Updating PTT entry %d to offset 0x%x\n",
  149. p_ptt->idx, new_hw_addr);
  150. /* The HW is using DWORDS and the address is in Bytes */
  151. p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
  152. REG_WR(p_hwfn,
  153. qed_ptt_config_addr(p_ptt) +
  154. offsetof(struct pxp_ptt_entry, offset),
  155. le32_to_cpu(p_ptt->pxp.offset));
  156. }
  157. static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
  158. struct qed_ptt *p_ptt, u32 hw_addr)
  159. {
  160. u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  161. u32 offset;
  162. offset = hw_addr - win_hw_addr;
  163. /* Verify the address is within the window */
  164. if (hw_addr < win_hw_addr ||
  165. offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
  166. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
  167. offset = 0;
  168. }
  169. return qed_ptt_get_bar_addr(p_ptt) + offset;
  170. }
  171. struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
  172. enum reserved_ptts ptt_idx)
  173. {
  174. if (ptt_idx >= RESERVED_PTT_MAX) {
  175. DP_NOTICE(p_hwfn,
  176. "Requested PTT %d is out of range\n", ptt_idx);
  177. return NULL;
  178. }
  179. return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
  180. }
  181. void qed_wr(struct qed_hwfn *p_hwfn,
  182. struct qed_ptt *p_ptt,
  183. u32 hw_addr, u32 val)
  184. {
  185. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  186. REG_WR(p_hwfn, bar_addr, val);
  187. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  188. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  189. bar_addr, hw_addr, val);
  190. }
  191. u32 qed_rd(struct qed_hwfn *p_hwfn,
  192. struct qed_ptt *p_ptt,
  193. u32 hw_addr)
  194. {
  195. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  196. u32 val = REG_RD(p_hwfn, bar_addr);
  197. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  198. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  199. bar_addr, hw_addr, val);
  200. return val;
  201. }
  202. static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
  203. struct qed_ptt *p_ptt,
  204. void *addr, u32 hw_addr, size_t n, bool to_device)
  205. {
  206. u32 dw_count, *host_addr, hw_offset;
  207. size_t quota, done = 0;
  208. u32 __iomem *reg_addr;
  209. while (done < n) {
  210. quota = min_t(size_t, n - done,
  211. PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
  212. if (IS_PF(p_hwfn->cdev)) {
  213. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
  214. hw_offset = qed_ptt_get_bar_addr(p_ptt);
  215. } else {
  216. hw_offset = hw_addr + done;
  217. }
  218. dw_count = quota / 4;
  219. host_addr = (u32 *)((u8 *)addr + done);
  220. reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
  221. if (to_device)
  222. while (dw_count--)
  223. DIRECT_REG_WR(reg_addr++, *host_addr++);
  224. else
  225. while (dw_count--)
  226. *host_addr++ = DIRECT_REG_RD(reg_addr++);
  227. done += quota;
  228. }
  229. }
  230. void qed_memcpy_from(struct qed_hwfn *p_hwfn,
  231. struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
  232. {
  233. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  234. "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
  235. hw_addr, dest, hw_addr, (unsigned long)n);
  236. qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
  237. }
  238. void qed_memcpy_to(struct qed_hwfn *p_hwfn,
  239. struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
  240. {
  241. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  242. "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
  243. hw_addr, hw_addr, src, (unsigned long)n);
  244. qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
  245. }
  246. void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
  247. {
  248. u16 control = 0;
  249. SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
  250. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
  251. /* Every pretend undos previous pretends, including
  252. * previous port pretend.
  253. */
  254. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  255. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  256. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  257. if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
  258. fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
  259. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  260. p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
  261. REG_WR(p_hwfn,
  262. qed_ptt_config_addr(p_ptt) +
  263. offsetof(struct pxp_ptt_entry, pretend),
  264. *(u32 *)&p_ptt->pxp.pretend);
  265. }
  266. void qed_port_pretend(struct qed_hwfn *p_hwfn,
  267. struct qed_ptt *p_ptt, u8 port_id)
  268. {
  269. u16 control = 0;
  270. SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
  271. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
  272. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  273. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  274. REG_WR(p_hwfn,
  275. qed_ptt_config_addr(p_ptt) +
  276. offsetof(struct pxp_ptt_entry, pretend),
  277. *(u32 *)&p_ptt->pxp.pretend);
  278. }
  279. void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  280. {
  281. u16 control = 0;
  282. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  283. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  284. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  285. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  286. REG_WR(p_hwfn,
  287. qed_ptt_config_addr(p_ptt) +
  288. offsetof(struct pxp_ptt_entry, pretend),
  289. *(u32 *)&p_ptt->pxp.pretend);
  290. }
  291. u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
  292. {
  293. u32 concrete_fid = 0;
  294. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
  295. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
  296. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
  297. return concrete_fid;
  298. }
  299. /* DMAE */
  300. static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
  301. const u8 is_src_type_grc,
  302. const u8 is_dst_type_grc,
  303. struct qed_dmae_params *p_params)
  304. {
  305. u16 opcode_b = 0;
  306. u32 opcode = 0;
  307. /* Whether the source is the PCIe or the GRC.
  308. * 0- The source is the PCIe
  309. * 1- The source is the GRC.
  310. */
  311. opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
  312. : DMAE_CMD_SRC_MASK_PCIE) <<
  313. DMAE_CMD_SRC_SHIFT;
  314. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
  315. DMAE_CMD_SRC_PF_ID_SHIFT);
  316. /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
  317. opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
  318. : DMAE_CMD_DST_MASK_PCIE) <<
  319. DMAE_CMD_DST_SHIFT;
  320. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
  321. DMAE_CMD_DST_PF_ID_SHIFT);
  322. /* Whether to write a completion word to the completion destination:
  323. * 0-Do not write a completion word
  324. * 1-Write the completion word
  325. */
  326. opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
  327. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  328. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  329. if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
  330. opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
  331. opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
  332. opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
  333. /* reset source address in next go */
  334. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  335. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  336. /* reset dest address in next go */
  337. opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
  338. DMAE_CMD_DST_ADDR_RESET_SHIFT);
  339. /* SRC/DST VFID: all 1's - pf, otherwise VF id */
  340. if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
  341. opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
  342. opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
  343. } else {
  344. opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
  345. DMAE_CMD_SRC_VF_ID_SHIFT;
  346. }
  347. if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
  348. opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
  349. opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
  350. } else {
  351. opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
  352. }
  353. p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
  354. p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
  355. }
  356. u32 qed_dmae_idx_to_go_cmd(u8 idx)
  357. {
  358. /* All the DMAE 'go' registers form an array in internal memory */
  359. return DMAE_REG_GO_C0 + (idx << 2);
  360. }
  361. static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
  362. struct qed_ptt *p_ptt)
  363. {
  364. struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
  365. u8 idx_cmd = p_hwfn->dmae_info.channel, i;
  366. int qed_status = 0;
  367. /* verify address is not NULL */
  368. if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
  369. ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
  370. DP_NOTICE(p_hwfn,
  371. "source or destination address 0 idx_cmd=%d\n"
  372. "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  373. idx_cmd,
  374. le32_to_cpu(p_command->opcode),
  375. le16_to_cpu(p_command->opcode_b),
  376. le16_to_cpu(p_command->length_dw),
  377. le32_to_cpu(p_command->src_addr_hi),
  378. le32_to_cpu(p_command->src_addr_lo),
  379. le32_to_cpu(p_command->dst_addr_hi),
  380. le32_to_cpu(p_command->dst_addr_lo));
  381. return -EINVAL;
  382. }
  383. DP_VERBOSE(p_hwfn,
  384. NETIF_MSG_HW,
  385. "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  386. idx_cmd,
  387. le32_to_cpu(p_command->opcode),
  388. le16_to_cpu(p_command->opcode_b),
  389. le16_to_cpu(p_command->length_dw),
  390. le32_to_cpu(p_command->src_addr_hi),
  391. le32_to_cpu(p_command->src_addr_lo),
  392. le32_to_cpu(p_command->dst_addr_hi),
  393. le32_to_cpu(p_command->dst_addr_lo));
  394. /* Copy the command to DMAE - need to do it before every call
  395. * for source/dest address no reset.
  396. * The first 9 DWs are the command registers, the 10 DW is the
  397. * GO register, and the rest are result registers
  398. * (which are read only by the client).
  399. */
  400. for (i = 0; i < DMAE_CMD_SIZE; i++) {
  401. u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
  402. *(((u32 *)p_command) + i) : 0;
  403. qed_wr(p_hwfn, p_ptt,
  404. DMAE_REG_CMD_MEM +
  405. (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
  406. (i * sizeof(u32)), data);
  407. }
  408. qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
  409. return qed_status;
  410. }
  411. int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
  412. {
  413. dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
  414. struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
  415. u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
  416. u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
  417. *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  418. sizeof(u32), p_addr, GFP_KERNEL);
  419. if (!*p_comp)
  420. goto err;
  421. p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
  422. *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  423. sizeof(struct dmae_cmd),
  424. p_addr, GFP_KERNEL);
  425. if (!*p_cmd)
  426. goto err;
  427. p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  428. *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  429. sizeof(u32) * DMAE_MAX_RW_SIZE,
  430. p_addr, GFP_KERNEL);
  431. if (!*p_buff)
  432. goto err;
  433. p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
  434. return 0;
  435. err:
  436. qed_dmae_info_free(p_hwfn);
  437. return -ENOMEM;
  438. }
  439. void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
  440. {
  441. dma_addr_t p_phys;
  442. /* Just make sure no one is in the middle */
  443. mutex_lock(&p_hwfn->dmae_info.mutex);
  444. if (p_hwfn->dmae_info.p_completion_word) {
  445. p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
  446. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  447. sizeof(u32),
  448. p_hwfn->dmae_info.p_completion_word, p_phys);
  449. p_hwfn->dmae_info.p_completion_word = NULL;
  450. }
  451. if (p_hwfn->dmae_info.p_dmae_cmd) {
  452. p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
  453. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  454. sizeof(struct dmae_cmd),
  455. p_hwfn->dmae_info.p_dmae_cmd, p_phys);
  456. p_hwfn->dmae_info.p_dmae_cmd = NULL;
  457. }
  458. if (p_hwfn->dmae_info.p_intermediate_buffer) {
  459. p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  460. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  461. sizeof(u32) * DMAE_MAX_RW_SIZE,
  462. p_hwfn->dmae_info.p_intermediate_buffer,
  463. p_phys);
  464. p_hwfn->dmae_info.p_intermediate_buffer = NULL;
  465. }
  466. mutex_unlock(&p_hwfn->dmae_info.mutex);
  467. }
  468. static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
  469. {
  470. u32 wait_cnt_limit = 10000, wait_cnt = 0;
  471. int qed_status = 0;
  472. barrier();
  473. while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
  474. udelay(DMAE_MIN_WAIT_TIME);
  475. if (++wait_cnt > wait_cnt_limit) {
  476. DP_NOTICE(p_hwfn->cdev,
  477. "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
  478. *p_hwfn->dmae_info.p_completion_word,
  479. DMAE_COMPLETION_VAL);
  480. qed_status = -EBUSY;
  481. break;
  482. }
  483. /* to sync the completion_word since we are not
  484. * using the volatile keyword for p_completion_word
  485. */
  486. barrier();
  487. }
  488. if (qed_status == 0)
  489. *p_hwfn->dmae_info.p_completion_word = 0;
  490. return qed_status;
  491. }
  492. static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
  493. struct qed_ptt *p_ptt,
  494. u64 src_addr,
  495. u64 dst_addr,
  496. u8 src_type,
  497. u8 dst_type,
  498. u32 length_dw)
  499. {
  500. dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  501. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  502. int qed_status = 0;
  503. switch (src_type) {
  504. case QED_DMAE_ADDRESS_GRC:
  505. case QED_DMAE_ADDRESS_HOST_PHYS:
  506. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
  507. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
  508. break;
  509. /* for virtual source addresses we use the intermediate buffer. */
  510. case QED_DMAE_ADDRESS_HOST_VIRT:
  511. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
  512. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
  513. memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
  514. (void *)(uintptr_t)src_addr,
  515. length_dw * sizeof(u32));
  516. break;
  517. default:
  518. return -EINVAL;
  519. }
  520. switch (dst_type) {
  521. case QED_DMAE_ADDRESS_GRC:
  522. case QED_DMAE_ADDRESS_HOST_PHYS:
  523. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
  524. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
  525. break;
  526. /* for virtual source addresses we use the intermediate buffer. */
  527. case QED_DMAE_ADDRESS_HOST_VIRT:
  528. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
  529. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
  530. break;
  531. default:
  532. return -EINVAL;
  533. }
  534. cmd->length_dw = cpu_to_le16((u16)length_dw);
  535. qed_dmae_post_command(p_hwfn, p_ptt);
  536. qed_status = qed_dmae_operation_wait(p_hwfn);
  537. if (qed_status) {
  538. DP_NOTICE(p_hwfn,
  539. "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
  540. src_addr, dst_addr, length_dw);
  541. return qed_status;
  542. }
  543. if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
  544. memcpy((void *)(uintptr_t)(dst_addr),
  545. &p_hwfn->dmae_info.p_intermediate_buffer[0],
  546. length_dw * sizeof(u32));
  547. return 0;
  548. }
  549. static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
  550. struct qed_ptt *p_ptt,
  551. u64 src_addr, u64 dst_addr,
  552. u8 src_type, u8 dst_type,
  553. u32 size_in_dwords,
  554. struct qed_dmae_params *p_params)
  555. {
  556. dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
  557. u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
  558. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  559. u64 src_addr_split = 0, dst_addr_split = 0;
  560. u16 length_limit = DMAE_MAX_RW_SIZE;
  561. int qed_status = 0;
  562. u32 offset = 0;
  563. qed_dmae_opcode(p_hwfn,
  564. (src_type == QED_DMAE_ADDRESS_GRC),
  565. (dst_type == QED_DMAE_ADDRESS_GRC),
  566. p_params);
  567. cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
  568. cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
  569. cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
  570. /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
  571. cnt_split = size_in_dwords / length_limit;
  572. length_mod = size_in_dwords % length_limit;
  573. src_addr_split = src_addr;
  574. dst_addr_split = dst_addr;
  575. for (i = 0; i <= cnt_split; i++) {
  576. offset = length_limit * i;
  577. if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
  578. if (src_type == QED_DMAE_ADDRESS_GRC)
  579. src_addr_split = src_addr + offset;
  580. else
  581. src_addr_split = src_addr + (offset * 4);
  582. }
  583. if (dst_type == QED_DMAE_ADDRESS_GRC)
  584. dst_addr_split = dst_addr + offset;
  585. else
  586. dst_addr_split = dst_addr + (offset * 4);
  587. length_cur = (cnt_split == i) ? length_mod : length_limit;
  588. /* might be zero on last iteration */
  589. if (!length_cur)
  590. continue;
  591. qed_status = qed_dmae_execute_sub_operation(p_hwfn,
  592. p_ptt,
  593. src_addr_split,
  594. dst_addr_split,
  595. src_type,
  596. dst_type,
  597. length_cur);
  598. if (qed_status) {
  599. DP_NOTICE(p_hwfn,
  600. "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
  601. qed_status, src_addr, dst_addr, length_cur);
  602. break;
  603. }
  604. }
  605. return qed_status;
  606. }
  607. int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
  608. struct qed_ptt *p_ptt,
  609. u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
  610. {
  611. u32 grc_addr_in_dw = grc_addr / sizeof(u32);
  612. struct qed_dmae_params params;
  613. int rc;
  614. memset(&params, 0, sizeof(struct qed_dmae_params));
  615. params.flags = flags;
  616. mutex_lock(&p_hwfn->dmae_info.mutex);
  617. rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
  618. grc_addr_in_dw,
  619. QED_DMAE_ADDRESS_HOST_VIRT,
  620. QED_DMAE_ADDRESS_GRC,
  621. size_in_dwords, &params);
  622. mutex_unlock(&p_hwfn->dmae_info.mutex);
  623. return rc;
  624. }
  625. int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
  626. struct qed_ptt *p_ptt,
  627. u32 grc_addr,
  628. dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
  629. {
  630. u32 grc_addr_in_dw = grc_addr / sizeof(u32);
  631. struct qed_dmae_params params;
  632. int rc;
  633. memset(&params, 0, sizeof(struct qed_dmae_params));
  634. params.flags = flags;
  635. mutex_lock(&p_hwfn->dmae_info.mutex);
  636. rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
  637. dest_addr, QED_DMAE_ADDRESS_GRC,
  638. QED_DMAE_ADDRESS_HOST_VIRT,
  639. size_in_dwords, &params);
  640. mutex_unlock(&p_hwfn->dmae_info.mutex);
  641. return rc;
  642. }
  643. int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
  644. struct qed_ptt *p_ptt,
  645. dma_addr_t source_addr,
  646. dma_addr_t dest_addr,
  647. u32 size_in_dwords, struct qed_dmae_params *p_params)
  648. {
  649. int rc;
  650. mutex_lock(&(p_hwfn->dmae_info.mutex));
  651. rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
  652. dest_addr,
  653. QED_DMAE_ADDRESS_HOST_PHYS,
  654. QED_DMAE_ADDRESS_HOST_PHYS,
  655. size_in_dwords, p_params);
  656. mutex_unlock(&(p_hwfn->dmae_info.mutex));
  657. return rc;
  658. }
  659. u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
  660. enum protocol_type proto, union qed_qm_pq_params *p_params)
  661. {
  662. u16 pq_id = 0;
  663. if ((proto == PROTOCOLID_CORE ||
  664. proto == PROTOCOLID_ETH ||
  665. proto == PROTOCOLID_ISCSI ||
  666. proto == PROTOCOLID_ROCE) && !p_params) {
  667. DP_NOTICE(p_hwfn,
  668. "Protocol %d received NULL PQ params\n", proto);
  669. return 0;
  670. }
  671. switch (proto) {
  672. case PROTOCOLID_CORE:
  673. if (p_params->core.tc == LB_TC)
  674. pq_id = p_hwfn->qm_info.pure_lb_pq;
  675. else if (p_params->core.tc == OOO_LB_TC)
  676. pq_id = p_hwfn->qm_info.ooo_pq;
  677. else
  678. pq_id = p_hwfn->qm_info.offload_pq;
  679. break;
  680. case PROTOCOLID_ETH:
  681. pq_id = p_params->eth.tc;
  682. if (p_params->eth.is_vf)
  683. pq_id += p_hwfn->qm_info.vf_queues_offset +
  684. p_params->eth.vf_id;
  685. break;
  686. case PROTOCOLID_ISCSI:
  687. if (p_params->iscsi.q_idx == 1)
  688. pq_id = p_hwfn->qm_info.pure_ack_pq;
  689. break;
  690. case PROTOCOLID_ROCE:
  691. if (p_params->roce.dcqcn)
  692. pq_id = p_params->roce.qpid;
  693. else
  694. pq_id = p_hwfn->qm_info.offload_pq;
  695. if (pq_id > p_hwfn->qm_info.num_pf_rls)
  696. pq_id = p_hwfn->qm_info.offload_pq;
  697. break;
  698. case PROTOCOLID_FCOE:
  699. pq_id = p_hwfn->qm_info.offload_pq;
  700. break;
  701. default:
  702. pq_id = 0;
  703. }
  704. pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
  705. return pq_id;
  706. }