qed_hw.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/errno.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/mutex.h>
  16. #include <linux/pci.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/string.h>
  20. #include <linux/qed/qed_chain.h>
  21. #include "qed.h"
  22. #include "qed_hsi.h"
  23. #include "qed_hw.h"
  24. #include "qed_reg_addr.h"
  25. #include "qed_sriov.h"
  26. #define QED_BAR_ACQUIRE_TIMEOUT 1000
  27. /* Invalid values */
  28. #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
  29. struct qed_ptt {
  30. struct list_head list_entry;
  31. unsigned int idx;
  32. struct pxp_ptt_entry pxp;
  33. };
  34. struct qed_ptt_pool {
  35. struct list_head free_list;
  36. spinlock_t lock; /* ptt synchronized access */
  37. struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
  38. };
  39. int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
  40. {
  41. struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
  42. int i;
  43. if (!p_pool)
  44. return -ENOMEM;
  45. INIT_LIST_HEAD(&p_pool->free_list);
  46. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  47. p_pool->ptts[i].idx = i;
  48. p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
  49. p_pool->ptts[i].pxp.pretend.control = 0;
  50. if (i >= RESERVED_PTT_MAX)
  51. list_add(&p_pool->ptts[i].list_entry,
  52. &p_pool->free_list);
  53. }
  54. p_hwfn->p_ptt_pool = p_pool;
  55. spin_lock_init(&p_pool->lock);
  56. return 0;
  57. }
  58. void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
  59. {
  60. struct qed_ptt *p_ptt;
  61. int i;
  62. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  63. p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
  64. p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
  65. }
  66. }
  67. void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
  68. {
  69. kfree(p_hwfn->p_ptt_pool);
  70. p_hwfn->p_ptt_pool = NULL;
  71. }
  72. struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
  73. {
  74. struct qed_ptt *p_ptt;
  75. unsigned int i;
  76. /* Take the free PTT from the list */
  77. for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
  78. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  79. if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
  80. p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
  81. struct qed_ptt, list_entry);
  82. list_del(&p_ptt->list_entry);
  83. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  84. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  85. "allocated ptt %d\n", p_ptt->idx);
  86. return p_ptt;
  87. }
  88. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  89. usleep_range(1000, 2000);
  90. }
  91. DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
  92. return NULL;
  93. }
  94. void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  95. {
  96. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  97. list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
  98. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  99. }
  100. u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  101. {
  102. /* The HW is using DWORDS and we need to translate it to Bytes */
  103. return le32_to_cpu(p_ptt->pxp.offset) << 2;
  104. }
  105. static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
  106. {
  107. return PXP_PF_WINDOW_ADMIN_PER_PF_START +
  108. p_ptt->idx * sizeof(struct pxp_ptt_entry);
  109. }
  110. u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
  111. {
  112. return PXP_EXTERNAL_BAR_PF_WINDOW_START +
  113. p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
  114. }
  115. void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
  116. struct qed_ptt *p_ptt, u32 new_hw_addr)
  117. {
  118. u32 prev_hw_addr;
  119. prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  120. if (new_hw_addr == prev_hw_addr)
  121. return;
  122. /* Update PTT entery in admin window */
  123. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  124. "Updating PTT entry %d to offset 0x%x\n",
  125. p_ptt->idx, new_hw_addr);
  126. /* The HW is using DWORDS and the address is in Bytes */
  127. p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
  128. REG_WR(p_hwfn,
  129. qed_ptt_config_addr(p_ptt) +
  130. offsetof(struct pxp_ptt_entry, offset),
  131. le32_to_cpu(p_ptt->pxp.offset));
  132. }
  133. static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
  134. struct qed_ptt *p_ptt, u32 hw_addr)
  135. {
  136. u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  137. u32 offset;
  138. offset = hw_addr - win_hw_addr;
  139. /* Verify the address is within the window */
  140. if (hw_addr < win_hw_addr ||
  141. offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
  142. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
  143. offset = 0;
  144. }
  145. return qed_ptt_get_bar_addr(p_ptt) + offset;
  146. }
  147. struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
  148. enum reserved_ptts ptt_idx)
  149. {
  150. if (ptt_idx >= RESERVED_PTT_MAX) {
  151. DP_NOTICE(p_hwfn,
  152. "Requested PTT %d is out of range\n", ptt_idx);
  153. return NULL;
  154. }
  155. return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
  156. }
  157. void qed_wr(struct qed_hwfn *p_hwfn,
  158. struct qed_ptt *p_ptt,
  159. u32 hw_addr, u32 val)
  160. {
  161. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  162. REG_WR(p_hwfn, bar_addr, val);
  163. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  164. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  165. bar_addr, hw_addr, val);
  166. }
  167. u32 qed_rd(struct qed_hwfn *p_hwfn,
  168. struct qed_ptt *p_ptt,
  169. u32 hw_addr)
  170. {
  171. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  172. u32 val = REG_RD(p_hwfn, bar_addr);
  173. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  174. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  175. bar_addr, hw_addr, val);
  176. return val;
  177. }
  178. static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
  179. struct qed_ptt *p_ptt,
  180. void *addr, u32 hw_addr, size_t n, bool to_device)
  181. {
  182. u32 dw_count, *host_addr, hw_offset;
  183. size_t quota, done = 0;
  184. u32 __iomem *reg_addr;
  185. while (done < n) {
  186. quota = min_t(size_t, n - done,
  187. PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
  188. if (IS_PF(p_hwfn->cdev)) {
  189. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
  190. hw_offset = qed_ptt_get_bar_addr(p_ptt);
  191. } else {
  192. hw_offset = hw_addr + done;
  193. }
  194. dw_count = quota / 4;
  195. host_addr = (u32 *)((u8 *)addr + done);
  196. reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
  197. if (to_device)
  198. while (dw_count--)
  199. DIRECT_REG_WR(reg_addr++, *host_addr++);
  200. else
  201. while (dw_count--)
  202. *host_addr++ = DIRECT_REG_RD(reg_addr++);
  203. done += quota;
  204. }
  205. }
  206. void qed_memcpy_from(struct qed_hwfn *p_hwfn,
  207. struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
  208. {
  209. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  210. "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
  211. hw_addr, dest, hw_addr, (unsigned long)n);
  212. qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
  213. }
  214. void qed_memcpy_to(struct qed_hwfn *p_hwfn,
  215. struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
  216. {
  217. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  218. "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
  219. hw_addr, hw_addr, src, (unsigned long)n);
  220. qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
  221. }
  222. void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
  223. {
  224. u16 control = 0;
  225. SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
  226. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
  227. /* Every pretend undos previous pretends, including
  228. * previous port pretend.
  229. */
  230. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  231. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  232. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  233. if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
  234. fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
  235. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  236. p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
  237. REG_WR(p_hwfn,
  238. qed_ptt_config_addr(p_ptt) +
  239. offsetof(struct pxp_ptt_entry, pretend),
  240. *(u32 *)&p_ptt->pxp.pretend);
  241. }
  242. void qed_port_pretend(struct qed_hwfn *p_hwfn,
  243. struct qed_ptt *p_ptt, u8 port_id)
  244. {
  245. u16 control = 0;
  246. SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
  247. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
  248. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  249. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  250. REG_WR(p_hwfn,
  251. qed_ptt_config_addr(p_ptt) +
  252. offsetof(struct pxp_ptt_entry, pretend),
  253. *(u32 *)&p_ptt->pxp.pretend);
  254. }
  255. void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  256. {
  257. u16 control = 0;
  258. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  259. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  260. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  261. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  262. REG_WR(p_hwfn,
  263. qed_ptt_config_addr(p_ptt) +
  264. offsetof(struct pxp_ptt_entry, pretend),
  265. *(u32 *)&p_ptt->pxp.pretend);
  266. }
  267. u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
  268. {
  269. u32 concrete_fid = 0;
  270. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
  271. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
  272. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
  273. return concrete_fid;
  274. }
  275. /* DMAE */
  276. static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
  277. const u8 is_src_type_grc,
  278. const u8 is_dst_type_grc,
  279. struct qed_dmae_params *p_params)
  280. {
  281. u16 opcode_b = 0;
  282. u32 opcode = 0;
  283. /* Whether the source is the PCIe or the GRC.
  284. * 0- The source is the PCIe
  285. * 1- The source is the GRC.
  286. */
  287. opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
  288. : DMAE_CMD_SRC_MASK_PCIE) <<
  289. DMAE_CMD_SRC_SHIFT;
  290. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
  291. DMAE_CMD_SRC_PF_ID_SHIFT);
  292. /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
  293. opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
  294. : DMAE_CMD_DST_MASK_PCIE) <<
  295. DMAE_CMD_DST_SHIFT;
  296. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
  297. DMAE_CMD_DST_PF_ID_SHIFT);
  298. /* Whether to write a completion word to the completion destination:
  299. * 0-Do not write a completion word
  300. * 1-Write the completion word
  301. */
  302. opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
  303. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  304. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  305. if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
  306. opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
  307. opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
  308. opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
  309. /* reset source address in next go */
  310. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  311. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  312. /* reset dest address in next go */
  313. opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
  314. DMAE_CMD_DST_ADDR_RESET_SHIFT);
  315. /* SRC/DST VFID: all 1's - pf, otherwise VF id */
  316. if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
  317. opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
  318. opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
  319. } else {
  320. opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
  321. DMAE_CMD_SRC_VF_ID_SHIFT;
  322. }
  323. if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
  324. opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
  325. opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
  326. } else {
  327. opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
  328. }
  329. p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
  330. p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
  331. }
  332. u32 qed_dmae_idx_to_go_cmd(u8 idx)
  333. {
  334. /* All the DMAE 'go' registers form an array in internal memory */
  335. return DMAE_REG_GO_C0 + (idx << 2);
  336. }
  337. static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
  338. struct qed_ptt *p_ptt)
  339. {
  340. struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
  341. u8 idx_cmd = p_hwfn->dmae_info.channel, i;
  342. int qed_status = 0;
  343. /* verify address is not NULL */
  344. if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
  345. ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
  346. DP_NOTICE(p_hwfn,
  347. "source or destination address 0 idx_cmd=%d\n"
  348. "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  349. idx_cmd,
  350. le32_to_cpu(p_command->opcode),
  351. le16_to_cpu(p_command->opcode_b),
  352. le16_to_cpu(p_command->length_dw),
  353. le32_to_cpu(p_command->src_addr_hi),
  354. le32_to_cpu(p_command->src_addr_lo),
  355. le32_to_cpu(p_command->dst_addr_hi),
  356. le32_to_cpu(p_command->dst_addr_lo));
  357. return -EINVAL;
  358. }
  359. DP_VERBOSE(p_hwfn,
  360. NETIF_MSG_HW,
  361. "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  362. idx_cmd,
  363. le32_to_cpu(p_command->opcode),
  364. le16_to_cpu(p_command->opcode_b),
  365. le16_to_cpu(p_command->length_dw),
  366. le32_to_cpu(p_command->src_addr_hi),
  367. le32_to_cpu(p_command->src_addr_lo),
  368. le32_to_cpu(p_command->dst_addr_hi),
  369. le32_to_cpu(p_command->dst_addr_lo));
  370. /* Copy the command to DMAE - need to do it before every call
  371. * for source/dest address no reset.
  372. * The first 9 DWs are the command registers, the 10 DW is the
  373. * GO register, and the rest are result registers
  374. * (which are read only by the client).
  375. */
  376. for (i = 0; i < DMAE_CMD_SIZE; i++) {
  377. u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
  378. *(((u32 *)p_command) + i) : 0;
  379. qed_wr(p_hwfn, p_ptt,
  380. DMAE_REG_CMD_MEM +
  381. (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
  382. (i * sizeof(u32)), data);
  383. }
  384. qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
  385. return qed_status;
  386. }
  387. int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
  388. {
  389. dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
  390. struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
  391. u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
  392. u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
  393. *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  394. sizeof(u32), p_addr, GFP_KERNEL);
  395. if (!*p_comp)
  396. goto err;
  397. p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
  398. *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  399. sizeof(struct dmae_cmd),
  400. p_addr, GFP_KERNEL);
  401. if (!*p_cmd)
  402. goto err;
  403. p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  404. *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  405. sizeof(u32) * DMAE_MAX_RW_SIZE,
  406. p_addr, GFP_KERNEL);
  407. if (!*p_buff)
  408. goto err;
  409. p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
  410. return 0;
  411. err:
  412. qed_dmae_info_free(p_hwfn);
  413. return -ENOMEM;
  414. }
  415. void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
  416. {
  417. dma_addr_t p_phys;
  418. /* Just make sure no one is in the middle */
  419. mutex_lock(&p_hwfn->dmae_info.mutex);
  420. if (p_hwfn->dmae_info.p_completion_word) {
  421. p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
  422. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  423. sizeof(u32),
  424. p_hwfn->dmae_info.p_completion_word, p_phys);
  425. p_hwfn->dmae_info.p_completion_word = NULL;
  426. }
  427. if (p_hwfn->dmae_info.p_dmae_cmd) {
  428. p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
  429. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  430. sizeof(struct dmae_cmd),
  431. p_hwfn->dmae_info.p_dmae_cmd, p_phys);
  432. p_hwfn->dmae_info.p_dmae_cmd = NULL;
  433. }
  434. if (p_hwfn->dmae_info.p_intermediate_buffer) {
  435. p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  436. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  437. sizeof(u32) * DMAE_MAX_RW_SIZE,
  438. p_hwfn->dmae_info.p_intermediate_buffer,
  439. p_phys);
  440. p_hwfn->dmae_info.p_intermediate_buffer = NULL;
  441. }
  442. mutex_unlock(&p_hwfn->dmae_info.mutex);
  443. }
  444. static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
  445. {
  446. u32 wait_cnt_limit = 10000, wait_cnt = 0;
  447. int qed_status = 0;
  448. barrier();
  449. while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
  450. udelay(DMAE_MIN_WAIT_TIME);
  451. if (++wait_cnt > wait_cnt_limit) {
  452. DP_NOTICE(p_hwfn->cdev,
  453. "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
  454. *p_hwfn->dmae_info.p_completion_word,
  455. DMAE_COMPLETION_VAL);
  456. qed_status = -EBUSY;
  457. break;
  458. }
  459. /* to sync the completion_word since we are not
  460. * using the volatile keyword for p_completion_word
  461. */
  462. barrier();
  463. }
  464. if (qed_status == 0)
  465. *p_hwfn->dmae_info.p_completion_word = 0;
  466. return qed_status;
  467. }
  468. static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
  469. struct qed_ptt *p_ptt,
  470. u64 src_addr,
  471. u64 dst_addr,
  472. u8 src_type,
  473. u8 dst_type,
  474. u32 length_dw)
  475. {
  476. dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  477. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  478. int qed_status = 0;
  479. switch (src_type) {
  480. case QED_DMAE_ADDRESS_GRC:
  481. case QED_DMAE_ADDRESS_HOST_PHYS:
  482. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
  483. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
  484. break;
  485. /* for virtual source addresses we use the intermediate buffer. */
  486. case QED_DMAE_ADDRESS_HOST_VIRT:
  487. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
  488. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
  489. memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
  490. (void *)(uintptr_t)src_addr,
  491. length_dw * sizeof(u32));
  492. break;
  493. default:
  494. return -EINVAL;
  495. }
  496. switch (dst_type) {
  497. case QED_DMAE_ADDRESS_GRC:
  498. case QED_DMAE_ADDRESS_HOST_PHYS:
  499. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
  500. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
  501. break;
  502. /* for virtual source addresses we use the intermediate buffer. */
  503. case QED_DMAE_ADDRESS_HOST_VIRT:
  504. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
  505. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
  506. break;
  507. default:
  508. return -EINVAL;
  509. }
  510. cmd->length_dw = cpu_to_le16((u16)length_dw);
  511. qed_dmae_post_command(p_hwfn, p_ptt);
  512. qed_status = qed_dmae_operation_wait(p_hwfn);
  513. if (qed_status) {
  514. DP_NOTICE(p_hwfn,
  515. "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
  516. src_addr, dst_addr, length_dw);
  517. return qed_status;
  518. }
  519. if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
  520. memcpy((void *)(uintptr_t)(dst_addr),
  521. &p_hwfn->dmae_info.p_intermediate_buffer[0],
  522. length_dw * sizeof(u32));
  523. return 0;
  524. }
  525. static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
  526. struct qed_ptt *p_ptt,
  527. u64 src_addr, u64 dst_addr,
  528. u8 src_type, u8 dst_type,
  529. u32 size_in_dwords,
  530. struct qed_dmae_params *p_params)
  531. {
  532. dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
  533. u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
  534. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  535. u64 src_addr_split = 0, dst_addr_split = 0;
  536. u16 length_limit = DMAE_MAX_RW_SIZE;
  537. int qed_status = 0;
  538. u32 offset = 0;
  539. qed_dmae_opcode(p_hwfn,
  540. (src_type == QED_DMAE_ADDRESS_GRC),
  541. (dst_type == QED_DMAE_ADDRESS_GRC),
  542. p_params);
  543. cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
  544. cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
  545. cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
  546. /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
  547. cnt_split = size_in_dwords / length_limit;
  548. length_mod = size_in_dwords % length_limit;
  549. src_addr_split = src_addr;
  550. dst_addr_split = dst_addr;
  551. for (i = 0; i <= cnt_split; i++) {
  552. offset = length_limit * i;
  553. if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
  554. if (src_type == QED_DMAE_ADDRESS_GRC)
  555. src_addr_split = src_addr + offset;
  556. else
  557. src_addr_split = src_addr + (offset * 4);
  558. }
  559. if (dst_type == QED_DMAE_ADDRESS_GRC)
  560. dst_addr_split = dst_addr + offset;
  561. else
  562. dst_addr_split = dst_addr + (offset * 4);
  563. length_cur = (cnt_split == i) ? length_mod : length_limit;
  564. /* might be zero on last iteration */
  565. if (!length_cur)
  566. continue;
  567. qed_status = qed_dmae_execute_sub_operation(p_hwfn,
  568. p_ptt,
  569. src_addr_split,
  570. dst_addr_split,
  571. src_type,
  572. dst_type,
  573. length_cur);
  574. if (qed_status) {
  575. DP_NOTICE(p_hwfn,
  576. "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
  577. qed_status, src_addr, dst_addr, length_cur);
  578. break;
  579. }
  580. }
  581. return qed_status;
  582. }
  583. int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
  584. struct qed_ptt *p_ptt,
  585. u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
  586. {
  587. u32 grc_addr_in_dw = grc_addr / sizeof(u32);
  588. struct qed_dmae_params params;
  589. int rc;
  590. memset(&params, 0, sizeof(struct qed_dmae_params));
  591. params.flags = flags;
  592. mutex_lock(&p_hwfn->dmae_info.mutex);
  593. rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
  594. grc_addr_in_dw,
  595. QED_DMAE_ADDRESS_HOST_VIRT,
  596. QED_DMAE_ADDRESS_GRC,
  597. size_in_dwords, &params);
  598. mutex_unlock(&p_hwfn->dmae_info.mutex);
  599. return rc;
  600. }
  601. int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
  602. struct qed_ptt *p_ptt,
  603. u32 grc_addr,
  604. dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
  605. {
  606. u32 grc_addr_in_dw = grc_addr / sizeof(u32);
  607. struct qed_dmae_params params;
  608. int rc;
  609. memset(&params, 0, sizeof(struct qed_dmae_params));
  610. params.flags = flags;
  611. mutex_lock(&p_hwfn->dmae_info.mutex);
  612. rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
  613. dest_addr, QED_DMAE_ADDRESS_GRC,
  614. QED_DMAE_ADDRESS_HOST_VIRT,
  615. size_in_dwords, &params);
  616. mutex_unlock(&p_hwfn->dmae_info.mutex);
  617. return rc;
  618. }
  619. int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
  620. struct qed_ptt *p_ptt,
  621. dma_addr_t source_addr,
  622. dma_addr_t dest_addr,
  623. u32 size_in_dwords, struct qed_dmae_params *p_params)
  624. {
  625. int rc;
  626. mutex_lock(&(p_hwfn->dmae_info.mutex));
  627. rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
  628. dest_addr,
  629. QED_DMAE_ADDRESS_HOST_PHYS,
  630. QED_DMAE_ADDRESS_HOST_PHYS,
  631. size_in_dwords, p_params);
  632. mutex_unlock(&(p_hwfn->dmae_info.mutex));
  633. return rc;
  634. }
  635. u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
  636. enum protocol_type proto, union qed_qm_pq_params *p_params)
  637. {
  638. u16 pq_id = 0;
  639. if ((proto == PROTOCOLID_CORE ||
  640. proto == PROTOCOLID_ETH ||
  641. proto == PROTOCOLID_ISCSI ||
  642. proto == PROTOCOLID_ROCE) && !p_params) {
  643. DP_NOTICE(p_hwfn,
  644. "Protocol %d received NULL PQ params\n", proto);
  645. return 0;
  646. }
  647. switch (proto) {
  648. case PROTOCOLID_CORE:
  649. if (p_params->core.tc == LB_TC)
  650. pq_id = p_hwfn->qm_info.pure_lb_pq;
  651. else if (p_params->core.tc == OOO_LB_TC)
  652. pq_id = p_hwfn->qm_info.ooo_pq;
  653. else
  654. pq_id = p_hwfn->qm_info.offload_pq;
  655. break;
  656. case PROTOCOLID_ETH:
  657. pq_id = p_params->eth.tc;
  658. if (p_params->eth.is_vf)
  659. pq_id += p_hwfn->qm_info.vf_queues_offset +
  660. p_params->eth.vf_id;
  661. break;
  662. case PROTOCOLID_ISCSI:
  663. if (p_params->iscsi.q_idx == 1)
  664. pq_id = p_hwfn->qm_info.pure_ack_pq;
  665. break;
  666. case PROTOCOLID_ROCE:
  667. if (p_params->roce.dcqcn)
  668. pq_id = p_params->roce.qpid;
  669. else
  670. pq_id = p_hwfn->qm_info.offload_pq;
  671. if (pq_id > p_hwfn->qm_info.num_pf_rls)
  672. pq_id = p_hwfn->qm_info.offload_pq;
  673. break;
  674. default:
  675. pq_id = 0;
  676. }
  677. pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
  678. return pq_id;
  679. }