qed_hw.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/errno.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/mutex.h>
  16. #include <linux/pci.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/string.h>
  20. #include <linux/qed/qed_chain.h>
  21. #include "qed.h"
  22. #include "qed_hsi.h"
  23. #include "qed_hw.h"
  24. #include "qed_reg_addr.h"
  25. #include "qed_sriov.h"
  26. #define QED_BAR_ACQUIRE_TIMEOUT 1000
  27. /* Invalid values */
  28. #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
  29. struct qed_ptt {
  30. struct list_head list_entry;
  31. unsigned int idx;
  32. struct pxp_ptt_entry pxp;
  33. };
  34. struct qed_ptt_pool {
  35. struct list_head free_list;
  36. spinlock_t lock; /* ptt synchronized access */
  37. struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
  38. };
  39. int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
  40. {
  41. struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
  42. GFP_KERNEL);
  43. int i;
  44. if (!p_pool)
  45. return -ENOMEM;
  46. INIT_LIST_HEAD(&p_pool->free_list);
  47. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  48. p_pool->ptts[i].idx = i;
  49. p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
  50. p_pool->ptts[i].pxp.pretend.control = 0;
  51. if (i >= RESERVED_PTT_MAX)
  52. list_add(&p_pool->ptts[i].list_entry,
  53. &p_pool->free_list);
  54. }
  55. p_hwfn->p_ptt_pool = p_pool;
  56. spin_lock_init(&p_pool->lock);
  57. return 0;
  58. }
  59. void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
  60. {
  61. struct qed_ptt *p_ptt;
  62. int i;
  63. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  64. p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
  65. p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
  66. }
  67. }
  68. void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
  69. {
  70. kfree(p_hwfn->p_ptt_pool);
  71. p_hwfn->p_ptt_pool = NULL;
  72. }
  73. struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
  74. {
  75. struct qed_ptt *p_ptt;
  76. unsigned int i;
  77. /* Take the free PTT from the list */
  78. for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
  79. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  80. if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
  81. p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
  82. struct qed_ptt, list_entry);
  83. list_del(&p_ptt->list_entry);
  84. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  85. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  86. "allocated ptt %d\n", p_ptt->idx);
  87. return p_ptt;
  88. }
  89. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  90. usleep_range(1000, 2000);
  91. }
  92. DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
  93. return NULL;
  94. }
  95. void qed_ptt_release(struct qed_hwfn *p_hwfn,
  96. struct qed_ptt *p_ptt)
  97. {
  98. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  99. list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
  100. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  101. }
  102. u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
  103. struct qed_ptt *p_ptt)
  104. {
  105. /* The HW is using DWORDS and we need to translate it to Bytes */
  106. return le32_to_cpu(p_ptt->pxp.offset) << 2;
  107. }
  108. static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
  109. {
  110. return PXP_PF_WINDOW_ADMIN_PER_PF_START +
  111. p_ptt->idx * sizeof(struct pxp_ptt_entry);
  112. }
  113. u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
  114. {
  115. return PXP_EXTERNAL_BAR_PF_WINDOW_START +
  116. p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
  117. }
  118. void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
  119. struct qed_ptt *p_ptt,
  120. u32 new_hw_addr)
  121. {
  122. u32 prev_hw_addr;
  123. prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  124. if (new_hw_addr == prev_hw_addr)
  125. return;
  126. /* Update PTT entery in admin window */
  127. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  128. "Updating PTT entry %d to offset 0x%x\n",
  129. p_ptt->idx, new_hw_addr);
  130. /* The HW is using DWORDS and the address is in Bytes */
  131. p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
  132. REG_WR(p_hwfn,
  133. qed_ptt_config_addr(p_ptt) +
  134. offsetof(struct pxp_ptt_entry, offset),
  135. le32_to_cpu(p_ptt->pxp.offset));
  136. }
  137. static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
  138. struct qed_ptt *p_ptt,
  139. u32 hw_addr)
  140. {
  141. u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  142. u32 offset;
  143. offset = hw_addr - win_hw_addr;
  144. /* Verify the address is within the window */
  145. if (hw_addr < win_hw_addr ||
  146. offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
  147. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
  148. offset = 0;
  149. }
  150. return qed_ptt_get_bar_addr(p_ptt) + offset;
  151. }
  152. struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
  153. enum reserved_ptts ptt_idx)
  154. {
  155. if (ptt_idx >= RESERVED_PTT_MAX) {
  156. DP_NOTICE(p_hwfn,
  157. "Requested PTT %d is out of range\n", ptt_idx);
  158. return NULL;
  159. }
  160. return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
  161. }
  162. void qed_wr(struct qed_hwfn *p_hwfn,
  163. struct qed_ptt *p_ptt,
  164. u32 hw_addr, u32 val)
  165. {
  166. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  167. REG_WR(p_hwfn, bar_addr, val);
  168. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  169. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  170. bar_addr, hw_addr, val);
  171. }
  172. u32 qed_rd(struct qed_hwfn *p_hwfn,
  173. struct qed_ptt *p_ptt,
  174. u32 hw_addr)
  175. {
  176. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  177. u32 val = REG_RD(p_hwfn, bar_addr);
  178. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  179. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  180. bar_addr, hw_addr, val);
  181. return val;
  182. }
  183. static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
  184. struct qed_ptt *p_ptt,
  185. void *addr,
  186. u32 hw_addr,
  187. size_t n,
  188. bool to_device)
  189. {
  190. u32 dw_count, *host_addr, hw_offset;
  191. size_t quota, done = 0;
  192. u32 __iomem *reg_addr;
  193. while (done < n) {
  194. quota = min_t(size_t, n - done,
  195. PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
  196. if (IS_PF(p_hwfn->cdev)) {
  197. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
  198. hw_offset = qed_ptt_get_bar_addr(p_ptt);
  199. } else {
  200. hw_offset = hw_addr + done;
  201. }
  202. dw_count = quota / 4;
  203. host_addr = (u32 *)((u8 *)addr + done);
  204. reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
  205. if (to_device)
  206. while (dw_count--)
  207. DIRECT_REG_WR(reg_addr++, *host_addr++);
  208. else
  209. while (dw_count--)
  210. *host_addr++ = DIRECT_REG_RD(reg_addr++);
  211. done += quota;
  212. }
  213. }
  214. void qed_memcpy_from(struct qed_hwfn *p_hwfn,
  215. struct qed_ptt *p_ptt,
  216. void *dest, u32 hw_addr, size_t n)
  217. {
  218. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  219. "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
  220. hw_addr, dest, hw_addr, (unsigned long)n);
  221. qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
  222. }
  223. void qed_memcpy_to(struct qed_hwfn *p_hwfn,
  224. struct qed_ptt *p_ptt,
  225. u32 hw_addr, void *src, size_t n)
  226. {
  227. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  228. "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
  229. hw_addr, hw_addr, src, (unsigned long)n);
  230. qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
  231. }
  232. void qed_fid_pretend(struct qed_hwfn *p_hwfn,
  233. struct qed_ptt *p_ptt,
  234. u16 fid)
  235. {
  236. u16 control = 0;
  237. SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
  238. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
  239. /* Every pretend undos previous pretends, including
  240. * previous port pretend.
  241. */
  242. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  243. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  244. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  245. if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
  246. fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
  247. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  248. p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
  249. REG_WR(p_hwfn,
  250. qed_ptt_config_addr(p_ptt) +
  251. offsetof(struct pxp_ptt_entry, pretend),
  252. *(u32 *)&p_ptt->pxp.pretend);
  253. }
  254. void qed_port_pretend(struct qed_hwfn *p_hwfn,
  255. struct qed_ptt *p_ptt,
  256. u8 port_id)
  257. {
  258. u16 control = 0;
  259. SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
  260. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
  261. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  262. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  263. REG_WR(p_hwfn,
  264. qed_ptt_config_addr(p_ptt) +
  265. offsetof(struct pxp_ptt_entry, pretend),
  266. *(u32 *)&p_ptt->pxp.pretend);
  267. }
  268. void qed_port_unpretend(struct qed_hwfn *p_hwfn,
  269. struct qed_ptt *p_ptt)
  270. {
  271. u16 control = 0;
  272. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  273. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  274. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  275. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  276. REG_WR(p_hwfn,
  277. qed_ptt_config_addr(p_ptt) +
  278. offsetof(struct pxp_ptt_entry, pretend),
  279. *(u32 *)&p_ptt->pxp.pretend);
  280. }
  281. u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
  282. {
  283. u32 concrete_fid = 0;
  284. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
  285. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
  286. SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
  287. return concrete_fid;
  288. }
  289. /* DMAE */
  290. static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
  291. const u8 is_src_type_grc,
  292. const u8 is_dst_type_grc,
  293. struct qed_dmae_params *p_params)
  294. {
  295. u16 opcode_b = 0;
  296. u32 opcode = 0;
  297. /* Whether the source is the PCIe or the GRC.
  298. * 0- The source is the PCIe
  299. * 1- The source is the GRC.
  300. */
  301. opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
  302. : DMAE_CMD_SRC_MASK_PCIE) <<
  303. DMAE_CMD_SRC_SHIFT;
  304. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
  305. DMAE_CMD_SRC_PF_ID_SHIFT);
  306. /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
  307. opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
  308. : DMAE_CMD_DST_MASK_PCIE) <<
  309. DMAE_CMD_DST_SHIFT;
  310. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
  311. DMAE_CMD_DST_PF_ID_SHIFT);
  312. /* Whether to write a completion word to the completion destination:
  313. * 0-Do not write a completion word
  314. * 1-Write the completion word
  315. */
  316. opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
  317. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  318. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  319. if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
  320. opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
  321. opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
  322. opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
  323. /* reset source address in next go */
  324. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  325. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  326. /* reset dest address in next go */
  327. opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
  328. DMAE_CMD_DST_ADDR_RESET_SHIFT);
  329. /* SRC/DST VFID: all 1's - pf, otherwise VF id */
  330. if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
  331. opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
  332. opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
  333. } else {
  334. opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
  335. DMAE_CMD_SRC_VF_ID_SHIFT;
  336. }
  337. if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
  338. opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
  339. opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
  340. } else {
  341. opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
  342. }
  343. p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
  344. p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
  345. }
  346. u32 qed_dmae_idx_to_go_cmd(u8 idx)
  347. {
  348. /* All the DMAE 'go' registers form an array in internal memory */
  349. return DMAE_REG_GO_C0 + (idx << 2);
  350. }
  351. static int
  352. qed_dmae_post_command(struct qed_hwfn *p_hwfn,
  353. struct qed_ptt *p_ptt)
  354. {
  355. struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
  356. u8 idx_cmd = p_hwfn->dmae_info.channel, i;
  357. int qed_status = 0;
  358. /* verify address is not NULL */
  359. if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
  360. ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
  361. DP_NOTICE(p_hwfn,
  362. "source or destination address 0 idx_cmd=%d\n"
  363. "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  364. idx_cmd,
  365. le32_to_cpu(command->opcode),
  366. le16_to_cpu(command->opcode_b),
  367. le16_to_cpu(command->length_dw),
  368. le32_to_cpu(command->src_addr_hi),
  369. le32_to_cpu(command->src_addr_lo),
  370. le32_to_cpu(command->dst_addr_hi),
  371. le32_to_cpu(command->dst_addr_lo));
  372. return -EINVAL;
  373. }
  374. DP_VERBOSE(p_hwfn,
  375. NETIF_MSG_HW,
  376. "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  377. idx_cmd,
  378. le32_to_cpu(command->opcode),
  379. le16_to_cpu(command->opcode_b),
  380. le16_to_cpu(command->length_dw),
  381. le32_to_cpu(command->src_addr_hi),
  382. le32_to_cpu(command->src_addr_lo),
  383. le32_to_cpu(command->dst_addr_hi),
  384. le32_to_cpu(command->dst_addr_lo));
  385. /* Copy the command to DMAE - need to do it before every call
  386. * for source/dest address no reset.
  387. * The first 9 DWs are the command registers, the 10 DW is the
  388. * GO register, and the rest are result registers
  389. * (which are read only by the client).
  390. */
  391. for (i = 0; i < DMAE_CMD_SIZE; i++) {
  392. u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
  393. *(((u32 *)command) + i) : 0;
  394. qed_wr(p_hwfn, p_ptt,
  395. DMAE_REG_CMD_MEM +
  396. (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
  397. (i * sizeof(u32)), data);
  398. }
  399. qed_wr(p_hwfn, p_ptt,
  400. qed_dmae_idx_to_go_cmd(idx_cmd),
  401. DMAE_GO_VALUE);
  402. return qed_status;
  403. }
  404. int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
  405. {
  406. dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
  407. struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
  408. u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
  409. u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
  410. *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  411. sizeof(u32),
  412. p_addr,
  413. GFP_KERNEL);
  414. if (!*p_comp) {
  415. DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
  416. goto err;
  417. }
  418. p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
  419. *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  420. sizeof(struct dmae_cmd),
  421. p_addr, GFP_KERNEL);
  422. if (!*p_cmd) {
  423. DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
  424. goto err;
  425. }
  426. p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  427. *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  428. sizeof(u32) * DMAE_MAX_RW_SIZE,
  429. p_addr, GFP_KERNEL);
  430. if (!*p_buff) {
  431. DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
  432. goto err;
  433. }
  434. p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
  435. return 0;
  436. err:
  437. qed_dmae_info_free(p_hwfn);
  438. return -ENOMEM;
  439. }
  440. void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
  441. {
  442. dma_addr_t p_phys;
  443. /* Just make sure no one is in the middle */
  444. mutex_lock(&p_hwfn->dmae_info.mutex);
  445. if (p_hwfn->dmae_info.p_completion_word) {
  446. p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
  447. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  448. sizeof(u32),
  449. p_hwfn->dmae_info.p_completion_word,
  450. p_phys);
  451. p_hwfn->dmae_info.p_completion_word = NULL;
  452. }
  453. if (p_hwfn->dmae_info.p_dmae_cmd) {
  454. p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
  455. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  456. sizeof(struct dmae_cmd),
  457. p_hwfn->dmae_info.p_dmae_cmd,
  458. p_phys);
  459. p_hwfn->dmae_info.p_dmae_cmd = NULL;
  460. }
  461. if (p_hwfn->dmae_info.p_intermediate_buffer) {
  462. p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  463. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  464. sizeof(u32) * DMAE_MAX_RW_SIZE,
  465. p_hwfn->dmae_info.p_intermediate_buffer,
  466. p_phys);
  467. p_hwfn->dmae_info.p_intermediate_buffer = NULL;
  468. }
  469. mutex_unlock(&p_hwfn->dmae_info.mutex);
  470. }
  471. static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
  472. {
  473. u32 wait_cnt = 0;
  474. u32 wait_cnt_limit = 10000;
  475. int qed_status = 0;
  476. barrier();
  477. while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
  478. udelay(DMAE_MIN_WAIT_TIME);
  479. if (++wait_cnt > wait_cnt_limit) {
  480. DP_NOTICE(p_hwfn->cdev,
  481. "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
  482. *p_hwfn->dmae_info.p_completion_word,
  483. DMAE_COMPLETION_VAL);
  484. qed_status = -EBUSY;
  485. break;
  486. }
  487. /* to sync the completion_word since we are not
  488. * using the volatile keyword for p_completion_word
  489. */
  490. barrier();
  491. }
  492. if (qed_status == 0)
  493. *p_hwfn->dmae_info.p_completion_word = 0;
  494. return qed_status;
  495. }
  496. static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
  497. struct qed_ptt *p_ptt,
  498. u64 src_addr,
  499. u64 dst_addr,
  500. u8 src_type,
  501. u8 dst_type,
  502. u32 length)
  503. {
  504. dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  505. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  506. int qed_status = 0;
  507. switch (src_type) {
  508. case QED_DMAE_ADDRESS_GRC:
  509. case QED_DMAE_ADDRESS_HOST_PHYS:
  510. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
  511. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
  512. break;
  513. /* for virtual source addresses we use the intermediate buffer. */
  514. case QED_DMAE_ADDRESS_HOST_VIRT:
  515. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
  516. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
  517. memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
  518. (void *)(uintptr_t)src_addr,
  519. length * sizeof(u32));
  520. break;
  521. default:
  522. return -EINVAL;
  523. }
  524. switch (dst_type) {
  525. case QED_DMAE_ADDRESS_GRC:
  526. case QED_DMAE_ADDRESS_HOST_PHYS:
  527. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
  528. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
  529. break;
  530. /* for virtual source addresses we use the intermediate buffer. */
  531. case QED_DMAE_ADDRESS_HOST_VIRT:
  532. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
  533. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
  534. break;
  535. default:
  536. return -EINVAL;
  537. }
  538. cmd->length_dw = cpu_to_le16((u16)length);
  539. qed_dmae_post_command(p_hwfn, p_ptt);
  540. qed_status = qed_dmae_operation_wait(p_hwfn);
  541. if (qed_status) {
  542. DP_NOTICE(p_hwfn,
  543. "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
  544. src_addr,
  545. dst_addr,
  546. length);
  547. return qed_status;
  548. }
  549. if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
  550. memcpy((void *)(uintptr_t)(dst_addr),
  551. &p_hwfn->dmae_info.p_intermediate_buffer[0],
  552. length * sizeof(u32));
  553. return 0;
  554. }
  555. static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
  556. struct qed_ptt *p_ptt,
  557. u64 src_addr, u64 dst_addr,
  558. u8 src_type, u8 dst_type,
  559. u32 size_in_dwords,
  560. struct qed_dmae_params *p_params)
  561. {
  562. dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
  563. u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
  564. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  565. u64 src_addr_split = 0, dst_addr_split = 0;
  566. u16 length_limit = DMAE_MAX_RW_SIZE;
  567. int qed_status = 0;
  568. u32 offset = 0;
  569. qed_dmae_opcode(p_hwfn,
  570. (src_type == QED_DMAE_ADDRESS_GRC),
  571. (dst_type == QED_DMAE_ADDRESS_GRC),
  572. p_params);
  573. cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
  574. cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
  575. cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
  576. /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
  577. cnt_split = size_in_dwords / length_limit;
  578. length_mod = size_in_dwords % length_limit;
  579. src_addr_split = src_addr;
  580. dst_addr_split = dst_addr;
  581. for (i = 0; i <= cnt_split; i++) {
  582. offset = length_limit * i;
  583. if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
  584. if (src_type == QED_DMAE_ADDRESS_GRC)
  585. src_addr_split = src_addr + offset;
  586. else
  587. src_addr_split = src_addr + (offset * 4);
  588. }
  589. if (dst_type == QED_DMAE_ADDRESS_GRC)
  590. dst_addr_split = dst_addr + offset;
  591. else
  592. dst_addr_split = dst_addr + (offset * 4);
  593. length_cur = (cnt_split == i) ? length_mod : length_limit;
  594. /* might be zero on last iteration */
  595. if (!length_cur)
  596. continue;
  597. qed_status = qed_dmae_execute_sub_operation(p_hwfn,
  598. p_ptt,
  599. src_addr_split,
  600. dst_addr_split,
  601. src_type,
  602. dst_type,
  603. length_cur);
  604. if (qed_status) {
  605. DP_NOTICE(p_hwfn,
  606. "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
  607. qed_status,
  608. src_addr,
  609. dst_addr,
  610. length_cur);
  611. break;
  612. }
  613. }
  614. return qed_status;
  615. }
  616. int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
  617. struct qed_ptt *p_ptt,
  618. u64 source_addr,
  619. u32 grc_addr,
  620. u32 size_in_dwords,
  621. u32 flags)
  622. {
  623. u32 grc_addr_in_dw = grc_addr / sizeof(u32);
  624. struct qed_dmae_params params;
  625. int rc;
  626. memset(&params, 0, sizeof(struct qed_dmae_params));
  627. params.flags = flags;
  628. mutex_lock(&p_hwfn->dmae_info.mutex);
  629. rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
  630. grc_addr_in_dw,
  631. QED_DMAE_ADDRESS_HOST_VIRT,
  632. QED_DMAE_ADDRESS_GRC,
  633. size_in_dwords, &params);
  634. mutex_unlock(&p_hwfn->dmae_info.mutex);
  635. return rc;
  636. }
  637. int
  638. qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
  639. dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
  640. {
  641. u32 grc_addr_in_dw = grc_addr / sizeof(u32);
  642. struct qed_dmae_params params;
  643. int rc;
  644. memset(&params, 0, sizeof(struct qed_dmae_params));
  645. params.flags = flags;
  646. mutex_lock(&p_hwfn->dmae_info.mutex);
  647. rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
  648. dest_addr, QED_DMAE_ADDRESS_GRC,
  649. QED_DMAE_ADDRESS_HOST_VIRT,
  650. size_in_dwords, &params);
  651. mutex_unlock(&p_hwfn->dmae_info.mutex);
  652. return rc;
  653. }
  654. int
  655. qed_dmae_host2host(struct qed_hwfn *p_hwfn,
  656. struct qed_ptt *p_ptt,
  657. dma_addr_t source_addr,
  658. dma_addr_t dest_addr,
  659. u32 size_in_dwords, struct qed_dmae_params *p_params)
  660. {
  661. int rc;
  662. mutex_lock(&(p_hwfn->dmae_info.mutex));
  663. rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
  664. dest_addr,
  665. QED_DMAE_ADDRESS_HOST_PHYS,
  666. QED_DMAE_ADDRESS_HOST_PHYS,
  667. size_in_dwords, p_params);
  668. mutex_unlock(&(p_hwfn->dmae_info.mutex));
  669. return rc;
  670. }
  671. u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
  672. enum protocol_type proto, union qed_qm_pq_params *p_params)
  673. {
  674. u16 pq_id = 0;
  675. if ((proto == PROTOCOLID_CORE ||
  676. proto == PROTOCOLID_ETH ||
  677. proto == PROTOCOLID_ISCSI ||
  678. proto == PROTOCOLID_ROCE) && !p_params) {
  679. DP_NOTICE(p_hwfn,
  680. "Protocol %d received NULL PQ params\n", proto);
  681. return 0;
  682. }
  683. switch (proto) {
  684. case PROTOCOLID_CORE:
  685. if (p_params->core.tc == LB_TC)
  686. pq_id = p_hwfn->qm_info.pure_lb_pq;
  687. else if (p_params->core.tc == OOO_LB_TC)
  688. pq_id = p_hwfn->qm_info.ooo_pq;
  689. else
  690. pq_id = p_hwfn->qm_info.offload_pq;
  691. break;
  692. case PROTOCOLID_ETH:
  693. pq_id = p_params->eth.tc;
  694. if (p_params->eth.is_vf)
  695. pq_id += p_hwfn->qm_info.vf_queues_offset +
  696. p_params->eth.vf_id;
  697. break;
  698. case PROTOCOLID_ISCSI:
  699. if (p_params->iscsi.q_idx == 1)
  700. pq_id = p_hwfn->qm_info.pure_ack_pq;
  701. break;
  702. case PROTOCOLID_ROCE:
  703. if (p_params->roce.dcqcn)
  704. pq_id = p_params->roce.qpid;
  705. else
  706. pq_id = p_hwfn->qm_info.offload_pq;
  707. if (pq_id > p_hwfn->qm_info.num_pf_rls)
  708. pq_id = p_hwfn->qm_info.offload_pq;
  709. break;
  710. default:
  711. pq_id = 0;
  712. }
  713. pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
  714. return pq_id;
  715. }