qed_init_ops.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include "qed.h"
  16. #include "qed_hsi.h"
  17. #include "qed_hw.h"
  18. #include "qed_init_ops.h"
  19. #include "qed_reg_addr.h"
  20. #include "qed_sriov.h"
  21. #define QED_INIT_MAX_POLL_COUNT 100
  22. #define QED_INIT_POLL_PERIOD_US 500
  23. static u32 pxp_global_win[] = {
  24. 0,
  25. 0,
  26. 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
  27. 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
  28. 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
  29. 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
  30. 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
  31. 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
  32. 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
  33. 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
  34. 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
  35. 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
  36. 0,
  37. 0,
  38. 0,
  39. 0,
  40. 0,
  41. 0,
  42. 0,
  43. };
  44. void qed_init_iro_array(struct qed_dev *cdev)
  45. {
  46. cdev->iro_arr = iro_arr;
  47. }
  48. /* Runtime configuration helpers */
  49. void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
  50. {
  51. int i;
  52. for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
  53. p_hwfn->rt_data.b_valid[i] = false;
  54. }
  55. void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
  56. {
  57. p_hwfn->rt_data.init_val[rt_offset] = val;
  58. p_hwfn->rt_data.b_valid[rt_offset] = true;
  59. }
  60. void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
  61. u32 rt_offset, u32 *p_val, size_t size)
  62. {
  63. size_t i;
  64. for (i = 0; i < size / sizeof(u32); i++) {
  65. p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
  66. p_hwfn->rt_data.b_valid[rt_offset + i] = true;
  67. }
  68. }
  69. static int qed_init_rt(struct qed_hwfn *p_hwfn,
  70. struct qed_ptt *p_ptt,
  71. u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
  72. {
  73. u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
  74. bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
  75. u16 i, segment;
  76. int rc = 0;
  77. /* Since not all RT entries are initialized, go over the RT and
  78. * for each segment of initialized values use DMA.
  79. */
  80. for (i = 0; i < size; i++) {
  81. if (!p_valid[i])
  82. continue;
  83. /* In case there isn't any wide-bus configuration here,
  84. * simply write the data instead of using dmae.
  85. */
  86. if (!b_must_dmae) {
  87. qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
  88. continue;
  89. }
  90. /* Start of a new segment */
  91. for (segment = 1; i + segment < size; segment++)
  92. if (!p_valid[i + segment])
  93. break;
  94. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  95. (uintptr_t)(p_init_val + i),
  96. addr + (i << 2), segment, 0);
  97. if (rc)
  98. return rc;
  99. /* Jump over the entire segment, including invalid entry */
  100. i += segment;
  101. }
  102. return rc;
  103. }
  104. int qed_init_alloc(struct qed_hwfn *p_hwfn)
  105. {
  106. struct qed_rt_data *rt_data = &p_hwfn->rt_data;
  107. if (IS_VF(p_hwfn->cdev))
  108. return 0;
  109. rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
  110. GFP_KERNEL);
  111. if (!rt_data->b_valid)
  112. return -ENOMEM;
  113. rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
  114. GFP_KERNEL);
  115. if (!rt_data->init_val) {
  116. kfree(rt_data->b_valid);
  117. return -ENOMEM;
  118. }
  119. return 0;
  120. }
  121. void qed_init_free(struct qed_hwfn *p_hwfn)
  122. {
  123. kfree(p_hwfn->rt_data.init_val);
  124. kfree(p_hwfn->rt_data.b_valid);
  125. }
  126. static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
  127. struct qed_ptt *p_ptt,
  128. u32 addr,
  129. u32 dmae_data_offset,
  130. u32 size,
  131. const u32 *buf,
  132. bool b_must_dmae,
  133. bool b_can_dmae)
  134. {
  135. int rc = 0;
  136. /* Perform DMAE only for lengthy enough sections or for wide-bus */
  137. if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
  138. const u32 *data = buf + dmae_data_offset;
  139. u32 i;
  140. for (i = 0; i < size; i++)
  141. qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
  142. } else {
  143. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  144. (uintptr_t)(buf + dmae_data_offset),
  145. addr, size, 0);
  146. }
  147. return rc;
  148. }
  149. static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
  150. struct qed_ptt *p_ptt,
  151. u32 addr, u32 fill, u32 fill_count)
  152. {
  153. static u32 zero_buffer[DMAE_MAX_RW_SIZE];
  154. memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
  155. /* invoke the DMAE virtual/physical buffer API with
  156. * 1. DMAE init channel
  157. * 2. addr,
  158. * 3. p_hwfb->temp_data,
  159. * 4. fill_count
  160. */
  161. return qed_dmae_host2grc(p_hwfn, p_ptt,
  162. (uintptr_t)(&zero_buffer[0]),
  163. addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
  164. }
  165. static void qed_init_fill(struct qed_hwfn *p_hwfn,
  166. struct qed_ptt *p_ptt,
  167. u32 addr, u32 fill, u32 fill_count)
  168. {
  169. u32 i;
  170. for (i = 0; i < fill_count; i++, addr += sizeof(u32))
  171. qed_wr(p_hwfn, p_ptt, addr, fill);
  172. }
  173. static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
  174. struct qed_ptt *p_ptt,
  175. struct init_write_op *cmd,
  176. bool b_must_dmae, bool b_can_dmae)
  177. {
  178. u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
  179. u32 data = le32_to_cpu(cmd->data);
  180. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  181. u32 offset, output_len, input_len, max_size;
  182. struct qed_dev *cdev = p_hwfn->cdev;
  183. union init_array_hdr *hdr;
  184. const u32 *array_data;
  185. int rc = 0;
  186. u32 size;
  187. array_data = cdev->fw_data->arr_data;
  188. hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
  189. data = le32_to_cpu(hdr->raw.data);
  190. switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
  191. case INIT_ARR_ZIPPED:
  192. offset = dmae_array_offset + 1;
  193. input_len = GET_FIELD(data,
  194. INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
  195. max_size = MAX_ZIPPED_SIZE * 4;
  196. memset(p_hwfn->unzip_buf, 0, max_size);
  197. output_len = qed_unzip_data(p_hwfn, input_len,
  198. (u8 *)&array_data[offset],
  199. max_size, (u8 *)p_hwfn->unzip_buf);
  200. if (output_len) {
  201. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
  202. output_len,
  203. p_hwfn->unzip_buf,
  204. b_must_dmae, b_can_dmae);
  205. } else {
  206. DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
  207. rc = -EINVAL;
  208. }
  209. break;
  210. case INIT_ARR_PATTERN:
  211. {
  212. u32 repeats = GET_FIELD(data,
  213. INIT_ARRAY_PATTERN_HDR_REPETITIONS);
  214. u32 i;
  215. size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
  216. for (i = 0; i < repeats; i++, addr += size << 2) {
  217. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  218. dmae_array_offset + 1,
  219. size, array_data,
  220. b_must_dmae, b_can_dmae);
  221. if (rc)
  222. break;
  223. }
  224. break;
  225. }
  226. case INIT_ARR_STANDARD:
  227. size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
  228. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  229. dmae_array_offset + 1,
  230. size, array_data,
  231. b_must_dmae, b_can_dmae);
  232. break;
  233. }
  234. return rc;
  235. }
  236. /* init_ops write command */
  237. static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
  238. struct qed_ptt *p_ptt,
  239. struct init_write_op *p_cmd, bool b_can_dmae)
  240. {
  241. u32 data = le32_to_cpu(p_cmd->data);
  242. bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
  243. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  244. union init_write_args *arg = &p_cmd->args;
  245. int rc = 0;
  246. /* Sanitize */
  247. if (b_must_dmae && !b_can_dmae) {
  248. DP_NOTICE(p_hwfn,
  249. "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
  250. addr);
  251. return -EINVAL;
  252. }
  253. switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
  254. case INIT_SRC_INLINE:
  255. data = le32_to_cpu(p_cmd->args.inline_val);
  256. qed_wr(p_hwfn, p_ptt, addr, data);
  257. break;
  258. case INIT_SRC_ZEROS:
  259. data = le32_to_cpu(p_cmd->args.zeros_count);
  260. if (b_must_dmae || (b_can_dmae && (data >= 64)))
  261. rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
  262. else
  263. qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
  264. break;
  265. case INIT_SRC_ARRAY:
  266. rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
  267. b_must_dmae, b_can_dmae);
  268. break;
  269. case INIT_SRC_RUNTIME:
  270. qed_init_rt(p_hwfn, p_ptt, addr,
  271. le16_to_cpu(arg->runtime.offset),
  272. le16_to_cpu(arg->runtime.size),
  273. b_must_dmae);
  274. break;
  275. }
  276. return rc;
  277. }
  278. static inline bool comp_eq(u32 val, u32 expected_val)
  279. {
  280. return val == expected_val;
  281. }
  282. static inline bool comp_and(u32 val, u32 expected_val)
  283. {
  284. return (val & expected_val) == expected_val;
  285. }
  286. static inline bool comp_or(u32 val, u32 expected_val)
  287. {
  288. return (val | expected_val) > 0;
  289. }
  290. /* init_ops read/poll commands */
  291. static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
  292. struct qed_ptt *p_ptt, struct init_read_op *cmd)
  293. {
  294. bool (*comp_check)(u32 val, u32 expected_val);
  295. u32 delay = QED_INIT_POLL_PERIOD_US, val;
  296. u32 data, addr, poll;
  297. int i;
  298. data = le32_to_cpu(cmd->op_data);
  299. addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
  300. poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
  301. val = qed_rd(p_hwfn, p_ptt, addr);
  302. if (poll == INIT_POLL_NONE)
  303. return;
  304. switch (poll) {
  305. case INIT_POLL_EQ:
  306. comp_check = comp_eq;
  307. break;
  308. case INIT_POLL_OR:
  309. comp_check = comp_or;
  310. break;
  311. case INIT_POLL_AND:
  312. comp_check = comp_and;
  313. break;
  314. default:
  315. DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
  316. cmd->op_data);
  317. return;
  318. }
  319. data = le32_to_cpu(cmd->expected_val);
  320. for (i = 0;
  321. i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
  322. i++) {
  323. udelay(delay);
  324. val = qed_rd(p_hwfn, p_ptt, addr);
  325. }
  326. if (i == QED_INIT_MAX_POLL_COUNT) {
  327. DP_ERR(p_hwfn,
  328. "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
  329. addr, le32_to_cpu(cmd->expected_val),
  330. val, le32_to_cpu(cmd->op_data));
  331. }
  332. }
  333. /* init_ops callbacks entry point */
  334. static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
  335. struct qed_ptt *p_ptt,
  336. struct init_callback_op *p_cmd)
  337. {
  338. DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
  339. }
  340. static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
  341. u16 *p_offset, int modes)
  342. {
  343. struct qed_dev *cdev = p_hwfn->cdev;
  344. const u8 *modes_tree_buf;
  345. u8 arg1, arg2, tree_val;
  346. modes_tree_buf = cdev->fw_data->modes_tree_buf;
  347. tree_val = modes_tree_buf[(*p_offset)++];
  348. switch (tree_val) {
  349. case INIT_MODE_OP_NOT:
  350. return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
  351. case INIT_MODE_OP_OR:
  352. arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  353. arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  354. return arg1 | arg2;
  355. case INIT_MODE_OP_AND:
  356. arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  357. arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  358. return arg1 & arg2;
  359. default:
  360. tree_val -= MAX_INIT_MODE_OPS;
  361. return (modes & BIT(tree_val)) ? 1 : 0;
  362. }
  363. }
  364. static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
  365. struct init_if_mode_op *p_cmd, int modes)
  366. {
  367. u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
  368. if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
  369. return 0;
  370. else
  371. return GET_FIELD(le32_to_cpu(p_cmd->op_data),
  372. INIT_IF_MODE_OP_CMD_OFFSET);
  373. }
  374. static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
  375. struct init_if_phase_op *p_cmd,
  376. u32 phase, u32 phase_id)
  377. {
  378. u32 data = le32_to_cpu(p_cmd->phase_data);
  379. u32 op_data = le32_to_cpu(p_cmd->op_data);
  380. if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
  381. (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
  382. GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
  383. return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
  384. else
  385. return 0;
  386. }
  387. int qed_init_run(struct qed_hwfn *p_hwfn,
  388. struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
  389. {
  390. struct qed_dev *cdev = p_hwfn->cdev;
  391. u32 cmd_num, num_init_ops;
  392. union init_op *init_ops;
  393. bool b_dmae = false;
  394. int rc = 0;
  395. num_init_ops = cdev->fw_data->init_ops_size;
  396. init_ops = cdev->fw_data->init_ops;
  397. p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
  398. if (!p_hwfn->unzip_buf)
  399. return -ENOMEM;
  400. for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
  401. union init_op *cmd = &init_ops[cmd_num];
  402. u32 data = le32_to_cpu(cmd->raw.op_data);
  403. switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
  404. case INIT_OP_WRITE:
  405. rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
  406. b_dmae);
  407. break;
  408. case INIT_OP_READ:
  409. qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
  410. break;
  411. case INIT_OP_IF_MODE:
  412. cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
  413. modes);
  414. break;
  415. case INIT_OP_IF_PHASE:
  416. cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
  417. phase, phase_id);
  418. b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
  419. break;
  420. case INIT_OP_DELAY:
  421. /* qed_init_run is always invoked from
  422. * sleep-able context
  423. */
  424. udelay(le32_to_cpu(cmd->delay.delay));
  425. break;
  426. case INIT_OP_CALLBACK:
  427. qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
  428. break;
  429. }
  430. if (rc)
  431. break;
  432. }
  433. kfree(p_hwfn->unzip_buf);
  434. return rc;
  435. }
  436. void qed_gtt_init(struct qed_hwfn *p_hwfn)
  437. {
  438. u32 gtt_base;
  439. u32 i;
  440. /* Set the global windows */
  441. gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
  442. for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
  443. if (pxp_global_win[i])
  444. REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
  445. pxp_global_win[i]);
  446. }
  447. int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
  448. {
  449. struct qed_fw_data *fw = cdev->fw_data;
  450. struct bin_buffer_hdr *buf_hdr;
  451. u32 offset, len;
  452. if (!data) {
  453. DP_NOTICE(cdev, "Invalid fw data\n");
  454. return -EINVAL;
  455. }
  456. /* First Dword contains metadata and should be skipped */
  457. buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
  458. offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
  459. fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
  460. offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
  461. fw->init_ops = (union init_op *)(data + offset);
  462. offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
  463. fw->arr_data = (u32 *)(data + offset);
  464. offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
  465. fw->modes_tree_buf = (u8 *)(data + offset);
  466. len = buf_hdr[BIN_BUF_INIT_CMD].length;
  467. fw->init_ops_size = len / sizeof(struct init_raw_op);
  468. return 0;
  469. }