qed_init_ops.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include "qed.h"
  16. #include "qed_hsi.h"
  17. #include "qed_hw.h"
  18. #include "qed_init_ops.h"
  19. #include "qed_reg_addr.h"
  20. #include "qed_sriov.h"
  21. #define QED_INIT_MAX_POLL_COUNT 100
  22. #define QED_INIT_POLL_PERIOD_US 500
  23. static u32 pxp_global_win[] = {
  24. 0,
  25. 0,
  26. 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
  27. 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
  28. 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
  29. 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
  30. 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
  31. 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
  32. 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
  33. 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
  34. 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
  35. 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
  36. 0,
  37. 0,
  38. 0,
  39. 0,
  40. 0,
  41. 0,
  42. 0,
  43. };
  44. void qed_init_iro_array(struct qed_dev *cdev)
  45. {
  46. cdev->iro_arr = iro_arr;
  47. }
  48. /* Runtime configuration helpers */
  49. void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
  50. {
  51. int i;
  52. for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
  53. p_hwfn->rt_data.b_valid[i] = false;
  54. }
  55. void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
  56. u32 rt_offset,
  57. u32 val)
  58. {
  59. p_hwfn->rt_data.init_val[rt_offset] = val;
  60. p_hwfn->rt_data.b_valid[rt_offset] = true;
  61. }
  62. void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
  63. u32 rt_offset, u32 *p_val,
  64. size_t size)
  65. {
  66. size_t i;
  67. for (i = 0; i < size / sizeof(u32); i++) {
  68. p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
  69. p_hwfn->rt_data.b_valid[rt_offset + i] = true;
  70. }
  71. }
  72. static int qed_init_rt(struct qed_hwfn *p_hwfn,
  73. struct qed_ptt *p_ptt,
  74. u32 addr,
  75. u16 rt_offset,
  76. u16 size,
  77. bool b_must_dmae)
  78. {
  79. u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
  80. bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
  81. u16 i, segment;
  82. int rc = 0;
  83. /* Since not all RT entries are initialized, go over the RT and
  84. * for each segment of initialized values use DMA.
  85. */
  86. for (i = 0; i < size; i++) {
  87. if (!p_valid[i])
  88. continue;
  89. /* In case there isn't any wide-bus configuration here,
  90. * simply write the data instead of using dmae.
  91. */
  92. if (!b_must_dmae) {
  93. qed_wr(p_hwfn, p_ptt, addr + (i << 2),
  94. p_init_val[i]);
  95. continue;
  96. }
  97. /* Start of a new segment */
  98. for (segment = 1; i + segment < size; segment++)
  99. if (!p_valid[i + segment])
  100. break;
  101. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  102. (uintptr_t)(p_init_val + i),
  103. addr + (i << 2), segment, 0);
  104. if (rc != 0)
  105. return rc;
  106. /* Jump over the entire segment, including invalid entry */
  107. i += segment;
  108. }
  109. return rc;
  110. }
  111. int qed_init_alloc(struct qed_hwfn *p_hwfn)
  112. {
  113. struct qed_rt_data *rt_data = &p_hwfn->rt_data;
  114. if (IS_VF(p_hwfn->cdev))
  115. return 0;
  116. rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
  117. GFP_KERNEL);
  118. if (!rt_data->b_valid)
  119. return -ENOMEM;
  120. rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
  121. GFP_KERNEL);
  122. if (!rt_data->init_val) {
  123. kfree(rt_data->b_valid);
  124. return -ENOMEM;
  125. }
  126. return 0;
  127. }
  128. void qed_init_free(struct qed_hwfn *p_hwfn)
  129. {
  130. kfree(p_hwfn->rt_data.init_val);
  131. kfree(p_hwfn->rt_data.b_valid);
  132. }
  133. static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
  134. struct qed_ptt *p_ptt,
  135. u32 addr,
  136. u32 dmae_data_offset,
  137. u32 size,
  138. const u32 *buf,
  139. bool b_must_dmae,
  140. bool b_can_dmae)
  141. {
  142. int rc = 0;
  143. /* Perform DMAE only for lengthy enough sections or for wide-bus */
  144. if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
  145. const u32 *data = buf + dmae_data_offset;
  146. u32 i;
  147. for (i = 0; i < size; i++)
  148. qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
  149. } else {
  150. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  151. (uintptr_t)(buf + dmae_data_offset),
  152. addr, size, 0);
  153. }
  154. return rc;
  155. }
  156. static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
  157. struct qed_ptt *p_ptt,
  158. u32 addr,
  159. u32 fill,
  160. u32 fill_count)
  161. {
  162. static u32 zero_buffer[DMAE_MAX_RW_SIZE];
  163. memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
  164. /* invoke the DMAE virtual/physical buffer API with
  165. * 1. DMAE init channel
  166. * 2. addr,
  167. * 3. p_hwfb->temp_data,
  168. * 4. fill_count
  169. */
  170. return qed_dmae_host2grc(p_hwfn, p_ptt,
  171. (uintptr_t)(&zero_buffer[0]),
  172. addr, fill_count,
  173. QED_DMAE_FLAG_RW_REPL_SRC);
  174. }
  175. static void qed_init_fill(struct qed_hwfn *p_hwfn,
  176. struct qed_ptt *p_ptt,
  177. u32 addr,
  178. u32 fill,
  179. u32 fill_count)
  180. {
  181. u32 i;
  182. for (i = 0; i < fill_count; i++, addr += sizeof(u32))
  183. qed_wr(p_hwfn, p_ptt, addr, fill);
  184. }
  185. static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
  186. struct qed_ptt *p_ptt,
  187. struct init_write_op *cmd,
  188. bool b_must_dmae,
  189. bool b_can_dmae)
  190. {
  191. u32 data = le32_to_cpu(cmd->data);
  192. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  193. u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
  194. u32 offset, output_len, input_len, max_size;
  195. struct qed_dev *cdev = p_hwfn->cdev;
  196. union init_array_hdr *hdr;
  197. const u32 *array_data;
  198. int rc = 0;
  199. u32 size;
  200. array_data = cdev->fw_data->arr_data;
  201. hdr = (union init_array_hdr *)(array_data +
  202. dmae_array_offset);
  203. data = le32_to_cpu(hdr->raw.data);
  204. switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
  205. case INIT_ARR_ZIPPED:
  206. offset = dmae_array_offset + 1;
  207. input_len = GET_FIELD(data,
  208. INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
  209. max_size = MAX_ZIPPED_SIZE * 4;
  210. memset(p_hwfn->unzip_buf, 0, max_size);
  211. output_len = qed_unzip_data(p_hwfn, input_len,
  212. (u8 *)&array_data[offset],
  213. max_size, (u8 *)p_hwfn->unzip_buf);
  214. if (output_len) {
  215. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
  216. output_len,
  217. p_hwfn->unzip_buf,
  218. b_must_dmae, b_can_dmae);
  219. } else {
  220. DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
  221. rc = -EINVAL;
  222. }
  223. break;
  224. case INIT_ARR_PATTERN:
  225. {
  226. u32 repeats = GET_FIELD(data,
  227. INIT_ARRAY_PATTERN_HDR_REPETITIONS);
  228. u32 i;
  229. size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
  230. for (i = 0; i < repeats; i++, addr += size << 2) {
  231. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  232. dmae_array_offset + 1,
  233. size, array_data,
  234. b_must_dmae, b_can_dmae);
  235. if (rc)
  236. break;
  237. }
  238. break;
  239. }
  240. case INIT_ARR_STANDARD:
  241. size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
  242. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  243. dmae_array_offset + 1,
  244. size, array_data,
  245. b_must_dmae, b_can_dmae);
  246. break;
  247. }
  248. return rc;
  249. }
  250. /* init_ops write command */
  251. static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
  252. struct qed_ptt *p_ptt,
  253. struct init_write_op *cmd,
  254. bool b_can_dmae)
  255. {
  256. u32 data = le32_to_cpu(cmd->data);
  257. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  258. bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
  259. union init_write_args *arg = &cmd->args;
  260. int rc = 0;
  261. /* Sanitize */
  262. if (b_must_dmae && !b_can_dmae) {
  263. DP_NOTICE(p_hwfn,
  264. "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
  265. addr);
  266. return -EINVAL;
  267. }
  268. switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
  269. case INIT_SRC_INLINE:
  270. qed_wr(p_hwfn, p_ptt, addr,
  271. le32_to_cpu(arg->inline_val));
  272. break;
  273. case INIT_SRC_ZEROS:
  274. if (b_must_dmae ||
  275. (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
  276. rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
  277. le32_to_cpu(arg->zeros_count));
  278. else
  279. qed_init_fill(p_hwfn, p_ptt, addr, 0,
  280. le32_to_cpu(arg->zeros_count));
  281. break;
  282. case INIT_SRC_ARRAY:
  283. rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
  284. b_must_dmae, b_can_dmae);
  285. break;
  286. case INIT_SRC_RUNTIME:
  287. qed_init_rt(p_hwfn, p_ptt, addr,
  288. le16_to_cpu(arg->runtime.offset),
  289. le16_to_cpu(arg->runtime.size),
  290. b_must_dmae);
  291. break;
  292. }
  293. return rc;
  294. }
  295. static inline bool comp_eq(u32 val, u32 expected_val)
  296. {
  297. return val == expected_val;
  298. }
  299. static inline bool comp_and(u32 val, u32 expected_val)
  300. {
  301. return (val & expected_val) == expected_val;
  302. }
  303. static inline bool comp_or(u32 val, u32 expected_val)
  304. {
  305. return (val | expected_val) > 0;
  306. }
  307. /* init_ops read/poll commands */
  308. static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
  309. struct qed_ptt *p_ptt,
  310. struct init_read_op *cmd)
  311. {
  312. bool (*comp_check)(u32 val, u32 expected_val);
  313. u32 delay = QED_INIT_POLL_PERIOD_US, val;
  314. u32 data, addr, poll;
  315. int i;
  316. data = le32_to_cpu(cmd->op_data);
  317. addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
  318. poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
  319. val = qed_rd(p_hwfn, p_ptt, addr);
  320. if (poll == INIT_POLL_NONE)
  321. return;
  322. switch (poll) {
  323. case INIT_POLL_EQ:
  324. comp_check = comp_eq;
  325. break;
  326. case INIT_POLL_OR:
  327. comp_check = comp_or;
  328. break;
  329. case INIT_POLL_AND:
  330. comp_check = comp_and;
  331. break;
  332. default:
  333. DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
  334. cmd->op_data);
  335. return;
  336. }
  337. data = le32_to_cpu(cmd->expected_val);
  338. for (i = 0;
  339. i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
  340. i++) {
  341. udelay(delay);
  342. val = qed_rd(p_hwfn, p_ptt, addr);
  343. }
  344. if (i == QED_INIT_MAX_POLL_COUNT) {
  345. DP_ERR(p_hwfn,
  346. "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
  347. addr, le32_to_cpu(cmd->expected_val),
  348. val, le32_to_cpu(cmd->op_data));
  349. }
  350. }
  351. /* init_ops callbacks entry point */
  352. static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
  353. struct qed_ptt *p_ptt,
  354. struct init_callback_op *p_cmd)
  355. {
  356. DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
  357. }
  358. static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
  359. u16 *offset,
  360. int modes)
  361. {
  362. struct qed_dev *cdev = p_hwfn->cdev;
  363. const u8 *modes_tree_buf;
  364. u8 arg1, arg2, tree_val;
  365. modes_tree_buf = cdev->fw_data->modes_tree_buf;
  366. tree_val = modes_tree_buf[(*offset)++];
  367. switch (tree_val) {
  368. case INIT_MODE_OP_NOT:
  369. return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
  370. case INIT_MODE_OP_OR:
  371. arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  372. arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  373. return arg1 | arg2;
  374. case INIT_MODE_OP_AND:
  375. arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  376. arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  377. return arg1 & arg2;
  378. default:
  379. tree_val -= MAX_INIT_MODE_OPS;
  380. return (modes & (1 << tree_val)) ? 1 : 0;
  381. }
  382. }
  383. static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
  384. struct init_if_mode_op *p_cmd,
  385. int modes)
  386. {
  387. u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
  388. if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
  389. return 0;
  390. else
  391. return GET_FIELD(le32_to_cpu(p_cmd->op_data),
  392. INIT_IF_MODE_OP_CMD_OFFSET);
  393. }
  394. static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
  395. struct init_if_phase_op *p_cmd,
  396. u32 phase,
  397. u32 phase_id)
  398. {
  399. u32 data = le32_to_cpu(p_cmd->phase_data);
  400. u32 op_data = le32_to_cpu(p_cmd->op_data);
  401. if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
  402. (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
  403. GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
  404. return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
  405. else
  406. return 0;
  407. }
  408. int qed_init_run(struct qed_hwfn *p_hwfn,
  409. struct qed_ptt *p_ptt,
  410. int phase,
  411. int phase_id,
  412. int modes)
  413. {
  414. struct qed_dev *cdev = p_hwfn->cdev;
  415. u32 cmd_num, num_init_ops;
  416. union init_op *init_ops;
  417. bool b_dmae = false;
  418. int rc = 0;
  419. num_init_ops = cdev->fw_data->init_ops_size;
  420. init_ops = cdev->fw_data->init_ops;
  421. p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
  422. if (!p_hwfn->unzip_buf) {
  423. DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
  424. return -ENOMEM;
  425. }
  426. for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
  427. union init_op *cmd = &init_ops[cmd_num];
  428. u32 data = le32_to_cpu(cmd->raw.op_data);
  429. switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
  430. case INIT_OP_WRITE:
  431. rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
  432. b_dmae);
  433. break;
  434. case INIT_OP_READ:
  435. qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
  436. break;
  437. case INIT_OP_IF_MODE:
  438. cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
  439. modes);
  440. break;
  441. case INIT_OP_IF_PHASE:
  442. cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
  443. phase, phase_id);
  444. b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
  445. break;
  446. case INIT_OP_DELAY:
  447. /* qed_init_run is always invoked from
  448. * sleep-able context
  449. */
  450. udelay(le32_to_cpu(cmd->delay.delay));
  451. break;
  452. case INIT_OP_CALLBACK:
  453. qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
  454. break;
  455. }
  456. if (rc)
  457. break;
  458. }
  459. kfree(p_hwfn->unzip_buf);
  460. return rc;
  461. }
  462. void qed_gtt_init(struct qed_hwfn *p_hwfn)
  463. {
  464. u32 gtt_base;
  465. u32 i;
  466. /* Set the global windows */
  467. gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
  468. for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
  469. if (pxp_global_win[i])
  470. REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
  471. pxp_global_win[i]);
  472. }
  473. int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
  474. {
  475. struct qed_fw_data *fw = cdev->fw_data;
  476. struct bin_buffer_hdr *buf_hdr;
  477. u32 offset, len;
  478. if (!data) {
  479. DP_NOTICE(cdev, "Invalid fw data\n");
  480. return -EINVAL;
  481. }
  482. /* First Dword contains metadata and should be skipped */
  483. buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
  484. offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
  485. fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
  486. offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
  487. fw->init_ops = (union init_op *)(data + offset);
  488. offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
  489. fw->arr_data = (u32 *)(data + offset);
  490. offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
  491. fw->modes_tree_buf = (u8 *)(data + offset);
  492. len = buf_hdr[BIN_BUF_INIT_CMD].length;
  493. fw->init_ops_size = len / sizeof(struct init_raw_op);
  494. return 0;
  495. }