qed_init_ops.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include "qed.h"
  16. #include "qed_hsi.h"
  17. #include "qed_hw.h"
  18. #include "qed_init_ops.h"
  19. #include "qed_reg_addr.h"
  20. #define QED_INIT_MAX_POLL_COUNT 100
  21. #define QED_INIT_POLL_PERIOD_US 500
  22. static u32 pxp_global_win[] = {
  23. 0,
  24. 0,
  25. 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
  26. 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
  27. 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
  28. 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
  29. 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
  30. 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
  31. 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
  32. 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
  33. 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
  34. 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
  35. 0,
  36. 0,
  37. 0,
  38. 0,
  39. 0,
  40. 0,
  41. 0,
  42. };
  43. void qed_init_iro_array(struct qed_dev *cdev)
  44. {
  45. cdev->iro_arr = iro_arr;
  46. }
  47. /* Runtime configuration helpers */
  48. void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
  49. {
  50. int i;
  51. for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
  52. p_hwfn->rt_data.b_valid[i] = false;
  53. }
  54. void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
  55. u32 rt_offset,
  56. u32 val)
  57. {
  58. p_hwfn->rt_data.init_val[rt_offset] = val;
  59. p_hwfn->rt_data.b_valid[rt_offset] = true;
  60. }
  61. void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
  62. u32 rt_offset, u32 *p_val,
  63. size_t size)
  64. {
  65. size_t i;
  66. for (i = 0; i < size / sizeof(u32); i++) {
  67. p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
  68. p_hwfn->rt_data.b_valid[rt_offset + i] = true;
  69. }
  70. }
  71. static int qed_init_rt(struct qed_hwfn *p_hwfn,
  72. struct qed_ptt *p_ptt,
  73. u32 addr,
  74. u16 rt_offset,
  75. u16 size,
  76. bool b_must_dmae)
  77. {
  78. u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
  79. bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
  80. u16 i, segment;
  81. int rc = 0;
  82. /* Since not all RT entries are initialized, go over the RT and
  83. * for each segment of initialized values use DMA.
  84. */
  85. for (i = 0; i < size; i++) {
  86. if (!p_valid[i])
  87. continue;
  88. /* In case there isn't any wide-bus configuration here,
  89. * simply write the data instead of using dmae.
  90. */
  91. if (!b_must_dmae) {
  92. qed_wr(p_hwfn, p_ptt, addr + (i << 2),
  93. p_init_val[i]);
  94. continue;
  95. }
  96. /* Start of a new segment */
  97. for (segment = 1; i + segment < size; segment++)
  98. if (!p_valid[i + segment])
  99. break;
  100. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  101. (uintptr_t)(p_init_val + i),
  102. addr + (i << 2), segment, 0);
  103. if (rc != 0)
  104. return rc;
  105. /* Jump over the entire segment, including invalid entry */
  106. i += segment;
  107. }
  108. return rc;
  109. }
  110. int qed_init_alloc(struct qed_hwfn *p_hwfn)
  111. {
  112. struct qed_rt_data *rt_data = &p_hwfn->rt_data;
  113. rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
  114. GFP_KERNEL);
  115. if (!rt_data->b_valid)
  116. return -ENOMEM;
  117. rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
  118. GFP_KERNEL);
  119. if (!rt_data->init_val) {
  120. kfree(rt_data->b_valid);
  121. return -ENOMEM;
  122. }
  123. return 0;
  124. }
  125. void qed_init_free(struct qed_hwfn *p_hwfn)
  126. {
  127. kfree(p_hwfn->rt_data.init_val);
  128. kfree(p_hwfn->rt_data.b_valid);
  129. }
  130. static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
  131. struct qed_ptt *p_ptt,
  132. u32 addr,
  133. u32 dmae_data_offset,
  134. u32 size,
  135. const u32 *buf,
  136. bool b_must_dmae,
  137. bool b_can_dmae)
  138. {
  139. int rc = 0;
  140. /* Perform DMAE only for lengthy enough sections or for wide-bus */
  141. if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
  142. const u32 *data = buf + dmae_data_offset;
  143. u32 i;
  144. for (i = 0; i < size; i++)
  145. qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
  146. } else {
  147. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  148. (uintptr_t)(buf + dmae_data_offset),
  149. addr, size, 0);
  150. }
  151. return rc;
  152. }
  153. static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
  154. struct qed_ptt *p_ptt,
  155. u32 addr,
  156. u32 fill,
  157. u32 fill_count)
  158. {
  159. static u32 zero_buffer[DMAE_MAX_RW_SIZE];
  160. memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
  161. /* invoke the DMAE virtual/physical buffer API with
  162. * 1. DMAE init channel
  163. * 2. addr,
  164. * 3. p_hwfb->temp_data,
  165. * 4. fill_count
  166. */
  167. return qed_dmae_host2grc(p_hwfn, p_ptt,
  168. (uintptr_t)(&zero_buffer[0]),
  169. addr, fill_count,
  170. QED_DMAE_FLAG_RW_REPL_SRC);
  171. }
  172. static void qed_init_fill(struct qed_hwfn *p_hwfn,
  173. struct qed_ptt *p_ptt,
  174. u32 addr,
  175. u32 fill,
  176. u32 fill_count)
  177. {
  178. u32 i;
  179. for (i = 0; i < fill_count; i++, addr += sizeof(u32))
  180. qed_wr(p_hwfn, p_ptt, addr, fill);
  181. }
  182. static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
  183. struct qed_ptt *p_ptt,
  184. struct init_write_op *cmd,
  185. bool b_must_dmae,
  186. bool b_can_dmae)
  187. {
  188. u32 data = le32_to_cpu(cmd->data);
  189. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  190. u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
  191. u32 offset, output_len, input_len, max_size;
  192. struct qed_dev *cdev = p_hwfn->cdev;
  193. union init_array_hdr *hdr;
  194. const u32 *array_data;
  195. int rc = 0;
  196. u32 size;
  197. array_data = cdev->fw_data->arr_data;
  198. hdr = (union init_array_hdr *)(array_data +
  199. dmae_array_offset);
  200. data = le32_to_cpu(hdr->raw.data);
  201. switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
  202. case INIT_ARR_ZIPPED:
  203. offset = dmae_array_offset + 1;
  204. input_len = GET_FIELD(data,
  205. INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
  206. max_size = MAX_ZIPPED_SIZE * 4;
  207. memset(p_hwfn->unzip_buf, 0, max_size);
  208. output_len = qed_unzip_data(p_hwfn, input_len,
  209. (u8 *)&array_data[offset],
  210. max_size, (u8 *)p_hwfn->unzip_buf);
  211. if (output_len) {
  212. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
  213. output_len,
  214. p_hwfn->unzip_buf,
  215. b_must_dmae, b_can_dmae);
  216. } else {
  217. DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
  218. rc = -EINVAL;
  219. }
  220. break;
  221. case INIT_ARR_PATTERN:
  222. {
  223. u32 repeats = GET_FIELD(data,
  224. INIT_ARRAY_PATTERN_HDR_REPETITIONS);
  225. u32 i;
  226. size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
  227. for (i = 0; i < repeats; i++, addr += size << 2) {
  228. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  229. dmae_array_offset + 1,
  230. size, array_data,
  231. b_must_dmae, b_can_dmae);
  232. if (rc)
  233. break;
  234. }
  235. break;
  236. }
  237. case INIT_ARR_STANDARD:
  238. size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
  239. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  240. dmae_array_offset + 1,
  241. size, array_data,
  242. b_must_dmae, b_can_dmae);
  243. break;
  244. }
  245. return rc;
  246. }
  247. /* init_ops write command */
  248. static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
  249. struct qed_ptt *p_ptt,
  250. struct init_write_op *cmd,
  251. bool b_can_dmae)
  252. {
  253. u32 data = le32_to_cpu(cmd->data);
  254. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  255. bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
  256. union init_write_args *arg = &cmd->args;
  257. int rc = 0;
  258. /* Sanitize */
  259. if (b_must_dmae && !b_can_dmae) {
  260. DP_NOTICE(p_hwfn,
  261. "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
  262. addr);
  263. return -EINVAL;
  264. }
  265. switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
  266. case INIT_SRC_INLINE:
  267. qed_wr(p_hwfn, p_ptt, addr,
  268. le32_to_cpu(arg->inline_val));
  269. break;
  270. case INIT_SRC_ZEROS:
  271. if (b_must_dmae ||
  272. (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
  273. rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
  274. le32_to_cpu(arg->zeros_count));
  275. else
  276. qed_init_fill(p_hwfn, p_ptt, addr, 0,
  277. le32_to_cpu(arg->zeros_count));
  278. break;
  279. case INIT_SRC_ARRAY:
  280. rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
  281. b_must_dmae, b_can_dmae);
  282. break;
  283. case INIT_SRC_RUNTIME:
  284. qed_init_rt(p_hwfn, p_ptt, addr,
  285. le16_to_cpu(arg->runtime.offset),
  286. le16_to_cpu(arg->runtime.size),
  287. b_must_dmae);
  288. break;
  289. }
  290. return rc;
  291. }
  292. static inline bool comp_eq(u32 val, u32 expected_val)
  293. {
  294. return val == expected_val;
  295. }
  296. static inline bool comp_and(u32 val, u32 expected_val)
  297. {
  298. return (val & expected_val) == expected_val;
  299. }
  300. static inline bool comp_or(u32 val, u32 expected_val)
  301. {
  302. return (val | expected_val) > 0;
  303. }
  304. /* init_ops read/poll commands */
  305. static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
  306. struct qed_ptt *p_ptt,
  307. struct init_read_op *cmd)
  308. {
  309. bool (*comp_check)(u32 val, u32 expected_val);
  310. u32 delay = QED_INIT_POLL_PERIOD_US, val;
  311. u32 data, addr, poll;
  312. int i;
  313. data = le32_to_cpu(cmd->op_data);
  314. addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
  315. poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
  316. val = qed_rd(p_hwfn, p_ptt, addr);
  317. if (poll == INIT_POLL_NONE)
  318. return;
  319. switch (poll) {
  320. case INIT_POLL_EQ:
  321. comp_check = comp_eq;
  322. break;
  323. case INIT_POLL_OR:
  324. comp_check = comp_or;
  325. break;
  326. case INIT_POLL_AND:
  327. comp_check = comp_and;
  328. break;
  329. default:
  330. DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
  331. cmd->op_data);
  332. return;
  333. }
  334. data = le32_to_cpu(cmd->expected_val);
  335. for (i = 0;
  336. i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
  337. i++) {
  338. udelay(delay);
  339. val = qed_rd(p_hwfn, p_ptt, addr);
  340. }
  341. if (i == QED_INIT_MAX_POLL_COUNT) {
  342. DP_ERR(p_hwfn,
  343. "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
  344. addr, le32_to_cpu(cmd->expected_val),
  345. val, le32_to_cpu(cmd->op_data));
  346. }
  347. }
  348. /* init_ops callbacks entry point */
  349. static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
  350. struct qed_ptt *p_ptt,
  351. struct init_callback_op *p_cmd)
  352. {
  353. DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
  354. }
  355. static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
  356. u16 *offset,
  357. int modes)
  358. {
  359. struct qed_dev *cdev = p_hwfn->cdev;
  360. const u8 *modes_tree_buf;
  361. u8 arg1, arg2, tree_val;
  362. modes_tree_buf = cdev->fw_data->modes_tree_buf;
  363. tree_val = modes_tree_buf[(*offset)++];
  364. switch (tree_val) {
  365. case INIT_MODE_OP_NOT:
  366. return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
  367. case INIT_MODE_OP_OR:
  368. arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  369. arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  370. return arg1 | arg2;
  371. case INIT_MODE_OP_AND:
  372. arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  373. arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  374. return arg1 & arg2;
  375. default:
  376. tree_val -= MAX_INIT_MODE_OPS;
  377. return (modes & (1 << tree_val)) ? 1 : 0;
  378. }
  379. }
  380. static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
  381. struct init_if_mode_op *p_cmd,
  382. int modes)
  383. {
  384. u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
  385. if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
  386. return 0;
  387. else
  388. return GET_FIELD(le32_to_cpu(p_cmd->op_data),
  389. INIT_IF_MODE_OP_CMD_OFFSET);
  390. }
  391. static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
  392. struct init_if_phase_op *p_cmd,
  393. u32 phase,
  394. u32 phase_id)
  395. {
  396. u32 data = le32_to_cpu(p_cmd->phase_data);
  397. u32 op_data = le32_to_cpu(p_cmd->op_data);
  398. if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
  399. (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
  400. GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
  401. return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
  402. else
  403. return 0;
  404. }
  405. int qed_init_run(struct qed_hwfn *p_hwfn,
  406. struct qed_ptt *p_ptt,
  407. int phase,
  408. int phase_id,
  409. int modes)
  410. {
  411. struct qed_dev *cdev = p_hwfn->cdev;
  412. u32 cmd_num, num_init_ops;
  413. union init_op *init_ops;
  414. bool b_dmae = false;
  415. int rc = 0;
  416. num_init_ops = cdev->fw_data->init_ops_size;
  417. init_ops = cdev->fw_data->init_ops;
  418. p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
  419. if (!p_hwfn->unzip_buf) {
  420. DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
  421. return -ENOMEM;
  422. }
  423. for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
  424. union init_op *cmd = &init_ops[cmd_num];
  425. u32 data = le32_to_cpu(cmd->raw.op_data);
  426. switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
  427. case INIT_OP_WRITE:
  428. rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
  429. b_dmae);
  430. break;
  431. case INIT_OP_READ:
  432. qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
  433. break;
  434. case INIT_OP_IF_MODE:
  435. cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
  436. modes);
  437. break;
  438. case INIT_OP_IF_PHASE:
  439. cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
  440. phase, phase_id);
  441. b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
  442. break;
  443. case INIT_OP_DELAY:
  444. /* qed_init_run is always invoked from
  445. * sleep-able context
  446. */
  447. udelay(le32_to_cpu(cmd->delay.delay));
  448. break;
  449. case INIT_OP_CALLBACK:
  450. qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
  451. break;
  452. }
  453. if (rc)
  454. break;
  455. }
  456. kfree(p_hwfn->unzip_buf);
  457. return rc;
  458. }
  459. void qed_gtt_init(struct qed_hwfn *p_hwfn)
  460. {
  461. u32 gtt_base;
  462. u32 i;
  463. /* Set the global windows */
  464. gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
  465. for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
  466. if (pxp_global_win[i])
  467. REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
  468. pxp_global_win[i]);
  469. }
  470. int qed_init_fw_data(struct qed_dev *cdev,
  471. const u8 *data)
  472. {
  473. struct qed_fw_data *fw = cdev->fw_data;
  474. struct bin_buffer_hdr *buf_hdr;
  475. u32 offset, len;
  476. if (!data) {
  477. DP_NOTICE(cdev, "Invalid fw data\n");
  478. return -EINVAL;
  479. }
  480. buf_hdr = (struct bin_buffer_hdr *)data;
  481. offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
  482. fw->init_ops = (union init_op *)(data + offset);
  483. offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
  484. fw->arr_data = (u32 *)(data + offset);
  485. offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
  486. fw->modes_tree_buf = (u8 *)(data + offset);
  487. len = buf_hdr[BIN_BUF_INIT_CMD].length;
  488. fw->init_ops_size = len / sizeof(struct init_raw_op);
  489. return 0;
  490. }