qed_init_ops.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/io.h>
  34. #include <linux/delay.h>
  35. #include <linux/errno.h>
  36. #include <linux/kernel.h>
  37. #include <linux/slab.h>
  38. #include <linux/string.h>
  39. #include "qed.h"
  40. #include "qed_hsi.h"
  41. #include "qed_hw.h"
  42. #include "qed_init_ops.h"
  43. #include "qed_reg_addr.h"
  44. #include "qed_sriov.h"
  45. #define QED_INIT_MAX_POLL_COUNT 100
  46. #define QED_INIT_POLL_PERIOD_US 500
  47. static u32 pxp_global_win[] = {
  48. 0,
  49. 0,
  50. 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
  51. 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
  52. 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
  53. 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
  54. 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
  55. 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
  56. 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
  57. 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
  58. 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
  59. 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
  60. 0,
  61. 0,
  62. 0,
  63. 0,
  64. 0,
  65. 0,
  66. 0,
  67. };
  68. void qed_init_iro_array(struct qed_dev *cdev)
  69. {
  70. cdev->iro_arr = iro_arr;
  71. }
  72. /* Runtime configuration helpers */
  73. void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
  74. {
  75. int i;
  76. for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
  77. p_hwfn->rt_data.b_valid[i] = false;
  78. }
  79. void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
  80. {
  81. p_hwfn->rt_data.init_val[rt_offset] = val;
  82. p_hwfn->rt_data.b_valid[rt_offset] = true;
  83. }
  84. void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
  85. u32 rt_offset, u32 *p_val, size_t size)
  86. {
  87. size_t i;
  88. for (i = 0; i < size / sizeof(u32); i++) {
  89. p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
  90. p_hwfn->rt_data.b_valid[rt_offset + i] = true;
  91. }
  92. }
  93. static int qed_init_rt(struct qed_hwfn *p_hwfn,
  94. struct qed_ptt *p_ptt,
  95. u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
  96. {
  97. u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
  98. bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
  99. u16 i, segment;
  100. int rc = 0;
  101. /* Since not all RT entries are initialized, go over the RT and
  102. * for each segment of initialized values use DMA.
  103. */
  104. for (i = 0; i < size; i++) {
  105. if (!p_valid[i])
  106. continue;
  107. /* In case there isn't any wide-bus configuration here,
  108. * simply write the data instead of using dmae.
  109. */
  110. if (!b_must_dmae) {
  111. qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
  112. continue;
  113. }
  114. /* Start of a new segment */
  115. for (segment = 1; i + segment < size; segment++)
  116. if (!p_valid[i + segment])
  117. break;
  118. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  119. (uintptr_t)(p_init_val + i),
  120. addr + (i << 2), segment, 0);
  121. if (rc)
  122. return rc;
  123. /* Jump over the entire segment, including invalid entry */
  124. i += segment;
  125. }
  126. return rc;
  127. }
  128. int qed_init_alloc(struct qed_hwfn *p_hwfn)
  129. {
  130. struct qed_rt_data *rt_data = &p_hwfn->rt_data;
  131. if (IS_VF(p_hwfn->cdev))
  132. return 0;
  133. rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool),
  134. GFP_KERNEL);
  135. if (!rt_data->b_valid)
  136. return -ENOMEM;
  137. rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32),
  138. GFP_KERNEL);
  139. if (!rt_data->init_val) {
  140. kfree(rt_data->b_valid);
  141. rt_data->b_valid = NULL;
  142. return -ENOMEM;
  143. }
  144. return 0;
  145. }
  146. void qed_init_free(struct qed_hwfn *p_hwfn)
  147. {
  148. kfree(p_hwfn->rt_data.init_val);
  149. p_hwfn->rt_data.init_val = NULL;
  150. kfree(p_hwfn->rt_data.b_valid);
  151. p_hwfn->rt_data.b_valid = NULL;
  152. }
  153. static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
  154. struct qed_ptt *p_ptt,
  155. u32 addr,
  156. u32 dmae_data_offset,
  157. u32 size,
  158. const u32 *buf,
  159. bool b_must_dmae,
  160. bool b_can_dmae)
  161. {
  162. int rc = 0;
  163. /* Perform DMAE only for lengthy enough sections or for wide-bus */
  164. if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
  165. const u32 *data = buf + dmae_data_offset;
  166. u32 i;
  167. for (i = 0; i < size; i++)
  168. qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
  169. } else {
  170. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  171. (uintptr_t)(buf + dmae_data_offset),
  172. addr, size, 0);
  173. }
  174. return rc;
  175. }
  176. static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
  177. struct qed_ptt *p_ptt,
  178. u32 addr, u32 fill, u32 fill_count)
  179. {
  180. static u32 zero_buffer[DMAE_MAX_RW_SIZE];
  181. memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
  182. /* invoke the DMAE virtual/physical buffer API with
  183. * 1. DMAE init channel
  184. * 2. addr,
  185. * 3. p_hwfb->temp_data,
  186. * 4. fill_count
  187. */
  188. return qed_dmae_host2grc(p_hwfn, p_ptt,
  189. (uintptr_t)(&zero_buffer[0]),
  190. addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
  191. }
  192. static void qed_init_fill(struct qed_hwfn *p_hwfn,
  193. struct qed_ptt *p_ptt,
  194. u32 addr, u32 fill, u32 fill_count)
  195. {
  196. u32 i;
  197. for (i = 0; i < fill_count; i++, addr += sizeof(u32))
  198. qed_wr(p_hwfn, p_ptt, addr, fill);
  199. }
  200. static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
  201. struct qed_ptt *p_ptt,
  202. struct init_write_op *cmd,
  203. bool b_must_dmae, bool b_can_dmae)
  204. {
  205. u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
  206. u32 data = le32_to_cpu(cmd->data);
  207. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  208. u32 offset, output_len, input_len, max_size;
  209. struct qed_dev *cdev = p_hwfn->cdev;
  210. union init_array_hdr *hdr;
  211. const u32 *array_data;
  212. int rc = 0;
  213. u32 size;
  214. array_data = cdev->fw_data->arr_data;
  215. hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
  216. data = le32_to_cpu(hdr->raw.data);
  217. switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
  218. case INIT_ARR_ZIPPED:
  219. offset = dmae_array_offset + 1;
  220. input_len = GET_FIELD(data,
  221. INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
  222. max_size = MAX_ZIPPED_SIZE * 4;
  223. memset(p_hwfn->unzip_buf, 0, max_size);
  224. output_len = qed_unzip_data(p_hwfn, input_len,
  225. (u8 *)&array_data[offset],
  226. max_size, (u8 *)p_hwfn->unzip_buf);
  227. if (output_len) {
  228. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
  229. output_len,
  230. p_hwfn->unzip_buf,
  231. b_must_dmae, b_can_dmae);
  232. } else {
  233. DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
  234. rc = -EINVAL;
  235. }
  236. break;
  237. case INIT_ARR_PATTERN:
  238. {
  239. u32 repeats = GET_FIELD(data,
  240. INIT_ARRAY_PATTERN_HDR_REPETITIONS);
  241. u32 i;
  242. size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
  243. for (i = 0; i < repeats; i++, addr += size << 2) {
  244. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  245. dmae_array_offset + 1,
  246. size, array_data,
  247. b_must_dmae, b_can_dmae);
  248. if (rc)
  249. break;
  250. }
  251. break;
  252. }
  253. case INIT_ARR_STANDARD:
  254. size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
  255. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  256. dmae_array_offset + 1,
  257. size, array_data,
  258. b_must_dmae, b_can_dmae);
  259. break;
  260. }
  261. return rc;
  262. }
  263. /* init_ops write command */
  264. static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
  265. struct qed_ptt *p_ptt,
  266. struct init_write_op *p_cmd, bool b_can_dmae)
  267. {
  268. u32 data = le32_to_cpu(p_cmd->data);
  269. bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
  270. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  271. union init_write_args *arg = &p_cmd->args;
  272. int rc = 0;
  273. /* Sanitize */
  274. if (b_must_dmae && !b_can_dmae) {
  275. DP_NOTICE(p_hwfn,
  276. "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
  277. addr);
  278. return -EINVAL;
  279. }
  280. switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
  281. case INIT_SRC_INLINE:
  282. data = le32_to_cpu(p_cmd->args.inline_val);
  283. qed_wr(p_hwfn, p_ptt, addr, data);
  284. break;
  285. case INIT_SRC_ZEROS:
  286. data = le32_to_cpu(p_cmd->args.zeros_count);
  287. if (b_must_dmae || (b_can_dmae && (data >= 64)))
  288. rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
  289. else
  290. qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
  291. break;
  292. case INIT_SRC_ARRAY:
  293. rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
  294. b_must_dmae, b_can_dmae);
  295. break;
  296. case INIT_SRC_RUNTIME:
  297. qed_init_rt(p_hwfn, p_ptt, addr,
  298. le16_to_cpu(arg->runtime.offset),
  299. le16_to_cpu(arg->runtime.size),
  300. b_must_dmae);
  301. break;
  302. }
  303. return rc;
  304. }
  305. static inline bool comp_eq(u32 val, u32 expected_val)
  306. {
  307. return val == expected_val;
  308. }
  309. static inline bool comp_and(u32 val, u32 expected_val)
  310. {
  311. return (val & expected_val) == expected_val;
  312. }
  313. static inline bool comp_or(u32 val, u32 expected_val)
  314. {
  315. return (val | expected_val) > 0;
  316. }
  317. /* init_ops read/poll commands */
  318. static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
  319. struct qed_ptt *p_ptt, struct init_read_op *cmd)
  320. {
  321. bool (*comp_check)(u32 val, u32 expected_val);
  322. u32 delay = QED_INIT_POLL_PERIOD_US, val;
  323. u32 data, addr, poll;
  324. int i;
  325. data = le32_to_cpu(cmd->op_data);
  326. addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
  327. poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
  328. val = qed_rd(p_hwfn, p_ptt, addr);
  329. if (poll == INIT_POLL_NONE)
  330. return;
  331. switch (poll) {
  332. case INIT_POLL_EQ:
  333. comp_check = comp_eq;
  334. break;
  335. case INIT_POLL_OR:
  336. comp_check = comp_or;
  337. break;
  338. case INIT_POLL_AND:
  339. comp_check = comp_and;
  340. break;
  341. default:
  342. DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
  343. cmd->op_data);
  344. return;
  345. }
  346. data = le32_to_cpu(cmd->expected_val);
  347. for (i = 0;
  348. i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
  349. i++) {
  350. udelay(delay);
  351. val = qed_rd(p_hwfn, p_ptt, addr);
  352. }
  353. if (i == QED_INIT_MAX_POLL_COUNT) {
  354. DP_ERR(p_hwfn,
  355. "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
  356. addr, le32_to_cpu(cmd->expected_val),
  357. val, le32_to_cpu(cmd->op_data));
  358. }
  359. }
  360. /* init_ops callbacks entry point */
  361. static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
  362. struct qed_ptt *p_ptt,
  363. struct init_callback_op *p_cmd)
  364. {
  365. int rc;
  366. switch (p_cmd->callback_id) {
  367. case DMAE_READY_CB:
  368. rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
  369. break;
  370. default:
  371. DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
  372. p_cmd->callback_id);
  373. return -EINVAL;
  374. }
  375. return rc;
  376. }
  377. static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
  378. u16 *p_offset, int modes)
  379. {
  380. struct qed_dev *cdev = p_hwfn->cdev;
  381. const u8 *modes_tree_buf;
  382. u8 arg1, arg2, tree_val;
  383. modes_tree_buf = cdev->fw_data->modes_tree_buf;
  384. tree_val = modes_tree_buf[(*p_offset)++];
  385. switch (tree_val) {
  386. case INIT_MODE_OP_NOT:
  387. return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
  388. case INIT_MODE_OP_OR:
  389. arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  390. arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  391. return arg1 | arg2;
  392. case INIT_MODE_OP_AND:
  393. arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  394. arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  395. return arg1 & arg2;
  396. default:
  397. tree_val -= MAX_INIT_MODE_OPS;
  398. return (modes & BIT(tree_val)) ? 1 : 0;
  399. }
  400. }
  401. static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
  402. struct init_if_mode_op *p_cmd, int modes)
  403. {
  404. u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
  405. if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
  406. return 0;
  407. else
  408. return GET_FIELD(le32_to_cpu(p_cmd->op_data),
  409. INIT_IF_MODE_OP_CMD_OFFSET);
  410. }
  411. static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
  412. struct init_if_phase_op *p_cmd,
  413. u32 phase, u32 phase_id)
  414. {
  415. u32 data = le32_to_cpu(p_cmd->phase_data);
  416. u32 op_data = le32_to_cpu(p_cmd->op_data);
  417. if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
  418. (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
  419. GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
  420. return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
  421. else
  422. return 0;
  423. }
  424. int qed_init_run(struct qed_hwfn *p_hwfn,
  425. struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
  426. {
  427. struct qed_dev *cdev = p_hwfn->cdev;
  428. u32 cmd_num, num_init_ops;
  429. union init_op *init_ops;
  430. bool b_dmae = false;
  431. int rc = 0;
  432. num_init_ops = cdev->fw_data->init_ops_size;
  433. init_ops = cdev->fw_data->init_ops;
  434. p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
  435. if (!p_hwfn->unzip_buf)
  436. return -ENOMEM;
  437. for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
  438. union init_op *cmd = &init_ops[cmd_num];
  439. u32 data = le32_to_cpu(cmd->raw.op_data);
  440. switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
  441. case INIT_OP_WRITE:
  442. rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
  443. b_dmae);
  444. break;
  445. case INIT_OP_READ:
  446. qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
  447. break;
  448. case INIT_OP_IF_MODE:
  449. cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
  450. modes);
  451. break;
  452. case INIT_OP_IF_PHASE:
  453. cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
  454. phase, phase_id);
  455. b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
  456. break;
  457. case INIT_OP_DELAY:
  458. /* qed_init_run is always invoked from
  459. * sleep-able context
  460. */
  461. udelay(le32_to_cpu(cmd->delay.delay));
  462. break;
  463. case INIT_OP_CALLBACK:
  464. rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
  465. break;
  466. }
  467. if (rc)
  468. break;
  469. }
  470. kfree(p_hwfn->unzip_buf);
  471. p_hwfn->unzip_buf = NULL;
  472. return rc;
  473. }
  474. void qed_gtt_init(struct qed_hwfn *p_hwfn)
  475. {
  476. u32 gtt_base;
  477. u32 i;
  478. /* Set the global windows */
  479. gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
  480. for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
  481. if (pxp_global_win[i])
  482. REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
  483. pxp_global_win[i]);
  484. }
  485. int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
  486. {
  487. struct qed_fw_data *fw = cdev->fw_data;
  488. struct bin_buffer_hdr *buf_hdr;
  489. u32 offset, len;
  490. if (!data) {
  491. DP_NOTICE(cdev, "Invalid fw data\n");
  492. return -EINVAL;
  493. }
  494. /* First Dword contains metadata and should be skipped */
  495. buf_hdr = (struct bin_buffer_hdr *)data;
  496. offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
  497. fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
  498. offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
  499. fw->init_ops = (union init_op *)(data + offset);
  500. offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
  501. fw->arr_data = (u32 *)(data + offset);
  502. offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
  503. fw->modes_tree_buf = (u8 *)(data + offset);
  504. len = buf_hdr[BIN_BUF_INIT_CMD].length;
  505. fw->init_ops_size = len / sizeof(struct init_raw_op);
  506. return 0;
  507. }