qed_init_ops.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/io.h>
  34. #include <linux/delay.h>
  35. #include <linux/errno.h>
  36. #include <linux/kernel.h>
  37. #include <linux/slab.h>
  38. #include <linux/string.h>
  39. #include "qed.h"
  40. #include "qed_hsi.h"
  41. #include "qed_hw.h"
  42. #include "qed_init_ops.h"
  43. #include "qed_reg_addr.h"
  44. #include "qed_sriov.h"
  45. #define QED_INIT_MAX_POLL_COUNT 100
  46. #define QED_INIT_POLL_PERIOD_US 500
  47. static u32 pxp_global_win[] = {
  48. 0,
  49. 0,
  50. 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
  51. 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
  52. 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
  53. 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
  54. 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
  55. 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
  56. 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
  57. 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
  58. 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
  59. 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
  60. 0,
  61. 0,
  62. 0,
  63. 0,
  64. 0,
  65. 0,
  66. 0,
  67. };
  68. void qed_init_iro_array(struct qed_dev *cdev)
  69. {
  70. cdev->iro_arr = iro_arr;
  71. }
  72. /* Runtime configuration helpers */
  73. void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
  74. {
  75. int i;
  76. for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
  77. p_hwfn->rt_data.b_valid[i] = false;
  78. }
  79. void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
  80. {
  81. p_hwfn->rt_data.init_val[rt_offset] = val;
  82. p_hwfn->rt_data.b_valid[rt_offset] = true;
  83. }
  84. void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
  85. u32 rt_offset, u32 *p_val, size_t size)
  86. {
  87. size_t i;
  88. for (i = 0; i < size / sizeof(u32); i++) {
  89. p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
  90. p_hwfn->rt_data.b_valid[rt_offset + i] = true;
  91. }
  92. }
  93. static int qed_init_rt(struct qed_hwfn *p_hwfn,
  94. struct qed_ptt *p_ptt,
  95. u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
  96. {
  97. u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
  98. bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
  99. u16 i, segment;
  100. int rc = 0;
  101. /* Since not all RT entries are initialized, go over the RT and
  102. * for each segment of initialized values use DMA.
  103. */
  104. for (i = 0; i < size; i++) {
  105. if (!p_valid[i])
  106. continue;
  107. /* In case there isn't any wide-bus configuration here,
  108. * simply write the data instead of using dmae.
  109. */
  110. if (!b_must_dmae) {
  111. qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
  112. continue;
  113. }
  114. /* Start of a new segment */
  115. for (segment = 1; i + segment < size; segment++)
  116. if (!p_valid[i + segment])
  117. break;
  118. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  119. (uintptr_t)(p_init_val + i),
  120. addr + (i << 2), segment, 0);
  121. if (rc)
  122. return rc;
  123. /* Jump over the entire segment, including invalid entry */
  124. i += segment;
  125. }
  126. return rc;
  127. }
  128. int qed_init_alloc(struct qed_hwfn *p_hwfn)
  129. {
  130. struct qed_rt_data *rt_data = &p_hwfn->rt_data;
  131. if (IS_VF(p_hwfn->cdev))
  132. return 0;
  133. rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
  134. GFP_KERNEL);
  135. if (!rt_data->b_valid)
  136. return -ENOMEM;
  137. rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
  138. GFP_KERNEL);
  139. if (!rt_data->init_val) {
  140. kfree(rt_data->b_valid);
  141. return -ENOMEM;
  142. }
  143. return 0;
  144. }
  145. void qed_init_free(struct qed_hwfn *p_hwfn)
  146. {
  147. kfree(p_hwfn->rt_data.init_val);
  148. kfree(p_hwfn->rt_data.b_valid);
  149. }
  150. static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
  151. struct qed_ptt *p_ptt,
  152. u32 addr,
  153. u32 dmae_data_offset,
  154. u32 size,
  155. const u32 *buf,
  156. bool b_must_dmae,
  157. bool b_can_dmae)
  158. {
  159. int rc = 0;
  160. /* Perform DMAE only for lengthy enough sections or for wide-bus */
  161. if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
  162. const u32 *data = buf + dmae_data_offset;
  163. u32 i;
  164. for (i = 0; i < size; i++)
  165. qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
  166. } else {
  167. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  168. (uintptr_t)(buf + dmae_data_offset),
  169. addr, size, 0);
  170. }
  171. return rc;
  172. }
  173. static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
  174. struct qed_ptt *p_ptt,
  175. u32 addr, u32 fill, u32 fill_count)
  176. {
  177. static u32 zero_buffer[DMAE_MAX_RW_SIZE];
  178. memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
  179. /* invoke the DMAE virtual/physical buffer API with
  180. * 1. DMAE init channel
  181. * 2. addr,
  182. * 3. p_hwfb->temp_data,
  183. * 4. fill_count
  184. */
  185. return qed_dmae_host2grc(p_hwfn, p_ptt,
  186. (uintptr_t)(&zero_buffer[0]),
  187. addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
  188. }
  189. static void qed_init_fill(struct qed_hwfn *p_hwfn,
  190. struct qed_ptt *p_ptt,
  191. u32 addr, u32 fill, u32 fill_count)
  192. {
  193. u32 i;
  194. for (i = 0; i < fill_count; i++, addr += sizeof(u32))
  195. qed_wr(p_hwfn, p_ptt, addr, fill);
  196. }
  197. static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
  198. struct qed_ptt *p_ptt,
  199. struct init_write_op *cmd,
  200. bool b_must_dmae, bool b_can_dmae)
  201. {
  202. u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
  203. u32 data = le32_to_cpu(cmd->data);
  204. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  205. u32 offset, output_len, input_len, max_size;
  206. struct qed_dev *cdev = p_hwfn->cdev;
  207. union init_array_hdr *hdr;
  208. const u32 *array_data;
  209. int rc = 0;
  210. u32 size;
  211. array_data = cdev->fw_data->arr_data;
  212. hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
  213. data = le32_to_cpu(hdr->raw.data);
  214. switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
  215. case INIT_ARR_ZIPPED:
  216. offset = dmae_array_offset + 1;
  217. input_len = GET_FIELD(data,
  218. INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
  219. max_size = MAX_ZIPPED_SIZE * 4;
  220. memset(p_hwfn->unzip_buf, 0, max_size);
  221. output_len = qed_unzip_data(p_hwfn, input_len,
  222. (u8 *)&array_data[offset],
  223. max_size, (u8 *)p_hwfn->unzip_buf);
  224. if (output_len) {
  225. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
  226. output_len,
  227. p_hwfn->unzip_buf,
  228. b_must_dmae, b_can_dmae);
  229. } else {
  230. DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
  231. rc = -EINVAL;
  232. }
  233. break;
  234. case INIT_ARR_PATTERN:
  235. {
  236. u32 repeats = GET_FIELD(data,
  237. INIT_ARRAY_PATTERN_HDR_REPETITIONS);
  238. u32 i;
  239. size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
  240. for (i = 0; i < repeats; i++, addr += size << 2) {
  241. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  242. dmae_array_offset + 1,
  243. size, array_data,
  244. b_must_dmae, b_can_dmae);
  245. if (rc)
  246. break;
  247. }
  248. break;
  249. }
  250. case INIT_ARR_STANDARD:
  251. size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
  252. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  253. dmae_array_offset + 1,
  254. size, array_data,
  255. b_must_dmae, b_can_dmae);
  256. break;
  257. }
  258. return rc;
  259. }
  260. /* init_ops write command */
  261. static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
  262. struct qed_ptt *p_ptt,
  263. struct init_write_op *p_cmd, bool b_can_dmae)
  264. {
  265. u32 data = le32_to_cpu(p_cmd->data);
  266. bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
  267. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  268. union init_write_args *arg = &p_cmd->args;
  269. int rc = 0;
  270. /* Sanitize */
  271. if (b_must_dmae && !b_can_dmae) {
  272. DP_NOTICE(p_hwfn,
  273. "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
  274. addr);
  275. return -EINVAL;
  276. }
  277. switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
  278. case INIT_SRC_INLINE:
  279. data = le32_to_cpu(p_cmd->args.inline_val);
  280. qed_wr(p_hwfn, p_ptt, addr, data);
  281. break;
  282. case INIT_SRC_ZEROS:
  283. data = le32_to_cpu(p_cmd->args.zeros_count);
  284. if (b_must_dmae || (b_can_dmae && (data >= 64)))
  285. rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
  286. else
  287. qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
  288. break;
  289. case INIT_SRC_ARRAY:
  290. rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
  291. b_must_dmae, b_can_dmae);
  292. break;
  293. case INIT_SRC_RUNTIME:
  294. qed_init_rt(p_hwfn, p_ptt, addr,
  295. le16_to_cpu(arg->runtime.offset),
  296. le16_to_cpu(arg->runtime.size),
  297. b_must_dmae);
  298. break;
  299. }
  300. return rc;
  301. }
  302. static inline bool comp_eq(u32 val, u32 expected_val)
  303. {
  304. return val == expected_val;
  305. }
  306. static inline bool comp_and(u32 val, u32 expected_val)
  307. {
  308. return (val & expected_val) == expected_val;
  309. }
  310. static inline bool comp_or(u32 val, u32 expected_val)
  311. {
  312. return (val | expected_val) > 0;
  313. }
  314. /* init_ops read/poll commands */
  315. static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
  316. struct qed_ptt *p_ptt, struct init_read_op *cmd)
  317. {
  318. bool (*comp_check)(u32 val, u32 expected_val);
  319. u32 delay = QED_INIT_POLL_PERIOD_US, val;
  320. u32 data, addr, poll;
  321. int i;
  322. data = le32_to_cpu(cmd->op_data);
  323. addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
  324. poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
  325. val = qed_rd(p_hwfn, p_ptt, addr);
  326. if (poll == INIT_POLL_NONE)
  327. return;
  328. switch (poll) {
  329. case INIT_POLL_EQ:
  330. comp_check = comp_eq;
  331. break;
  332. case INIT_POLL_OR:
  333. comp_check = comp_or;
  334. break;
  335. case INIT_POLL_AND:
  336. comp_check = comp_and;
  337. break;
  338. default:
  339. DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
  340. cmd->op_data);
  341. return;
  342. }
  343. data = le32_to_cpu(cmd->expected_val);
  344. for (i = 0;
  345. i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
  346. i++) {
  347. udelay(delay);
  348. val = qed_rd(p_hwfn, p_ptt, addr);
  349. }
  350. if (i == QED_INIT_MAX_POLL_COUNT) {
  351. DP_ERR(p_hwfn,
  352. "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
  353. addr, le32_to_cpu(cmd->expected_val),
  354. val, le32_to_cpu(cmd->op_data));
  355. }
  356. }
  357. /* init_ops callbacks entry point */
  358. static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
  359. struct qed_ptt *p_ptt,
  360. struct init_callback_op *p_cmd)
  361. {
  362. DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
  363. }
  364. static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
  365. u16 *p_offset, int modes)
  366. {
  367. struct qed_dev *cdev = p_hwfn->cdev;
  368. const u8 *modes_tree_buf;
  369. u8 arg1, arg2, tree_val;
  370. modes_tree_buf = cdev->fw_data->modes_tree_buf;
  371. tree_val = modes_tree_buf[(*p_offset)++];
  372. switch (tree_val) {
  373. case INIT_MODE_OP_NOT:
  374. return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
  375. case INIT_MODE_OP_OR:
  376. arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  377. arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  378. return arg1 | arg2;
  379. case INIT_MODE_OP_AND:
  380. arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  381. arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
  382. return arg1 & arg2;
  383. default:
  384. tree_val -= MAX_INIT_MODE_OPS;
  385. return (modes & BIT(tree_val)) ? 1 : 0;
  386. }
  387. }
  388. static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
  389. struct init_if_mode_op *p_cmd, int modes)
  390. {
  391. u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
  392. if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
  393. return 0;
  394. else
  395. return GET_FIELD(le32_to_cpu(p_cmd->op_data),
  396. INIT_IF_MODE_OP_CMD_OFFSET);
  397. }
  398. static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
  399. struct init_if_phase_op *p_cmd,
  400. u32 phase, u32 phase_id)
  401. {
  402. u32 data = le32_to_cpu(p_cmd->phase_data);
  403. u32 op_data = le32_to_cpu(p_cmd->op_data);
  404. if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
  405. (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
  406. GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
  407. return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
  408. else
  409. return 0;
  410. }
  411. int qed_init_run(struct qed_hwfn *p_hwfn,
  412. struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
  413. {
  414. struct qed_dev *cdev = p_hwfn->cdev;
  415. u32 cmd_num, num_init_ops;
  416. union init_op *init_ops;
  417. bool b_dmae = false;
  418. int rc = 0;
  419. num_init_ops = cdev->fw_data->init_ops_size;
  420. init_ops = cdev->fw_data->init_ops;
  421. p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
  422. if (!p_hwfn->unzip_buf)
  423. return -ENOMEM;
  424. for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
  425. union init_op *cmd = &init_ops[cmd_num];
  426. u32 data = le32_to_cpu(cmd->raw.op_data);
  427. switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
  428. case INIT_OP_WRITE:
  429. rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
  430. b_dmae);
  431. break;
  432. case INIT_OP_READ:
  433. qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
  434. break;
  435. case INIT_OP_IF_MODE:
  436. cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
  437. modes);
  438. break;
  439. case INIT_OP_IF_PHASE:
  440. cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
  441. phase, phase_id);
  442. b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
  443. break;
  444. case INIT_OP_DELAY:
  445. /* qed_init_run is always invoked from
  446. * sleep-able context
  447. */
  448. udelay(le32_to_cpu(cmd->delay.delay));
  449. break;
  450. case INIT_OP_CALLBACK:
  451. qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
  452. break;
  453. }
  454. if (rc)
  455. break;
  456. }
  457. kfree(p_hwfn->unzip_buf);
  458. return rc;
  459. }
  460. void qed_gtt_init(struct qed_hwfn *p_hwfn)
  461. {
  462. u32 gtt_base;
  463. u32 i;
  464. /* Set the global windows */
  465. gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
  466. for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
  467. if (pxp_global_win[i])
  468. REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
  469. pxp_global_win[i]);
  470. }
  471. int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
  472. {
  473. struct qed_fw_data *fw = cdev->fw_data;
  474. struct bin_buffer_hdr *buf_hdr;
  475. u32 offset, len;
  476. if (!data) {
  477. DP_NOTICE(cdev, "Invalid fw data\n");
  478. return -EINVAL;
  479. }
  480. /* First Dword contains metadata and should be skipped */
  481. buf_hdr = (struct bin_buffer_hdr *)data;
  482. offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
  483. fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
  484. offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
  485. fw->init_ops = (union init_op *)(data + offset);
  486. offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
  487. fw->arr_data = (u32 *)(data + offset);
  488. offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
  489. fw->modes_tree_buf = (u8 *)(data + offset);
  490. len = buf_hdr[BIN_BUF_INIT_CMD].length;
  491. fw->init_ops_size = len / sizeof(struct init_raw_op);
  492. return 0;
  493. }