i40e_nvm.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #include "i40e_prototype.h"
  27. /**
  28. * i40e_init_nvm_ops - Initialize NVM function pointers
  29. * @hw: pointer to the HW structure
  30. *
  31. * Setup the function pointers and the NVM info structure. Should be called
  32. * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  33. * Please notice that the NVM term is used here (& in all methods covered
  34. * in this file) as an equivalent of the FLASH part mapped into the SR.
  35. * We are accessing FLASH always thru the Shadow RAM.
  36. **/
  37. i40e_status i40e_init_nvm(struct i40e_hw *hw)
  38. {
  39. struct i40e_nvm_info *nvm = &hw->nvm;
  40. i40e_status ret_code = 0;
  41. u32 fla, gens;
  42. u8 sr_size;
  43. /* The SR size is stored regardless of the nvm programming mode
  44. * as the blank mode may be used in the factory line.
  45. */
  46. gens = rd32(hw, I40E_GLNVM_GENS);
  47. sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
  48. I40E_GLNVM_GENS_SR_SIZE_SHIFT);
  49. /* Switching to words (sr_size contains power of 2KB) */
  50. nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
  51. /* Check if we are in the normal or blank NVM programming mode */
  52. fla = rd32(hw, I40E_GLNVM_FLA);
  53. if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
  54. /* Max NVM timeout */
  55. nvm->timeout = I40E_MAX_NVM_TIMEOUT;
  56. nvm->blank_nvm_mode = false;
  57. } else { /* Blank programming mode */
  58. nvm->blank_nvm_mode = true;
  59. ret_code = I40E_ERR_NVM_BLANK_MODE;
  60. i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
  61. }
  62. return ret_code;
  63. }
  64. /**
  65. * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
  66. * @hw: pointer to the HW structure
  67. * @access: NVM access type (read or write)
  68. *
  69. * This function will request NVM ownership for reading
  70. * via the proper Admin Command.
  71. **/
  72. i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
  73. enum i40e_aq_resource_access_type access)
  74. {
  75. i40e_status ret_code = 0;
  76. u64 gtime, timeout;
  77. u64 time_left = 0;
  78. if (hw->nvm.blank_nvm_mode)
  79. goto i40e_i40e_acquire_nvm_exit;
  80. ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
  81. 0, &time_left, NULL);
  82. /* Reading the Global Device Timer */
  83. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  84. /* Store the timeout */
  85. hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
  86. if (ret_code)
  87. i40e_debug(hw, I40E_DEBUG_NVM,
  88. "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
  89. access, time_left, ret_code, hw->aq.asq_last_status);
  90. if (ret_code && time_left) {
  91. /* Poll until the current NVM owner timeouts */
  92. timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
  93. while ((gtime < timeout) && time_left) {
  94. usleep_range(10000, 20000);
  95. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  96. ret_code = i40e_aq_request_resource(hw,
  97. I40E_NVM_RESOURCE_ID,
  98. access, 0, &time_left,
  99. NULL);
  100. if (!ret_code) {
  101. hw->nvm.hw_semaphore_timeout =
  102. I40E_MS_TO_GTIME(time_left) + gtime;
  103. break;
  104. }
  105. }
  106. if (ret_code) {
  107. hw->nvm.hw_semaphore_timeout = 0;
  108. i40e_debug(hw, I40E_DEBUG_NVM,
  109. "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
  110. time_left, ret_code, hw->aq.asq_last_status);
  111. }
  112. }
  113. i40e_i40e_acquire_nvm_exit:
  114. return ret_code;
  115. }
  116. /**
  117. * i40e_release_nvm - Generic request for releasing the NVM ownership
  118. * @hw: pointer to the HW structure
  119. *
  120. * This function will release NVM resource via the proper Admin Command.
  121. **/
  122. void i40e_release_nvm(struct i40e_hw *hw)
  123. {
  124. if (!hw->nvm.blank_nvm_mode)
  125. i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
  126. }
  127. /**
  128. * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
  129. * @hw: pointer to the HW structure
  130. *
  131. * Polls the SRCTL Shadow RAM register done bit.
  132. **/
  133. static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
  134. {
  135. i40e_status ret_code = I40E_ERR_TIMEOUT;
  136. u32 srctl, wait_cnt;
  137. /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
  138. for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
  139. srctl = rd32(hw, I40E_GLNVM_SRCTL);
  140. if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
  141. ret_code = 0;
  142. break;
  143. }
  144. udelay(5);
  145. }
  146. if (ret_code == I40E_ERR_TIMEOUT)
  147. i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
  148. return ret_code;
  149. }
  150. /**
  151. * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
  152. * @hw: pointer to the HW structure
  153. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  154. * @data: word read from the Shadow RAM
  155. *
  156. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  157. **/
  158. static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
  159. u16 *data)
  160. {
  161. i40e_status ret_code = I40E_ERR_TIMEOUT;
  162. u32 sr_reg;
  163. if (offset >= hw->nvm.sr_size) {
  164. i40e_debug(hw, I40E_DEBUG_NVM,
  165. "NVM read error: offset %d beyond Shadow RAM limit %d\n",
  166. offset, hw->nvm.sr_size);
  167. ret_code = I40E_ERR_PARAM;
  168. goto read_nvm_exit;
  169. }
  170. /* Poll the done bit first */
  171. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  172. if (!ret_code) {
  173. /* Write the address and start reading */
  174. sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
  175. BIT(I40E_GLNVM_SRCTL_START_SHIFT);
  176. wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
  177. /* Poll I40E_GLNVM_SRCTL until the done bit is set */
  178. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  179. if (!ret_code) {
  180. sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
  181. *data = (u16)((sr_reg &
  182. I40E_GLNVM_SRDATA_RDDATA_MASK)
  183. >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
  184. }
  185. }
  186. if (ret_code)
  187. i40e_debug(hw, I40E_DEBUG_NVM,
  188. "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
  189. offset);
  190. read_nvm_exit:
  191. return ret_code;
  192. }
  193. /**
  194. * i40e_read_nvm_aq - Read Shadow RAM.
  195. * @hw: pointer to the HW structure.
  196. * @module_pointer: module pointer location in words from the NVM beginning
  197. * @offset: offset in words from module start
  198. * @words: number of words to write
  199. * @data: buffer with words to write to the Shadow RAM
  200. * @last_command: tells the AdminQ that this is the last command
  201. *
  202. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  203. **/
  204. static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  205. u32 offset, u16 words, void *data,
  206. bool last_command)
  207. {
  208. i40e_status ret_code = I40E_ERR_NVM;
  209. struct i40e_asq_cmd_details cmd_details;
  210. memset(&cmd_details, 0, sizeof(cmd_details));
  211. /* Here we are checking the SR limit only for the flat memory model.
  212. * We cannot do it for the module-based model, as we did not acquire
  213. * the NVM resource yet (we cannot get the module pointer value).
  214. * Firmware will check the module-based model.
  215. */
  216. if ((offset + words) > hw->nvm.sr_size)
  217. i40e_debug(hw, I40E_DEBUG_NVM,
  218. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  219. (offset + words), hw->nvm.sr_size);
  220. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  221. /* We can write only up to 4KB (one sector), in one AQ write */
  222. i40e_debug(hw, I40E_DEBUG_NVM,
  223. "NVM write fail error: tried to write %d words, limit is %d.\n",
  224. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  225. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  226. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  227. /* A single write cannot spread over two sectors */
  228. i40e_debug(hw, I40E_DEBUG_NVM,
  229. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  230. offset, words);
  231. else
  232. ret_code = i40e_aq_read_nvm(hw, module_pointer,
  233. 2 * offset, /*bytes*/
  234. 2 * words, /*bytes*/
  235. data, last_command, &cmd_details);
  236. return ret_code;
  237. }
  238. /**
  239. * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
  240. * @hw: pointer to the HW structure
  241. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  242. * @data: word read from the Shadow RAM
  243. *
  244. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  245. **/
  246. static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
  247. u16 *data)
  248. {
  249. i40e_status ret_code = I40E_ERR_TIMEOUT;
  250. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
  251. *data = le16_to_cpu(*(__le16 *)data);
  252. return ret_code;
  253. }
  254. /**
  255. * i40e_read_nvm_word - Reads Shadow RAM
  256. * @hw: pointer to the HW structure
  257. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  258. * @data: word read from the Shadow RAM
  259. *
  260. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  261. **/
  262. i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
  263. u16 *data)
  264. {
  265. if (hw->mac.type == I40E_MAC_X722)
  266. return i40e_read_nvm_word_aq(hw, offset, data);
  267. return i40e_read_nvm_word_srctl(hw, offset, data);
  268. }
  269. /**
  270. * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
  271. * @hw: pointer to the HW structure
  272. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  273. * @words: (in) number of words to read; (out) number of words actually read
  274. * @data: words read from the Shadow RAM
  275. *
  276. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  277. * method. The buffer read is preceded by the NVM ownership take
  278. * and followed by the release.
  279. **/
  280. static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
  281. u16 *words, u16 *data)
  282. {
  283. i40e_status ret_code = 0;
  284. u16 index, word;
  285. /* Loop thru the selected region */
  286. for (word = 0; word < *words; word++) {
  287. index = offset + word;
  288. ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
  289. if (ret_code)
  290. break;
  291. }
  292. /* Update the number of words read from the Shadow RAM */
  293. *words = word;
  294. return ret_code;
  295. }
  296. /**
  297. * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
  298. * @hw: pointer to the HW structure
  299. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  300. * @words: (in) number of words to read; (out) number of words actually read
  301. * @data: words read from the Shadow RAM
  302. *
  303. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
  304. * method. The buffer read is preceded by the NVM ownership take
  305. * and followed by the release.
  306. **/
  307. static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
  308. u16 *words, u16 *data)
  309. {
  310. i40e_status ret_code;
  311. u16 read_size = *words;
  312. bool last_cmd = false;
  313. u16 words_read = 0;
  314. u16 i = 0;
  315. do {
  316. /* Calculate number of bytes we should read in this step.
  317. * FVL AQ do not allow to read more than one page at a time or
  318. * to cross page boundaries.
  319. */
  320. if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
  321. read_size = min(*words,
  322. (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
  323. (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
  324. else
  325. read_size = min((*words - words_read),
  326. I40E_SR_SECTOR_SIZE_IN_WORDS);
  327. /* Check if this is last command, if so set proper flag */
  328. if ((words_read + read_size) >= *words)
  329. last_cmd = true;
  330. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
  331. data + words_read, last_cmd);
  332. if (ret_code)
  333. goto read_nvm_buffer_aq_exit;
  334. /* Increment counter for words already read and move offset to
  335. * new read location
  336. */
  337. words_read += read_size;
  338. offset += read_size;
  339. } while (words_read < *words);
  340. for (i = 0; i < *words; i++)
  341. data[i] = le16_to_cpu(((__le16 *)data)[i]);
  342. read_nvm_buffer_aq_exit:
  343. *words = words_read;
  344. return ret_code;
  345. }
  346. /**
  347. * i40e_read_nvm_buffer - Reads Shadow RAM buffer
  348. * @hw: pointer to the HW structure
  349. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  350. * @words: (in) number of words to read; (out) number of words actually read
  351. * @data: words read from the Shadow RAM
  352. *
  353. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  354. * method. The buffer read is preceded by the NVM ownership take
  355. * and followed by the release.
  356. **/
  357. i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
  358. u16 *words, u16 *data)
  359. {
  360. if (hw->mac.type == I40E_MAC_X722)
  361. return i40e_read_nvm_buffer_aq(hw, offset, words, data);
  362. return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
  363. }
  364. /**
  365. * i40e_write_nvm_aq - Writes Shadow RAM.
  366. * @hw: pointer to the HW structure.
  367. * @module_pointer: module pointer location in words from the NVM beginning
  368. * @offset: offset in words from module start
  369. * @words: number of words to write
  370. * @data: buffer with words to write to the Shadow RAM
  371. * @last_command: tells the AdminQ that this is the last command
  372. *
  373. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  374. **/
  375. static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  376. u32 offset, u16 words, void *data,
  377. bool last_command)
  378. {
  379. i40e_status ret_code = I40E_ERR_NVM;
  380. /* Here we are checking the SR limit only for the flat memory model.
  381. * We cannot do it for the module-based model, as we did not acquire
  382. * the NVM resource yet (we cannot get the module pointer value).
  383. * Firmware will check the module-based model.
  384. */
  385. if ((offset + words) > hw->nvm.sr_size)
  386. i40e_debug(hw, I40E_DEBUG_NVM,
  387. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  388. (offset + words), hw->nvm.sr_size);
  389. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  390. /* We can write only up to 4KB (one sector), in one AQ write */
  391. i40e_debug(hw, I40E_DEBUG_NVM,
  392. "NVM write fail error: tried to write %d words, limit is %d.\n",
  393. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  394. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  395. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  396. /* A single write cannot spread over two sectors */
  397. i40e_debug(hw, I40E_DEBUG_NVM,
  398. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  399. offset, words);
  400. else
  401. ret_code = i40e_aq_update_nvm(hw, module_pointer,
  402. 2 * offset, /*bytes*/
  403. 2 * words, /*bytes*/
  404. data, last_command, NULL);
  405. return ret_code;
  406. }
  407. /**
  408. * i40e_calc_nvm_checksum - Calculates and returns the checksum
  409. * @hw: pointer to hardware structure
  410. * @checksum: pointer to the checksum
  411. *
  412. * This function calculates SW Checksum that covers the whole 64kB shadow RAM
  413. * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
  414. * is customer specific and unknown. Therefore, this function skips all maximum
  415. * possible size of VPD (1kB).
  416. **/
  417. static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
  418. u16 *checksum)
  419. {
  420. i40e_status ret_code = 0;
  421. struct i40e_virt_mem vmem;
  422. u16 pcie_alt_module = 0;
  423. u16 checksum_local = 0;
  424. u16 vpd_module = 0;
  425. u16 *data;
  426. u16 i = 0;
  427. ret_code = i40e_allocate_virt_mem(hw, &vmem,
  428. I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
  429. if (ret_code)
  430. goto i40e_calc_nvm_checksum_exit;
  431. data = (u16 *)vmem.va;
  432. /* read pointer to VPD area */
  433. ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
  434. if (ret_code) {
  435. ret_code = I40E_ERR_NVM_CHECKSUM;
  436. goto i40e_calc_nvm_checksum_exit;
  437. }
  438. /* read pointer to PCIe Alt Auto-load module */
  439. ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
  440. &pcie_alt_module);
  441. if (ret_code) {
  442. ret_code = I40E_ERR_NVM_CHECKSUM;
  443. goto i40e_calc_nvm_checksum_exit;
  444. }
  445. /* Calculate SW checksum that covers the whole 64kB shadow RAM
  446. * except the VPD and PCIe ALT Auto-load modules
  447. */
  448. for (i = 0; i < hw->nvm.sr_size; i++) {
  449. /* Read SR page */
  450. if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
  451. u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
  452. ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
  453. if (ret_code) {
  454. ret_code = I40E_ERR_NVM_CHECKSUM;
  455. goto i40e_calc_nvm_checksum_exit;
  456. }
  457. }
  458. /* Skip Checksum word */
  459. if (i == I40E_SR_SW_CHECKSUM_WORD)
  460. continue;
  461. /* Skip VPD module (convert byte size to word count) */
  462. if ((i >= (u32)vpd_module) &&
  463. (i < ((u32)vpd_module +
  464. (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
  465. continue;
  466. }
  467. /* Skip PCIe ALT module (convert byte size to word count) */
  468. if ((i >= (u32)pcie_alt_module) &&
  469. (i < ((u32)pcie_alt_module +
  470. (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
  471. continue;
  472. }
  473. checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
  474. }
  475. *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
  476. i40e_calc_nvm_checksum_exit:
  477. i40e_free_virt_mem(hw, &vmem);
  478. return ret_code;
  479. }
  480. /**
  481. * i40e_update_nvm_checksum - Updates the NVM checksum
  482. * @hw: pointer to hardware structure
  483. *
  484. * NVM ownership must be acquired before calling this function and released
  485. * on ARQ completion event reception by caller.
  486. * This function will commit SR to NVM.
  487. **/
  488. i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
  489. {
  490. i40e_status ret_code = 0;
  491. u16 checksum;
  492. ret_code = i40e_calc_nvm_checksum(hw, &checksum);
  493. if (!ret_code)
  494. ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
  495. 1, &checksum, true);
  496. return ret_code;
  497. }
  498. /**
  499. * i40e_validate_nvm_checksum - Validate EEPROM checksum
  500. * @hw: pointer to hardware structure
  501. * @checksum: calculated checksum
  502. *
  503. * Performs checksum calculation and validates the NVM SW checksum. If the
  504. * caller does not need checksum, the value can be NULL.
  505. **/
  506. i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
  507. u16 *checksum)
  508. {
  509. i40e_status ret_code = 0;
  510. u16 checksum_sr = 0;
  511. u16 checksum_local = 0;
  512. ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
  513. if (ret_code)
  514. goto i40e_validate_nvm_checksum_exit;
  515. /* Do not use i40e_read_nvm_word() because we do not want to take
  516. * the synchronization semaphores twice here.
  517. */
  518. i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
  519. /* Verify read checksum from EEPROM is the same as
  520. * calculated checksum
  521. */
  522. if (checksum_local != checksum_sr)
  523. ret_code = I40E_ERR_NVM_CHECKSUM;
  524. /* If the user cares, return the calculated checksum */
  525. if (checksum)
  526. *checksum = checksum_local;
  527. i40e_validate_nvm_checksum_exit:
  528. return ret_code;
  529. }
  530. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  531. struct i40e_nvm_access *cmd,
  532. u8 *bytes, int *errno);
  533. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  534. struct i40e_nvm_access *cmd,
  535. u8 *bytes, int *errno);
  536. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  537. struct i40e_nvm_access *cmd,
  538. u8 *bytes, int *errno);
  539. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  540. struct i40e_nvm_access *cmd,
  541. int *errno);
  542. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  543. struct i40e_nvm_access *cmd,
  544. int *errno);
  545. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  546. struct i40e_nvm_access *cmd,
  547. u8 *bytes, int *errno);
  548. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  549. struct i40e_nvm_access *cmd,
  550. u8 *bytes, int *errno);
  551. static inline u8 i40e_nvmupd_get_module(u32 val)
  552. {
  553. return (u8)(val & I40E_NVM_MOD_PNT_MASK);
  554. }
  555. static inline u8 i40e_nvmupd_get_transaction(u32 val)
  556. {
  557. return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
  558. }
  559. static char *i40e_nvm_update_state_str[] = {
  560. "I40E_NVMUPD_INVALID",
  561. "I40E_NVMUPD_READ_CON",
  562. "I40E_NVMUPD_READ_SNT",
  563. "I40E_NVMUPD_READ_LCB",
  564. "I40E_NVMUPD_READ_SA",
  565. "I40E_NVMUPD_WRITE_ERA",
  566. "I40E_NVMUPD_WRITE_CON",
  567. "I40E_NVMUPD_WRITE_SNT",
  568. "I40E_NVMUPD_WRITE_LCB",
  569. "I40E_NVMUPD_WRITE_SA",
  570. "I40E_NVMUPD_CSUM_CON",
  571. "I40E_NVMUPD_CSUM_SA",
  572. "I40E_NVMUPD_CSUM_LCB",
  573. };
  574. /**
  575. * i40e_nvmupd_command - Process an NVM update command
  576. * @hw: pointer to hardware structure
  577. * @cmd: pointer to nvm update command
  578. * @bytes: pointer to the data buffer
  579. * @errno: pointer to return error code
  580. *
  581. * Dispatches command depending on what update state is current
  582. **/
  583. i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
  584. struct i40e_nvm_access *cmd,
  585. u8 *bytes, int *errno)
  586. {
  587. i40e_status status;
  588. /* assume success */
  589. *errno = 0;
  590. switch (hw->nvmupd_state) {
  591. case I40E_NVMUPD_STATE_INIT:
  592. status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
  593. break;
  594. case I40E_NVMUPD_STATE_READING:
  595. status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
  596. break;
  597. case I40E_NVMUPD_STATE_WRITING:
  598. status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
  599. break;
  600. default:
  601. /* invalid state, should never happen */
  602. i40e_debug(hw, I40E_DEBUG_NVM,
  603. "NVMUPD: no such state %d\n", hw->nvmupd_state);
  604. status = I40E_NOT_SUPPORTED;
  605. *errno = -ESRCH;
  606. break;
  607. }
  608. return status;
  609. }
  610. /**
  611. * i40e_nvmupd_state_init - Handle NVM update state Init
  612. * @hw: pointer to hardware structure
  613. * @cmd: pointer to nvm update command buffer
  614. * @bytes: pointer to the data buffer
  615. * @errno: pointer to return error code
  616. *
  617. * Process legitimate commands of the Init state and conditionally set next
  618. * state. Reject all other commands.
  619. **/
  620. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  621. struct i40e_nvm_access *cmd,
  622. u8 *bytes, int *errno)
  623. {
  624. i40e_status status = 0;
  625. enum i40e_nvmupd_cmd upd_cmd;
  626. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
  627. switch (upd_cmd) {
  628. case I40E_NVMUPD_READ_SA:
  629. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  630. if (status) {
  631. *errno = i40e_aq_rc_to_posix(status,
  632. hw->aq.asq_last_status);
  633. } else {
  634. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
  635. i40e_release_nvm(hw);
  636. }
  637. break;
  638. case I40E_NVMUPD_READ_SNT:
  639. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  640. if (status) {
  641. *errno = i40e_aq_rc_to_posix(status,
  642. hw->aq.asq_last_status);
  643. } else {
  644. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
  645. if (status)
  646. i40e_release_nvm(hw);
  647. else
  648. hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
  649. }
  650. break;
  651. case I40E_NVMUPD_WRITE_ERA:
  652. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  653. if (status) {
  654. *errno = i40e_aq_rc_to_posix(status,
  655. hw->aq.asq_last_status);
  656. } else {
  657. status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
  658. if (status)
  659. i40e_release_nvm(hw);
  660. else
  661. hw->aq.nvm_release_on_done = true;
  662. }
  663. break;
  664. case I40E_NVMUPD_WRITE_SA:
  665. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  666. if (status) {
  667. *errno = i40e_aq_rc_to_posix(status,
  668. hw->aq.asq_last_status);
  669. } else {
  670. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
  671. if (status)
  672. i40e_release_nvm(hw);
  673. else
  674. hw->aq.nvm_release_on_done = true;
  675. }
  676. break;
  677. case I40E_NVMUPD_WRITE_SNT:
  678. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  679. if (status) {
  680. *errno = i40e_aq_rc_to_posix(status,
  681. hw->aq.asq_last_status);
  682. } else {
  683. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
  684. if (status)
  685. i40e_release_nvm(hw);
  686. else
  687. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
  688. }
  689. break;
  690. case I40E_NVMUPD_CSUM_SA:
  691. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  692. if (status) {
  693. *errno = i40e_aq_rc_to_posix(status,
  694. hw->aq.asq_last_status);
  695. } else {
  696. status = i40e_update_nvm_checksum(hw);
  697. if (status) {
  698. *errno = hw->aq.asq_last_status ?
  699. i40e_aq_rc_to_posix(status,
  700. hw->aq.asq_last_status) :
  701. -EIO;
  702. i40e_release_nvm(hw);
  703. } else {
  704. hw->aq.nvm_release_on_done = true;
  705. }
  706. }
  707. break;
  708. default:
  709. i40e_debug(hw, I40E_DEBUG_NVM,
  710. "NVMUPD: bad cmd %s in init state\n",
  711. i40e_nvm_update_state_str[upd_cmd]);
  712. status = I40E_ERR_NVM;
  713. *errno = -ESRCH;
  714. break;
  715. }
  716. return status;
  717. }
  718. /**
  719. * i40e_nvmupd_state_reading - Handle NVM update state Reading
  720. * @hw: pointer to hardware structure
  721. * @cmd: pointer to nvm update command buffer
  722. * @bytes: pointer to the data buffer
  723. * @errno: pointer to return error code
  724. *
  725. * NVM ownership is already held. Process legitimate commands and set any
  726. * change in state; reject all other commands.
  727. **/
  728. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  729. struct i40e_nvm_access *cmd,
  730. u8 *bytes, int *errno)
  731. {
  732. i40e_status status;
  733. enum i40e_nvmupd_cmd upd_cmd;
  734. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
  735. switch (upd_cmd) {
  736. case I40E_NVMUPD_READ_SA:
  737. case I40E_NVMUPD_READ_CON:
  738. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
  739. break;
  740. case I40E_NVMUPD_READ_LCB:
  741. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
  742. i40e_release_nvm(hw);
  743. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  744. break;
  745. default:
  746. i40e_debug(hw, I40E_DEBUG_NVM,
  747. "NVMUPD: bad cmd %s in reading state.\n",
  748. i40e_nvm_update_state_str[upd_cmd]);
  749. status = I40E_NOT_SUPPORTED;
  750. *errno = -ESRCH;
  751. break;
  752. }
  753. return status;
  754. }
  755. /**
  756. * i40e_nvmupd_state_writing - Handle NVM update state Writing
  757. * @hw: pointer to hardware structure
  758. * @cmd: pointer to nvm update command buffer
  759. * @bytes: pointer to the data buffer
  760. * @errno: pointer to return error code
  761. *
  762. * NVM ownership is already held. Process legitimate commands and set any
  763. * change in state; reject all other commands
  764. **/
  765. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  766. struct i40e_nvm_access *cmd,
  767. u8 *bytes, int *errno)
  768. {
  769. i40e_status status;
  770. enum i40e_nvmupd_cmd upd_cmd;
  771. bool retry_attempt = false;
  772. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
  773. retry:
  774. switch (upd_cmd) {
  775. case I40E_NVMUPD_WRITE_CON:
  776. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
  777. break;
  778. case I40E_NVMUPD_WRITE_LCB:
  779. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
  780. if (!status)
  781. hw->aq.nvm_release_on_done = true;
  782. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  783. break;
  784. case I40E_NVMUPD_CSUM_CON:
  785. status = i40e_update_nvm_checksum(hw);
  786. if (status) {
  787. *errno = hw->aq.asq_last_status ?
  788. i40e_aq_rc_to_posix(status,
  789. hw->aq.asq_last_status) :
  790. -EIO;
  791. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  792. }
  793. break;
  794. case I40E_NVMUPD_CSUM_LCB:
  795. status = i40e_update_nvm_checksum(hw);
  796. if (status)
  797. *errno = hw->aq.asq_last_status ?
  798. i40e_aq_rc_to_posix(status,
  799. hw->aq.asq_last_status) :
  800. -EIO;
  801. else
  802. hw->aq.nvm_release_on_done = true;
  803. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  804. break;
  805. default:
  806. i40e_debug(hw, I40E_DEBUG_NVM,
  807. "NVMUPD: bad cmd %s in writing state.\n",
  808. i40e_nvm_update_state_str[upd_cmd]);
  809. status = I40E_NOT_SUPPORTED;
  810. *errno = -ESRCH;
  811. break;
  812. }
  813. /* In some circumstances, a multi-write transaction takes longer
  814. * than the default 3 minute timeout on the write semaphore. If
  815. * the write failed with an EBUSY status, this is likely the problem,
  816. * so here we try to reacquire the semaphore then retry the write.
  817. * We only do one retry, then give up.
  818. */
  819. if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
  820. !retry_attempt) {
  821. i40e_status old_status = status;
  822. u32 old_asq_status = hw->aq.asq_last_status;
  823. u32 gtime;
  824. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  825. if (gtime >= hw->nvm.hw_semaphore_timeout) {
  826. i40e_debug(hw, I40E_DEBUG_ALL,
  827. "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
  828. gtime, hw->nvm.hw_semaphore_timeout);
  829. i40e_release_nvm(hw);
  830. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  831. if (status) {
  832. i40e_debug(hw, I40E_DEBUG_ALL,
  833. "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
  834. hw->aq.asq_last_status);
  835. status = old_status;
  836. hw->aq.asq_last_status = old_asq_status;
  837. } else {
  838. retry_attempt = true;
  839. goto retry;
  840. }
  841. }
  842. }
  843. return status;
  844. }
  845. /**
  846. * i40e_nvmupd_validate_command - Validate given command
  847. * @hw: pointer to hardware structure
  848. * @cmd: pointer to nvm update command buffer
  849. * @errno: pointer to return error code
  850. *
  851. * Return one of the valid command types or I40E_NVMUPD_INVALID
  852. **/
  853. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  854. struct i40e_nvm_access *cmd,
  855. int *errno)
  856. {
  857. enum i40e_nvmupd_cmd upd_cmd;
  858. u8 transaction;
  859. /* anything that doesn't match a recognized case is an error */
  860. upd_cmd = I40E_NVMUPD_INVALID;
  861. transaction = i40e_nvmupd_get_transaction(cmd->config);
  862. /* limits on data size */
  863. if ((cmd->data_size < 1) ||
  864. (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
  865. i40e_debug(hw, I40E_DEBUG_NVM,
  866. "i40e_nvmupd_validate_command data_size %d\n",
  867. cmd->data_size);
  868. *errno = -EFAULT;
  869. return I40E_NVMUPD_INVALID;
  870. }
  871. switch (cmd->command) {
  872. case I40E_NVM_READ:
  873. switch (transaction) {
  874. case I40E_NVM_CON:
  875. upd_cmd = I40E_NVMUPD_READ_CON;
  876. break;
  877. case I40E_NVM_SNT:
  878. upd_cmd = I40E_NVMUPD_READ_SNT;
  879. break;
  880. case I40E_NVM_LCB:
  881. upd_cmd = I40E_NVMUPD_READ_LCB;
  882. break;
  883. case I40E_NVM_SA:
  884. upd_cmd = I40E_NVMUPD_READ_SA;
  885. break;
  886. }
  887. break;
  888. case I40E_NVM_WRITE:
  889. switch (transaction) {
  890. case I40E_NVM_CON:
  891. upd_cmd = I40E_NVMUPD_WRITE_CON;
  892. break;
  893. case I40E_NVM_SNT:
  894. upd_cmd = I40E_NVMUPD_WRITE_SNT;
  895. break;
  896. case I40E_NVM_LCB:
  897. upd_cmd = I40E_NVMUPD_WRITE_LCB;
  898. break;
  899. case I40E_NVM_SA:
  900. upd_cmd = I40E_NVMUPD_WRITE_SA;
  901. break;
  902. case I40E_NVM_ERA:
  903. upd_cmd = I40E_NVMUPD_WRITE_ERA;
  904. break;
  905. case I40E_NVM_CSUM:
  906. upd_cmd = I40E_NVMUPD_CSUM_CON;
  907. break;
  908. case (I40E_NVM_CSUM|I40E_NVM_SA):
  909. upd_cmd = I40E_NVMUPD_CSUM_SA;
  910. break;
  911. case (I40E_NVM_CSUM|I40E_NVM_LCB):
  912. upd_cmd = I40E_NVMUPD_CSUM_LCB;
  913. break;
  914. }
  915. break;
  916. }
  917. i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
  918. i40e_nvm_update_state_str[upd_cmd],
  919. hw->nvmupd_state,
  920. hw->aq.nvm_release_on_done);
  921. if (upd_cmd == I40E_NVMUPD_INVALID) {
  922. *errno = -EFAULT;
  923. i40e_debug(hw, I40E_DEBUG_NVM,
  924. "i40e_nvmupd_validate_command returns %d errno %d\n",
  925. upd_cmd, *errno);
  926. }
  927. return upd_cmd;
  928. }
  929. /**
  930. * i40e_nvmupd_nvm_read - Read NVM
  931. * @hw: pointer to hardware structure
  932. * @cmd: pointer to nvm update command buffer
  933. * @bytes: pointer to the data buffer
  934. * @errno: pointer to return error code
  935. *
  936. * cmd structure contains identifiers and data buffer
  937. **/
  938. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  939. struct i40e_nvm_access *cmd,
  940. u8 *bytes, int *errno)
  941. {
  942. i40e_status status;
  943. u8 module, transaction;
  944. bool last;
  945. transaction = i40e_nvmupd_get_transaction(cmd->config);
  946. module = i40e_nvmupd_get_module(cmd->config);
  947. last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
  948. status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  949. bytes, last, NULL);
  950. if (status) {
  951. i40e_debug(hw, I40E_DEBUG_NVM,
  952. "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
  953. module, cmd->offset, cmd->data_size);
  954. i40e_debug(hw, I40E_DEBUG_NVM,
  955. "i40e_nvmupd_nvm_read status %d aq %d\n",
  956. status, hw->aq.asq_last_status);
  957. *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  958. }
  959. return status;
  960. }
  961. /**
  962. * i40e_nvmupd_nvm_erase - Erase an NVM module
  963. * @hw: pointer to hardware structure
  964. * @cmd: pointer to nvm update command buffer
  965. * @errno: pointer to return error code
  966. *
  967. * module, offset, data_size and data are in cmd structure
  968. **/
  969. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  970. struct i40e_nvm_access *cmd,
  971. int *errno)
  972. {
  973. i40e_status status = 0;
  974. u8 module, transaction;
  975. bool last;
  976. transaction = i40e_nvmupd_get_transaction(cmd->config);
  977. module = i40e_nvmupd_get_module(cmd->config);
  978. last = (transaction & I40E_NVM_LCB);
  979. status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  980. last, NULL);
  981. if (status) {
  982. i40e_debug(hw, I40E_DEBUG_NVM,
  983. "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
  984. module, cmd->offset, cmd->data_size);
  985. i40e_debug(hw, I40E_DEBUG_NVM,
  986. "i40e_nvmupd_nvm_erase status %d aq %d\n",
  987. status, hw->aq.asq_last_status);
  988. *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  989. }
  990. return status;
  991. }
  992. /**
  993. * i40e_nvmupd_nvm_write - Write NVM
  994. * @hw: pointer to hardware structure
  995. * @cmd: pointer to nvm update command buffer
  996. * @bytes: pointer to the data buffer
  997. * @errno: pointer to return error code
  998. *
  999. * module, offset, data_size and data are in cmd structure
  1000. **/
  1001. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  1002. struct i40e_nvm_access *cmd,
  1003. u8 *bytes, int *errno)
  1004. {
  1005. i40e_status status = 0;
  1006. u8 module, transaction;
  1007. bool last;
  1008. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1009. module = i40e_nvmupd_get_module(cmd->config);
  1010. last = (transaction & I40E_NVM_LCB);
  1011. status = i40e_aq_update_nvm(hw, module, cmd->offset,
  1012. (u16)cmd->data_size, bytes, last, NULL);
  1013. if (status) {
  1014. i40e_debug(hw, I40E_DEBUG_NVM,
  1015. "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
  1016. module, cmd->offset, cmd->data_size);
  1017. i40e_debug(hw, I40E_DEBUG_NVM,
  1018. "i40e_nvmupd_nvm_write status %d aq %d\n",
  1019. status, hw->aq.asq_last_status);
  1020. *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1021. }
  1022. return status;
  1023. }