i40e_nvm.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include "i40e_prototype.h"
  4. /**
  5. * i40e_init_nvm_ops - Initialize NVM function pointers
  6. * @hw: pointer to the HW structure
  7. *
  8. * Setup the function pointers and the NVM info structure. Should be called
  9. * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  10. * Please notice that the NVM term is used here (& in all methods covered
  11. * in this file) as an equivalent of the FLASH part mapped into the SR.
  12. * We are accessing FLASH always thru the Shadow RAM.
  13. **/
  14. i40e_status i40e_init_nvm(struct i40e_hw *hw)
  15. {
  16. struct i40e_nvm_info *nvm = &hw->nvm;
  17. i40e_status ret_code = 0;
  18. u32 fla, gens;
  19. u8 sr_size;
  20. /* The SR size is stored regardless of the nvm programming mode
  21. * as the blank mode may be used in the factory line.
  22. */
  23. gens = rd32(hw, I40E_GLNVM_GENS);
  24. sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
  25. I40E_GLNVM_GENS_SR_SIZE_SHIFT);
  26. /* Switching to words (sr_size contains power of 2KB) */
  27. nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
  28. /* Check if we are in the normal or blank NVM programming mode */
  29. fla = rd32(hw, I40E_GLNVM_FLA);
  30. if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
  31. /* Max NVM timeout */
  32. nvm->timeout = I40E_MAX_NVM_TIMEOUT;
  33. nvm->blank_nvm_mode = false;
  34. } else { /* Blank programming mode */
  35. nvm->blank_nvm_mode = true;
  36. ret_code = I40E_ERR_NVM_BLANK_MODE;
  37. i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
  38. }
  39. return ret_code;
  40. }
  41. /**
  42. * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
  43. * @hw: pointer to the HW structure
  44. * @access: NVM access type (read or write)
  45. *
  46. * This function will request NVM ownership for reading
  47. * via the proper Admin Command.
  48. **/
  49. i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
  50. enum i40e_aq_resource_access_type access)
  51. {
  52. i40e_status ret_code = 0;
  53. u64 gtime, timeout;
  54. u64 time_left = 0;
  55. if (hw->nvm.blank_nvm_mode)
  56. goto i40e_i40e_acquire_nvm_exit;
  57. ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
  58. 0, &time_left, NULL);
  59. /* Reading the Global Device Timer */
  60. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  61. /* Store the timeout */
  62. hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
  63. if (ret_code)
  64. i40e_debug(hw, I40E_DEBUG_NVM,
  65. "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
  66. access, time_left, ret_code, hw->aq.asq_last_status);
  67. if (ret_code && time_left) {
  68. /* Poll until the current NVM owner timeouts */
  69. timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
  70. while ((gtime < timeout) && time_left) {
  71. usleep_range(10000, 20000);
  72. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  73. ret_code = i40e_aq_request_resource(hw,
  74. I40E_NVM_RESOURCE_ID,
  75. access, 0, &time_left,
  76. NULL);
  77. if (!ret_code) {
  78. hw->nvm.hw_semaphore_timeout =
  79. I40E_MS_TO_GTIME(time_left) + gtime;
  80. break;
  81. }
  82. }
  83. if (ret_code) {
  84. hw->nvm.hw_semaphore_timeout = 0;
  85. i40e_debug(hw, I40E_DEBUG_NVM,
  86. "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
  87. time_left, ret_code, hw->aq.asq_last_status);
  88. }
  89. }
  90. i40e_i40e_acquire_nvm_exit:
  91. return ret_code;
  92. }
  93. /**
  94. * i40e_release_nvm - Generic request for releasing the NVM ownership
  95. * @hw: pointer to the HW structure
  96. *
  97. * This function will release NVM resource via the proper Admin Command.
  98. **/
  99. void i40e_release_nvm(struct i40e_hw *hw)
  100. {
  101. i40e_status ret_code = I40E_SUCCESS;
  102. u32 total_delay = 0;
  103. if (hw->nvm.blank_nvm_mode)
  104. return;
  105. ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
  106. /* there are some rare cases when trying to release the resource
  107. * results in an admin Q timeout, so handle them correctly
  108. */
  109. while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
  110. (total_delay < hw->aq.asq_cmd_timeout)) {
  111. usleep_range(1000, 2000);
  112. ret_code = i40e_aq_release_resource(hw,
  113. I40E_NVM_RESOURCE_ID,
  114. 0, NULL);
  115. total_delay++;
  116. }
  117. }
  118. /**
  119. * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
  120. * @hw: pointer to the HW structure
  121. *
  122. * Polls the SRCTL Shadow RAM register done bit.
  123. **/
  124. static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
  125. {
  126. i40e_status ret_code = I40E_ERR_TIMEOUT;
  127. u32 srctl, wait_cnt;
  128. /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
  129. for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
  130. srctl = rd32(hw, I40E_GLNVM_SRCTL);
  131. if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
  132. ret_code = 0;
  133. break;
  134. }
  135. udelay(5);
  136. }
  137. if (ret_code == I40E_ERR_TIMEOUT)
  138. i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
  139. return ret_code;
  140. }
  141. /**
  142. * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
  143. * @hw: pointer to the HW structure
  144. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  145. * @data: word read from the Shadow RAM
  146. *
  147. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  148. **/
  149. static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
  150. u16 *data)
  151. {
  152. i40e_status ret_code = I40E_ERR_TIMEOUT;
  153. u32 sr_reg;
  154. if (offset >= hw->nvm.sr_size) {
  155. i40e_debug(hw, I40E_DEBUG_NVM,
  156. "NVM read error: offset %d beyond Shadow RAM limit %d\n",
  157. offset, hw->nvm.sr_size);
  158. ret_code = I40E_ERR_PARAM;
  159. goto read_nvm_exit;
  160. }
  161. /* Poll the done bit first */
  162. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  163. if (!ret_code) {
  164. /* Write the address and start reading */
  165. sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
  166. BIT(I40E_GLNVM_SRCTL_START_SHIFT);
  167. wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
  168. /* Poll I40E_GLNVM_SRCTL until the done bit is set */
  169. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  170. if (!ret_code) {
  171. sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
  172. *data = (u16)((sr_reg &
  173. I40E_GLNVM_SRDATA_RDDATA_MASK)
  174. >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
  175. }
  176. }
  177. if (ret_code)
  178. i40e_debug(hw, I40E_DEBUG_NVM,
  179. "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
  180. offset);
  181. read_nvm_exit:
  182. return ret_code;
  183. }
  184. /**
  185. * i40e_read_nvm_aq - Read Shadow RAM.
  186. * @hw: pointer to the HW structure.
  187. * @module_pointer: module pointer location in words from the NVM beginning
  188. * @offset: offset in words from module start
  189. * @words: number of words to write
  190. * @data: buffer with words to write to the Shadow RAM
  191. * @last_command: tells the AdminQ that this is the last command
  192. *
  193. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  194. **/
  195. static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
  196. u8 module_pointer, u32 offset,
  197. u16 words, void *data,
  198. bool last_command)
  199. {
  200. i40e_status ret_code = I40E_ERR_NVM;
  201. struct i40e_asq_cmd_details cmd_details;
  202. memset(&cmd_details, 0, sizeof(cmd_details));
  203. cmd_details.wb_desc = &hw->nvm_wb_desc;
  204. /* Here we are checking the SR limit only for the flat memory model.
  205. * We cannot do it for the module-based model, as we did not acquire
  206. * the NVM resource yet (we cannot get the module pointer value).
  207. * Firmware will check the module-based model.
  208. */
  209. if ((offset + words) > hw->nvm.sr_size)
  210. i40e_debug(hw, I40E_DEBUG_NVM,
  211. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  212. (offset + words), hw->nvm.sr_size);
  213. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  214. /* We can write only up to 4KB (one sector), in one AQ write */
  215. i40e_debug(hw, I40E_DEBUG_NVM,
  216. "NVM write fail error: tried to write %d words, limit is %d.\n",
  217. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  218. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  219. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  220. /* A single write cannot spread over two sectors */
  221. i40e_debug(hw, I40E_DEBUG_NVM,
  222. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  223. offset, words);
  224. else
  225. ret_code = i40e_aq_read_nvm(hw, module_pointer,
  226. 2 * offset, /*bytes*/
  227. 2 * words, /*bytes*/
  228. data, last_command, &cmd_details);
  229. return ret_code;
  230. }
  231. /**
  232. * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
  233. * @hw: pointer to the HW structure
  234. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  235. * @data: word read from the Shadow RAM
  236. *
  237. * Reads one 16 bit word from the Shadow RAM using the AdminQ
  238. **/
  239. static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
  240. u16 *data)
  241. {
  242. i40e_status ret_code = I40E_ERR_TIMEOUT;
  243. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
  244. *data = le16_to_cpu(*(__le16 *)data);
  245. return ret_code;
  246. }
  247. /**
  248. * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
  249. * @hw: pointer to the HW structure
  250. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  251. * @data: word read from the Shadow RAM
  252. *
  253. * Reads one 16 bit word from the Shadow RAM.
  254. *
  255. * Do not use this function except in cases where the nvm lock is already
  256. * taken via i40e_acquire_nvm().
  257. **/
  258. static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
  259. u16 offset, u16 *data)
  260. {
  261. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
  262. return i40e_read_nvm_word_aq(hw, offset, data);
  263. return i40e_read_nvm_word_srctl(hw, offset, data);
  264. }
  265. /**
  266. * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
  267. * @hw: pointer to the HW structure
  268. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  269. * @data: word read from the Shadow RAM
  270. *
  271. * Reads one 16 bit word from the Shadow RAM.
  272. **/
  273. i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
  274. u16 *data)
  275. {
  276. i40e_status ret_code = 0;
  277. if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
  278. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  279. if (ret_code)
  280. return ret_code;
  281. ret_code = __i40e_read_nvm_word(hw, offset, data);
  282. if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
  283. i40e_release_nvm(hw);
  284. return ret_code;
  285. }
  286. /**
  287. * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
  288. * @hw: pointer to the HW structure
  289. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  290. * @words: (in) number of words to read; (out) number of words actually read
  291. * @data: words read from the Shadow RAM
  292. *
  293. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  294. * method. The buffer read is preceded by the NVM ownership take
  295. * and followed by the release.
  296. **/
  297. static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
  298. u16 *words, u16 *data)
  299. {
  300. i40e_status ret_code = 0;
  301. u16 index, word;
  302. /* Loop thru the selected region */
  303. for (word = 0; word < *words; word++) {
  304. index = offset + word;
  305. ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
  306. if (ret_code)
  307. break;
  308. }
  309. /* Update the number of words read from the Shadow RAM */
  310. *words = word;
  311. return ret_code;
  312. }
  313. /**
  314. * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
  315. * @hw: pointer to the HW structure
  316. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  317. * @words: (in) number of words to read; (out) number of words actually read
  318. * @data: words read from the Shadow RAM
  319. *
  320. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
  321. * method. The buffer read is preceded by the NVM ownership take
  322. * and followed by the release.
  323. **/
  324. static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
  325. u16 *words, u16 *data)
  326. {
  327. i40e_status ret_code;
  328. u16 read_size;
  329. bool last_cmd = false;
  330. u16 words_read = 0;
  331. u16 i = 0;
  332. do {
  333. /* Calculate number of bytes we should read in this step.
  334. * FVL AQ do not allow to read more than one page at a time or
  335. * to cross page boundaries.
  336. */
  337. if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
  338. read_size = min(*words,
  339. (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
  340. (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
  341. else
  342. read_size = min((*words - words_read),
  343. I40E_SR_SECTOR_SIZE_IN_WORDS);
  344. /* Check if this is last command, if so set proper flag */
  345. if ((words_read + read_size) >= *words)
  346. last_cmd = true;
  347. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
  348. data + words_read, last_cmd);
  349. if (ret_code)
  350. goto read_nvm_buffer_aq_exit;
  351. /* Increment counter for words already read and move offset to
  352. * new read location
  353. */
  354. words_read += read_size;
  355. offset += read_size;
  356. } while (words_read < *words);
  357. for (i = 0; i < *words; i++)
  358. data[i] = le16_to_cpu(((__le16 *)data)[i]);
  359. read_nvm_buffer_aq_exit:
  360. *words = words_read;
  361. return ret_code;
  362. }
  363. /**
  364. * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
  365. * @hw: pointer to the HW structure
  366. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  367. * @words: (in) number of words to read; (out) number of words actually read
  368. * @data: words read from the Shadow RAM
  369. *
  370. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  371. * method.
  372. **/
  373. static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
  374. u16 offset, u16 *words,
  375. u16 *data)
  376. {
  377. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
  378. return i40e_read_nvm_buffer_aq(hw, offset, words, data);
  379. return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
  380. }
  381. /**
  382. * i40e_write_nvm_aq - Writes Shadow RAM.
  383. * @hw: pointer to the HW structure.
  384. * @module_pointer: module pointer location in words from the NVM beginning
  385. * @offset: offset in words from module start
  386. * @words: number of words to write
  387. * @data: buffer with words to write to the Shadow RAM
  388. * @last_command: tells the AdminQ that this is the last command
  389. *
  390. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  391. **/
  392. static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  393. u32 offset, u16 words, void *data,
  394. bool last_command)
  395. {
  396. i40e_status ret_code = I40E_ERR_NVM;
  397. struct i40e_asq_cmd_details cmd_details;
  398. memset(&cmd_details, 0, sizeof(cmd_details));
  399. cmd_details.wb_desc = &hw->nvm_wb_desc;
  400. /* Here we are checking the SR limit only for the flat memory model.
  401. * We cannot do it for the module-based model, as we did not acquire
  402. * the NVM resource yet (we cannot get the module pointer value).
  403. * Firmware will check the module-based model.
  404. */
  405. if ((offset + words) > hw->nvm.sr_size)
  406. i40e_debug(hw, I40E_DEBUG_NVM,
  407. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  408. (offset + words), hw->nvm.sr_size);
  409. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  410. /* We can write only up to 4KB (one sector), in one AQ write */
  411. i40e_debug(hw, I40E_DEBUG_NVM,
  412. "NVM write fail error: tried to write %d words, limit is %d.\n",
  413. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  414. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  415. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  416. /* A single write cannot spread over two sectors */
  417. i40e_debug(hw, I40E_DEBUG_NVM,
  418. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  419. offset, words);
  420. else
  421. ret_code = i40e_aq_update_nvm(hw, module_pointer,
  422. 2 * offset, /*bytes*/
  423. 2 * words, /*bytes*/
  424. data, last_command, 0,
  425. &cmd_details);
  426. return ret_code;
  427. }
  428. /**
  429. * i40e_calc_nvm_checksum - Calculates and returns the checksum
  430. * @hw: pointer to hardware structure
  431. * @checksum: pointer to the checksum
  432. *
  433. * This function calculates SW Checksum that covers the whole 64kB shadow RAM
  434. * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
  435. * is customer specific and unknown. Therefore, this function skips all maximum
  436. * possible size of VPD (1kB).
  437. **/
  438. static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
  439. u16 *checksum)
  440. {
  441. i40e_status ret_code;
  442. struct i40e_virt_mem vmem;
  443. u16 pcie_alt_module = 0;
  444. u16 checksum_local = 0;
  445. u16 vpd_module = 0;
  446. u16 *data;
  447. u16 i = 0;
  448. ret_code = i40e_allocate_virt_mem(hw, &vmem,
  449. I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
  450. if (ret_code)
  451. goto i40e_calc_nvm_checksum_exit;
  452. data = (u16 *)vmem.va;
  453. /* read pointer to VPD area */
  454. ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
  455. if (ret_code) {
  456. ret_code = I40E_ERR_NVM_CHECKSUM;
  457. goto i40e_calc_nvm_checksum_exit;
  458. }
  459. /* read pointer to PCIe Alt Auto-load module */
  460. ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
  461. &pcie_alt_module);
  462. if (ret_code) {
  463. ret_code = I40E_ERR_NVM_CHECKSUM;
  464. goto i40e_calc_nvm_checksum_exit;
  465. }
  466. /* Calculate SW checksum that covers the whole 64kB shadow RAM
  467. * except the VPD and PCIe ALT Auto-load modules
  468. */
  469. for (i = 0; i < hw->nvm.sr_size; i++) {
  470. /* Read SR page */
  471. if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
  472. u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
  473. ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
  474. if (ret_code) {
  475. ret_code = I40E_ERR_NVM_CHECKSUM;
  476. goto i40e_calc_nvm_checksum_exit;
  477. }
  478. }
  479. /* Skip Checksum word */
  480. if (i == I40E_SR_SW_CHECKSUM_WORD)
  481. continue;
  482. /* Skip VPD module (convert byte size to word count) */
  483. if ((i >= (u32)vpd_module) &&
  484. (i < ((u32)vpd_module +
  485. (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
  486. continue;
  487. }
  488. /* Skip PCIe ALT module (convert byte size to word count) */
  489. if ((i >= (u32)pcie_alt_module) &&
  490. (i < ((u32)pcie_alt_module +
  491. (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
  492. continue;
  493. }
  494. checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
  495. }
  496. *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
  497. i40e_calc_nvm_checksum_exit:
  498. i40e_free_virt_mem(hw, &vmem);
  499. return ret_code;
  500. }
  501. /**
  502. * i40e_update_nvm_checksum - Updates the NVM checksum
  503. * @hw: pointer to hardware structure
  504. *
  505. * NVM ownership must be acquired before calling this function and released
  506. * on ARQ completion event reception by caller.
  507. * This function will commit SR to NVM.
  508. **/
  509. i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
  510. {
  511. i40e_status ret_code;
  512. u16 checksum;
  513. __le16 le_sum;
  514. ret_code = i40e_calc_nvm_checksum(hw, &checksum);
  515. if (!ret_code) {
  516. le_sum = cpu_to_le16(checksum);
  517. ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
  518. 1, &le_sum, true);
  519. }
  520. return ret_code;
  521. }
  522. /**
  523. * i40e_validate_nvm_checksum - Validate EEPROM checksum
  524. * @hw: pointer to hardware structure
  525. * @checksum: calculated checksum
  526. *
  527. * Performs checksum calculation and validates the NVM SW checksum. If the
  528. * caller does not need checksum, the value can be NULL.
  529. **/
  530. i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
  531. u16 *checksum)
  532. {
  533. i40e_status ret_code = 0;
  534. u16 checksum_sr = 0;
  535. u16 checksum_local = 0;
  536. /* We must acquire the NVM lock in order to correctly synchronize the
  537. * NVM accesses across multiple PFs. Without doing so it is possible
  538. * for one of the PFs to read invalid data potentially indicating that
  539. * the checksum is invalid.
  540. */
  541. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  542. if (ret_code)
  543. return ret_code;
  544. ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
  545. __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
  546. i40e_release_nvm(hw);
  547. if (ret_code)
  548. return ret_code;
  549. /* Verify read checksum from EEPROM is the same as
  550. * calculated checksum
  551. */
  552. if (checksum_local != checksum_sr)
  553. ret_code = I40E_ERR_NVM_CHECKSUM;
  554. /* If the user cares, return the calculated checksum */
  555. if (checksum)
  556. *checksum = checksum_local;
  557. return ret_code;
  558. }
  559. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  560. struct i40e_nvm_access *cmd,
  561. u8 *bytes, int *perrno);
  562. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  563. struct i40e_nvm_access *cmd,
  564. u8 *bytes, int *perrno);
  565. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  566. struct i40e_nvm_access *cmd,
  567. u8 *bytes, int *errno);
  568. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  569. struct i40e_nvm_access *cmd,
  570. int *perrno);
  571. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  572. struct i40e_nvm_access *cmd,
  573. int *perrno);
  574. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  575. struct i40e_nvm_access *cmd,
  576. u8 *bytes, int *perrno);
  577. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  578. struct i40e_nvm_access *cmd,
  579. u8 *bytes, int *perrno);
  580. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  581. struct i40e_nvm_access *cmd,
  582. u8 *bytes, int *perrno);
  583. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  584. struct i40e_nvm_access *cmd,
  585. u8 *bytes, int *perrno);
  586. static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
  587. struct i40e_nvm_access *cmd,
  588. u8 *bytes, int *perrno);
  589. static inline u8 i40e_nvmupd_get_module(u32 val)
  590. {
  591. return (u8)(val & I40E_NVM_MOD_PNT_MASK);
  592. }
  593. static inline u8 i40e_nvmupd_get_transaction(u32 val)
  594. {
  595. return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
  596. }
  597. static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
  598. {
  599. return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
  600. I40E_NVM_PRESERVATION_FLAGS_SHIFT);
  601. }
  602. static const char * const i40e_nvm_update_state_str[] = {
  603. "I40E_NVMUPD_INVALID",
  604. "I40E_NVMUPD_READ_CON",
  605. "I40E_NVMUPD_READ_SNT",
  606. "I40E_NVMUPD_READ_LCB",
  607. "I40E_NVMUPD_READ_SA",
  608. "I40E_NVMUPD_WRITE_ERA",
  609. "I40E_NVMUPD_WRITE_CON",
  610. "I40E_NVMUPD_WRITE_SNT",
  611. "I40E_NVMUPD_WRITE_LCB",
  612. "I40E_NVMUPD_WRITE_SA",
  613. "I40E_NVMUPD_CSUM_CON",
  614. "I40E_NVMUPD_CSUM_SA",
  615. "I40E_NVMUPD_CSUM_LCB",
  616. "I40E_NVMUPD_STATUS",
  617. "I40E_NVMUPD_EXEC_AQ",
  618. "I40E_NVMUPD_GET_AQ_RESULT",
  619. "I40E_NVMUPD_GET_AQ_EVENT",
  620. };
  621. /**
  622. * i40e_nvmupd_command - Process an NVM update command
  623. * @hw: pointer to hardware structure
  624. * @cmd: pointer to nvm update command
  625. * @bytes: pointer to the data buffer
  626. * @perrno: pointer to return error code
  627. *
  628. * Dispatches command depending on what update state is current
  629. **/
  630. i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
  631. struct i40e_nvm_access *cmd,
  632. u8 *bytes, int *perrno)
  633. {
  634. i40e_status status;
  635. enum i40e_nvmupd_cmd upd_cmd;
  636. /* assume success */
  637. *perrno = 0;
  638. /* early check for status command and debug msgs */
  639. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  640. i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
  641. i40e_nvm_update_state_str[upd_cmd],
  642. hw->nvmupd_state,
  643. hw->nvm_release_on_done, hw->nvm_wait_opcode,
  644. cmd->command, cmd->config, cmd->offset, cmd->data_size);
  645. if (upd_cmd == I40E_NVMUPD_INVALID) {
  646. *perrno = -EFAULT;
  647. i40e_debug(hw, I40E_DEBUG_NVM,
  648. "i40e_nvmupd_validate_command returns %d errno %d\n",
  649. upd_cmd, *perrno);
  650. }
  651. /* a status request returns immediately rather than
  652. * going into the state machine
  653. */
  654. if (upd_cmd == I40E_NVMUPD_STATUS) {
  655. if (!cmd->data_size) {
  656. *perrno = -EFAULT;
  657. return I40E_ERR_BUF_TOO_SHORT;
  658. }
  659. bytes[0] = hw->nvmupd_state;
  660. if (cmd->data_size >= 4) {
  661. bytes[1] = 0;
  662. *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
  663. }
  664. /* Clear error status on read */
  665. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
  666. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  667. return 0;
  668. }
  669. /* Clear status even it is not read and log */
  670. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
  671. i40e_debug(hw, I40E_DEBUG_NVM,
  672. "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
  673. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  674. }
  675. /* Acquire lock to prevent race condition where adminq_task
  676. * can execute after i40e_nvmupd_nvm_read/write but before state
  677. * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
  678. *
  679. * During NVMUpdate, it is observed that lock could be held for
  680. * ~5ms for most commands. However lock is held for ~60ms for
  681. * NVMUPD_CSUM_LCB command.
  682. */
  683. mutex_lock(&hw->aq.arq_mutex);
  684. switch (hw->nvmupd_state) {
  685. case I40E_NVMUPD_STATE_INIT:
  686. status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
  687. break;
  688. case I40E_NVMUPD_STATE_READING:
  689. status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
  690. break;
  691. case I40E_NVMUPD_STATE_WRITING:
  692. status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
  693. break;
  694. case I40E_NVMUPD_STATE_INIT_WAIT:
  695. case I40E_NVMUPD_STATE_WRITE_WAIT:
  696. /* if we need to stop waiting for an event, clear
  697. * the wait info and return before doing anything else
  698. */
  699. if (cmd->offset == 0xffff) {
  700. i40e_nvmupd_clear_wait_state(hw);
  701. status = 0;
  702. break;
  703. }
  704. status = I40E_ERR_NOT_READY;
  705. *perrno = -EBUSY;
  706. break;
  707. default:
  708. /* invalid state, should never happen */
  709. i40e_debug(hw, I40E_DEBUG_NVM,
  710. "NVMUPD: no such state %d\n", hw->nvmupd_state);
  711. status = I40E_NOT_SUPPORTED;
  712. *perrno = -ESRCH;
  713. break;
  714. }
  715. mutex_unlock(&hw->aq.arq_mutex);
  716. return status;
  717. }
  718. /**
  719. * i40e_nvmupd_state_init - Handle NVM update state Init
  720. * @hw: pointer to hardware structure
  721. * @cmd: pointer to nvm update command buffer
  722. * @bytes: pointer to the data buffer
  723. * @perrno: pointer to return error code
  724. *
  725. * Process legitimate commands of the Init state and conditionally set next
  726. * state. Reject all other commands.
  727. **/
  728. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  729. struct i40e_nvm_access *cmd,
  730. u8 *bytes, int *perrno)
  731. {
  732. i40e_status status = 0;
  733. enum i40e_nvmupd_cmd upd_cmd;
  734. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  735. switch (upd_cmd) {
  736. case I40E_NVMUPD_READ_SA:
  737. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  738. if (status) {
  739. *perrno = i40e_aq_rc_to_posix(status,
  740. hw->aq.asq_last_status);
  741. } else {
  742. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  743. i40e_release_nvm(hw);
  744. }
  745. break;
  746. case I40E_NVMUPD_READ_SNT:
  747. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  748. if (status) {
  749. *perrno = i40e_aq_rc_to_posix(status,
  750. hw->aq.asq_last_status);
  751. } else {
  752. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  753. if (status)
  754. i40e_release_nvm(hw);
  755. else
  756. hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
  757. }
  758. break;
  759. case I40E_NVMUPD_WRITE_ERA:
  760. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  761. if (status) {
  762. *perrno = i40e_aq_rc_to_posix(status,
  763. hw->aq.asq_last_status);
  764. } else {
  765. status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
  766. if (status) {
  767. i40e_release_nvm(hw);
  768. } else {
  769. hw->nvm_release_on_done = true;
  770. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
  771. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  772. }
  773. }
  774. break;
  775. case I40E_NVMUPD_WRITE_SA:
  776. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  777. if (status) {
  778. *perrno = i40e_aq_rc_to_posix(status,
  779. hw->aq.asq_last_status);
  780. } else {
  781. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  782. if (status) {
  783. i40e_release_nvm(hw);
  784. } else {
  785. hw->nvm_release_on_done = true;
  786. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  787. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  788. }
  789. }
  790. break;
  791. case I40E_NVMUPD_WRITE_SNT:
  792. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  793. if (status) {
  794. *perrno = i40e_aq_rc_to_posix(status,
  795. hw->aq.asq_last_status);
  796. } else {
  797. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  798. if (status) {
  799. i40e_release_nvm(hw);
  800. } else {
  801. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  802. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  803. }
  804. }
  805. break;
  806. case I40E_NVMUPD_CSUM_SA:
  807. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  808. if (status) {
  809. *perrno = i40e_aq_rc_to_posix(status,
  810. hw->aq.asq_last_status);
  811. } else {
  812. status = i40e_update_nvm_checksum(hw);
  813. if (status) {
  814. *perrno = hw->aq.asq_last_status ?
  815. i40e_aq_rc_to_posix(status,
  816. hw->aq.asq_last_status) :
  817. -EIO;
  818. i40e_release_nvm(hw);
  819. } else {
  820. hw->nvm_release_on_done = true;
  821. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  822. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  823. }
  824. }
  825. break;
  826. case I40E_NVMUPD_EXEC_AQ:
  827. status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
  828. break;
  829. case I40E_NVMUPD_GET_AQ_RESULT:
  830. status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
  831. break;
  832. case I40E_NVMUPD_GET_AQ_EVENT:
  833. status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
  834. break;
  835. default:
  836. i40e_debug(hw, I40E_DEBUG_NVM,
  837. "NVMUPD: bad cmd %s in init state\n",
  838. i40e_nvm_update_state_str[upd_cmd]);
  839. status = I40E_ERR_NVM;
  840. *perrno = -ESRCH;
  841. break;
  842. }
  843. return status;
  844. }
  845. /**
  846. * i40e_nvmupd_state_reading - Handle NVM update state Reading
  847. * @hw: pointer to hardware structure
  848. * @cmd: pointer to nvm update command buffer
  849. * @bytes: pointer to the data buffer
  850. * @perrno: pointer to return error code
  851. *
  852. * NVM ownership is already held. Process legitimate commands and set any
  853. * change in state; reject all other commands.
  854. **/
  855. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  856. struct i40e_nvm_access *cmd,
  857. u8 *bytes, int *perrno)
  858. {
  859. i40e_status status = 0;
  860. enum i40e_nvmupd_cmd upd_cmd;
  861. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  862. switch (upd_cmd) {
  863. case I40E_NVMUPD_READ_SA:
  864. case I40E_NVMUPD_READ_CON:
  865. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  866. break;
  867. case I40E_NVMUPD_READ_LCB:
  868. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  869. i40e_release_nvm(hw);
  870. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  871. break;
  872. default:
  873. i40e_debug(hw, I40E_DEBUG_NVM,
  874. "NVMUPD: bad cmd %s in reading state.\n",
  875. i40e_nvm_update_state_str[upd_cmd]);
  876. status = I40E_NOT_SUPPORTED;
  877. *perrno = -ESRCH;
  878. break;
  879. }
  880. return status;
  881. }
  882. /**
  883. * i40e_nvmupd_state_writing - Handle NVM update state Writing
  884. * @hw: pointer to hardware structure
  885. * @cmd: pointer to nvm update command buffer
  886. * @bytes: pointer to the data buffer
  887. * @perrno: pointer to return error code
  888. *
  889. * NVM ownership is already held. Process legitimate commands and set any
  890. * change in state; reject all other commands
  891. **/
  892. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  893. struct i40e_nvm_access *cmd,
  894. u8 *bytes, int *perrno)
  895. {
  896. i40e_status status = 0;
  897. enum i40e_nvmupd_cmd upd_cmd;
  898. bool retry_attempt = false;
  899. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  900. retry:
  901. switch (upd_cmd) {
  902. case I40E_NVMUPD_WRITE_CON:
  903. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  904. if (!status) {
  905. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  906. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  907. }
  908. break;
  909. case I40E_NVMUPD_WRITE_LCB:
  910. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  911. if (status) {
  912. *perrno = hw->aq.asq_last_status ?
  913. i40e_aq_rc_to_posix(status,
  914. hw->aq.asq_last_status) :
  915. -EIO;
  916. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  917. } else {
  918. hw->nvm_release_on_done = true;
  919. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  920. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  921. }
  922. break;
  923. case I40E_NVMUPD_CSUM_CON:
  924. /* Assumes the caller has acquired the nvm */
  925. status = i40e_update_nvm_checksum(hw);
  926. if (status) {
  927. *perrno = hw->aq.asq_last_status ?
  928. i40e_aq_rc_to_posix(status,
  929. hw->aq.asq_last_status) :
  930. -EIO;
  931. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  932. } else {
  933. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  934. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  935. }
  936. break;
  937. case I40E_NVMUPD_CSUM_LCB:
  938. /* Assumes the caller has acquired the nvm */
  939. status = i40e_update_nvm_checksum(hw);
  940. if (status) {
  941. *perrno = hw->aq.asq_last_status ?
  942. i40e_aq_rc_to_posix(status,
  943. hw->aq.asq_last_status) :
  944. -EIO;
  945. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  946. } else {
  947. hw->nvm_release_on_done = true;
  948. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  949. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  950. }
  951. break;
  952. default:
  953. i40e_debug(hw, I40E_DEBUG_NVM,
  954. "NVMUPD: bad cmd %s in writing state.\n",
  955. i40e_nvm_update_state_str[upd_cmd]);
  956. status = I40E_NOT_SUPPORTED;
  957. *perrno = -ESRCH;
  958. break;
  959. }
  960. /* In some circumstances, a multi-write transaction takes longer
  961. * than the default 3 minute timeout on the write semaphore. If
  962. * the write failed with an EBUSY status, this is likely the problem,
  963. * so here we try to reacquire the semaphore then retry the write.
  964. * We only do one retry, then give up.
  965. */
  966. if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
  967. !retry_attempt) {
  968. i40e_status old_status = status;
  969. u32 old_asq_status = hw->aq.asq_last_status;
  970. u32 gtime;
  971. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  972. if (gtime >= hw->nvm.hw_semaphore_timeout) {
  973. i40e_debug(hw, I40E_DEBUG_ALL,
  974. "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
  975. gtime, hw->nvm.hw_semaphore_timeout);
  976. i40e_release_nvm(hw);
  977. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  978. if (status) {
  979. i40e_debug(hw, I40E_DEBUG_ALL,
  980. "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
  981. hw->aq.asq_last_status);
  982. status = old_status;
  983. hw->aq.asq_last_status = old_asq_status;
  984. } else {
  985. retry_attempt = true;
  986. goto retry;
  987. }
  988. }
  989. }
  990. return status;
  991. }
  992. /**
  993. * i40e_nvmupd_clear_wait_state - clear wait state on hw
  994. * @hw: pointer to the hardware structure
  995. **/
  996. void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
  997. {
  998. i40e_debug(hw, I40E_DEBUG_NVM,
  999. "NVMUPD: clearing wait on opcode 0x%04x\n",
  1000. hw->nvm_wait_opcode);
  1001. if (hw->nvm_release_on_done) {
  1002. i40e_release_nvm(hw);
  1003. hw->nvm_release_on_done = false;
  1004. }
  1005. hw->nvm_wait_opcode = 0;
  1006. if (hw->aq.arq_last_status) {
  1007. hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
  1008. return;
  1009. }
  1010. switch (hw->nvmupd_state) {
  1011. case I40E_NVMUPD_STATE_INIT_WAIT:
  1012. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  1013. break;
  1014. case I40E_NVMUPD_STATE_WRITE_WAIT:
  1015. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
  1016. break;
  1017. default:
  1018. break;
  1019. }
  1020. }
  1021. /**
  1022. * i40e_nvmupd_check_wait_event - handle NVM update operation events
  1023. * @hw: pointer to the hardware structure
  1024. * @opcode: the event that just happened
  1025. * @desc: AdminQ descriptor
  1026. **/
  1027. void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
  1028. struct i40e_aq_desc *desc)
  1029. {
  1030. u32 aq_desc_len = sizeof(struct i40e_aq_desc);
  1031. if (opcode == hw->nvm_wait_opcode) {
  1032. memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
  1033. i40e_nvmupd_clear_wait_state(hw);
  1034. }
  1035. }
  1036. /**
  1037. * i40e_nvmupd_validate_command - Validate given command
  1038. * @hw: pointer to hardware structure
  1039. * @cmd: pointer to nvm update command buffer
  1040. * @perrno: pointer to return error code
  1041. *
  1042. * Return one of the valid command types or I40E_NVMUPD_INVALID
  1043. **/
  1044. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  1045. struct i40e_nvm_access *cmd,
  1046. int *perrno)
  1047. {
  1048. enum i40e_nvmupd_cmd upd_cmd;
  1049. u8 module, transaction;
  1050. /* anything that doesn't match a recognized case is an error */
  1051. upd_cmd = I40E_NVMUPD_INVALID;
  1052. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1053. module = i40e_nvmupd_get_module(cmd->config);
  1054. /* limits on data size */
  1055. if ((cmd->data_size < 1) ||
  1056. (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
  1057. i40e_debug(hw, I40E_DEBUG_NVM,
  1058. "i40e_nvmupd_validate_command data_size %d\n",
  1059. cmd->data_size);
  1060. *perrno = -EFAULT;
  1061. return I40E_NVMUPD_INVALID;
  1062. }
  1063. switch (cmd->command) {
  1064. case I40E_NVM_READ:
  1065. switch (transaction) {
  1066. case I40E_NVM_CON:
  1067. upd_cmd = I40E_NVMUPD_READ_CON;
  1068. break;
  1069. case I40E_NVM_SNT:
  1070. upd_cmd = I40E_NVMUPD_READ_SNT;
  1071. break;
  1072. case I40E_NVM_LCB:
  1073. upd_cmd = I40E_NVMUPD_READ_LCB;
  1074. break;
  1075. case I40E_NVM_SA:
  1076. upd_cmd = I40E_NVMUPD_READ_SA;
  1077. break;
  1078. case I40E_NVM_EXEC:
  1079. if (module == 0xf)
  1080. upd_cmd = I40E_NVMUPD_STATUS;
  1081. else if (module == 0)
  1082. upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
  1083. break;
  1084. case I40E_NVM_AQE:
  1085. upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
  1086. break;
  1087. }
  1088. break;
  1089. case I40E_NVM_WRITE:
  1090. switch (transaction) {
  1091. case I40E_NVM_CON:
  1092. upd_cmd = I40E_NVMUPD_WRITE_CON;
  1093. break;
  1094. case I40E_NVM_SNT:
  1095. upd_cmd = I40E_NVMUPD_WRITE_SNT;
  1096. break;
  1097. case I40E_NVM_LCB:
  1098. upd_cmd = I40E_NVMUPD_WRITE_LCB;
  1099. break;
  1100. case I40E_NVM_SA:
  1101. upd_cmd = I40E_NVMUPD_WRITE_SA;
  1102. break;
  1103. case I40E_NVM_ERA:
  1104. upd_cmd = I40E_NVMUPD_WRITE_ERA;
  1105. break;
  1106. case I40E_NVM_CSUM:
  1107. upd_cmd = I40E_NVMUPD_CSUM_CON;
  1108. break;
  1109. case (I40E_NVM_CSUM|I40E_NVM_SA):
  1110. upd_cmd = I40E_NVMUPD_CSUM_SA;
  1111. break;
  1112. case (I40E_NVM_CSUM|I40E_NVM_LCB):
  1113. upd_cmd = I40E_NVMUPD_CSUM_LCB;
  1114. break;
  1115. case I40E_NVM_EXEC:
  1116. if (module == 0)
  1117. upd_cmd = I40E_NVMUPD_EXEC_AQ;
  1118. break;
  1119. }
  1120. break;
  1121. }
  1122. return upd_cmd;
  1123. }
  1124. /**
  1125. * i40e_nvmupd_exec_aq - Run an AQ command
  1126. * @hw: pointer to hardware structure
  1127. * @cmd: pointer to nvm update command buffer
  1128. * @bytes: pointer to the data buffer
  1129. * @perrno: pointer to return error code
  1130. *
  1131. * cmd structure contains identifiers and data buffer
  1132. **/
  1133. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  1134. struct i40e_nvm_access *cmd,
  1135. u8 *bytes, int *perrno)
  1136. {
  1137. struct i40e_asq_cmd_details cmd_details;
  1138. i40e_status status;
  1139. struct i40e_aq_desc *aq_desc;
  1140. u32 buff_size = 0;
  1141. u8 *buff = NULL;
  1142. u32 aq_desc_len;
  1143. u32 aq_data_len;
  1144. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1145. if (cmd->offset == 0xffff)
  1146. return 0;
  1147. memset(&cmd_details, 0, sizeof(cmd_details));
  1148. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1149. aq_desc_len = sizeof(struct i40e_aq_desc);
  1150. memset(&hw->nvm_wb_desc, 0, aq_desc_len);
  1151. /* get the aq descriptor */
  1152. if (cmd->data_size < aq_desc_len) {
  1153. i40e_debug(hw, I40E_DEBUG_NVM,
  1154. "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
  1155. cmd->data_size, aq_desc_len);
  1156. *perrno = -EINVAL;
  1157. return I40E_ERR_PARAM;
  1158. }
  1159. aq_desc = (struct i40e_aq_desc *)bytes;
  1160. /* if data buffer needed, make sure it's ready */
  1161. aq_data_len = cmd->data_size - aq_desc_len;
  1162. buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
  1163. if (buff_size) {
  1164. if (!hw->nvm_buff.va) {
  1165. status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
  1166. hw->aq.asq_buf_size);
  1167. if (status)
  1168. i40e_debug(hw, I40E_DEBUG_NVM,
  1169. "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
  1170. status);
  1171. }
  1172. if (hw->nvm_buff.va) {
  1173. buff = hw->nvm_buff.va;
  1174. memcpy(buff, &bytes[aq_desc_len], aq_data_len);
  1175. }
  1176. }
  1177. if (cmd->offset)
  1178. memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
  1179. /* and away we go! */
  1180. status = i40e_asq_send_command(hw, aq_desc, buff,
  1181. buff_size, &cmd_details);
  1182. if (status) {
  1183. i40e_debug(hw, I40E_DEBUG_NVM,
  1184. "i40e_nvmupd_exec_aq err %s aq_err %s\n",
  1185. i40e_stat_str(hw, status),
  1186. i40e_aq_str(hw, hw->aq.asq_last_status));
  1187. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1188. return status;
  1189. }
  1190. /* should we wait for a followup event? */
  1191. if (cmd->offset) {
  1192. hw->nvm_wait_opcode = cmd->offset;
  1193. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  1194. }
  1195. return status;
  1196. }
  1197. /**
  1198. * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
  1199. * @hw: pointer to hardware structure
  1200. * @cmd: pointer to nvm update command buffer
  1201. * @bytes: pointer to the data buffer
  1202. * @perrno: pointer to return error code
  1203. *
  1204. * cmd structure contains identifiers and data buffer
  1205. **/
  1206. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  1207. struct i40e_nvm_access *cmd,
  1208. u8 *bytes, int *perrno)
  1209. {
  1210. u32 aq_total_len;
  1211. u32 aq_desc_len;
  1212. int remainder;
  1213. u8 *buff;
  1214. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1215. aq_desc_len = sizeof(struct i40e_aq_desc);
  1216. aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
  1217. /* check offset range */
  1218. if (cmd->offset > aq_total_len) {
  1219. i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
  1220. __func__, cmd->offset, aq_total_len);
  1221. *perrno = -EINVAL;
  1222. return I40E_ERR_PARAM;
  1223. }
  1224. /* check copylength range */
  1225. if (cmd->data_size > (aq_total_len - cmd->offset)) {
  1226. int new_len = aq_total_len - cmd->offset;
  1227. i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
  1228. __func__, cmd->data_size, new_len);
  1229. cmd->data_size = new_len;
  1230. }
  1231. remainder = cmd->data_size;
  1232. if (cmd->offset < aq_desc_len) {
  1233. u32 len = aq_desc_len - cmd->offset;
  1234. len = min(len, cmd->data_size);
  1235. i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
  1236. __func__, cmd->offset, cmd->offset + len);
  1237. buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
  1238. memcpy(bytes, buff, len);
  1239. bytes += len;
  1240. remainder -= len;
  1241. buff = hw->nvm_buff.va;
  1242. } else {
  1243. buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
  1244. }
  1245. if (remainder > 0) {
  1246. int start_byte = buff - (u8 *)hw->nvm_buff.va;
  1247. i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
  1248. __func__, start_byte, start_byte + remainder);
  1249. memcpy(bytes, buff, remainder);
  1250. }
  1251. return 0;
  1252. }
  1253. /**
  1254. * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
  1255. * @hw: pointer to hardware structure
  1256. * @cmd: pointer to nvm update command buffer
  1257. * @bytes: pointer to the data buffer
  1258. * @perrno: pointer to return error code
  1259. *
  1260. * cmd structure contains identifiers and data buffer
  1261. **/
  1262. static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
  1263. struct i40e_nvm_access *cmd,
  1264. u8 *bytes, int *perrno)
  1265. {
  1266. u32 aq_total_len;
  1267. u32 aq_desc_len;
  1268. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1269. aq_desc_len = sizeof(struct i40e_aq_desc);
  1270. aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
  1271. /* check copylength range */
  1272. if (cmd->data_size > aq_total_len) {
  1273. i40e_debug(hw, I40E_DEBUG_NVM,
  1274. "%s: copy length %d too big, trimming to %d\n",
  1275. __func__, cmd->data_size, aq_total_len);
  1276. cmd->data_size = aq_total_len;
  1277. }
  1278. memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
  1279. return 0;
  1280. }
  1281. /**
  1282. * i40e_nvmupd_nvm_read - Read NVM
  1283. * @hw: pointer to hardware structure
  1284. * @cmd: pointer to nvm update command buffer
  1285. * @bytes: pointer to the data buffer
  1286. * @perrno: pointer to return error code
  1287. *
  1288. * cmd structure contains identifiers and data buffer
  1289. **/
  1290. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  1291. struct i40e_nvm_access *cmd,
  1292. u8 *bytes, int *perrno)
  1293. {
  1294. struct i40e_asq_cmd_details cmd_details;
  1295. i40e_status status;
  1296. u8 module, transaction;
  1297. bool last;
  1298. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1299. module = i40e_nvmupd_get_module(cmd->config);
  1300. last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
  1301. memset(&cmd_details, 0, sizeof(cmd_details));
  1302. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1303. status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1304. bytes, last, &cmd_details);
  1305. if (status) {
  1306. i40e_debug(hw, I40E_DEBUG_NVM,
  1307. "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
  1308. module, cmd->offset, cmd->data_size);
  1309. i40e_debug(hw, I40E_DEBUG_NVM,
  1310. "i40e_nvmupd_nvm_read status %d aq %d\n",
  1311. status, hw->aq.asq_last_status);
  1312. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1313. }
  1314. return status;
  1315. }
  1316. /**
  1317. * i40e_nvmupd_nvm_erase - Erase an NVM module
  1318. * @hw: pointer to hardware structure
  1319. * @cmd: pointer to nvm update command buffer
  1320. * @perrno: pointer to return error code
  1321. *
  1322. * module, offset, data_size and data are in cmd structure
  1323. **/
  1324. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  1325. struct i40e_nvm_access *cmd,
  1326. int *perrno)
  1327. {
  1328. i40e_status status = 0;
  1329. struct i40e_asq_cmd_details cmd_details;
  1330. u8 module, transaction;
  1331. bool last;
  1332. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1333. module = i40e_nvmupd_get_module(cmd->config);
  1334. last = (transaction & I40E_NVM_LCB);
  1335. memset(&cmd_details, 0, sizeof(cmd_details));
  1336. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1337. status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1338. last, &cmd_details);
  1339. if (status) {
  1340. i40e_debug(hw, I40E_DEBUG_NVM,
  1341. "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
  1342. module, cmd->offset, cmd->data_size);
  1343. i40e_debug(hw, I40E_DEBUG_NVM,
  1344. "i40e_nvmupd_nvm_erase status %d aq %d\n",
  1345. status, hw->aq.asq_last_status);
  1346. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1347. }
  1348. return status;
  1349. }
  1350. /**
  1351. * i40e_nvmupd_nvm_write - Write NVM
  1352. * @hw: pointer to hardware structure
  1353. * @cmd: pointer to nvm update command buffer
  1354. * @bytes: pointer to the data buffer
  1355. * @perrno: pointer to return error code
  1356. *
  1357. * module, offset, data_size and data are in cmd structure
  1358. **/
  1359. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  1360. struct i40e_nvm_access *cmd,
  1361. u8 *bytes, int *perrno)
  1362. {
  1363. i40e_status status = 0;
  1364. struct i40e_asq_cmd_details cmd_details;
  1365. u8 module, transaction;
  1366. u8 preservation_flags;
  1367. bool last;
  1368. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1369. module = i40e_nvmupd_get_module(cmd->config);
  1370. last = (transaction & I40E_NVM_LCB);
  1371. preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
  1372. memset(&cmd_details, 0, sizeof(cmd_details));
  1373. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1374. status = i40e_aq_update_nvm(hw, module, cmd->offset,
  1375. (u16)cmd->data_size, bytes, last,
  1376. preservation_flags, &cmd_details);
  1377. if (status) {
  1378. i40e_debug(hw, I40E_DEBUG_NVM,
  1379. "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
  1380. module, cmd->offset, cmd->data_size);
  1381. i40e_debug(hw, I40E_DEBUG_NVM,
  1382. "i40e_nvmupd_nvm_write status %d aq %d\n",
  1383. status, hw->aq.asq_last_status);
  1384. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1385. }
  1386. return status;
  1387. }