i40e_nvm.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #include "i40e_prototype.h"
  27. /**
  28. * i40e_init_nvm_ops - Initialize NVM function pointers
  29. * @hw: pointer to the HW structure
  30. *
  31. * Setup the function pointers and the NVM info structure. Should be called
  32. * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  33. * Please notice that the NVM term is used here (& in all methods covered
  34. * in this file) as an equivalent of the FLASH part mapped into the SR.
  35. * We are accessing FLASH always thru the Shadow RAM.
  36. **/
  37. i40e_status i40e_init_nvm(struct i40e_hw *hw)
  38. {
  39. struct i40e_nvm_info *nvm = &hw->nvm;
  40. i40e_status ret_code = 0;
  41. u32 fla, gens;
  42. u8 sr_size;
  43. /* The SR size is stored regardless of the nvm programming mode
  44. * as the blank mode may be used in the factory line.
  45. */
  46. gens = rd32(hw, I40E_GLNVM_GENS);
  47. sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
  48. I40E_GLNVM_GENS_SR_SIZE_SHIFT);
  49. /* Switching to words (sr_size contains power of 2KB) */
  50. nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
  51. /* Check if we are in the normal or blank NVM programming mode */
  52. fla = rd32(hw, I40E_GLNVM_FLA);
  53. if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
  54. /* Max NVM timeout */
  55. nvm->timeout = I40E_MAX_NVM_TIMEOUT;
  56. nvm->blank_nvm_mode = false;
  57. } else { /* Blank programming mode */
  58. nvm->blank_nvm_mode = true;
  59. ret_code = I40E_ERR_NVM_BLANK_MODE;
  60. i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
  61. }
  62. return ret_code;
  63. }
  64. /**
  65. * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
  66. * @hw: pointer to the HW structure
  67. * @access: NVM access type (read or write)
  68. *
  69. * This function will request NVM ownership for reading
  70. * via the proper Admin Command.
  71. **/
  72. i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
  73. enum i40e_aq_resource_access_type access)
  74. {
  75. i40e_status ret_code = 0;
  76. u64 gtime, timeout;
  77. u64 time_left = 0;
  78. if (hw->nvm.blank_nvm_mode)
  79. goto i40e_i40e_acquire_nvm_exit;
  80. ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
  81. 0, &time_left, NULL);
  82. /* Reading the Global Device Timer */
  83. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  84. /* Store the timeout */
  85. hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
  86. if (ret_code)
  87. i40e_debug(hw, I40E_DEBUG_NVM,
  88. "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
  89. access, time_left, ret_code, hw->aq.asq_last_status);
  90. if (ret_code && time_left) {
  91. /* Poll until the current NVM owner timeouts */
  92. timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
  93. while ((gtime < timeout) && time_left) {
  94. usleep_range(10000, 20000);
  95. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  96. ret_code = i40e_aq_request_resource(hw,
  97. I40E_NVM_RESOURCE_ID,
  98. access, 0, &time_left,
  99. NULL);
  100. if (!ret_code) {
  101. hw->nvm.hw_semaphore_timeout =
  102. I40E_MS_TO_GTIME(time_left) + gtime;
  103. break;
  104. }
  105. }
  106. if (ret_code) {
  107. hw->nvm.hw_semaphore_timeout = 0;
  108. i40e_debug(hw, I40E_DEBUG_NVM,
  109. "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
  110. time_left, ret_code, hw->aq.asq_last_status);
  111. }
  112. }
  113. i40e_i40e_acquire_nvm_exit:
  114. return ret_code;
  115. }
  116. /**
  117. * i40e_release_nvm - Generic request for releasing the NVM ownership
  118. * @hw: pointer to the HW structure
  119. *
  120. * This function will release NVM resource via the proper Admin Command.
  121. **/
  122. void i40e_release_nvm(struct i40e_hw *hw)
  123. {
  124. i40e_status ret_code = I40E_SUCCESS;
  125. u32 total_delay = 0;
  126. if (hw->nvm.blank_nvm_mode)
  127. return;
  128. ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
  129. /* there are some rare cases when trying to release the resource
  130. * results in an admin Q timeout, so handle them correctly
  131. */
  132. while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
  133. (total_delay < hw->aq.asq_cmd_timeout)) {
  134. usleep_range(1000, 2000);
  135. ret_code = i40e_aq_release_resource(hw,
  136. I40E_NVM_RESOURCE_ID,
  137. 0, NULL);
  138. total_delay++;
  139. }
  140. }
  141. /**
  142. * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
  143. * @hw: pointer to the HW structure
  144. *
  145. * Polls the SRCTL Shadow RAM register done bit.
  146. **/
  147. static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
  148. {
  149. i40e_status ret_code = I40E_ERR_TIMEOUT;
  150. u32 srctl, wait_cnt;
  151. /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
  152. for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
  153. srctl = rd32(hw, I40E_GLNVM_SRCTL);
  154. if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
  155. ret_code = 0;
  156. break;
  157. }
  158. udelay(5);
  159. }
  160. if (ret_code == I40E_ERR_TIMEOUT)
  161. i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
  162. return ret_code;
  163. }
  164. /**
  165. * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
  166. * @hw: pointer to the HW structure
  167. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  168. * @data: word read from the Shadow RAM
  169. *
  170. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  171. **/
  172. static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
  173. u16 *data)
  174. {
  175. i40e_status ret_code = I40E_ERR_TIMEOUT;
  176. u32 sr_reg;
  177. if (offset >= hw->nvm.sr_size) {
  178. i40e_debug(hw, I40E_DEBUG_NVM,
  179. "NVM read error: offset %d beyond Shadow RAM limit %d\n",
  180. offset, hw->nvm.sr_size);
  181. ret_code = I40E_ERR_PARAM;
  182. goto read_nvm_exit;
  183. }
  184. /* Poll the done bit first */
  185. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  186. if (!ret_code) {
  187. /* Write the address and start reading */
  188. sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
  189. BIT(I40E_GLNVM_SRCTL_START_SHIFT);
  190. wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
  191. /* Poll I40E_GLNVM_SRCTL until the done bit is set */
  192. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  193. if (!ret_code) {
  194. sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
  195. *data = (u16)((sr_reg &
  196. I40E_GLNVM_SRDATA_RDDATA_MASK)
  197. >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
  198. }
  199. }
  200. if (ret_code)
  201. i40e_debug(hw, I40E_DEBUG_NVM,
  202. "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
  203. offset);
  204. read_nvm_exit:
  205. return ret_code;
  206. }
  207. /**
  208. * i40e_read_nvm_aq - Read Shadow RAM.
  209. * @hw: pointer to the HW structure.
  210. * @module_pointer: module pointer location in words from the NVM beginning
  211. * @offset: offset in words from module start
  212. * @words: number of words to write
  213. * @data: buffer with words to write to the Shadow RAM
  214. * @last_command: tells the AdminQ that this is the last command
  215. *
  216. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  217. **/
  218. static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  219. u32 offset, u16 words, void *data,
  220. bool last_command)
  221. {
  222. i40e_status ret_code = I40E_ERR_NVM;
  223. struct i40e_asq_cmd_details cmd_details;
  224. memset(&cmd_details, 0, sizeof(cmd_details));
  225. cmd_details.wb_desc = &hw->nvm_wb_desc;
  226. /* Here we are checking the SR limit only for the flat memory model.
  227. * We cannot do it for the module-based model, as we did not acquire
  228. * the NVM resource yet (we cannot get the module pointer value).
  229. * Firmware will check the module-based model.
  230. */
  231. if ((offset + words) > hw->nvm.sr_size)
  232. i40e_debug(hw, I40E_DEBUG_NVM,
  233. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  234. (offset + words), hw->nvm.sr_size);
  235. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  236. /* We can write only up to 4KB (one sector), in one AQ write */
  237. i40e_debug(hw, I40E_DEBUG_NVM,
  238. "NVM write fail error: tried to write %d words, limit is %d.\n",
  239. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  240. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  241. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  242. /* A single write cannot spread over two sectors */
  243. i40e_debug(hw, I40E_DEBUG_NVM,
  244. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  245. offset, words);
  246. else
  247. ret_code = i40e_aq_read_nvm(hw, module_pointer,
  248. 2 * offset, /*bytes*/
  249. 2 * words, /*bytes*/
  250. data, last_command, &cmd_details);
  251. return ret_code;
  252. }
  253. /**
  254. * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
  255. * @hw: pointer to the HW structure
  256. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  257. * @data: word read from the Shadow RAM
  258. *
  259. * Reads one 16 bit word from the Shadow RAM using the AdminQ
  260. **/
  261. static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
  262. u16 *data)
  263. {
  264. i40e_status ret_code = I40E_ERR_TIMEOUT;
  265. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
  266. *data = le16_to_cpu(*(__le16 *)data);
  267. return ret_code;
  268. }
  269. /**
  270. * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
  271. * @hw: pointer to the HW structure
  272. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  273. * @data: word read from the Shadow RAM
  274. *
  275. * Reads one 16 bit word from the Shadow RAM.
  276. *
  277. * Do not use this function except in cases where the nvm lock is already
  278. * taken via i40e_acquire_nvm().
  279. **/
  280. static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
  281. u16 offset, u16 *data)
  282. {
  283. i40e_status ret_code = 0;
  284. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
  285. ret_code = i40e_read_nvm_word_aq(hw, offset, data);
  286. else
  287. ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
  288. return ret_code;
  289. }
  290. /**
  291. * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
  292. * @hw: pointer to the HW structure
  293. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  294. * @data: word read from the Shadow RAM
  295. *
  296. * Reads one 16 bit word from the Shadow RAM.
  297. **/
  298. i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
  299. u16 *data)
  300. {
  301. i40e_status ret_code = 0;
  302. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  303. if (ret_code)
  304. return ret_code;
  305. ret_code = __i40e_read_nvm_word(hw, offset, data);
  306. i40e_release_nvm(hw);
  307. return ret_code;
  308. }
  309. /**
  310. * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
  311. * @hw: pointer to the HW structure
  312. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  313. * @words: (in) number of words to read; (out) number of words actually read
  314. * @data: words read from the Shadow RAM
  315. *
  316. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  317. * method. The buffer read is preceded by the NVM ownership take
  318. * and followed by the release.
  319. **/
  320. static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
  321. u16 *words, u16 *data)
  322. {
  323. i40e_status ret_code = 0;
  324. u16 index, word;
  325. /* Loop thru the selected region */
  326. for (word = 0; word < *words; word++) {
  327. index = offset + word;
  328. ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
  329. if (ret_code)
  330. break;
  331. }
  332. /* Update the number of words read from the Shadow RAM */
  333. *words = word;
  334. return ret_code;
  335. }
  336. /**
  337. * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
  338. * @hw: pointer to the HW structure
  339. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  340. * @words: (in) number of words to read; (out) number of words actually read
  341. * @data: words read from the Shadow RAM
  342. *
  343. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
  344. * method. The buffer read is preceded by the NVM ownership take
  345. * and followed by the release.
  346. **/
  347. static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
  348. u16 *words, u16 *data)
  349. {
  350. i40e_status ret_code;
  351. u16 read_size = *words;
  352. bool last_cmd = false;
  353. u16 words_read = 0;
  354. u16 i = 0;
  355. do {
  356. /* Calculate number of bytes we should read in this step.
  357. * FVL AQ do not allow to read more than one page at a time or
  358. * to cross page boundaries.
  359. */
  360. if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
  361. read_size = min(*words,
  362. (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
  363. (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
  364. else
  365. read_size = min((*words - words_read),
  366. I40E_SR_SECTOR_SIZE_IN_WORDS);
  367. /* Check if this is last command, if so set proper flag */
  368. if ((words_read + read_size) >= *words)
  369. last_cmd = true;
  370. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
  371. data + words_read, last_cmd);
  372. if (ret_code)
  373. goto read_nvm_buffer_aq_exit;
  374. /* Increment counter for words already read and move offset to
  375. * new read location
  376. */
  377. words_read += read_size;
  378. offset += read_size;
  379. } while (words_read < *words);
  380. for (i = 0; i < *words; i++)
  381. data[i] = le16_to_cpu(((__le16 *)data)[i]);
  382. read_nvm_buffer_aq_exit:
  383. *words = words_read;
  384. return ret_code;
  385. }
  386. /**
  387. * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
  388. * @hw: pointer to the HW structure
  389. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  390. * @words: (in) number of words to read; (out) number of words actually read
  391. * @data: words read from the Shadow RAM
  392. *
  393. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  394. * method.
  395. **/
  396. static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
  397. u16 offset, u16 *words,
  398. u16 *data)
  399. {
  400. i40e_status ret_code = 0;
  401. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
  402. ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
  403. else
  404. ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
  405. return ret_code;
  406. }
  407. /**
  408. * i40e_write_nvm_aq - Writes Shadow RAM.
  409. * @hw: pointer to the HW structure.
  410. * @module_pointer: module pointer location in words from the NVM beginning
  411. * @offset: offset in words from module start
  412. * @words: number of words to write
  413. * @data: buffer with words to write to the Shadow RAM
  414. * @last_command: tells the AdminQ that this is the last command
  415. *
  416. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  417. **/
  418. static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  419. u32 offset, u16 words, void *data,
  420. bool last_command)
  421. {
  422. i40e_status ret_code = I40E_ERR_NVM;
  423. struct i40e_asq_cmd_details cmd_details;
  424. memset(&cmd_details, 0, sizeof(cmd_details));
  425. cmd_details.wb_desc = &hw->nvm_wb_desc;
  426. /* Here we are checking the SR limit only for the flat memory model.
  427. * We cannot do it for the module-based model, as we did not acquire
  428. * the NVM resource yet (we cannot get the module pointer value).
  429. * Firmware will check the module-based model.
  430. */
  431. if ((offset + words) > hw->nvm.sr_size)
  432. i40e_debug(hw, I40E_DEBUG_NVM,
  433. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  434. (offset + words), hw->nvm.sr_size);
  435. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  436. /* We can write only up to 4KB (one sector), in one AQ write */
  437. i40e_debug(hw, I40E_DEBUG_NVM,
  438. "NVM write fail error: tried to write %d words, limit is %d.\n",
  439. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  440. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  441. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  442. /* A single write cannot spread over two sectors */
  443. i40e_debug(hw, I40E_DEBUG_NVM,
  444. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  445. offset, words);
  446. else
  447. ret_code = i40e_aq_update_nvm(hw, module_pointer,
  448. 2 * offset, /*bytes*/
  449. 2 * words, /*bytes*/
  450. data, last_command, &cmd_details);
  451. return ret_code;
  452. }
  453. /**
  454. * i40e_calc_nvm_checksum - Calculates and returns the checksum
  455. * @hw: pointer to hardware structure
  456. * @checksum: pointer to the checksum
  457. *
  458. * This function calculates SW Checksum that covers the whole 64kB shadow RAM
  459. * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
  460. * is customer specific and unknown. Therefore, this function skips all maximum
  461. * possible size of VPD (1kB).
  462. **/
  463. static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
  464. u16 *checksum)
  465. {
  466. i40e_status ret_code;
  467. struct i40e_virt_mem vmem;
  468. u16 pcie_alt_module = 0;
  469. u16 checksum_local = 0;
  470. u16 vpd_module = 0;
  471. u16 *data;
  472. u16 i = 0;
  473. ret_code = i40e_allocate_virt_mem(hw, &vmem,
  474. I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
  475. if (ret_code)
  476. goto i40e_calc_nvm_checksum_exit;
  477. data = (u16 *)vmem.va;
  478. /* read pointer to VPD area */
  479. ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
  480. if (ret_code) {
  481. ret_code = I40E_ERR_NVM_CHECKSUM;
  482. goto i40e_calc_nvm_checksum_exit;
  483. }
  484. /* read pointer to PCIe Alt Auto-load module */
  485. ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
  486. &pcie_alt_module);
  487. if (ret_code) {
  488. ret_code = I40E_ERR_NVM_CHECKSUM;
  489. goto i40e_calc_nvm_checksum_exit;
  490. }
  491. /* Calculate SW checksum that covers the whole 64kB shadow RAM
  492. * except the VPD and PCIe ALT Auto-load modules
  493. */
  494. for (i = 0; i < hw->nvm.sr_size; i++) {
  495. /* Read SR page */
  496. if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
  497. u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
  498. ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
  499. if (ret_code) {
  500. ret_code = I40E_ERR_NVM_CHECKSUM;
  501. goto i40e_calc_nvm_checksum_exit;
  502. }
  503. }
  504. /* Skip Checksum word */
  505. if (i == I40E_SR_SW_CHECKSUM_WORD)
  506. continue;
  507. /* Skip VPD module (convert byte size to word count) */
  508. if ((i >= (u32)vpd_module) &&
  509. (i < ((u32)vpd_module +
  510. (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
  511. continue;
  512. }
  513. /* Skip PCIe ALT module (convert byte size to word count) */
  514. if ((i >= (u32)pcie_alt_module) &&
  515. (i < ((u32)pcie_alt_module +
  516. (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
  517. continue;
  518. }
  519. checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
  520. }
  521. *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
  522. i40e_calc_nvm_checksum_exit:
  523. i40e_free_virt_mem(hw, &vmem);
  524. return ret_code;
  525. }
  526. /**
  527. * i40e_update_nvm_checksum - Updates the NVM checksum
  528. * @hw: pointer to hardware structure
  529. *
  530. * NVM ownership must be acquired before calling this function and released
  531. * on ARQ completion event reception by caller.
  532. * This function will commit SR to NVM.
  533. **/
  534. i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
  535. {
  536. i40e_status ret_code;
  537. u16 checksum;
  538. __le16 le_sum;
  539. ret_code = i40e_calc_nvm_checksum(hw, &checksum);
  540. if (!ret_code) {
  541. le_sum = cpu_to_le16(checksum);
  542. ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
  543. 1, &le_sum, true);
  544. }
  545. return ret_code;
  546. }
  547. /**
  548. * i40e_validate_nvm_checksum - Validate EEPROM checksum
  549. * @hw: pointer to hardware structure
  550. * @checksum: calculated checksum
  551. *
  552. * Performs checksum calculation and validates the NVM SW checksum. If the
  553. * caller does not need checksum, the value can be NULL.
  554. **/
  555. i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
  556. u16 *checksum)
  557. {
  558. i40e_status ret_code = 0;
  559. u16 checksum_sr = 0;
  560. u16 checksum_local = 0;
  561. /* We must acquire the NVM lock in order to correctly synchronize the
  562. * NVM accesses across multiple PFs. Without doing so it is possible
  563. * for one of the PFs to read invalid data potentially indicating that
  564. * the checksum is invalid.
  565. */
  566. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  567. if (ret_code)
  568. return ret_code;
  569. ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
  570. __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
  571. i40e_release_nvm(hw);
  572. if (ret_code)
  573. return ret_code;
  574. /* Verify read checksum from EEPROM is the same as
  575. * calculated checksum
  576. */
  577. if (checksum_local != checksum_sr)
  578. ret_code = I40E_ERR_NVM_CHECKSUM;
  579. /* If the user cares, return the calculated checksum */
  580. if (checksum)
  581. *checksum = checksum_local;
  582. return ret_code;
  583. }
  584. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  585. struct i40e_nvm_access *cmd,
  586. u8 *bytes, int *perrno);
  587. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  588. struct i40e_nvm_access *cmd,
  589. u8 *bytes, int *perrno);
  590. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  591. struct i40e_nvm_access *cmd,
  592. u8 *bytes, int *errno);
  593. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  594. struct i40e_nvm_access *cmd,
  595. int *perrno);
  596. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  597. struct i40e_nvm_access *cmd,
  598. int *perrno);
  599. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  600. struct i40e_nvm_access *cmd,
  601. u8 *bytes, int *perrno);
  602. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  603. struct i40e_nvm_access *cmd,
  604. u8 *bytes, int *perrno);
  605. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  606. struct i40e_nvm_access *cmd,
  607. u8 *bytes, int *perrno);
  608. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  609. struct i40e_nvm_access *cmd,
  610. u8 *bytes, int *perrno);
  611. static inline u8 i40e_nvmupd_get_module(u32 val)
  612. {
  613. return (u8)(val & I40E_NVM_MOD_PNT_MASK);
  614. }
  615. static inline u8 i40e_nvmupd_get_transaction(u32 val)
  616. {
  617. return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
  618. }
  619. static const char * const i40e_nvm_update_state_str[] = {
  620. "I40E_NVMUPD_INVALID",
  621. "I40E_NVMUPD_READ_CON",
  622. "I40E_NVMUPD_READ_SNT",
  623. "I40E_NVMUPD_READ_LCB",
  624. "I40E_NVMUPD_READ_SA",
  625. "I40E_NVMUPD_WRITE_ERA",
  626. "I40E_NVMUPD_WRITE_CON",
  627. "I40E_NVMUPD_WRITE_SNT",
  628. "I40E_NVMUPD_WRITE_LCB",
  629. "I40E_NVMUPD_WRITE_SA",
  630. "I40E_NVMUPD_CSUM_CON",
  631. "I40E_NVMUPD_CSUM_SA",
  632. "I40E_NVMUPD_CSUM_LCB",
  633. "I40E_NVMUPD_STATUS",
  634. "I40E_NVMUPD_EXEC_AQ",
  635. "I40E_NVMUPD_GET_AQ_RESULT",
  636. };
  637. /**
  638. * i40e_nvmupd_command - Process an NVM update command
  639. * @hw: pointer to hardware structure
  640. * @cmd: pointer to nvm update command
  641. * @bytes: pointer to the data buffer
  642. * @perrno: pointer to return error code
  643. *
  644. * Dispatches command depending on what update state is current
  645. **/
  646. i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
  647. struct i40e_nvm_access *cmd,
  648. u8 *bytes, int *perrno)
  649. {
  650. i40e_status status;
  651. enum i40e_nvmupd_cmd upd_cmd;
  652. /* assume success */
  653. *perrno = 0;
  654. /* early check for status command and debug msgs */
  655. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  656. i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
  657. i40e_nvm_update_state_str[upd_cmd],
  658. hw->nvmupd_state,
  659. hw->nvm_release_on_done, hw->nvm_wait_opcode,
  660. cmd->command, cmd->config, cmd->offset, cmd->data_size);
  661. if (upd_cmd == I40E_NVMUPD_INVALID) {
  662. *perrno = -EFAULT;
  663. i40e_debug(hw, I40E_DEBUG_NVM,
  664. "i40e_nvmupd_validate_command returns %d errno %d\n",
  665. upd_cmd, *perrno);
  666. }
  667. /* a status request returns immediately rather than
  668. * going into the state machine
  669. */
  670. if (upd_cmd == I40E_NVMUPD_STATUS) {
  671. if (!cmd->data_size) {
  672. *perrno = -EFAULT;
  673. return I40E_ERR_BUF_TOO_SHORT;
  674. }
  675. bytes[0] = hw->nvmupd_state;
  676. if (cmd->data_size >= 4) {
  677. bytes[1] = 0;
  678. *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
  679. }
  680. /* Clear error status on read */
  681. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
  682. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  683. return 0;
  684. }
  685. /* Clear status even it is not read and log */
  686. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
  687. i40e_debug(hw, I40E_DEBUG_NVM,
  688. "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
  689. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  690. }
  691. /* Acquire lock to prevent race condition where adminq_task
  692. * can execute after i40e_nvmupd_nvm_read/write but before state
  693. * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
  694. *
  695. * During NVMUpdate, it is observed that lock could be held for
  696. * ~5ms for most commands. However lock is held for ~60ms for
  697. * NVMUPD_CSUM_LCB command.
  698. */
  699. mutex_lock(&hw->aq.arq_mutex);
  700. switch (hw->nvmupd_state) {
  701. case I40E_NVMUPD_STATE_INIT:
  702. status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
  703. break;
  704. case I40E_NVMUPD_STATE_READING:
  705. status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
  706. break;
  707. case I40E_NVMUPD_STATE_WRITING:
  708. status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
  709. break;
  710. case I40E_NVMUPD_STATE_INIT_WAIT:
  711. case I40E_NVMUPD_STATE_WRITE_WAIT:
  712. /* if we need to stop waiting for an event, clear
  713. * the wait info and return before doing anything else
  714. */
  715. if (cmd->offset == 0xffff) {
  716. i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
  717. status = 0;
  718. goto exit;
  719. }
  720. status = I40E_ERR_NOT_READY;
  721. *perrno = -EBUSY;
  722. break;
  723. default:
  724. /* invalid state, should never happen */
  725. i40e_debug(hw, I40E_DEBUG_NVM,
  726. "NVMUPD: no such state %d\n", hw->nvmupd_state);
  727. status = I40E_NOT_SUPPORTED;
  728. *perrno = -ESRCH;
  729. break;
  730. }
  731. exit:
  732. mutex_unlock(&hw->aq.arq_mutex);
  733. return status;
  734. }
  735. /**
  736. * i40e_nvmupd_state_init - Handle NVM update state Init
  737. * @hw: pointer to hardware structure
  738. * @cmd: pointer to nvm update command buffer
  739. * @bytes: pointer to the data buffer
  740. * @perrno: pointer to return error code
  741. *
  742. * Process legitimate commands of the Init state and conditionally set next
  743. * state. Reject all other commands.
  744. **/
  745. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  746. struct i40e_nvm_access *cmd,
  747. u8 *bytes, int *perrno)
  748. {
  749. i40e_status status = 0;
  750. enum i40e_nvmupd_cmd upd_cmd;
  751. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  752. switch (upd_cmd) {
  753. case I40E_NVMUPD_READ_SA:
  754. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  755. if (status) {
  756. *perrno = i40e_aq_rc_to_posix(status,
  757. hw->aq.asq_last_status);
  758. } else {
  759. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  760. i40e_release_nvm(hw);
  761. }
  762. break;
  763. case I40E_NVMUPD_READ_SNT:
  764. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  765. if (status) {
  766. *perrno = i40e_aq_rc_to_posix(status,
  767. hw->aq.asq_last_status);
  768. } else {
  769. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  770. if (status)
  771. i40e_release_nvm(hw);
  772. else
  773. hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
  774. }
  775. break;
  776. case I40E_NVMUPD_WRITE_ERA:
  777. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  778. if (status) {
  779. *perrno = i40e_aq_rc_to_posix(status,
  780. hw->aq.asq_last_status);
  781. } else {
  782. status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
  783. if (status) {
  784. i40e_release_nvm(hw);
  785. } else {
  786. hw->nvm_release_on_done = true;
  787. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
  788. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  789. }
  790. }
  791. break;
  792. case I40E_NVMUPD_WRITE_SA:
  793. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  794. if (status) {
  795. *perrno = i40e_aq_rc_to_posix(status,
  796. hw->aq.asq_last_status);
  797. } else {
  798. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  799. if (status) {
  800. i40e_release_nvm(hw);
  801. } else {
  802. hw->nvm_release_on_done = true;
  803. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  804. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  805. }
  806. }
  807. break;
  808. case I40E_NVMUPD_WRITE_SNT:
  809. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  810. if (status) {
  811. *perrno = i40e_aq_rc_to_posix(status,
  812. hw->aq.asq_last_status);
  813. } else {
  814. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  815. if (status) {
  816. i40e_release_nvm(hw);
  817. } else {
  818. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  819. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  820. }
  821. }
  822. break;
  823. case I40E_NVMUPD_CSUM_SA:
  824. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  825. if (status) {
  826. *perrno = i40e_aq_rc_to_posix(status,
  827. hw->aq.asq_last_status);
  828. } else {
  829. status = i40e_update_nvm_checksum(hw);
  830. if (status) {
  831. *perrno = hw->aq.asq_last_status ?
  832. i40e_aq_rc_to_posix(status,
  833. hw->aq.asq_last_status) :
  834. -EIO;
  835. i40e_release_nvm(hw);
  836. } else {
  837. hw->nvm_release_on_done = true;
  838. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  839. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  840. }
  841. }
  842. break;
  843. case I40E_NVMUPD_EXEC_AQ:
  844. status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
  845. break;
  846. case I40E_NVMUPD_GET_AQ_RESULT:
  847. status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
  848. break;
  849. default:
  850. i40e_debug(hw, I40E_DEBUG_NVM,
  851. "NVMUPD: bad cmd %s in init state\n",
  852. i40e_nvm_update_state_str[upd_cmd]);
  853. status = I40E_ERR_NVM;
  854. *perrno = -ESRCH;
  855. break;
  856. }
  857. return status;
  858. }
  859. /**
  860. * i40e_nvmupd_state_reading - Handle NVM update state Reading
  861. * @hw: pointer to hardware structure
  862. * @cmd: pointer to nvm update command buffer
  863. * @bytes: pointer to the data buffer
  864. * @perrno: pointer to return error code
  865. *
  866. * NVM ownership is already held. Process legitimate commands and set any
  867. * change in state; reject all other commands.
  868. **/
  869. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  870. struct i40e_nvm_access *cmd,
  871. u8 *bytes, int *perrno)
  872. {
  873. i40e_status status = 0;
  874. enum i40e_nvmupd_cmd upd_cmd;
  875. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  876. switch (upd_cmd) {
  877. case I40E_NVMUPD_READ_SA:
  878. case I40E_NVMUPD_READ_CON:
  879. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  880. break;
  881. case I40E_NVMUPD_READ_LCB:
  882. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  883. i40e_release_nvm(hw);
  884. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  885. break;
  886. default:
  887. i40e_debug(hw, I40E_DEBUG_NVM,
  888. "NVMUPD: bad cmd %s in reading state.\n",
  889. i40e_nvm_update_state_str[upd_cmd]);
  890. status = I40E_NOT_SUPPORTED;
  891. *perrno = -ESRCH;
  892. break;
  893. }
  894. return status;
  895. }
  896. /**
  897. * i40e_nvmupd_state_writing - Handle NVM update state Writing
  898. * @hw: pointer to hardware structure
  899. * @cmd: pointer to nvm update command buffer
  900. * @bytes: pointer to the data buffer
  901. * @perrno: pointer to return error code
  902. *
  903. * NVM ownership is already held. Process legitimate commands and set any
  904. * change in state; reject all other commands
  905. **/
  906. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  907. struct i40e_nvm_access *cmd,
  908. u8 *bytes, int *perrno)
  909. {
  910. i40e_status status = 0;
  911. enum i40e_nvmupd_cmd upd_cmd;
  912. bool retry_attempt = false;
  913. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  914. retry:
  915. switch (upd_cmd) {
  916. case I40E_NVMUPD_WRITE_CON:
  917. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  918. if (!status) {
  919. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  920. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  921. }
  922. break;
  923. case I40E_NVMUPD_WRITE_LCB:
  924. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  925. if (status) {
  926. *perrno = hw->aq.asq_last_status ?
  927. i40e_aq_rc_to_posix(status,
  928. hw->aq.asq_last_status) :
  929. -EIO;
  930. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  931. } else {
  932. hw->nvm_release_on_done = true;
  933. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  934. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  935. }
  936. break;
  937. case I40E_NVMUPD_CSUM_CON:
  938. /* Assumes the caller has acquired the nvm */
  939. status = i40e_update_nvm_checksum(hw);
  940. if (status) {
  941. *perrno = hw->aq.asq_last_status ?
  942. i40e_aq_rc_to_posix(status,
  943. hw->aq.asq_last_status) :
  944. -EIO;
  945. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  946. } else {
  947. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  948. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  949. }
  950. break;
  951. case I40E_NVMUPD_CSUM_LCB:
  952. /* Assumes the caller has acquired the nvm */
  953. status = i40e_update_nvm_checksum(hw);
  954. if (status) {
  955. *perrno = hw->aq.asq_last_status ?
  956. i40e_aq_rc_to_posix(status,
  957. hw->aq.asq_last_status) :
  958. -EIO;
  959. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  960. } else {
  961. hw->nvm_release_on_done = true;
  962. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  963. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  964. }
  965. break;
  966. default:
  967. i40e_debug(hw, I40E_DEBUG_NVM,
  968. "NVMUPD: bad cmd %s in writing state.\n",
  969. i40e_nvm_update_state_str[upd_cmd]);
  970. status = I40E_NOT_SUPPORTED;
  971. *perrno = -ESRCH;
  972. break;
  973. }
  974. /* In some circumstances, a multi-write transaction takes longer
  975. * than the default 3 minute timeout on the write semaphore. If
  976. * the write failed with an EBUSY status, this is likely the problem,
  977. * so here we try to reacquire the semaphore then retry the write.
  978. * We only do one retry, then give up.
  979. */
  980. if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
  981. !retry_attempt) {
  982. i40e_status old_status = status;
  983. u32 old_asq_status = hw->aq.asq_last_status;
  984. u32 gtime;
  985. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  986. if (gtime >= hw->nvm.hw_semaphore_timeout) {
  987. i40e_debug(hw, I40E_DEBUG_ALL,
  988. "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
  989. gtime, hw->nvm.hw_semaphore_timeout);
  990. i40e_release_nvm(hw);
  991. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  992. if (status) {
  993. i40e_debug(hw, I40E_DEBUG_ALL,
  994. "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
  995. hw->aq.asq_last_status);
  996. status = old_status;
  997. hw->aq.asq_last_status = old_asq_status;
  998. } else {
  999. retry_attempt = true;
  1000. goto retry;
  1001. }
  1002. }
  1003. }
  1004. return status;
  1005. }
  1006. /**
  1007. * i40e_nvmupd_check_wait_event - handle NVM update operation events
  1008. * @hw: pointer to the hardware structure
  1009. * @opcode: the event that just happened
  1010. **/
  1011. void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
  1012. {
  1013. if (opcode == hw->nvm_wait_opcode) {
  1014. i40e_debug(hw, I40E_DEBUG_NVM,
  1015. "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
  1016. if (hw->nvm_release_on_done) {
  1017. i40e_release_nvm(hw);
  1018. hw->nvm_release_on_done = false;
  1019. }
  1020. hw->nvm_wait_opcode = 0;
  1021. if (hw->aq.arq_last_status) {
  1022. hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
  1023. return;
  1024. }
  1025. switch (hw->nvmupd_state) {
  1026. case I40E_NVMUPD_STATE_INIT_WAIT:
  1027. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  1028. break;
  1029. case I40E_NVMUPD_STATE_WRITE_WAIT:
  1030. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
  1031. break;
  1032. default:
  1033. break;
  1034. }
  1035. }
  1036. }
  1037. /**
  1038. * i40e_nvmupd_validate_command - Validate given command
  1039. * @hw: pointer to hardware structure
  1040. * @cmd: pointer to nvm update command buffer
  1041. * @perrno: pointer to return error code
  1042. *
  1043. * Return one of the valid command types or I40E_NVMUPD_INVALID
  1044. **/
  1045. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  1046. struct i40e_nvm_access *cmd,
  1047. int *perrno)
  1048. {
  1049. enum i40e_nvmupd_cmd upd_cmd;
  1050. u8 module, transaction;
  1051. /* anything that doesn't match a recognized case is an error */
  1052. upd_cmd = I40E_NVMUPD_INVALID;
  1053. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1054. module = i40e_nvmupd_get_module(cmd->config);
  1055. /* limits on data size */
  1056. if ((cmd->data_size < 1) ||
  1057. (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
  1058. i40e_debug(hw, I40E_DEBUG_NVM,
  1059. "i40e_nvmupd_validate_command data_size %d\n",
  1060. cmd->data_size);
  1061. *perrno = -EFAULT;
  1062. return I40E_NVMUPD_INVALID;
  1063. }
  1064. switch (cmd->command) {
  1065. case I40E_NVM_READ:
  1066. switch (transaction) {
  1067. case I40E_NVM_CON:
  1068. upd_cmd = I40E_NVMUPD_READ_CON;
  1069. break;
  1070. case I40E_NVM_SNT:
  1071. upd_cmd = I40E_NVMUPD_READ_SNT;
  1072. break;
  1073. case I40E_NVM_LCB:
  1074. upd_cmd = I40E_NVMUPD_READ_LCB;
  1075. break;
  1076. case I40E_NVM_SA:
  1077. upd_cmd = I40E_NVMUPD_READ_SA;
  1078. break;
  1079. case I40E_NVM_EXEC:
  1080. if (module == 0xf)
  1081. upd_cmd = I40E_NVMUPD_STATUS;
  1082. else if (module == 0)
  1083. upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
  1084. break;
  1085. }
  1086. break;
  1087. case I40E_NVM_WRITE:
  1088. switch (transaction) {
  1089. case I40E_NVM_CON:
  1090. upd_cmd = I40E_NVMUPD_WRITE_CON;
  1091. break;
  1092. case I40E_NVM_SNT:
  1093. upd_cmd = I40E_NVMUPD_WRITE_SNT;
  1094. break;
  1095. case I40E_NVM_LCB:
  1096. upd_cmd = I40E_NVMUPD_WRITE_LCB;
  1097. break;
  1098. case I40E_NVM_SA:
  1099. upd_cmd = I40E_NVMUPD_WRITE_SA;
  1100. break;
  1101. case I40E_NVM_ERA:
  1102. upd_cmd = I40E_NVMUPD_WRITE_ERA;
  1103. break;
  1104. case I40E_NVM_CSUM:
  1105. upd_cmd = I40E_NVMUPD_CSUM_CON;
  1106. break;
  1107. case (I40E_NVM_CSUM|I40E_NVM_SA):
  1108. upd_cmd = I40E_NVMUPD_CSUM_SA;
  1109. break;
  1110. case (I40E_NVM_CSUM|I40E_NVM_LCB):
  1111. upd_cmd = I40E_NVMUPD_CSUM_LCB;
  1112. break;
  1113. case I40E_NVM_EXEC:
  1114. if (module == 0)
  1115. upd_cmd = I40E_NVMUPD_EXEC_AQ;
  1116. break;
  1117. }
  1118. break;
  1119. }
  1120. return upd_cmd;
  1121. }
  1122. /**
  1123. * i40e_nvmupd_exec_aq - Run an AQ command
  1124. * @hw: pointer to hardware structure
  1125. * @cmd: pointer to nvm update command buffer
  1126. * @bytes: pointer to the data buffer
  1127. * @perrno: pointer to return error code
  1128. *
  1129. * cmd structure contains identifiers and data buffer
  1130. **/
  1131. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  1132. struct i40e_nvm_access *cmd,
  1133. u8 *bytes, int *perrno)
  1134. {
  1135. struct i40e_asq_cmd_details cmd_details;
  1136. i40e_status status;
  1137. struct i40e_aq_desc *aq_desc;
  1138. u32 buff_size = 0;
  1139. u8 *buff = NULL;
  1140. u32 aq_desc_len;
  1141. u32 aq_data_len;
  1142. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1143. memset(&cmd_details, 0, sizeof(cmd_details));
  1144. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1145. aq_desc_len = sizeof(struct i40e_aq_desc);
  1146. memset(&hw->nvm_wb_desc, 0, aq_desc_len);
  1147. /* get the aq descriptor */
  1148. if (cmd->data_size < aq_desc_len) {
  1149. i40e_debug(hw, I40E_DEBUG_NVM,
  1150. "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
  1151. cmd->data_size, aq_desc_len);
  1152. *perrno = -EINVAL;
  1153. return I40E_ERR_PARAM;
  1154. }
  1155. aq_desc = (struct i40e_aq_desc *)bytes;
  1156. /* if data buffer needed, make sure it's ready */
  1157. aq_data_len = cmd->data_size - aq_desc_len;
  1158. buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
  1159. if (buff_size) {
  1160. if (!hw->nvm_buff.va) {
  1161. status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
  1162. hw->aq.asq_buf_size);
  1163. if (status)
  1164. i40e_debug(hw, I40E_DEBUG_NVM,
  1165. "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
  1166. status);
  1167. }
  1168. if (hw->nvm_buff.va) {
  1169. buff = hw->nvm_buff.va;
  1170. memcpy(buff, &bytes[aq_desc_len], aq_data_len);
  1171. }
  1172. }
  1173. /* and away we go! */
  1174. status = i40e_asq_send_command(hw, aq_desc, buff,
  1175. buff_size, &cmd_details);
  1176. if (status) {
  1177. i40e_debug(hw, I40E_DEBUG_NVM,
  1178. "i40e_nvmupd_exec_aq err %s aq_err %s\n",
  1179. i40e_stat_str(hw, status),
  1180. i40e_aq_str(hw, hw->aq.asq_last_status));
  1181. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1182. }
  1183. /* should we wait for a followup event? */
  1184. if (cmd->offset) {
  1185. hw->nvm_wait_opcode = cmd->offset;
  1186. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  1187. }
  1188. return status;
  1189. }
  1190. /**
  1191. * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
  1192. * @hw: pointer to hardware structure
  1193. * @cmd: pointer to nvm update command buffer
  1194. * @bytes: pointer to the data buffer
  1195. * @perrno: pointer to return error code
  1196. *
  1197. * cmd structure contains identifiers and data buffer
  1198. **/
  1199. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  1200. struct i40e_nvm_access *cmd,
  1201. u8 *bytes, int *perrno)
  1202. {
  1203. u32 aq_total_len;
  1204. u32 aq_desc_len;
  1205. int remainder;
  1206. u8 *buff;
  1207. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1208. aq_desc_len = sizeof(struct i40e_aq_desc);
  1209. aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
  1210. /* check offset range */
  1211. if (cmd->offset > aq_total_len) {
  1212. i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
  1213. __func__, cmd->offset, aq_total_len);
  1214. *perrno = -EINVAL;
  1215. return I40E_ERR_PARAM;
  1216. }
  1217. /* check copylength range */
  1218. if (cmd->data_size > (aq_total_len - cmd->offset)) {
  1219. int new_len = aq_total_len - cmd->offset;
  1220. i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
  1221. __func__, cmd->data_size, new_len);
  1222. cmd->data_size = new_len;
  1223. }
  1224. remainder = cmd->data_size;
  1225. if (cmd->offset < aq_desc_len) {
  1226. u32 len = aq_desc_len - cmd->offset;
  1227. len = min(len, cmd->data_size);
  1228. i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
  1229. __func__, cmd->offset, cmd->offset + len);
  1230. buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
  1231. memcpy(bytes, buff, len);
  1232. bytes += len;
  1233. remainder -= len;
  1234. buff = hw->nvm_buff.va;
  1235. } else {
  1236. buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
  1237. }
  1238. if (remainder > 0) {
  1239. int start_byte = buff - (u8 *)hw->nvm_buff.va;
  1240. i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
  1241. __func__, start_byte, start_byte + remainder);
  1242. memcpy(bytes, buff, remainder);
  1243. }
  1244. return 0;
  1245. }
  1246. /**
  1247. * i40e_nvmupd_nvm_read - Read NVM
  1248. * @hw: pointer to hardware structure
  1249. * @cmd: pointer to nvm update command buffer
  1250. * @bytes: pointer to the data buffer
  1251. * @perrno: pointer to return error code
  1252. *
  1253. * cmd structure contains identifiers and data buffer
  1254. **/
  1255. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  1256. struct i40e_nvm_access *cmd,
  1257. u8 *bytes, int *perrno)
  1258. {
  1259. struct i40e_asq_cmd_details cmd_details;
  1260. i40e_status status;
  1261. u8 module, transaction;
  1262. bool last;
  1263. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1264. module = i40e_nvmupd_get_module(cmd->config);
  1265. last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
  1266. memset(&cmd_details, 0, sizeof(cmd_details));
  1267. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1268. status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1269. bytes, last, &cmd_details);
  1270. if (status) {
  1271. i40e_debug(hw, I40E_DEBUG_NVM,
  1272. "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
  1273. module, cmd->offset, cmd->data_size);
  1274. i40e_debug(hw, I40E_DEBUG_NVM,
  1275. "i40e_nvmupd_nvm_read status %d aq %d\n",
  1276. status, hw->aq.asq_last_status);
  1277. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1278. }
  1279. return status;
  1280. }
  1281. /**
  1282. * i40e_nvmupd_nvm_erase - Erase an NVM module
  1283. * @hw: pointer to hardware structure
  1284. * @cmd: pointer to nvm update command buffer
  1285. * @perrno: pointer to return error code
  1286. *
  1287. * module, offset, data_size and data are in cmd structure
  1288. **/
  1289. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  1290. struct i40e_nvm_access *cmd,
  1291. int *perrno)
  1292. {
  1293. i40e_status status = 0;
  1294. struct i40e_asq_cmd_details cmd_details;
  1295. u8 module, transaction;
  1296. bool last;
  1297. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1298. module = i40e_nvmupd_get_module(cmd->config);
  1299. last = (transaction & I40E_NVM_LCB);
  1300. memset(&cmd_details, 0, sizeof(cmd_details));
  1301. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1302. status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1303. last, &cmd_details);
  1304. if (status) {
  1305. i40e_debug(hw, I40E_DEBUG_NVM,
  1306. "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
  1307. module, cmd->offset, cmd->data_size);
  1308. i40e_debug(hw, I40E_DEBUG_NVM,
  1309. "i40e_nvmupd_nvm_erase status %d aq %d\n",
  1310. status, hw->aq.asq_last_status);
  1311. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1312. }
  1313. return status;
  1314. }
  1315. /**
  1316. * i40e_nvmupd_nvm_write - Write NVM
  1317. * @hw: pointer to hardware structure
  1318. * @cmd: pointer to nvm update command buffer
  1319. * @bytes: pointer to the data buffer
  1320. * @perrno: pointer to return error code
  1321. *
  1322. * module, offset, data_size and data are in cmd structure
  1323. **/
  1324. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  1325. struct i40e_nvm_access *cmd,
  1326. u8 *bytes, int *perrno)
  1327. {
  1328. i40e_status status = 0;
  1329. struct i40e_asq_cmd_details cmd_details;
  1330. u8 module, transaction;
  1331. bool last;
  1332. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1333. module = i40e_nvmupd_get_module(cmd->config);
  1334. last = (transaction & I40E_NVM_LCB);
  1335. memset(&cmd_details, 0, sizeof(cmd_details));
  1336. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1337. status = i40e_aq_update_nvm(hw, module, cmd->offset,
  1338. (u16)cmd->data_size, bytes, last,
  1339. &cmd_details);
  1340. if (status) {
  1341. i40e_debug(hw, I40E_DEBUG_NVM,
  1342. "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
  1343. module, cmd->offset, cmd->data_size);
  1344. i40e_debug(hw, I40E_DEBUG_NVM,
  1345. "i40e_nvmupd_nvm_write status %d aq %d\n",
  1346. status, hw->aq.asq_last_status);
  1347. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1348. }
  1349. return status;
  1350. }