i40e_nvm.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #include "i40e_prototype.h"
  27. /**
  28. * i40e_init_nvm_ops - Initialize NVM function pointers
  29. * @hw: pointer to the HW structure
  30. *
  31. * Setup the function pointers and the NVM info structure. Should be called
  32. * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  33. * Please notice that the NVM term is used here (& in all methods covered
  34. * in this file) as an equivalent of the FLASH part mapped into the SR.
  35. * We are accessing FLASH always thru the Shadow RAM.
  36. **/
  37. i40e_status i40e_init_nvm(struct i40e_hw *hw)
  38. {
  39. struct i40e_nvm_info *nvm = &hw->nvm;
  40. i40e_status ret_code = 0;
  41. u32 fla, gens;
  42. u8 sr_size;
  43. /* The SR size is stored regardless of the nvm programming mode
  44. * as the blank mode may be used in the factory line.
  45. */
  46. gens = rd32(hw, I40E_GLNVM_GENS);
  47. sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
  48. I40E_GLNVM_GENS_SR_SIZE_SHIFT);
  49. /* Switching to words (sr_size contains power of 2KB) */
  50. nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
  51. /* Check if we are in the normal or blank NVM programming mode */
  52. fla = rd32(hw, I40E_GLNVM_FLA);
  53. if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
  54. /* Max NVM timeout */
  55. nvm->timeout = I40E_MAX_NVM_TIMEOUT;
  56. nvm->blank_nvm_mode = false;
  57. } else { /* Blank programming mode */
  58. nvm->blank_nvm_mode = true;
  59. ret_code = I40E_ERR_NVM_BLANK_MODE;
  60. i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
  61. }
  62. return ret_code;
  63. }
  64. /**
  65. * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
  66. * @hw: pointer to the HW structure
  67. * @access: NVM access type (read or write)
  68. *
  69. * This function will request NVM ownership for reading
  70. * via the proper Admin Command.
  71. **/
  72. i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
  73. enum i40e_aq_resource_access_type access)
  74. {
  75. i40e_status ret_code = 0;
  76. u64 gtime, timeout;
  77. u64 time_left = 0;
  78. if (hw->nvm.blank_nvm_mode)
  79. goto i40e_i40e_acquire_nvm_exit;
  80. ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
  81. 0, &time_left, NULL);
  82. /* Reading the Global Device Timer */
  83. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  84. /* Store the timeout */
  85. hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
  86. if (ret_code)
  87. i40e_debug(hw, I40E_DEBUG_NVM,
  88. "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
  89. access, time_left, ret_code, hw->aq.asq_last_status);
  90. if (ret_code && time_left) {
  91. /* Poll until the current NVM owner timeouts */
  92. timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
  93. while ((gtime < timeout) && time_left) {
  94. usleep_range(10000, 20000);
  95. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  96. ret_code = i40e_aq_request_resource(hw,
  97. I40E_NVM_RESOURCE_ID,
  98. access, 0, &time_left,
  99. NULL);
  100. if (!ret_code) {
  101. hw->nvm.hw_semaphore_timeout =
  102. I40E_MS_TO_GTIME(time_left) + gtime;
  103. break;
  104. }
  105. }
  106. if (ret_code) {
  107. hw->nvm.hw_semaphore_timeout = 0;
  108. i40e_debug(hw, I40E_DEBUG_NVM,
  109. "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
  110. time_left, ret_code, hw->aq.asq_last_status);
  111. }
  112. }
  113. i40e_i40e_acquire_nvm_exit:
  114. return ret_code;
  115. }
  116. /**
  117. * i40e_release_nvm - Generic request for releasing the NVM ownership
  118. * @hw: pointer to the HW structure
  119. *
  120. * This function will release NVM resource via the proper Admin Command.
  121. **/
  122. void i40e_release_nvm(struct i40e_hw *hw)
  123. {
  124. if (!hw->nvm.blank_nvm_mode)
  125. i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
  126. }
  127. /**
  128. * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
  129. * @hw: pointer to the HW structure
  130. *
  131. * Polls the SRCTL Shadow RAM register done bit.
  132. **/
  133. static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
  134. {
  135. i40e_status ret_code = I40E_ERR_TIMEOUT;
  136. u32 srctl, wait_cnt;
  137. /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
  138. for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
  139. srctl = rd32(hw, I40E_GLNVM_SRCTL);
  140. if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
  141. ret_code = 0;
  142. break;
  143. }
  144. udelay(5);
  145. }
  146. if (ret_code == I40E_ERR_TIMEOUT)
  147. i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
  148. return ret_code;
  149. }
  150. /**
  151. * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
  152. * @hw: pointer to the HW structure
  153. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  154. * @data: word read from the Shadow RAM
  155. *
  156. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  157. **/
  158. static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
  159. u16 *data)
  160. {
  161. i40e_status ret_code = I40E_ERR_TIMEOUT;
  162. u32 sr_reg;
  163. if (offset >= hw->nvm.sr_size) {
  164. i40e_debug(hw, I40E_DEBUG_NVM,
  165. "NVM read error: offset %d beyond Shadow RAM limit %d\n",
  166. offset, hw->nvm.sr_size);
  167. ret_code = I40E_ERR_PARAM;
  168. goto read_nvm_exit;
  169. }
  170. /* Poll the done bit first */
  171. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  172. if (!ret_code) {
  173. /* Write the address and start reading */
  174. sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
  175. BIT(I40E_GLNVM_SRCTL_START_SHIFT);
  176. wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
  177. /* Poll I40E_GLNVM_SRCTL until the done bit is set */
  178. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  179. if (!ret_code) {
  180. sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
  181. *data = (u16)((sr_reg &
  182. I40E_GLNVM_SRDATA_RDDATA_MASK)
  183. >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
  184. }
  185. }
  186. if (ret_code)
  187. i40e_debug(hw, I40E_DEBUG_NVM,
  188. "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
  189. offset);
  190. read_nvm_exit:
  191. return ret_code;
  192. }
  193. /**
  194. * i40e_read_nvm_aq - Read Shadow RAM.
  195. * @hw: pointer to the HW structure.
  196. * @module_pointer: module pointer location in words from the NVM beginning
  197. * @offset: offset in words from module start
  198. * @words: number of words to write
  199. * @data: buffer with words to write to the Shadow RAM
  200. * @last_command: tells the AdminQ that this is the last command
  201. *
  202. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  203. **/
  204. static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  205. u32 offset, u16 words, void *data,
  206. bool last_command)
  207. {
  208. i40e_status ret_code = I40E_ERR_NVM;
  209. struct i40e_asq_cmd_details cmd_details;
  210. memset(&cmd_details, 0, sizeof(cmd_details));
  211. /* Here we are checking the SR limit only for the flat memory model.
  212. * We cannot do it for the module-based model, as we did not acquire
  213. * the NVM resource yet (we cannot get the module pointer value).
  214. * Firmware will check the module-based model.
  215. */
  216. if ((offset + words) > hw->nvm.sr_size)
  217. i40e_debug(hw, I40E_DEBUG_NVM,
  218. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  219. (offset + words), hw->nvm.sr_size);
  220. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  221. /* We can write only up to 4KB (one sector), in one AQ write */
  222. i40e_debug(hw, I40E_DEBUG_NVM,
  223. "NVM write fail error: tried to write %d words, limit is %d.\n",
  224. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  225. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  226. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  227. /* A single write cannot spread over two sectors */
  228. i40e_debug(hw, I40E_DEBUG_NVM,
  229. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  230. offset, words);
  231. else
  232. ret_code = i40e_aq_read_nvm(hw, module_pointer,
  233. 2 * offset, /*bytes*/
  234. 2 * words, /*bytes*/
  235. data, last_command, &cmd_details);
  236. return ret_code;
  237. }
  238. /**
  239. * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
  240. * @hw: pointer to the HW structure
  241. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  242. * @data: word read from the Shadow RAM
  243. *
  244. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  245. **/
  246. static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
  247. u16 *data)
  248. {
  249. i40e_status ret_code = I40E_ERR_TIMEOUT;
  250. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
  251. *data = le16_to_cpu(*(__le16 *)data);
  252. return ret_code;
  253. }
  254. /**
  255. * i40e_read_nvm_word - Reads Shadow RAM
  256. * @hw: pointer to the HW structure
  257. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  258. * @data: word read from the Shadow RAM
  259. *
  260. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  261. **/
  262. i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
  263. u16 *data)
  264. {
  265. enum i40e_status_code ret_code = 0;
  266. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
  267. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  268. if (!ret_code) {
  269. ret_code = i40e_read_nvm_word_aq(hw, offset, data);
  270. i40e_release_nvm(hw);
  271. }
  272. } else {
  273. ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
  274. }
  275. return ret_code;
  276. }
  277. /**
  278. * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
  279. * @hw: pointer to the HW structure
  280. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  281. * @words: (in) number of words to read; (out) number of words actually read
  282. * @data: words read from the Shadow RAM
  283. *
  284. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  285. * method. The buffer read is preceded by the NVM ownership take
  286. * and followed by the release.
  287. **/
  288. static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
  289. u16 *words, u16 *data)
  290. {
  291. i40e_status ret_code = 0;
  292. u16 index, word;
  293. /* Loop thru the selected region */
  294. for (word = 0; word < *words; word++) {
  295. index = offset + word;
  296. ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
  297. if (ret_code)
  298. break;
  299. }
  300. /* Update the number of words read from the Shadow RAM */
  301. *words = word;
  302. return ret_code;
  303. }
  304. /**
  305. * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
  306. * @hw: pointer to the HW structure
  307. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  308. * @words: (in) number of words to read; (out) number of words actually read
  309. * @data: words read from the Shadow RAM
  310. *
  311. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
  312. * method. The buffer read is preceded by the NVM ownership take
  313. * and followed by the release.
  314. **/
  315. static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
  316. u16 *words, u16 *data)
  317. {
  318. i40e_status ret_code;
  319. u16 read_size = *words;
  320. bool last_cmd = false;
  321. u16 words_read = 0;
  322. u16 i = 0;
  323. do {
  324. /* Calculate number of bytes we should read in this step.
  325. * FVL AQ do not allow to read more than one page at a time or
  326. * to cross page boundaries.
  327. */
  328. if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
  329. read_size = min(*words,
  330. (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
  331. (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
  332. else
  333. read_size = min((*words - words_read),
  334. I40E_SR_SECTOR_SIZE_IN_WORDS);
  335. /* Check if this is last command, if so set proper flag */
  336. if ((words_read + read_size) >= *words)
  337. last_cmd = true;
  338. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
  339. data + words_read, last_cmd);
  340. if (ret_code)
  341. goto read_nvm_buffer_aq_exit;
  342. /* Increment counter for words already read and move offset to
  343. * new read location
  344. */
  345. words_read += read_size;
  346. offset += read_size;
  347. } while (words_read < *words);
  348. for (i = 0; i < *words; i++)
  349. data[i] = le16_to_cpu(((__le16 *)data)[i]);
  350. read_nvm_buffer_aq_exit:
  351. *words = words_read;
  352. return ret_code;
  353. }
  354. /**
  355. * i40e_read_nvm_buffer - Reads Shadow RAM buffer
  356. * @hw: pointer to the HW structure
  357. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  358. * @words: (in) number of words to read; (out) number of words actually read
  359. * @data: words read from the Shadow RAM
  360. *
  361. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  362. * method. The buffer read is preceded by the NVM ownership take
  363. * and followed by the release.
  364. **/
  365. i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
  366. u16 *words, u16 *data)
  367. {
  368. enum i40e_status_code ret_code = 0;
  369. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
  370. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  371. if (!ret_code) {
  372. ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
  373. data);
  374. i40e_release_nvm(hw);
  375. }
  376. } else {
  377. ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
  378. }
  379. return ret_code;
  380. }
  381. /**
  382. * i40e_write_nvm_aq - Writes Shadow RAM.
  383. * @hw: pointer to the HW structure.
  384. * @module_pointer: module pointer location in words from the NVM beginning
  385. * @offset: offset in words from module start
  386. * @words: number of words to write
  387. * @data: buffer with words to write to the Shadow RAM
  388. * @last_command: tells the AdminQ that this is the last command
  389. *
  390. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  391. **/
  392. static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  393. u32 offset, u16 words, void *data,
  394. bool last_command)
  395. {
  396. i40e_status ret_code = I40E_ERR_NVM;
  397. struct i40e_asq_cmd_details cmd_details;
  398. memset(&cmd_details, 0, sizeof(cmd_details));
  399. cmd_details.wb_desc = &hw->nvm_wb_desc;
  400. /* Here we are checking the SR limit only for the flat memory model.
  401. * We cannot do it for the module-based model, as we did not acquire
  402. * the NVM resource yet (we cannot get the module pointer value).
  403. * Firmware will check the module-based model.
  404. */
  405. if ((offset + words) > hw->nvm.sr_size)
  406. i40e_debug(hw, I40E_DEBUG_NVM,
  407. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  408. (offset + words), hw->nvm.sr_size);
  409. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  410. /* We can write only up to 4KB (one sector), in one AQ write */
  411. i40e_debug(hw, I40E_DEBUG_NVM,
  412. "NVM write fail error: tried to write %d words, limit is %d.\n",
  413. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  414. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  415. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  416. /* A single write cannot spread over two sectors */
  417. i40e_debug(hw, I40E_DEBUG_NVM,
  418. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  419. offset, words);
  420. else
  421. ret_code = i40e_aq_update_nvm(hw, module_pointer,
  422. 2 * offset, /*bytes*/
  423. 2 * words, /*bytes*/
  424. data, last_command, &cmd_details);
  425. return ret_code;
  426. }
  427. /**
  428. * i40e_calc_nvm_checksum - Calculates and returns the checksum
  429. * @hw: pointer to hardware structure
  430. * @checksum: pointer to the checksum
  431. *
  432. * This function calculates SW Checksum that covers the whole 64kB shadow RAM
  433. * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
  434. * is customer specific and unknown. Therefore, this function skips all maximum
  435. * possible size of VPD (1kB).
  436. **/
  437. static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
  438. u16 *checksum)
  439. {
  440. i40e_status ret_code;
  441. struct i40e_virt_mem vmem;
  442. u16 pcie_alt_module = 0;
  443. u16 checksum_local = 0;
  444. u16 vpd_module = 0;
  445. u16 *data;
  446. u16 i = 0;
  447. ret_code = i40e_allocate_virt_mem(hw, &vmem,
  448. I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
  449. if (ret_code)
  450. goto i40e_calc_nvm_checksum_exit;
  451. data = (u16 *)vmem.va;
  452. /* read pointer to VPD area */
  453. ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
  454. if (ret_code) {
  455. ret_code = I40E_ERR_NVM_CHECKSUM;
  456. goto i40e_calc_nvm_checksum_exit;
  457. }
  458. /* read pointer to PCIe Alt Auto-load module */
  459. ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
  460. &pcie_alt_module);
  461. if (ret_code) {
  462. ret_code = I40E_ERR_NVM_CHECKSUM;
  463. goto i40e_calc_nvm_checksum_exit;
  464. }
  465. /* Calculate SW checksum that covers the whole 64kB shadow RAM
  466. * except the VPD and PCIe ALT Auto-load modules
  467. */
  468. for (i = 0; i < hw->nvm.sr_size; i++) {
  469. /* Read SR page */
  470. if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
  471. u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
  472. ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
  473. if (ret_code) {
  474. ret_code = I40E_ERR_NVM_CHECKSUM;
  475. goto i40e_calc_nvm_checksum_exit;
  476. }
  477. }
  478. /* Skip Checksum word */
  479. if (i == I40E_SR_SW_CHECKSUM_WORD)
  480. continue;
  481. /* Skip VPD module (convert byte size to word count) */
  482. if ((i >= (u32)vpd_module) &&
  483. (i < ((u32)vpd_module +
  484. (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
  485. continue;
  486. }
  487. /* Skip PCIe ALT module (convert byte size to word count) */
  488. if ((i >= (u32)pcie_alt_module) &&
  489. (i < ((u32)pcie_alt_module +
  490. (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
  491. continue;
  492. }
  493. checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
  494. }
  495. *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
  496. i40e_calc_nvm_checksum_exit:
  497. i40e_free_virt_mem(hw, &vmem);
  498. return ret_code;
  499. }
  500. /**
  501. * i40e_update_nvm_checksum - Updates the NVM checksum
  502. * @hw: pointer to hardware structure
  503. *
  504. * NVM ownership must be acquired before calling this function and released
  505. * on ARQ completion event reception by caller.
  506. * This function will commit SR to NVM.
  507. **/
  508. i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
  509. {
  510. i40e_status ret_code;
  511. u16 checksum;
  512. __le16 le_sum;
  513. ret_code = i40e_calc_nvm_checksum(hw, &checksum);
  514. if (!ret_code) {
  515. le_sum = cpu_to_le16(checksum);
  516. ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
  517. 1, &le_sum, true);
  518. }
  519. return ret_code;
  520. }
  521. /**
  522. * i40e_validate_nvm_checksum - Validate EEPROM checksum
  523. * @hw: pointer to hardware structure
  524. * @checksum: calculated checksum
  525. *
  526. * Performs checksum calculation and validates the NVM SW checksum. If the
  527. * caller does not need checksum, the value can be NULL.
  528. **/
  529. i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
  530. u16 *checksum)
  531. {
  532. i40e_status ret_code = 0;
  533. u16 checksum_sr = 0;
  534. u16 checksum_local = 0;
  535. ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
  536. if (ret_code)
  537. goto i40e_validate_nvm_checksum_exit;
  538. /* Do not use i40e_read_nvm_word() because we do not want to take
  539. * the synchronization semaphores twice here.
  540. */
  541. i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
  542. /* Verify read checksum from EEPROM is the same as
  543. * calculated checksum
  544. */
  545. if (checksum_local != checksum_sr)
  546. ret_code = I40E_ERR_NVM_CHECKSUM;
  547. /* If the user cares, return the calculated checksum */
  548. if (checksum)
  549. *checksum = checksum_local;
  550. i40e_validate_nvm_checksum_exit:
  551. return ret_code;
  552. }
  553. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  554. struct i40e_nvm_access *cmd,
  555. u8 *bytes, int *perrno);
  556. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  557. struct i40e_nvm_access *cmd,
  558. u8 *bytes, int *perrno);
  559. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  560. struct i40e_nvm_access *cmd,
  561. u8 *bytes, int *errno);
  562. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  563. struct i40e_nvm_access *cmd,
  564. int *perrno);
  565. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  566. struct i40e_nvm_access *cmd,
  567. int *perrno);
  568. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  569. struct i40e_nvm_access *cmd,
  570. u8 *bytes, int *perrno);
  571. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  572. struct i40e_nvm_access *cmd,
  573. u8 *bytes, int *perrno);
  574. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  575. struct i40e_nvm_access *cmd,
  576. u8 *bytes, int *perrno);
  577. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  578. struct i40e_nvm_access *cmd,
  579. u8 *bytes, int *perrno);
  580. static inline u8 i40e_nvmupd_get_module(u32 val)
  581. {
  582. return (u8)(val & I40E_NVM_MOD_PNT_MASK);
  583. }
  584. static inline u8 i40e_nvmupd_get_transaction(u32 val)
  585. {
  586. return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
  587. }
  588. static const char * const i40e_nvm_update_state_str[] = {
  589. "I40E_NVMUPD_INVALID",
  590. "I40E_NVMUPD_READ_CON",
  591. "I40E_NVMUPD_READ_SNT",
  592. "I40E_NVMUPD_READ_LCB",
  593. "I40E_NVMUPD_READ_SA",
  594. "I40E_NVMUPD_WRITE_ERA",
  595. "I40E_NVMUPD_WRITE_CON",
  596. "I40E_NVMUPD_WRITE_SNT",
  597. "I40E_NVMUPD_WRITE_LCB",
  598. "I40E_NVMUPD_WRITE_SA",
  599. "I40E_NVMUPD_CSUM_CON",
  600. "I40E_NVMUPD_CSUM_SA",
  601. "I40E_NVMUPD_CSUM_LCB",
  602. "I40E_NVMUPD_STATUS",
  603. "I40E_NVMUPD_EXEC_AQ",
  604. "I40E_NVMUPD_GET_AQ_RESULT",
  605. };
  606. /**
  607. * i40e_nvmupd_command - Process an NVM update command
  608. * @hw: pointer to hardware structure
  609. * @cmd: pointer to nvm update command
  610. * @bytes: pointer to the data buffer
  611. * @perrno: pointer to return error code
  612. *
  613. * Dispatches command depending on what update state is current
  614. **/
  615. i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
  616. struct i40e_nvm_access *cmd,
  617. u8 *bytes, int *perrno)
  618. {
  619. i40e_status status;
  620. enum i40e_nvmupd_cmd upd_cmd;
  621. /* assume success */
  622. *perrno = 0;
  623. /* early check for status command and debug msgs */
  624. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  625. i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
  626. i40e_nvm_update_state_str[upd_cmd],
  627. hw->nvmupd_state,
  628. hw->nvm_release_on_done, hw->nvm_wait_opcode,
  629. cmd->command, cmd->config, cmd->offset, cmd->data_size);
  630. if (upd_cmd == I40E_NVMUPD_INVALID) {
  631. *perrno = -EFAULT;
  632. i40e_debug(hw, I40E_DEBUG_NVM,
  633. "i40e_nvmupd_validate_command returns %d errno %d\n",
  634. upd_cmd, *perrno);
  635. }
  636. /* a status request returns immediately rather than
  637. * going into the state machine
  638. */
  639. if (upd_cmd == I40E_NVMUPD_STATUS) {
  640. if (!cmd->data_size) {
  641. *perrno = -EFAULT;
  642. return I40E_ERR_BUF_TOO_SHORT;
  643. }
  644. bytes[0] = hw->nvmupd_state;
  645. if (cmd->data_size >= 4) {
  646. bytes[1] = 0;
  647. *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
  648. }
  649. /* Clear error status on read */
  650. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
  651. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  652. return 0;
  653. }
  654. /* Clear status even it is not read and log */
  655. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
  656. i40e_debug(hw, I40E_DEBUG_NVM,
  657. "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
  658. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  659. }
  660. switch (hw->nvmupd_state) {
  661. case I40E_NVMUPD_STATE_INIT:
  662. status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
  663. break;
  664. case I40E_NVMUPD_STATE_READING:
  665. status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
  666. break;
  667. case I40E_NVMUPD_STATE_WRITING:
  668. status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
  669. break;
  670. case I40E_NVMUPD_STATE_INIT_WAIT:
  671. case I40E_NVMUPD_STATE_WRITE_WAIT:
  672. /* if we need to stop waiting for an event, clear
  673. * the wait info and return before doing anything else
  674. */
  675. if (cmd->offset == 0xffff) {
  676. i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
  677. return 0;
  678. }
  679. status = I40E_ERR_NOT_READY;
  680. *perrno = -EBUSY;
  681. break;
  682. default:
  683. /* invalid state, should never happen */
  684. i40e_debug(hw, I40E_DEBUG_NVM,
  685. "NVMUPD: no such state %d\n", hw->nvmupd_state);
  686. status = I40E_NOT_SUPPORTED;
  687. *perrno = -ESRCH;
  688. break;
  689. }
  690. return status;
  691. }
  692. /**
  693. * i40e_nvmupd_state_init - Handle NVM update state Init
  694. * @hw: pointer to hardware structure
  695. * @cmd: pointer to nvm update command buffer
  696. * @bytes: pointer to the data buffer
  697. * @perrno: pointer to return error code
  698. *
  699. * Process legitimate commands of the Init state and conditionally set next
  700. * state. Reject all other commands.
  701. **/
  702. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  703. struct i40e_nvm_access *cmd,
  704. u8 *bytes, int *perrno)
  705. {
  706. i40e_status status = 0;
  707. enum i40e_nvmupd_cmd upd_cmd;
  708. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  709. switch (upd_cmd) {
  710. case I40E_NVMUPD_READ_SA:
  711. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  712. if (status) {
  713. *perrno = i40e_aq_rc_to_posix(status,
  714. hw->aq.asq_last_status);
  715. } else {
  716. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  717. i40e_release_nvm(hw);
  718. }
  719. break;
  720. case I40E_NVMUPD_READ_SNT:
  721. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  722. if (status) {
  723. *perrno = i40e_aq_rc_to_posix(status,
  724. hw->aq.asq_last_status);
  725. } else {
  726. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  727. if (status)
  728. i40e_release_nvm(hw);
  729. else
  730. hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
  731. }
  732. break;
  733. case I40E_NVMUPD_WRITE_ERA:
  734. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  735. if (status) {
  736. *perrno = i40e_aq_rc_to_posix(status,
  737. hw->aq.asq_last_status);
  738. } else {
  739. status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
  740. if (status) {
  741. i40e_release_nvm(hw);
  742. } else {
  743. hw->nvm_release_on_done = true;
  744. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
  745. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  746. }
  747. }
  748. break;
  749. case I40E_NVMUPD_WRITE_SA:
  750. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  751. if (status) {
  752. *perrno = i40e_aq_rc_to_posix(status,
  753. hw->aq.asq_last_status);
  754. } else {
  755. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  756. if (status) {
  757. i40e_release_nvm(hw);
  758. } else {
  759. hw->nvm_release_on_done = true;
  760. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  761. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  762. }
  763. }
  764. break;
  765. case I40E_NVMUPD_WRITE_SNT:
  766. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  767. if (status) {
  768. *perrno = i40e_aq_rc_to_posix(status,
  769. hw->aq.asq_last_status);
  770. } else {
  771. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  772. if (status) {
  773. i40e_release_nvm(hw);
  774. } else {
  775. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  776. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  777. }
  778. }
  779. break;
  780. case I40E_NVMUPD_CSUM_SA:
  781. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  782. if (status) {
  783. *perrno = i40e_aq_rc_to_posix(status,
  784. hw->aq.asq_last_status);
  785. } else {
  786. status = i40e_update_nvm_checksum(hw);
  787. if (status) {
  788. *perrno = hw->aq.asq_last_status ?
  789. i40e_aq_rc_to_posix(status,
  790. hw->aq.asq_last_status) :
  791. -EIO;
  792. i40e_release_nvm(hw);
  793. } else {
  794. hw->nvm_release_on_done = true;
  795. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  796. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  797. }
  798. }
  799. break;
  800. case I40E_NVMUPD_EXEC_AQ:
  801. status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
  802. break;
  803. case I40E_NVMUPD_GET_AQ_RESULT:
  804. status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
  805. break;
  806. default:
  807. i40e_debug(hw, I40E_DEBUG_NVM,
  808. "NVMUPD: bad cmd %s in init state\n",
  809. i40e_nvm_update_state_str[upd_cmd]);
  810. status = I40E_ERR_NVM;
  811. *perrno = -ESRCH;
  812. break;
  813. }
  814. return status;
  815. }
  816. /**
  817. * i40e_nvmupd_state_reading - Handle NVM update state Reading
  818. * @hw: pointer to hardware structure
  819. * @cmd: pointer to nvm update command buffer
  820. * @bytes: pointer to the data buffer
  821. * @perrno: pointer to return error code
  822. *
  823. * NVM ownership is already held. Process legitimate commands and set any
  824. * change in state; reject all other commands.
  825. **/
  826. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  827. struct i40e_nvm_access *cmd,
  828. u8 *bytes, int *perrno)
  829. {
  830. i40e_status status = 0;
  831. enum i40e_nvmupd_cmd upd_cmd;
  832. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  833. switch (upd_cmd) {
  834. case I40E_NVMUPD_READ_SA:
  835. case I40E_NVMUPD_READ_CON:
  836. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  837. break;
  838. case I40E_NVMUPD_READ_LCB:
  839. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  840. i40e_release_nvm(hw);
  841. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  842. break;
  843. default:
  844. i40e_debug(hw, I40E_DEBUG_NVM,
  845. "NVMUPD: bad cmd %s in reading state.\n",
  846. i40e_nvm_update_state_str[upd_cmd]);
  847. status = I40E_NOT_SUPPORTED;
  848. *perrno = -ESRCH;
  849. break;
  850. }
  851. return status;
  852. }
  853. /**
  854. * i40e_nvmupd_state_writing - Handle NVM update state Writing
  855. * @hw: pointer to hardware structure
  856. * @cmd: pointer to nvm update command buffer
  857. * @bytes: pointer to the data buffer
  858. * @perrno: pointer to return error code
  859. *
  860. * NVM ownership is already held. Process legitimate commands and set any
  861. * change in state; reject all other commands
  862. **/
  863. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  864. struct i40e_nvm_access *cmd,
  865. u8 *bytes, int *perrno)
  866. {
  867. i40e_status status = 0;
  868. enum i40e_nvmupd_cmd upd_cmd;
  869. bool retry_attempt = false;
  870. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  871. retry:
  872. switch (upd_cmd) {
  873. case I40E_NVMUPD_WRITE_CON:
  874. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  875. if (!status) {
  876. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  877. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  878. }
  879. break;
  880. case I40E_NVMUPD_WRITE_LCB:
  881. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  882. if (status) {
  883. *perrno = hw->aq.asq_last_status ?
  884. i40e_aq_rc_to_posix(status,
  885. hw->aq.asq_last_status) :
  886. -EIO;
  887. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  888. } else {
  889. hw->nvm_release_on_done = true;
  890. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  891. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  892. }
  893. break;
  894. case I40E_NVMUPD_CSUM_CON:
  895. status = i40e_update_nvm_checksum(hw);
  896. if (status) {
  897. *perrno = hw->aq.asq_last_status ?
  898. i40e_aq_rc_to_posix(status,
  899. hw->aq.asq_last_status) :
  900. -EIO;
  901. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  902. } else {
  903. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  904. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  905. }
  906. break;
  907. case I40E_NVMUPD_CSUM_LCB:
  908. status = i40e_update_nvm_checksum(hw);
  909. if (status) {
  910. *perrno = hw->aq.asq_last_status ?
  911. i40e_aq_rc_to_posix(status,
  912. hw->aq.asq_last_status) :
  913. -EIO;
  914. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  915. } else {
  916. hw->nvm_release_on_done = true;
  917. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  918. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  919. }
  920. break;
  921. default:
  922. i40e_debug(hw, I40E_DEBUG_NVM,
  923. "NVMUPD: bad cmd %s in writing state.\n",
  924. i40e_nvm_update_state_str[upd_cmd]);
  925. status = I40E_NOT_SUPPORTED;
  926. *perrno = -ESRCH;
  927. break;
  928. }
  929. /* In some circumstances, a multi-write transaction takes longer
  930. * than the default 3 minute timeout on the write semaphore. If
  931. * the write failed with an EBUSY status, this is likely the problem,
  932. * so here we try to reacquire the semaphore then retry the write.
  933. * We only do one retry, then give up.
  934. */
  935. if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
  936. !retry_attempt) {
  937. i40e_status old_status = status;
  938. u32 old_asq_status = hw->aq.asq_last_status;
  939. u32 gtime;
  940. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  941. if (gtime >= hw->nvm.hw_semaphore_timeout) {
  942. i40e_debug(hw, I40E_DEBUG_ALL,
  943. "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
  944. gtime, hw->nvm.hw_semaphore_timeout);
  945. i40e_release_nvm(hw);
  946. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  947. if (status) {
  948. i40e_debug(hw, I40E_DEBUG_ALL,
  949. "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
  950. hw->aq.asq_last_status);
  951. status = old_status;
  952. hw->aq.asq_last_status = old_asq_status;
  953. } else {
  954. retry_attempt = true;
  955. goto retry;
  956. }
  957. }
  958. }
  959. return status;
  960. }
  961. /**
  962. * i40e_nvmupd_check_wait_event - handle NVM update operation events
  963. * @hw: pointer to the hardware structure
  964. * @opcode: the event that just happened
  965. **/
  966. void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
  967. {
  968. if (opcode == hw->nvm_wait_opcode) {
  969. i40e_debug(hw, I40E_DEBUG_NVM,
  970. "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
  971. if (hw->nvm_release_on_done) {
  972. i40e_release_nvm(hw);
  973. hw->nvm_release_on_done = false;
  974. }
  975. hw->nvm_wait_opcode = 0;
  976. if (hw->aq.arq_last_status) {
  977. hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
  978. return;
  979. }
  980. switch (hw->nvmupd_state) {
  981. case I40E_NVMUPD_STATE_INIT_WAIT:
  982. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  983. break;
  984. case I40E_NVMUPD_STATE_WRITE_WAIT:
  985. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
  986. break;
  987. default:
  988. break;
  989. }
  990. }
  991. }
  992. /**
  993. * i40e_nvmupd_validate_command - Validate given command
  994. * @hw: pointer to hardware structure
  995. * @cmd: pointer to nvm update command buffer
  996. * @perrno: pointer to return error code
  997. *
  998. * Return one of the valid command types or I40E_NVMUPD_INVALID
  999. **/
  1000. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  1001. struct i40e_nvm_access *cmd,
  1002. int *perrno)
  1003. {
  1004. enum i40e_nvmupd_cmd upd_cmd;
  1005. u8 module, transaction;
  1006. /* anything that doesn't match a recognized case is an error */
  1007. upd_cmd = I40E_NVMUPD_INVALID;
  1008. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1009. module = i40e_nvmupd_get_module(cmd->config);
  1010. /* limits on data size */
  1011. if ((cmd->data_size < 1) ||
  1012. (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
  1013. i40e_debug(hw, I40E_DEBUG_NVM,
  1014. "i40e_nvmupd_validate_command data_size %d\n",
  1015. cmd->data_size);
  1016. *perrno = -EFAULT;
  1017. return I40E_NVMUPD_INVALID;
  1018. }
  1019. switch (cmd->command) {
  1020. case I40E_NVM_READ:
  1021. switch (transaction) {
  1022. case I40E_NVM_CON:
  1023. upd_cmd = I40E_NVMUPD_READ_CON;
  1024. break;
  1025. case I40E_NVM_SNT:
  1026. upd_cmd = I40E_NVMUPD_READ_SNT;
  1027. break;
  1028. case I40E_NVM_LCB:
  1029. upd_cmd = I40E_NVMUPD_READ_LCB;
  1030. break;
  1031. case I40E_NVM_SA:
  1032. upd_cmd = I40E_NVMUPD_READ_SA;
  1033. break;
  1034. case I40E_NVM_EXEC:
  1035. if (module == 0xf)
  1036. upd_cmd = I40E_NVMUPD_STATUS;
  1037. else if (module == 0)
  1038. upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
  1039. break;
  1040. }
  1041. break;
  1042. case I40E_NVM_WRITE:
  1043. switch (transaction) {
  1044. case I40E_NVM_CON:
  1045. upd_cmd = I40E_NVMUPD_WRITE_CON;
  1046. break;
  1047. case I40E_NVM_SNT:
  1048. upd_cmd = I40E_NVMUPD_WRITE_SNT;
  1049. break;
  1050. case I40E_NVM_LCB:
  1051. upd_cmd = I40E_NVMUPD_WRITE_LCB;
  1052. break;
  1053. case I40E_NVM_SA:
  1054. upd_cmd = I40E_NVMUPD_WRITE_SA;
  1055. break;
  1056. case I40E_NVM_ERA:
  1057. upd_cmd = I40E_NVMUPD_WRITE_ERA;
  1058. break;
  1059. case I40E_NVM_CSUM:
  1060. upd_cmd = I40E_NVMUPD_CSUM_CON;
  1061. break;
  1062. case (I40E_NVM_CSUM|I40E_NVM_SA):
  1063. upd_cmd = I40E_NVMUPD_CSUM_SA;
  1064. break;
  1065. case (I40E_NVM_CSUM|I40E_NVM_LCB):
  1066. upd_cmd = I40E_NVMUPD_CSUM_LCB;
  1067. break;
  1068. case I40E_NVM_EXEC:
  1069. if (module == 0)
  1070. upd_cmd = I40E_NVMUPD_EXEC_AQ;
  1071. break;
  1072. }
  1073. break;
  1074. }
  1075. return upd_cmd;
  1076. }
  1077. /**
  1078. * i40e_nvmupd_exec_aq - Run an AQ command
  1079. * @hw: pointer to hardware structure
  1080. * @cmd: pointer to nvm update command buffer
  1081. * @bytes: pointer to the data buffer
  1082. * @perrno: pointer to return error code
  1083. *
  1084. * cmd structure contains identifiers and data buffer
  1085. **/
  1086. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  1087. struct i40e_nvm_access *cmd,
  1088. u8 *bytes, int *perrno)
  1089. {
  1090. struct i40e_asq_cmd_details cmd_details;
  1091. i40e_status status;
  1092. struct i40e_aq_desc *aq_desc;
  1093. u32 buff_size = 0;
  1094. u8 *buff = NULL;
  1095. u32 aq_desc_len;
  1096. u32 aq_data_len;
  1097. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1098. memset(&cmd_details, 0, sizeof(cmd_details));
  1099. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1100. aq_desc_len = sizeof(struct i40e_aq_desc);
  1101. memset(&hw->nvm_wb_desc, 0, aq_desc_len);
  1102. /* get the aq descriptor */
  1103. if (cmd->data_size < aq_desc_len) {
  1104. i40e_debug(hw, I40E_DEBUG_NVM,
  1105. "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
  1106. cmd->data_size, aq_desc_len);
  1107. *perrno = -EINVAL;
  1108. return I40E_ERR_PARAM;
  1109. }
  1110. aq_desc = (struct i40e_aq_desc *)bytes;
  1111. /* if data buffer needed, make sure it's ready */
  1112. aq_data_len = cmd->data_size - aq_desc_len;
  1113. buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
  1114. if (buff_size) {
  1115. if (!hw->nvm_buff.va) {
  1116. status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
  1117. hw->aq.asq_buf_size);
  1118. if (status)
  1119. i40e_debug(hw, I40E_DEBUG_NVM,
  1120. "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
  1121. status);
  1122. }
  1123. if (hw->nvm_buff.va) {
  1124. buff = hw->nvm_buff.va;
  1125. memcpy(buff, &bytes[aq_desc_len], aq_data_len);
  1126. }
  1127. }
  1128. /* and away we go! */
  1129. status = i40e_asq_send_command(hw, aq_desc, buff,
  1130. buff_size, &cmd_details);
  1131. if (status) {
  1132. i40e_debug(hw, I40E_DEBUG_NVM,
  1133. "i40e_nvmupd_exec_aq err %s aq_err %s\n",
  1134. i40e_stat_str(hw, status),
  1135. i40e_aq_str(hw, hw->aq.asq_last_status));
  1136. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1137. }
  1138. /* should we wait for a followup event? */
  1139. if (cmd->offset) {
  1140. hw->nvm_wait_opcode = cmd->offset;
  1141. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  1142. }
  1143. return status;
  1144. }
  1145. /**
  1146. * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
  1147. * @hw: pointer to hardware structure
  1148. * @cmd: pointer to nvm update command buffer
  1149. * @bytes: pointer to the data buffer
  1150. * @perrno: pointer to return error code
  1151. *
  1152. * cmd structure contains identifiers and data buffer
  1153. **/
  1154. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  1155. struct i40e_nvm_access *cmd,
  1156. u8 *bytes, int *perrno)
  1157. {
  1158. u32 aq_total_len;
  1159. u32 aq_desc_len;
  1160. int remainder;
  1161. u8 *buff;
  1162. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1163. aq_desc_len = sizeof(struct i40e_aq_desc);
  1164. aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
  1165. /* check offset range */
  1166. if (cmd->offset > aq_total_len) {
  1167. i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
  1168. __func__, cmd->offset, aq_total_len);
  1169. *perrno = -EINVAL;
  1170. return I40E_ERR_PARAM;
  1171. }
  1172. /* check copylength range */
  1173. if (cmd->data_size > (aq_total_len - cmd->offset)) {
  1174. int new_len = aq_total_len - cmd->offset;
  1175. i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
  1176. __func__, cmd->data_size, new_len);
  1177. cmd->data_size = new_len;
  1178. }
  1179. remainder = cmd->data_size;
  1180. if (cmd->offset < aq_desc_len) {
  1181. u32 len = aq_desc_len - cmd->offset;
  1182. len = min(len, cmd->data_size);
  1183. i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
  1184. __func__, cmd->offset, cmd->offset + len);
  1185. buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
  1186. memcpy(bytes, buff, len);
  1187. bytes += len;
  1188. remainder -= len;
  1189. buff = hw->nvm_buff.va;
  1190. } else {
  1191. buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
  1192. }
  1193. if (remainder > 0) {
  1194. int start_byte = buff - (u8 *)hw->nvm_buff.va;
  1195. i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
  1196. __func__, start_byte, start_byte + remainder);
  1197. memcpy(bytes, buff, remainder);
  1198. }
  1199. return 0;
  1200. }
  1201. /**
  1202. * i40e_nvmupd_nvm_read - Read NVM
  1203. * @hw: pointer to hardware structure
  1204. * @cmd: pointer to nvm update command buffer
  1205. * @bytes: pointer to the data buffer
  1206. * @perrno: pointer to return error code
  1207. *
  1208. * cmd structure contains identifiers and data buffer
  1209. **/
  1210. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  1211. struct i40e_nvm_access *cmd,
  1212. u8 *bytes, int *perrno)
  1213. {
  1214. struct i40e_asq_cmd_details cmd_details;
  1215. i40e_status status;
  1216. u8 module, transaction;
  1217. bool last;
  1218. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1219. module = i40e_nvmupd_get_module(cmd->config);
  1220. last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
  1221. memset(&cmd_details, 0, sizeof(cmd_details));
  1222. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1223. status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1224. bytes, last, &cmd_details);
  1225. if (status) {
  1226. i40e_debug(hw, I40E_DEBUG_NVM,
  1227. "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
  1228. module, cmd->offset, cmd->data_size);
  1229. i40e_debug(hw, I40E_DEBUG_NVM,
  1230. "i40e_nvmupd_nvm_read status %d aq %d\n",
  1231. status, hw->aq.asq_last_status);
  1232. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1233. }
  1234. return status;
  1235. }
  1236. /**
  1237. * i40e_nvmupd_nvm_erase - Erase an NVM module
  1238. * @hw: pointer to hardware structure
  1239. * @cmd: pointer to nvm update command buffer
  1240. * @perrno: pointer to return error code
  1241. *
  1242. * module, offset, data_size and data are in cmd structure
  1243. **/
  1244. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  1245. struct i40e_nvm_access *cmd,
  1246. int *perrno)
  1247. {
  1248. i40e_status status = 0;
  1249. struct i40e_asq_cmd_details cmd_details;
  1250. u8 module, transaction;
  1251. bool last;
  1252. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1253. module = i40e_nvmupd_get_module(cmd->config);
  1254. last = (transaction & I40E_NVM_LCB);
  1255. memset(&cmd_details, 0, sizeof(cmd_details));
  1256. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1257. status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1258. last, &cmd_details);
  1259. if (status) {
  1260. i40e_debug(hw, I40E_DEBUG_NVM,
  1261. "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
  1262. module, cmd->offset, cmd->data_size);
  1263. i40e_debug(hw, I40E_DEBUG_NVM,
  1264. "i40e_nvmupd_nvm_erase status %d aq %d\n",
  1265. status, hw->aq.asq_last_status);
  1266. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1267. }
  1268. return status;
  1269. }
  1270. /**
  1271. * i40e_nvmupd_nvm_write - Write NVM
  1272. * @hw: pointer to hardware structure
  1273. * @cmd: pointer to nvm update command buffer
  1274. * @bytes: pointer to the data buffer
  1275. * @perrno: pointer to return error code
  1276. *
  1277. * module, offset, data_size and data are in cmd structure
  1278. **/
  1279. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  1280. struct i40e_nvm_access *cmd,
  1281. u8 *bytes, int *perrno)
  1282. {
  1283. i40e_status status = 0;
  1284. struct i40e_asq_cmd_details cmd_details;
  1285. u8 module, transaction;
  1286. bool last;
  1287. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1288. module = i40e_nvmupd_get_module(cmd->config);
  1289. last = (transaction & I40E_NVM_LCB);
  1290. memset(&cmd_details, 0, sizeof(cmd_details));
  1291. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1292. status = i40e_aq_update_nvm(hw, module, cmd->offset,
  1293. (u16)cmd->data_size, bytes, last,
  1294. &cmd_details);
  1295. if (status) {
  1296. i40e_debug(hw, I40E_DEBUG_NVM,
  1297. "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
  1298. module, cmd->offset, cmd->data_size);
  1299. i40e_debug(hw, I40E_DEBUG_NVM,
  1300. "i40e_nvmupd_nvm_write status %d aq %d\n",
  1301. status, hw->aq.asq_last_status);
  1302. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1303. }
  1304. return status;
  1305. }