i40e_nvm.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*******************************************************************************
  3. *
  4. * Intel Ethernet Controller XL710 Family Linux Driver
  5. * Copyright(c) 2013 - 2014 Intel Corporation.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with this program. If not, see <http://www.gnu.org/licenses/>.
  18. *
  19. * The full GNU General Public License is included in this distribution in
  20. * the file called "COPYING".
  21. *
  22. * Contact Information:
  23. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25. *
  26. ******************************************************************************/
  27. #include "i40e_prototype.h"
  28. /**
  29. * i40e_init_nvm_ops - Initialize NVM function pointers
  30. * @hw: pointer to the HW structure
  31. *
  32. * Setup the function pointers and the NVM info structure. Should be called
  33. * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  34. * Please notice that the NVM term is used here (& in all methods covered
  35. * in this file) as an equivalent of the FLASH part mapped into the SR.
  36. * We are accessing FLASH always thru the Shadow RAM.
  37. **/
  38. i40e_status i40e_init_nvm(struct i40e_hw *hw)
  39. {
  40. struct i40e_nvm_info *nvm = &hw->nvm;
  41. i40e_status ret_code = 0;
  42. u32 fla, gens;
  43. u8 sr_size;
  44. /* The SR size is stored regardless of the nvm programming mode
  45. * as the blank mode may be used in the factory line.
  46. */
  47. gens = rd32(hw, I40E_GLNVM_GENS);
  48. sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
  49. I40E_GLNVM_GENS_SR_SIZE_SHIFT);
  50. /* Switching to words (sr_size contains power of 2KB) */
  51. nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
  52. /* Check if we are in the normal or blank NVM programming mode */
  53. fla = rd32(hw, I40E_GLNVM_FLA);
  54. if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
  55. /* Max NVM timeout */
  56. nvm->timeout = I40E_MAX_NVM_TIMEOUT;
  57. nvm->blank_nvm_mode = false;
  58. } else { /* Blank programming mode */
  59. nvm->blank_nvm_mode = true;
  60. ret_code = I40E_ERR_NVM_BLANK_MODE;
  61. i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
  62. }
  63. return ret_code;
  64. }
  65. /**
  66. * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
  67. * @hw: pointer to the HW structure
  68. * @access: NVM access type (read or write)
  69. *
  70. * This function will request NVM ownership for reading
  71. * via the proper Admin Command.
  72. **/
  73. i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
  74. enum i40e_aq_resource_access_type access)
  75. {
  76. i40e_status ret_code = 0;
  77. u64 gtime, timeout;
  78. u64 time_left = 0;
  79. if (hw->nvm.blank_nvm_mode)
  80. goto i40e_i40e_acquire_nvm_exit;
  81. ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
  82. 0, &time_left, NULL);
  83. /* Reading the Global Device Timer */
  84. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  85. /* Store the timeout */
  86. hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
  87. if (ret_code)
  88. i40e_debug(hw, I40E_DEBUG_NVM,
  89. "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
  90. access, time_left, ret_code, hw->aq.asq_last_status);
  91. if (ret_code && time_left) {
  92. /* Poll until the current NVM owner timeouts */
  93. timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
  94. while ((gtime < timeout) && time_left) {
  95. usleep_range(10000, 20000);
  96. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  97. ret_code = i40e_aq_request_resource(hw,
  98. I40E_NVM_RESOURCE_ID,
  99. access, 0, &time_left,
  100. NULL);
  101. if (!ret_code) {
  102. hw->nvm.hw_semaphore_timeout =
  103. I40E_MS_TO_GTIME(time_left) + gtime;
  104. break;
  105. }
  106. }
  107. if (ret_code) {
  108. hw->nvm.hw_semaphore_timeout = 0;
  109. i40e_debug(hw, I40E_DEBUG_NVM,
  110. "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
  111. time_left, ret_code, hw->aq.asq_last_status);
  112. }
  113. }
  114. i40e_i40e_acquire_nvm_exit:
  115. return ret_code;
  116. }
  117. /**
  118. * i40e_release_nvm - Generic request for releasing the NVM ownership
  119. * @hw: pointer to the HW structure
  120. *
  121. * This function will release NVM resource via the proper Admin Command.
  122. **/
  123. void i40e_release_nvm(struct i40e_hw *hw)
  124. {
  125. i40e_status ret_code = I40E_SUCCESS;
  126. u32 total_delay = 0;
  127. if (hw->nvm.blank_nvm_mode)
  128. return;
  129. ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
  130. /* there are some rare cases when trying to release the resource
  131. * results in an admin Q timeout, so handle them correctly
  132. */
  133. while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
  134. (total_delay < hw->aq.asq_cmd_timeout)) {
  135. usleep_range(1000, 2000);
  136. ret_code = i40e_aq_release_resource(hw,
  137. I40E_NVM_RESOURCE_ID,
  138. 0, NULL);
  139. total_delay++;
  140. }
  141. }
  142. /**
  143. * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
  144. * @hw: pointer to the HW structure
  145. *
  146. * Polls the SRCTL Shadow RAM register done bit.
  147. **/
  148. static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
  149. {
  150. i40e_status ret_code = I40E_ERR_TIMEOUT;
  151. u32 srctl, wait_cnt;
  152. /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
  153. for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
  154. srctl = rd32(hw, I40E_GLNVM_SRCTL);
  155. if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
  156. ret_code = 0;
  157. break;
  158. }
  159. udelay(5);
  160. }
  161. if (ret_code == I40E_ERR_TIMEOUT)
  162. i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
  163. return ret_code;
  164. }
  165. /**
  166. * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
  167. * @hw: pointer to the HW structure
  168. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  169. * @data: word read from the Shadow RAM
  170. *
  171. * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  172. **/
  173. static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
  174. u16 *data)
  175. {
  176. i40e_status ret_code = I40E_ERR_TIMEOUT;
  177. u32 sr_reg;
  178. if (offset >= hw->nvm.sr_size) {
  179. i40e_debug(hw, I40E_DEBUG_NVM,
  180. "NVM read error: offset %d beyond Shadow RAM limit %d\n",
  181. offset, hw->nvm.sr_size);
  182. ret_code = I40E_ERR_PARAM;
  183. goto read_nvm_exit;
  184. }
  185. /* Poll the done bit first */
  186. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  187. if (!ret_code) {
  188. /* Write the address and start reading */
  189. sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
  190. BIT(I40E_GLNVM_SRCTL_START_SHIFT);
  191. wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
  192. /* Poll I40E_GLNVM_SRCTL until the done bit is set */
  193. ret_code = i40e_poll_sr_srctl_done_bit(hw);
  194. if (!ret_code) {
  195. sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
  196. *data = (u16)((sr_reg &
  197. I40E_GLNVM_SRDATA_RDDATA_MASK)
  198. >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
  199. }
  200. }
  201. if (ret_code)
  202. i40e_debug(hw, I40E_DEBUG_NVM,
  203. "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
  204. offset);
  205. read_nvm_exit:
  206. return ret_code;
  207. }
  208. /**
  209. * i40e_read_nvm_aq - Read Shadow RAM.
  210. * @hw: pointer to the HW structure.
  211. * @module_pointer: module pointer location in words from the NVM beginning
  212. * @offset: offset in words from module start
  213. * @words: number of words to write
  214. * @data: buffer with words to write to the Shadow RAM
  215. * @last_command: tells the AdminQ that this is the last command
  216. *
  217. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  218. **/
  219. static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
  220. u8 module_pointer, u32 offset,
  221. u16 words, void *data,
  222. bool last_command)
  223. {
  224. i40e_status ret_code = I40E_ERR_NVM;
  225. struct i40e_asq_cmd_details cmd_details;
  226. memset(&cmd_details, 0, sizeof(cmd_details));
  227. cmd_details.wb_desc = &hw->nvm_wb_desc;
  228. /* Here we are checking the SR limit only for the flat memory model.
  229. * We cannot do it for the module-based model, as we did not acquire
  230. * the NVM resource yet (we cannot get the module pointer value).
  231. * Firmware will check the module-based model.
  232. */
  233. if ((offset + words) > hw->nvm.sr_size)
  234. i40e_debug(hw, I40E_DEBUG_NVM,
  235. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  236. (offset + words), hw->nvm.sr_size);
  237. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  238. /* We can write only up to 4KB (one sector), in one AQ write */
  239. i40e_debug(hw, I40E_DEBUG_NVM,
  240. "NVM write fail error: tried to write %d words, limit is %d.\n",
  241. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  242. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  243. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  244. /* A single write cannot spread over two sectors */
  245. i40e_debug(hw, I40E_DEBUG_NVM,
  246. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  247. offset, words);
  248. else
  249. ret_code = i40e_aq_read_nvm(hw, module_pointer,
  250. 2 * offset, /*bytes*/
  251. 2 * words, /*bytes*/
  252. data, last_command, &cmd_details);
  253. return ret_code;
  254. }
  255. /**
  256. * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
  257. * @hw: pointer to the HW structure
  258. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  259. * @data: word read from the Shadow RAM
  260. *
  261. * Reads one 16 bit word from the Shadow RAM using the AdminQ
  262. **/
  263. static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
  264. u16 *data)
  265. {
  266. i40e_status ret_code = I40E_ERR_TIMEOUT;
  267. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
  268. *data = le16_to_cpu(*(__le16 *)data);
  269. return ret_code;
  270. }
  271. /**
  272. * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
  273. * @hw: pointer to the HW structure
  274. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  275. * @data: word read from the Shadow RAM
  276. *
  277. * Reads one 16 bit word from the Shadow RAM.
  278. *
  279. * Do not use this function except in cases where the nvm lock is already
  280. * taken via i40e_acquire_nvm().
  281. **/
  282. static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
  283. u16 offset, u16 *data)
  284. {
  285. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
  286. return i40e_read_nvm_word_aq(hw, offset, data);
  287. return i40e_read_nvm_word_srctl(hw, offset, data);
  288. }
  289. /**
  290. * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
  291. * @hw: pointer to the HW structure
  292. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  293. * @data: word read from the Shadow RAM
  294. *
  295. * Reads one 16 bit word from the Shadow RAM.
  296. **/
  297. i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
  298. u16 *data)
  299. {
  300. i40e_status ret_code = 0;
  301. if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
  302. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  303. if (ret_code)
  304. return ret_code;
  305. ret_code = __i40e_read_nvm_word(hw, offset, data);
  306. if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
  307. i40e_release_nvm(hw);
  308. return ret_code;
  309. }
  310. /**
  311. * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
  312. * @hw: pointer to the HW structure
  313. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  314. * @words: (in) number of words to read; (out) number of words actually read
  315. * @data: words read from the Shadow RAM
  316. *
  317. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  318. * method. The buffer read is preceded by the NVM ownership take
  319. * and followed by the release.
  320. **/
  321. static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
  322. u16 *words, u16 *data)
  323. {
  324. i40e_status ret_code = 0;
  325. u16 index, word;
  326. /* Loop thru the selected region */
  327. for (word = 0; word < *words; word++) {
  328. index = offset + word;
  329. ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
  330. if (ret_code)
  331. break;
  332. }
  333. /* Update the number of words read from the Shadow RAM */
  334. *words = word;
  335. return ret_code;
  336. }
  337. /**
  338. * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
  339. * @hw: pointer to the HW structure
  340. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  341. * @words: (in) number of words to read; (out) number of words actually read
  342. * @data: words read from the Shadow RAM
  343. *
  344. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
  345. * method. The buffer read is preceded by the NVM ownership take
  346. * and followed by the release.
  347. **/
  348. static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
  349. u16 *words, u16 *data)
  350. {
  351. i40e_status ret_code;
  352. u16 read_size;
  353. bool last_cmd = false;
  354. u16 words_read = 0;
  355. u16 i = 0;
  356. do {
  357. /* Calculate number of bytes we should read in this step.
  358. * FVL AQ do not allow to read more than one page at a time or
  359. * to cross page boundaries.
  360. */
  361. if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
  362. read_size = min(*words,
  363. (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
  364. (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
  365. else
  366. read_size = min((*words - words_read),
  367. I40E_SR_SECTOR_SIZE_IN_WORDS);
  368. /* Check if this is last command, if so set proper flag */
  369. if ((words_read + read_size) >= *words)
  370. last_cmd = true;
  371. ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
  372. data + words_read, last_cmd);
  373. if (ret_code)
  374. goto read_nvm_buffer_aq_exit;
  375. /* Increment counter for words already read and move offset to
  376. * new read location
  377. */
  378. words_read += read_size;
  379. offset += read_size;
  380. } while (words_read < *words);
  381. for (i = 0; i < *words; i++)
  382. data[i] = le16_to_cpu(((__le16 *)data)[i]);
  383. read_nvm_buffer_aq_exit:
  384. *words = words_read;
  385. return ret_code;
  386. }
  387. /**
  388. * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
  389. * @hw: pointer to the HW structure
  390. * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  391. * @words: (in) number of words to read; (out) number of words actually read
  392. * @data: words read from the Shadow RAM
  393. *
  394. * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
  395. * method.
  396. **/
  397. static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
  398. u16 offset, u16 *words,
  399. u16 *data)
  400. {
  401. if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
  402. return i40e_read_nvm_buffer_aq(hw, offset, words, data);
  403. return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
  404. }
  405. /**
  406. * i40e_write_nvm_aq - Writes Shadow RAM.
  407. * @hw: pointer to the HW structure.
  408. * @module_pointer: module pointer location in words from the NVM beginning
  409. * @offset: offset in words from module start
  410. * @words: number of words to write
  411. * @data: buffer with words to write to the Shadow RAM
  412. * @last_command: tells the AdminQ that this is the last command
  413. *
  414. * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
  415. **/
  416. static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  417. u32 offset, u16 words, void *data,
  418. bool last_command)
  419. {
  420. i40e_status ret_code = I40E_ERR_NVM;
  421. struct i40e_asq_cmd_details cmd_details;
  422. memset(&cmd_details, 0, sizeof(cmd_details));
  423. cmd_details.wb_desc = &hw->nvm_wb_desc;
  424. /* Here we are checking the SR limit only for the flat memory model.
  425. * We cannot do it for the module-based model, as we did not acquire
  426. * the NVM resource yet (we cannot get the module pointer value).
  427. * Firmware will check the module-based model.
  428. */
  429. if ((offset + words) > hw->nvm.sr_size)
  430. i40e_debug(hw, I40E_DEBUG_NVM,
  431. "NVM write error: offset %d beyond Shadow RAM limit %d\n",
  432. (offset + words), hw->nvm.sr_size);
  433. else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
  434. /* We can write only up to 4KB (one sector), in one AQ write */
  435. i40e_debug(hw, I40E_DEBUG_NVM,
  436. "NVM write fail error: tried to write %d words, limit is %d.\n",
  437. words, I40E_SR_SECTOR_SIZE_IN_WORDS);
  438. else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
  439. != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
  440. /* A single write cannot spread over two sectors */
  441. i40e_debug(hw, I40E_DEBUG_NVM,
  442. "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
  443. offset, words);
  444. else
  445. ret_code = i40e_aq_update_nvm(hw, module_pointer,
  446. 2 * offset, /*bytes*/
  447. 2 * words, /*bytes*/
  448. data, last_command, 0,
  449. &cmd_details);
  450. return ret_code;
  451. }
  452. /**
  453. * i40e_calc_nvm_checksum - Calculates and returns the checksum
  454. * @hw: pointer to hardware structure
  455. * @checksum: pointer to the checksum
  456. *
  457. * This function calculates SW Checksum that covers the whole 64kB shadow RAM
  458. * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
  459. * is customer specific and unknown. Therefore, this function skips all maximum
  460. * possible size of VPD (1kB).
  461. **/
  462. static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
  463. u16 *checksum)
  464. {
  465. i40e_status ret_code;
  466. struct i40e_virt_mem vmem;
  467. u16 pcie_alt_module = 0;
  468. u16 checksum_local = 0;
  469. u16 vpd_module = 0;
  470. u16 *data;
  471. u16 i = 0;
  472. ret_code = i40e_allocate_virt_mem(hw, &vmem,
  473. I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
  474. if (ret_code)
  475. goto i40e_calc_nvm_checksum_exit;
  476. data = (u16 *)vmem.va;
  477. /* read pointer to VPD area */
  478. ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
  479. if (ret_code) {
  480. ret_code = I40E_ERR_NVM_CHECKSUM;
  481. goto i40e_calc_nvm_checksum_exit;
  482. }
  483. /* read pointer to PCIe Alt Auto-load module */
  484. ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
  485. &pcie_alt_module);
  486. if (ret_code) {
  487. ret_code = I40E_ERR_NVM_CHECKSUM;
  488. goto i40e_calc_nvm_checksum_exit;
  489. }
  490. /* Calculate SW checksum that covers the whole 64kB shadow RAM
  491. * except the VPD and PCIe ALT Auto-load modules
  492. */
  493. for (i = 0; i < hw->nvm.sr_size; i++) {
  494. /* Read SR page */
  495. if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
  496. u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
  497. ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
  498. if (ret_code) {
  499. ret_code = I40E_ERR_NVM_CHECKSUM;
  500. goto i40e_calc_nvm_checksum_exit;
  501. }
  502. }
  503. /* Skip Checksum word */
  504. if (i == I40E_SR_SW_CHECKSUM_WORD)
  505. continue;
  506. /* Skip VPD module (convert byte size to word count) */
  507. if ((i >= (u32)vpd_module) &&
  508. (i < ((u32)vpd_module +
  509. (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
  510. continue;
  511. }
  512. /* Skip PCIe ALT module (convert byte size to word count) */
  513. if ((i >= (u32)pcie_alt_module) &&
  514. (i < ((u32)pcie_alt_module +
  515. (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
  516. continue;
  517. }
  518. checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
  519. }
  520. *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
  521. i40e_calc_nvm_checksum_exit:
  522. i40e_free_virt_mem(hw, &vmem);
  523. return ret_code;
  524. }
  525. /**
  526. * i40e_update_nvm_checksum - Updates the NVM checksum
  527. * @hw: pointer to hardware structure
  528. *
  529. * NVM ownership must be acquired before calling this function and released
  530. * on ARQ completion event reception by caller.
  531. * This function will commit SR to NVM.
  532. **/
  533. i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
  534. {
  535. i40e_status ret_code;
  536. u16 checksum;
  537. __le16 le_sum;
  538. ret_code = i40e_calc_nvm_checksum(hw, &checksum);
  539. if (!ret_code) {
  540. le_sum = cpu_to_le16(checksum);
  541. ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
  542. 1, &le_sum, true);
  543. }
  544. return ret_code;
  545. }
  546. /**
  547. * i40e_validate_nvm_checksum - Validate EEPROM checksum
  548. * @hw: pointer to hardware structure
  549. * @checksum: calculated checksum
  550. *
  551. * Performs checksum calculation and validates the NVM SW checksum. If the
  552. * caller does not need checksum, the value can be NULL.
  553. **/
  554. i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
  555. u16 *checksum)
  556. {
  557. i40e_status ret_code = 0;
  558. u16 checksum_sr = 0;
  559. u16 checksum_local = 0;
  560. /* We must acquire the NVM lock in order to correctly synchronize the
  561. * NVM accesses across multiple PFs. Without doing so it is possible
  562. * for one of the PFs to read invalid data potentially indicating that
  563. * the checksum is invalid.
  564. */
  565. ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  566. if (ret_code)
  567. return ret_code;
  568. ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
  569. __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
  570. i40e_release_nvm(hw);
  571. if (ret_code)
  572. return ret_code;
  573. /* Verify read checksum from EEPROM is the same as
  574. * calculated checksum
  575. */
  576. if (checksum_local != checksum_sr)
  577. ret_code = I40E_ERR_NVM_CHECKSUM;
  578. /* If the user cares, return the calculated checksum */
  579. if (checksum)
  580. *checksum = checksum_local;
  581. return ret_code;
  582. }
  583. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  584. struct i40e_nvm_access *cmd,
  585. u8 *bytes, int *perrno);
  586. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  587. struct i40e_nvm_access *cmd,
  588. u8 *bytes, int *perrno);
  589. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  590. struct i40e_nvm_access *cmd,
  591. u8 *bytes, int *errno);
  592. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  593. struct i40e_nvm_access *cmd,
  594. int *perrno);
  595. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  596. struct i40e_nvm_access *cmd,
  597. int *perrno);
  598. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  599. struct i40e_nvm_access *cmd,
  600. u8 *bytes, int *perrno);
  601. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  602. struct i40e_nvm_access *cmd,
  603. u8 *bytes, int *perrno);
  604. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  605. struct i40e_nvm_access *cmd,
  606. u8 *bytes, int *perrno);
  607. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  608. struct i40e_nvm_access *cmd,
  609. u8 *bytes, int *perrno);
  610. static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
  611. struct i40e_nvm_access *cmd,
  612. u8 *bytes, int *perrno);
  613. static inline u8 i40e_nvmupd_get_module(u32 val)
  614. {
  615. return (u8)(val & I40E_NVM_MOD_PNT_MASK);
  616. }
  617. static inline u8 i40e_nvmupd_get_transaction(u32 val)
  618. {
  619. return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
  620. }
  621. static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
  622. {
  623. return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
  624. I40E_NVM_PRESERVATION_FLAGS_SHIFT);
  625. }
  626. static const char * const i40e_nvm_update_state_str[] = {
  627. "I40E_NVMUPD_INVALID",
  628. "I40E_NVMUPD_READ_CON",
  629. "I40E_NVMUPD_READ_SNT",
  630. "I40E_NVMUPD_READ_LCB",
  631. "I40E_NVMUPD_READ_SA",
  632. "I40E_NVMUPD_WRITE_ERA",
  633. "I40E_NVMUPD_WRITE_CON",
  634. "I40E_NVMUPD_WRITE_SNT",
  635. "I40E_NVMUPD_WRITE_LCB",
  636. "I40E_NVMUPD_WRITE_SA",
  637. "I40E_NVMUPD_CSUM_CON",
  638. "I40E_NVMUPD_CSUM_SA",
  639. "I40E_NVMUPD_CSUM_LCB",
  640. "I40E_NVMUPD_STATUS",
  641. "I40E_NVMUPD_EXEC_AQ",
  642. "I40E_NVMUPD_GET_AQ_RESULT",
  643. "I40E_NVMUPD_GET_AQ_EVENT",
  644. };
  645. /**
  646. * i40e_nvmupd_command - Process an NVM update command
  647. * @hw: pointer to hardware structure
  648. * @cmd: pointer to nvm update command
  649. * @bytes: pointer to the data buffer
  650. * @perrno: pointer to return error code
  651. *
  652. * Dispatches command depending on what update state is current
  653. **/
  654. i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
  655. struct i40e_nvm_access *cmd,
  656. u8 *bytes, int *perrno)
  657. {
  658. i40e_status status;
  659. enum i40e_nvmupd_cmd upd_cmd;
  660. /* assume success */
  661. *perrno = 0;
  662. /* early check for status command and debug msgs */
  663. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  664. i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
  665. i40e_nvm_update_state_str[upd_cmd],
  666. hw->nvmupd_state,
  667. hw->nvm_release_on_done, hw->nvm_wait_opcode,
  668. cmd->command, cmd->config, cmd->offset, cmd->data_size);
  669. if (upd_cmd == I40E_NVMUPD_INVALID) {
  670. *perrno = -EFAULT;
  671. i40e_debug(hw, I40E_DEBUG_NVM,
  672. "i40e_nvmupd_validate_command returns %d errno %d\n",
  673. upd_cmd, *perrno);
  674. }
  675. /* a status request returns immediately rather than
  676. * going into the state machine
  677. */
  678. if (upd_cmd == I40E_NVMUPD_STATUS) {
  679. if (!cmd->data_size) {
  680. *perrno = -EFAULT;
  681. return I40E_ERR_BUF_TOO_SHORT;
  682. }
  683. bytes[0] = hw->nvmupd_state;
  684. if (cmd->data_size >= 4) {
  685. bytes[1] = 0;
  686. *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
  687. }
  688. /* Clear error status on read */
  689. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
  690. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  691. return 0;
  692. }
  693. /* Clear status even it is not read and log */
  694. if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
  695. i40e_debug(hw, I40E_DEBUG_NVM,
  696. "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
  697. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  698. }
  699. /* Acquire lock to prevent race condition where adminq_task
  700. * can execute after i40e_nvmupd_nvm_read/write but before state
  701. * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
  702. *
  703. * During NVMUpdate, it is observed that lock could be held for
  704. * ~5ms for most commands. However lock is held for ~60ms for
  705. * NVMUPD_CSUM_LCB command.
  706. */
  707. mutex_lock(&hw->aq.arq_mutex);
  708. switch (hw->nvmupd_state) {
  709. case I40E_NVMUPD_STATE_INIT:
  710. status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
  711. break;
  712. case I40E_NVMUPD_STATE_READING:
  713. status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
  714. break;
  715. case I40E_NVMUPD_STATE_WRITING:
  716. status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
  717. break;
  718. case I40E_NVMUPD_STATE_INIT_WAIT:
  719. case I40E_NVMUPD_STATE_WRITE_WAIT:
  720. /* if we need to stop waiting for an event, clear
  721. * the wait info and return before doing anything else
  722. */
  723. if (cmd->offset == 0xffff) {
  724. i40e_nvmupd_clear_wait_state(hw);
  725. status = 0;
  726. break;
  727. }
  728. status = I40E_ERR_NOT_READY;
  729. *perrno = -EBUSY;
  730. break;
  731. default:
  732. /* invalid state, should never happen */
  733. i40e_debug(hw, I40E_DEBUG_NVM,
  734. "NVMUPD: no such state %d\n", hw->nvmupd_state);
  735. status = I40E_NOT_SUPPORTED;
  736. *perrno = -ESRCH;
  737. break;
  738. }
  739. mutex_unlock(&hw->aq.arq_mutex);
  740. return status;
  741. }
  742. /**
  743. * i40e_nvmupd_state_init - Handle NVM update state Init
  744. * @hw: pointer to hardware structure
  745. * @cmd: pointer to nvm update command buffer
  746. * @bytes: pointer to the data buffer
  747. * @perrno: pointer to return error code
  748. *
  749. * Process legitimate commands of the Init state and conditionally set next
  750. * state. Reject all other commands.
  751. **/
  752. static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  753. struct i40e_nvm_access *cmd,
  754. u8 *bytes, int *perrno)
  755. {
  756. i40e_status status = 0;
  757. enum i40e_nvmupd_cmd upd_cmd;
  758. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  759. switch (upd_cmd) {
  760. case I40E_NVMUPD_READ_SA:
  761. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  762. if (status) {
  763. *perrno = i40e_aq_rc_to_posix(status,
  764. hw->aq.asq_last_status);
  765. } else {
  766. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  767. i40e_release_nvm(hw);
  768. }
  769. break;
  770. case I40E_NVMUPD_READ_SNT:
  771. status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
  772. if (status) {
  773. *perrno = i40e_aq_rc_to_posix(status,
  774. hw->aq.asq_last_status);
  775. } else {
  776. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  777. if (status)
  778. i40e_release_nvm(hw);
  779. else
  780. hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
  781. }
  782. break;
  783. case I40E_NVMUPD_WRITE_ERA:
  784. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  785. if (status) {
  786. *perrno = i40e_aq_rc_to_posix(status,
  787. hw->aq.asq_last_status);
  788. } else {
  789. status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
  790. if (status) {
  791. i40e_release_nvm(hw);
  792. } else {
  793. hw->nvm_release_on_done = true;
  794. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
  795. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  796. }
  797. }
  798. break;
  799. case I40E_NVMUPD_WRITE_SA:
  800. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  801. if (status) {
  802. *perrno = i40e_aq_rc_to_posix(status,
  803. hw->aq.asq_last_status);
  804. } else {
  805. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  806. if (status) {
  807. i40e_release_nvm(hw);
  808. } else {
  809. hw->nvm_release_on_done = true;
  810. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  811. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  812. }
  813. }
  814. break;
  815. case I40E_NVMUPD_WRITE_SNT:
  816. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  817. if (status) {
  818. *perrno = i40e_aq_rc_to_posix(status,
  819. hw->aq.asq_last_status);
  820. } else {
  821. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  822. if (status) {
  823. i40e_release_nvm(hw);
  824. } else {
  825. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  826. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  827. }
  828. }
  829. break;
  830. case I40E_NVMUPD_CSUM_SA:
  831. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  832. if (status) {
  833. *perrno = i40e_aq_rc_to_posix(status,
  834. hw->aq.asq_last_status);
  835. } else {
  836. status = i40e_update_nvm_checksum(hw);
  837. if (status) {
  838. *perrno = hw->aq.asq_last_status ?
  839. i40e_aq_rc_to_posix(status,
  840. hw->aq.asq_last_status) :
  841. -EIO;
  842. i40e_release_nvm(hw);
  843. } else {
  844. hw->nvm_release_on_done = true;
  845. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  846. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  847. }
  848. }
  849. break;
  850. case I40E_NVMUPD_EXEC_AQ:
  851. status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
  852. break;
  853. case I40E_NVMUPD_GET_AQ_RESULT:
  854. status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
  855. break;
  856. case I40E_NVMUPD_GET_AQ_EVENT:
  857. status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
  858. break;
  859. default:
  860. i40e_debug(hw, I40E_DEBUG_NVM,
  861. "NVMUPD: bad cmd %s in init state\n",
  862. i40e_nvm_update_state_str[upd_cmd]);
  863. status = I40E_ERR_NVM;
  864. *perrno = -ESRCH;
  865. break;
  866. }
  867. return status;
  868. }
  869. /**
  870. * i40e_nvmupd_state_reading - Handle NVM update state Reading
  871. * @hw: pointer to hardware structure
  872. * @cmd: pointer to nvm update command buffer
  873. * @bytes: pointer to the data buffer
  874. * @perrno: pointer to return error code
  875. *
  876. * NVM ownership is already held. Process legitimate commands and set any
  877. * change in state; reject all other commands.
  878. **/
  879. static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  880. struct i40e_nvm_access *cmd,
  881. u8 *bytes, int *perrno)
  882. {
  883. i40e_status status = 0;
  884. enum i40e_nvmupd_cmd upd_cmd;
  885. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  886. switch (upd_cmd) {
  887. case I40E_NVMUPD_READ_SA:
  888. case I40E_NVMUPD_READ_CON:
  889. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  890. break;
  891. case I40E_NVMUPD_READ_LCB:
  892. status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
  893. i40e_release_nvm(hw);
  894. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  895. break;
  896. default:
  897. i40e_debug(hw, I40E_DEBUG_NVM,
  898. "NVMUPD: bad cmd %s in reading state.\n",
  899. i40e_nvm_update_state_str[upd_cmd]);
  900. status = I40E_NOT_SUPPORTED;
  901. *perrno = -ESRCH;
  902. break;
  903. }
  904. return status;
  905. }
  906. /**
  907. * i40e_nvmupd_state_writing - Handle NVM update state Writing
  908. * @hw: pointer to hardware structure
  909. * @cmd: pointer to nvm update command buffer
  910. * @bytes: pointer to the data buffer
  911. * @perrno: pointer to return error code
  912. *
  913. * NVM ownership is already held. Process legitimate commands and set any
  914. * change in state; reject all other commands
  915. **/
  916. static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
  917. struct i40e_nvm_access *cmd,
  918. u8 *bytes, int *perrno)
  919. {
  920. i40e_status status = 0;
  921. enum i40e_nvmupd_cmd upd_cmd;
  922. bool retry_attempt = false;
  923. upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
  924. retry:
  925. switch (upd_cmd) {
  926. case I40E_NVMUPD_WRITE_CON:
  927. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  928. if (!status) {
  929. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  930. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  931. }
  932. break;
  933. case I40E_NVMUPD_WRITE_LCB:
  934. status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
  935. if (status) {
  936. *perrno = hw->aq.asq_last_status ?
  937. i40e_aq_rc_to_posix(status,
  938. hw->aq.asq_last_status) :
  939. -EIO;
  940. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  941. } else {
  942. hw->nvm_release_on_done = true;
  943. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  944. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  945. }
  946. break;
  947. case I40E_NVMUPD_CSUM_CON:
  948. /* Assumes the caller has acquired the nvm */
  949. status = i40e_update_nvm_checksum(hw);
  950. if (status) {
  951. *perrno = hw->aq.asq_last_status ?
  952. i40e_aq_rc_to_posix(status,
  953. hw->aq.asq_last_status) :
  954. -EIO;
  955. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  956. } else {
  957. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  958. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
  959. }
  960. break;
  961. case I40E_NVMUPD_CSUM_LCB:
  962. /* Assumes the caller has acquired the nvm */
  963. status = i40e_update_nvm_checksum(hw);
  964. if (status) {
  965. *perrno = hw->aq.asq_last_status ?
  966. i40e_aq_rc_to_posix(status,
  967. hw->aq.asq_last_status) :
  968. -EIO;
  969. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  970. } else {
  971. hw->nvm_release_on_done = true;
  972. hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
  973. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  974. }
  975. break;
  976. default:
  977. i40e_debug(hw, I40E_DEBUG_NVM,
  978. "NVMUPD: bad cmd %s in writing state.\n",
  979. i40e_nvm_update_state_str[upd_cmd]);
  980. status = I40E_NOT_SUPPORTED;
  981. *perrno = -ESRCH;
  982. break;
  983. }
  984. /* In some circumstances, a multi-write transaction takes longer
  985. * than the default 3 minute timeout on the write semaphore. If
  986. * the write failed with an EBUSY status, this is likely the problem,
  987. * so here we try to reacquire the semaphore then retry the write.
  988. * We only do one retry, then give up.
  989. */
  990. if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
  991. !retry_attempt) {
  992. i40e_status old_status = status;
  993. u32 old_asq_status = hw->aq.asq_last_status;
  994. u32 gtime;
  995. gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  996. if (gtime >= hw->nvm.hw_semaphore_timeout) {
  997. i40e_debug(hw, I40E_DEBUG_ALL,
  998. "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
  999. gtime, hw->nvm.hw_semaphore_timeout);
  1000. i40e_release_nvm(hw);
  1001. status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
  1002. if (status) {
  1003. i40e_debug(hw, I40E_DEBUG_ALL,
  1004. "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
  1005. hw->aq.asq_last_status);
  1006. status = old_status;
  1007. hw->aq.asq_last_status = old_asq_status;
  1008. } else {
  1009. retry_attempt = true;
  1010. goto retry;
  1011. }
  1012. }
  1013. }
  1014. return status;
  1015. }
  1016. /**
  1017. * i40e_nvmupd_clear_wait_state - clear wait state on hw
  1018. * @hw: pointer to the hardware structure
  1019. **/
  1020. void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
  1021. {
  1022. i40e_debug(hw, I40E_DEBUG_NVM,
  1023. "NVMUPD: clearing wait on opcode 0x%04x\n",
  1024. hw->nvm_wait_opcode);
  1025. if (hw->nvm_release_on_done) {
  1026. i40e_release_nvm(hw);
  1027. hw->nvm_release_on_done = false;
  1028. }
  1029. hw->nvm_wait_opcode = 0;
  1030. if (hw->aq.arq_last_status) {
  1031. hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
  1032. return;
  1033. }
  1034. switch (hw->nvmupd_state) {
  1035. case I40E_NVMUPD_STATE_INIT_WAIT:
  1036. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
  1037. break;
  1038. case I40E_NVMUPD_STATE_WRITE_WAIT:
  1039. hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
  1040. break;
  1041. default:
  1042. break;
  1043. }
  1044. }
  1045. /**
  1046. * i40e_nvmupd_check_wait_event - handle NVM update operation events
  1047. * @hw: pointer to the hardware structure
  1048. * @opcode: the event that just happened
  1049. **/
  1050. void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
  1051. struct i40e_aq_desc *desc)
  1052. {
  1053. u32 aq_desc_len = sizeof(struct i40e_aq_desc);
  1054. if (opcode == hw->nvm_wait_opcode) {
  1055. memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
  1056. i40e_nvmupd_clear_wait_state(hw);
  1057. }
  1058. }
  1059. /**
  1060. * i40e_nvmupd_validate_command - Validate given command
  1061. * @hw: pointer to hardware structure
  1062. * @cmd: pointer to nvm update command buffer
  1063. * @perrno: pointer to return error code
  1064. *
  1065. * Return one of the valid command types or I40E_NVMUPD_INVALID
  1066. **/
  1067. static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  1068. struct i40e_nvm_access *cmd,
  1069. int *perrno)
  1070. {
  1071. enum i40e_nvmupd_cmd upd_cmd;
  1072. u8 module, transaction;
  1073. /* anything that doesn't match a recognized case is an error */
  1074. upd_cmd = I40E_NVMUPD_INVALID;
  1075. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1076. module = i40e_nvmupd_get_module(cmd->config);
  1077. /* limits on data size */
  1078. if ((cmd->data_size < 1) ||
  1079. (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
  1080. i40e_debug(hw, I40E_DEBUG_NVM,
  1081. "i40e_nvmupd_validate_command data_size %d\n",
  1082. cmd->data_size);
  1083. *perrno = -EFAULT;
  1084. return I40E_NVMUPD_INVALID;
  1085. }
  1086. switch (cmd->command) {
  1087. case I40E_NVM_READ:
  1088. switch (transaction) {
  1089. case I40E_NVM_CON:
  1090. upd_cmd = I40E_NVMUPD_READ_CON;
  1091. break;
  1092. case I40E_NVM_SNT:
  1093. upd_cmd = I40E_NVMUPD_READ_SNT;
  1094. break;
  1095. case I40E_NVM_LCB:
  1096. upd_cmd = I40E_NVMUPD_READ_LCB;
  1097. break;
  1098. case I40E_NVM_SA:
  1099. upd_cmd = I40E_NVMUPD_READ_SA;
  1100. break;
  1101. case I40E_NVM_EXEC:
  1102. if (module == 0xf)
  1103. upd_cmd = I40E_NVMUPD_STATUS;
  1104. else if (module == 0)
  1105. upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
  1106. break;
  1107. case I40E_NVM_AQE:
  1108. upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
  1109. break;
  1110. }
  1111. break;
  1112. case I40E_NVM_WRITE:
  1113. switch (transaction) {
  1114. case I40E_NVM_CON:
  1115. upd_cmd = I40E_NVMUPD_WRITE_CON;
  1116. break;
  1117. case I40E_NVM_SNT:
  1118. upd_cmd = I40E_NVMUPD_WRITE_SNT;
  1119. break;
  1120. case I40E_NVM_LCB:
  1121. upd_cmd = I40E_NVMUPD_WRITE_LCB;
  1122. break;
  1123. case I40E_NVM_SA:
  1124. upd_cmd = I40E_NVMUPD_WRITE_SA;
  1125. break;
  1126. case I40E_NVM_ERA:
  1127. upd_cmd = I40E_NVMUPD_WRITE_ERA;
  1128. break;
  1129. case I40E_NVM_CSUM:
  1130. upd_cmd = I40E_NVMUPD_CSUM_CON;
  1131. break;
  1132. case (I40E_NVM_CSUM|I40E_NVM_SA):
  1133. upd_cmd = I40E_NVMUPD_CSUM_SA;
  1134. break;
  1135. case (I40E_NVM_CSUM|I40E_NVM_LCB):
  1136. upd_cmd = I40E_NVMUPD_CSUM_LCB;
  1137. break;
  1138. case I40E_NVM_EXEC:
  1139. if (module == 0)
  1140. upd_cmd = I40E_NVMUPD_EXEC_AQ;
  1141. break;
  1142. }
  1143. break;
  1144. }
  1145. return upd_cmd;
  1146. }
  1147. /**
  1148. * i40e_nvmupd_exec_aq - Run an AQ command
  1149. * @hw: pointer to hardware structure
  1150. * @cmd: pointer to nvm update command buffer
  1151. * @bytes: pointer to the data buffer
  1152. * @perrno: pointer to return error code
  1153. *
  1154. * cmd structure contains identifiers and data buffer
  1155. **/
  1156. static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
  1157. struct i40e_nvm_access *cmd,
  1158. u8 *bytes, int *perrno)
  1159. {
  1160. struct i40e_asq_cmd_details cmd_details;
  1161. i40e_status status;
  1162. struct i40e_aq_desc *aq_desc;
  1163. u32 buff_size = 0;
  1164. u8 *buff = NULL;
  1165. u32 aq_desc_len;
  1166. u32 aq_data_len;
  1167. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1168. if (cmd->offset == 0xffff)
  1169. return 0;
  1170. memset(&cmd_details, 0, sizeof(cmd_details));
  1171. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1172. aq_desc_len = sizeof(struct i40e_aq_desc);
  1173. memset(&hw->nvm_wb_desc, 0, aq_desc_len);
  1174. /* get the aq descriptor */
  1175. if (cmd->data_size < aq_desc_len) {
  1176. i40e_debug(hw, I40E_DEBUG_NVM,
  1177. "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
  1178. cmd->data_size, aq_desc_len);
  1179. *perrno = -EINVAL;
  1180. return I40E_ERR_PARAM;
  1181. }
  1182. aq_desc = (struct i40e_aq_desc *)bytes;
  1183. /* if data buffer needed, make sure it's ready */
  1184. aq_data_len = cmd->data_size - aq_desc_len;
  1185. buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
  1186. if (buff_size) {
  1187. if (!hw->nvm_buff.va) {
  1188. status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
  1189. hw->aq.asq_buf_size);
  1190. if (status)
  1191. i40e_debug(hw, I40E_DEBUG_NVM,
  1192. "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
  1193. status);
  1194. }
  1195. if (hw->nvm_buff.va) {
  1196. buff = hw->nvm_buff.va;
  1197. memcpy(buff, &bytes[aq_desc_len], aq_data_len);
  1198. }
  1199. }
  1200. if (cmd->offset)
  1201. memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
  1202. /* and away we go! */
  1203. status = i40e_asq_send_command(hw, aq_desc, buff,
  1204. buff_size, &cmd_details);
  1205. if (status) {
  1206. i40e_debug(hw, I40E_DEBUG_NVM,
  1207. "i40e_nvmupd_exec_aq err %s aq_err %s\n",
  1208. i40e_stat_str(hw, status),
  1209. i40e_aq_str(hw, hw->aq.asq_last_status));
  1210. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1211. return status;
  1212. }
  1213. /* should we wait for a followup event? */
  1214. if (cmd->offset) {
  1215. hw->nvm_wait_opcode = cmd->offset;
  1216. hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
  1217. }
  1218. return status;
  1219. }
  1220. /**
  1221. * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
  1222. * @hw: pointer to hardware structure
  1223. * @cmd: pointer to nvm update command buffer
  1224. * @bytes: pointer to the data buffer
  1225. * @perrno: pointer to return error code
  1226. *
  1227. * cmd structure contains identifiers and data buffer
  1228. **/
  1229. static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
  1230. struct i40e_nvm_access *cmd,
  1231. u8 *bytes, int *perrno)
  1232. {
  1233. u32 aq_total_len;
  1234. u32 aq_desc_len;
  1235. int remainder;
  1236. u8 *buff;
  1237. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1238. aq_desc_len = sizeof(struct i40e_aq_desc);
  1239. aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
  1240. /* check offset range */
  1241. if (cmd->offset > aq_total_len) {
  1242. i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
  1243. __func__, cmd->offset, aq_total_len);
  1244. *perrno = -EINVAL;
  1245. return I40E_ERR_PARAM;
  1246. }
  1247. /* check copylength range */
  1248. if (cmd->data_size > (aq_total_len - cmd->offset)) {
  1249. int new_len = aq_total_len - cmd->offset;
  1250. i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
  1251. __func__, cmd->data_size, new_len);
  1252. cmd->data_size = new_len;
  1253. }
  1254. remainder = cmd->data_size;
  1255. if (cmd->offset < aq_desc_len) {
  1256. u32 len = aq_desc_len - cmd->offset;
  1257. len = min(len, cmd->data_size);
  1258. i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
  1259. __func__, cmd->offset, cmd->offset + len);
  1260. buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
  1261. memcpy(bytes, buff, len);
  1262. bytes += len;
  1263. remainder -= len;
  1264. buff = hw->nvm_buff.va;
  1265. } else {
  1266. buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
  1267. }
  1268. if (remainder > 0) {
  1269. int start_byte = buff - (u8 *)hw->nvm_buff.va;
  1270. i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
  1271. __func__, start_byte, start_byte + remainder);
  1272. memcpy(bytes, buff, remainder);
  1273. }
  1274. return 0;
  1275. }
  1276. /**
  1277. * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
  1278. * @hw: pointer to hardware structure
  1279. * @cmd: pointer to nvm update command buffer
  1280. * @bytes: pointer to the data buffer
  1281. * @perrno: pointer to return error code
  1282. *
  1283. * cmd structure contains identifiers and data buffer
  1284. **/
  1285. static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
  1286. struct i40e_nvm_access *cmd,
  1287. u8 *bytes, int *perrno)
  1288. {
  1289. u32 aq_total_len;
  1290. u32 aq_desc_len;
  1291. i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
  1292. aq_desc_len = sizeof(struct i40e_aq_desc);
  1293. aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
  1294. /* check copylength range */
  1295. if (cmd->data_size > aq_total_len) {
  1296. i40e_debug(hw, I40E_DEBUG_NVM,
  1297. "%s: copy length %d too big, trimming to %d\n",
  1298. __func__, cmd->data_size, aq_total_len);
  1299. cmd->data_size = aq_total_len;
  1300. }
  1301. memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
  1302. return 0;
  1303. }
  1304. /**
  1305. * i40e_nvmupd_nvm_read - Read NVM
  1306. * @hw: pointer to hardware structure
  1307. * @cmd: pointer to nvm update command buffer
  1308. * @bytes: pointer to the data buffer
  1309. * @perrno: pointer to return error code
  1310. *
  1311. * cmd structure contains identifiers and data buffer
  1312. **/
  1313. static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  1314. struct i40e_nvm_access *cmd,
  1315. u8 *bytes, int *perrno)
  1316. {
  1317. struct i40e_asq_cmd_details cmd_details;
  1318. i40e_status status;
  1319. u8 module, transaction;
  1320. bool last;
  1321. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1322. module = i40e_nvmupd_get_module(cmd->config);
  1323. last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
  1324. memset(&cmd_details, 0, sizeof(cmd_details));
  1325. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1326. status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1327. bytes, last, &cmd_details);
  1328. if (status) {
  1329. i40e_debug(hw, I40E_DEBUG_NVM,
  1330. "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
  1331. module, cmd->offset, cmd->data_size);
  1332. i40e_debug(hw, I40E_DEBUG_NVM,
  1333. "i40e_nvmupd_nvm_read status %d aq %d\n",
  1334. status, hw->aq.asq_last_status);
  1335. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1336. }
  1337. return status;
  1338. }
  1339. /**
  1340. * i40e_nvmupd_nvm_erase - Erase an NVM module
  1341. * @hw: pointer to hardware structure
  1342. * @cmd: pointer to nvm update command buffer
  1343. * @perrno: pointer to return error code
  1344. *
  1345. * module, offset, data_size and data are in cmd structure
  1346. **/
  1347. static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  1348. struct i40e_nvm_access *cmd,
  1349. int *perrno)
  1350. {
  1351. i40e_status status = 0;
  1352. struct i40e_asq_cmd_details cmd_details;
  1353. u8 module, transaction;
  1354. bool last;
  1355. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1356. module = i40e_nvmupd_get_module(cmd->config);
  1357. last = (transaction & I40E_NVM_LCB);
  1358. memset(&cmd_details, 0, sizeof(cmd_details));
  1359. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1360. status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
  1361. last, &cmd_details);
  1362. if (status) {
  1363. i40e_debug(hw, I40E_DEBUG_NVM,
  1364. "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
  1365. module, cmd->offset, cmd->data_size);
  1366. i40e_debug(hw, I40E_DEBUG_NVM,
  1367. "i40e_nvmupd_nvm_erase status %d aq %d\n",
  1368. status, hw->aq.asq_last_status);
  1369. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1370. }
  1371. return status;
  1372. }
  1373. /**
  1374. * i40e_nvmupd_nvm_write - Write NVM
  1375. * @hw: pointer to hardware structure
  1376. * @cmd: pointer to nvm update command buffer
  1377. * @bytes: pointer to the data buffer
  1378. * @perrno: pointer to return error code
  1379. *
  1380. * module, offset, data_size and data are in cmd structure
  1381. **/
  1382. static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
  1383. struct i40e_nvm_access *cmd,
  1384. u8 *bytes, int *perrno)
  1385. {
  1386. i40e_status status = 0;
  1387. struct i40e_asq_cmd_details cmd_details;
  1388. u8 module, transaction;
  1389. u8 preservation_flags;
  1390. bool last;
  1391. transaction = i40e_nvmupd_get_transaction(cmd->config);
  1392. module = i40e_nvmupd_get_module(cmd->config);
  1393. last = (transaction & I40E_NVM_LCB);
  1394. preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
  1395. memset(&cmd_details, 0, sizeof(cmd_details));
  1396. cmd_details.wb_desc = &hw->nvm_wb_desc;
  1397. status = i40e_aq_update_nvm(hw, module, cmd->offset,
  1398. (u16)cmd->data_size, bytes, last,
  1399. preservation_flags, &cmd_details);
  1400. if (status) {
  1401. i40e_debug(hw, I40E_DEBUG_NVM,
  1402. "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
  1403. module, cmd->offset, cmd->data_size);
  1404. i40e_debug(hw, I40E_DEBUG_NVM,
  1405. "i40e_nvmupd_nvm_write status %d aq %d\n",
  1406. status, hw->aq.asq_last_status);
  1407. *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
  1408. }
  1409. return status;
  1410. }