e1000_i210.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Intel(R) Gigabit Ethernet Linux driver
  3. * Copyright(c) 2007-2014 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * The full GNU General Public License is included in this distribution in
  18. * the file called "COPYING".
  19. *
  20. * Contact Information:
  21. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  22. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  23. */
  24. /* e1000_i210
  25. * e1000_i211
  26. */
  27. #include <linux/types.h>
  28. #include <linux/if_ether.h>
  29. #include "e1000_hw.h"
  30. #include "e1000_i210.h"
  31. static s32 igb_update_flash_i210(struct e1000_hw *hw);
  32. /**
  33. * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
  34. * @hw: pointer to the HW structure
  35. *
  36. * Acquire the HW semaphore to access the PHY or NVM
  37. */
  38. static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
  39. {
  40. u32 swsm;
  41. s32 timeout = hw->nvm.word_size + 1;
  42. s32 i = 0;
  43. /* Get the SW semaphore */
  44. while (i < timeout) {
  45. swsm = rd32(E1000_SWSM);
  46. if (!(swsm & E1000_SWSM_SMBI))
  47. break;
  48. udelay(50);
  49. i++;
  50. }
  51. if (i == timeout) {
  52. /* In rare circumstances, the SW semaphore may already be held
  53. * unintentionally. Clear the semaphore once before giving up.
  54. */
  55. if (hw->dev_spec._82575.clear_semaphore_once) {
  56. hw->dev_spec._82575.clear_semaphore_once = false;
  57. igb_put_hw_semaphore(hw);
  58. for (i = 0; i < timeout; i++) {
  59. swsm = rd32(E1000_SWSM);
  60. if (!(swsm & E1000_SWSM_SMBI))
  61. break;
  62. udelay(50);
  63. }
  64. }
  65. /* If we do not have the semaphore here, we have to give up. */
  66. if (i == timeout) {
  67. hw_dbg("Driver can't access device - SMBI bit is set.\n");
  68. return -E1000_ERR_NVM;
  69. }
  70. }
  71. /* Get the FW semaphore. */
  72. for (i = 0; i < timeout; i++) {
  73. swsm = rd32(E1000_SWSM);
  74. wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
  75. /* Semaphore acquired if bit latched */
  76. if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
  77. break;
  78. udelay(50);
  79. }
  80. if (i == timeout) {
  81. /* Release semaphores */
  82. igb_put_hw_semaphore(hw);
  83. hw_dbg("Driver can't access the NVM\n");
  84. return -E1000_ERR_NVM;
  85. }
  86. return 0;
  87. }
  88. /**
  89. * igb_acquire_nvm_i210 - Request for access to EEPROM
  90. * @hw: pointer to the HW structure
  91. *
  92. * Acquire the necessary semaphores for exclusive access to the EEPROM.
  93. * Set the EEPROM access request bit and wait for EEPROM access grant bit.
  94. * Return successful if access grant bit set, else clear the request for
  95. * EEPROM access and return -E1000_ERR_NVM (-1).
  96. **/
  97. static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
  98. {
  99. return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
  100. }
  101. /**
  102. * igb_release_nvm_i210 - Release exclusive access to EEPROM
  103. * @hw: pointer to the HW structure
  104. *
  105. * Stop any current commands to the EEPROM and clear the EEPROM request bit,
  106. * then release the semaphores acquired.
  107. **/
  108. static void igb_release_nvm_i210(struct e1000_hw *hw)
  109. {
  110. igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
  111. }
  112. /**
  113. * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
  114. * @hw: pointer to the HW structure
  115. * @mask: specifies which semaphore to acquire
  116. *
  117. * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
  118. * will also specify which port we're acquiring the lock for.
  119. **/
  120. s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
  121. {
  122. u32 swfw_sync;
  123. u32 swmask = mask;
  124. u32 fwmask = mask << 16;
  125. s32 ret_val = 0;
  126. s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
  127. while (i < timeout) {
  128. if (igb_get_hw_semaphore_i210(hw)) {
  129. ret_val = -E1000_ERR_SWFW_SYNC;
  130. goto out;
  131. }
  132. swfw_sync = rd32(E1000_SW_FW_SYNC);
  133. if (!(swfw_sync & (fwmask | swmask)))
  134. break;
  135. /* Firmware currently using resource (fwmask) */
  136. igb_put_hw_semaphore(hw);
  137. mdelay(5);
  138. i++;
  139. }
  140. if (i == timeout) {
  141. hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
  142. ret_val = -E1000_ERR_SWFW_SYNC;
  143. goto out;
  144. }
  145. swfw_sync |= swmask;
  146. wr32(E1000_SW_FW_SYNC, swfw_sync);
  147. igb_put_hw_semaphore(hw);
  148. out:
  149. return ret_val;
  150. }
  151. /**
  152. * igb_release_swfw_sync_i210 - Release SW/FW semaphore
  153. * @hw: pointer to the HW structure
  154. * @mask: specifies which semaphore to acquire
  155. *
  156. * Release the SW/FW semaphore used to access the PHY or NVM. The mask
  157. * will also specify which port we're releasing the lock for.
  158. **/
  159. void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
  160. {
  161. u32 swfw_sync;
  162. while (igb_get_hw_semaphore_i210(hw))
  163. ; /* Empty */
  164. swfw_sync = rd32(E1000_SW_FW_SYNC);
  165. swfw_sync &= ~mask;
  166. wr32(E1000_SW_FW_SYNC, swfw_sync);
  167. igb_put_hw_semaphore(hw);
  168. }
  169. /**
  170. * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
  171. * @hw: pointer to the HW structure
  172. * @offset: offset of word in the Shadow Ram to read
  173. * @words: number of words to read
  174. * @data: word read from the Shadow Ram
  175. *
  176. * Reads a 16 bit word from the Shadow Ram using the EERD register.
  177. * Uses necessary synchronization semaphores.
  178. **/
  179. static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
  180. u16 *data)
  181. {
  182. s32 status = 0;
  183. u16 i, count;
  184. /* We cannot hold synchronization semaphores for too long,
  185. * because of forceful takeover procedure. However it is more efficient
  186. * to read in bursts than synchronizing access for each word.
  187. */
  188. for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
  189. count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
  190. E1000_EERD_EEWR_MAX_COUNT : (words - i);
  191. if (!(hw->nvm.ops.acquire(hw))) {
  192. status = igb_read_nvm_eerd(hw, offset, count,
  193. data + i);
  194. hw->nvm.ops.release(hw);
  195. } else {
  196. status = E1000_ERR_SWFW_SYNC;
  197. }
  198. if (status)
  199. break;
  200. }
  201. return status;
  202. }
  203. /**
  204. * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
  205. * @hw: pointer to the HW structure
  206. * @offset: offset within the Shadow Ram to be written to
  207. * @words: number of words to write
  208. * @data: 16 bit word(s) to be written to the Shadow Ram
  209. *
  210. * Writes data to Shadow Ram at offset using EEWR register.
  211. *
  212. * If igb_update_nvm_checksum is not called after this function , the
  213. * Shadow Ram will most likely contain an invalid checksum.
  214. **/
  215. static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
  216. u16 *data)
  217. {
  218. struct e1000_nvm_info *nvm = &hw->nvm;
  219. u32 i, k, eewr = 0;
  220. u32 attempts = 100000;
  221. s32 ret_val = 0;
  222. /* A check for invalid values: offset too large, too many words,
  223. * too many words for the offset, and not enough words.
  224. */
  225. if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
  226. (words == 0)) {
  227. hw_dbg("nvm parameter(s) out of bounds\n");
  228. ret_val = -E1000_ERR_NVM;
  229. goto out;
  230. }
  231. for (i = 0; i < words; i++) {
  232. eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
  233. (data[i] << E1000_NVM_RW_REG_DATA) |
  234. E1000_NVM_RW_REG_START;
  235. wr32(E1000_SRWR, eewr);
  236. for (k = 0; k < attempts; k++) {
  237. if (E1000_NVM_RW_REG_DONE &
  238. rd32(E1000_SRWR)) {
  239. ret_val = 0;
  240. break;
  241. }
  242. udelay(5);
  243. }
  244. if (ret_val) {
  245. hw_dbg("Shadow RAM write EEWR timed out\n");
  246. break;
  247. }
  248. }
  249. out:
  250. return ret_val;
  251. }
  252. /**
  253. * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
  254. * @hw: pointer to the HW structure
  255. * @offset: offset within the Shadow RAM to be written to
  256. * @words: number of words to write
  257. * @data: 16 bit word(s) to be written to the Shadow RAM
  258. *
  259. * Writes data to Shadow RAM at offset using EEWR register.
  260. *
  261. * If e1000_update_nvm_checksum is not called after this function , the
  262. * data will not be committed to FLASH and also Shadow RAM will most likely
  263. * contain an invalid checksum.
  264. *
  265. * If error code is returned, data and Shadow RAM may be inconsistent - buffer
  266. * partially written.
  267. **/
  268. static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
  269. u16 *data)
  270. {
  271. s32 status = 0;
  272. u16 i, count;
  273. /* We cannot hold synchronization semaphores for too long,
  274. * because of forceful takeover procedure. However it is more efficient
  275. * to write in bursts than synchronizing access for each word.
  276. */
  277. for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
  278. count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
  279. E1000_EERD_EEWR_MAX_COUNT : (words - i);
  280. if (!(hw->nvm.ops.acquire(hw))) {
  281. status = igb_write_nvm_srwr(hw, offset, count,
  282. data + i);
  283. hw->nvm.ops.release(hw);
  284. } else {
  285. status = E1000_ERR_SWFW_SYNC;
  286. }
  287. if (status)
  288. break;
  289. }
  290. return status;
  291. }
  292. /**
  293. * igb_read_invm_word_i210 - Reads OTP
  294. * @hw: pointer to the HW structure
  295. * @address: the word address (aka eeprom offset) to read
  296. * @data: pointer to the data read
  297. *
  298. * Reads 16-bit words from the OTP. Return error when the word is not
  299. * stored in OTP.
  300. **/
  301. static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
  302. {
  303. s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  304. u32 invm_dword;
  305. u16 i;
  306. u8 record_type, word_address;
  307. for (i = 0; i < E1000_INVM_SIZE; i++) {
  308. invm_dword = rd32(E1000_INVM_DATA_REG(i));
  309. /* Get record type */
  310. record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
  311. if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
  312. break;
  313. if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
  314. i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
  315. if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
  316. i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
  317. if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
  318. word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
  319. if (word_address == address) {
  320. *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
  321. hw_dbg("Read INVM Word 0x%02x = %x\n",
  322. address, *data);
  323. status = 0;
  324. break;
  325. }
  326. }
  327. }
  328. if (status)
  329. hw_dbg("Requested word 0x%02x not found in OTP\n", address);
  330. return status;
  331. }
  332. /**
  333. * igb_read_invm_i210 - Read invm wrapper function for I210/I211
  334. * @hw: pointer to the HW structure
  335. * @words: number of words to read
  336. * @data: pointer to the data read
  337. *
  338. * Wrapper function to return data formerly found in the NVM.
  339. **/
  340. static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
  341. u16 words __always_unused, u16 *data)
  342. {
  343. s32 ret_val = 0;
  344. /* Only the MAC addr is required to be present in the iNVM */
  345. switch (offset) {
  346. case NVM_MAC_ADDR:
  347. ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
  348. ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
  349. &data[1]);
  350. ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
  351. &data[2]);
  352. if (ret_val)
  353. hw_dbg("MAC Addr not found in iNVM\n");
  354. break;
  355. case NVM_INIT_CTRL_2:
  356. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  357. if (ret_val) {
  358. *data = NVM_INIT_CTRL_2_DEFAULT_I211;
  359. ret_val = 0;
  360. }
  361. break;
  362. case NVM_INIT_CTRL_4:
  363. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  364. if (ret_val) {
  365. *data = NVM_INIT_CTRL_4_DEFAULT_I211;
  366. ret_val = 0;
  367. }
  368. break;
  369. case NVM_LED_1_CFG:
  370. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  371. if (ret_val) {
  372. *data = NVM_LED_1_CFG_DEFAULT_I211;
  373. ret_val = 0;
  374. }
  375. break;
  376. case NVM_LED_0_2_CFG:
  377. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  378. if (ret_val) {
  379. *data = NVM_LED_0_2_CFG_DEFAULT_I211;
  380. ret_val = 0;
  381. }
  382. break;
  383. case NVM_ID_LED_SETTINGS:
  384. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  385. if (ret_val) {
  386. *data = ID_LED_RESERVED_FFFF;
  387. ret_val = 0;
  388. }
  389. break;
  390. case NVM_SUB_DEV_ID:
  391. *data = hw->subsystem_device_id;
  392. break;
  393. case NVM_SUB_VEN_ID:
  394. *data = hw->subsystem_vendor_id;
  395. break;
  396. case NVM_DEV_ID:
  397. *data = hw->device_id;
  398. break;
  399. case NVM_VEN_ID:
  400. *data = hw->vendor_id;
  401. break;
  402. default:
  403. hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
  404. *data = NVM_RESERVED_WORD;
  405. break;
  406. }
  407. return ret_val;
  408. }
  409. /**
  410. * igb_read_invm_version - Reads iNVM version and image type
  411. * @hw: pointer to the HW structure
  412. * @invm_ver: version structure for the version read
  413. *
  414. * Reads iNVM version and image type.
  415. **/
  416. s32 igb_read_invm_version(struct e1000_hw *hw,
  417. struct e1000_fw_version *invm_ver) {
  418. u32 *record = NULL;
  419. u32 *next_record = NULL;
  420. u32 i = 0;
  421. u32 invm_dword = 0;
  422. u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
  423. E1000_INVM_RECORD_SIZE_IN_BYTES);
  424. u32 buffer[E1000_INVM_SIZE];
  425. s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  426. u16 version = 0;
  427. /* Read iNVM memory */
  428. for (i = 0; i < E1000_INVM_SIZE; i++) {
  429. invm_dword = rd32(E1000_INVM_DATA_REG(i));
  430. buffer[i] = invm_dword;
  431. }
  432. /* Read version number */
  433. for (i = 1; i < invm_blocks; i++) {
  434. record = &buffer[invm_blocks - i];
  435. next_record = &buffer[invm_blocks - i + 1];
  436. /* Check if we have first version location used */
  437. if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
  438. version = 0;
  439. status = 0;
  440. break;
  441. }
  442. /* Check if we have second version location used */
  443. else if ((i == 1) &&
  444. ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
  445. version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
  446. status = 0;
  447. break;
  448. }
  449. /* Check if we have odd version location
  450. * used and it is the last one used
  451. */
  452. else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
  453. ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
  454. (i != 1))) {
  455. version = (*next_record & E1000_INVM_VER_FIELD_TWO)
  456. >> 13;
  457. status = 0;
  458. break;
  459. }
  460. /* Check if we have even version location
  461. * used and it is the last one used
  462. */
  463. else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
  464. ((*record & 0x3) == 0)) {
  465. version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
  466. status = 0;
  467. break;
  468. }
  469. }
  470. if (!status) {
  471. invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
  472. >> E1000_INVM_MAJOR_SHIFT;
  473. invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
  474. }
  475. /* Read Image Type */
  476. for (i = 1; i < invm_blocks; i++) {
  477. record = &buffer[invm_blocks - i];
  478. next_record = &buffer[invm_blocks - i + 1];
  479. /* Check if we have image type in first location used */
  480. if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
  481. invm_ver->invm_img_type = 0;
  482. status = 0;
  483. break;
  484. }
  485. /* Check if we have image type in first location used */
  486. else if ((((*record & 0x3) == 0) &&
  487. ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
  488. ((((*record & 0x3) != 0) && (i != 1)))) {
  489. invm_ver->invm_img_type =
  490. (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
  491. status = 0;
  492. break;
  493. }
  494. }
  495. return status;
  496. }
  497. /**
  498. * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
  499. * @hw: pointer to the HW structure
  500. *
  501. * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
  502. * and then verifies that the sum of the EEPROM is equal to 0xBABA.
  503. **/
  504. static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
  505. {
  506. s32 status = 0;
  507. s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
  508. if (!(hw->nvm.ops.acquire(hw))) {
  509. /* Replace the read function with semaphore grabbing with
  510. * the one that skips this for a while.
  511. * We have semaphore taken already here.
  512. */
  513. read_op_ptr = hw->nvm.ops.read;
  514. hw->nvm.ops.read = igb_read_nvm_eerd;
  515. status = igb_validate_nvm_checksum(hw);
  516. /* Revert original read operation. */
  517. hw->nvm.ops.read = read_op_ptr;
  518. hw->nvm.ops.release(hw);
  519. } else {
  520. status = E1000_ERR_SWFW_SYNC;
  521. }
  522. return status;
  523. }
  524. /**
  525. * igb_update_nvm_checksum_i210 - Update EEPROM checksum
  526. * @hw: pointer to the HW structure
  527. *
  528. * Updates the EEPROM checksum by reading/adding each word of the EEPROM
  529. * up to the checksum. Then calculates the EEPROM checksum and writes the
  530. * value to the EEPROM. Next commit EEPROM data onto the Flash.
  531. **/
  532. static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
  533. {
  534. s32 ret_val = 0;
  535. u16 checksum = 0;
  536. u16 i, nvm_data;
  537. /* Read the first word from the EEPROM. If this times out or fails, do
  538. * not continue or we could be in for a very long wait while every
  539. * EEPROM read fails
  540. */
  541. ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
  542. if (ret_val) {
  543. hw_dbg("EEPROM read failed\n");
  544. goto out;
  545. }
  546. if (!(hw->nvm.ops.acquire(hw))) {
  547. /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
  548. * because we do not want to take the synchronization
  549. * semaphores twice here.
  550. */
  551. for (i = 0; i < NVM_CHECKSUM_REG; i++) {
  552. ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
  553. if (ret_val) {
  554. hw->nvm.ops.release(hw);
  555. hw_dbg("NVM Read Error while updating checksum.\n");
  556. goto out;
  557. }
  558. checksum += nvm_data;
  559. }
  560. checksum = (u16) NVM_SUM - checksum;
  561. ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
  562. &checksum);
  563. if (ret_val) {
  564. hw->nvm.ops.release(hw);
  565. hw_dbg("NVM Write Error while updating checksum.\n");
  566. goto out;
  567. }
  568. hw->nvm.ops.release(hw);
  569. ret_val = igb_update_flash_i210(hw);
  570. } else {
  571. ret_val = -E1000_ERR_SWFW_SYNC;
  572. }
  573. out:
  574. return ret_val;
  575. }
  576. /**
  577. * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
  578. * @hw: pointer to the HW structure
  579. *
  580. **/
  581. static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
  582. {
  583. s32 ret_val = -E1000_ERR_NVM;
  584. u32 i, reg;
  585. for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
  586. reg = rd32(E1000_EECD);
  587. if (reg & E1000_EECD_FLUDONE_I210) {
  588. ret_val = 0;
  589. break;
  590. }
  591. udelay(5);
  592. }
  593. return ret_val;
  594. }
  595. /**
  596. * igb_get_flash_presence_i210 - Check if flash device is detected.
  597. * @hw: pointer to the HW structure
  598. *
  599. **/
  600. bool igb_get_flash_presence_i210(struct e1000_hw *hw)
  601. {
  602. u32 eec = 0;
  603. bool ret_val = false;
  604. eec = rd32(E1000_EECD);
  605. if (eec & E1000_EECD_FLASH_DETECTED_I210)
  606. ret_val = true;
  607. return ret_val;
  608. }
  609. /**
  610. * igb_update_flash_i210 - Commit EEPROM to the flash
  611. * @hw: pointer to the HW structure
  612. *
  613. **/
  614. static s32 igb_update_flash_i210(struct e1000_hw *hw)
  615. {
  616. s32 ret_val = 0;
  617. u32 flup;
  618. ret_val = igb_pool_flash_update_done_i210(hw);
  619. if (ret_val == -E1000_ERR_NVM) {
  620. hw_dbg("Flash update time out\n");
  621. goto out;
  622. }
  623. flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
  624. wr32(E1000_EECD, flup);
  625. ret_val = igb_pool_flash_update_done_i210(hw);
  626. if (ret_val)
  627. hw_dbg("Flash update time out\n");
  628. else
  629. hw_dbg("Flash update complete\n");
  630. out:
  631. return ret_val;
  632. }
  633. /**
  634. * igb_valid_led_default_i210 - Verify a valid default LED config
  635. * @hw: pointer to the HW structure
  636. * @data: pointer to the NVM (EEPROM)
  637. *
  638. * Read the EEPROM for the current default LED configuration. If the
  639. * LED configuration is not valid, set to a valid LED configuration.
  640. **/
  641. s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
  642. {
  643. s32 ret_val;
  644. ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
  645. if (ret_val) {
  646. hw_dbg("NVM Read Error\n");
  647. goto out;
  648. }
  649. if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
  650. switch (hw->phy.media_type) {
  651. case e1000_media_type_internal_serdes:
  652. *data = ID_LED_DEFAULT_I210_SERDES;
  653. break;
  654. case e1000_media_type_copper:
  655. default:
  656. *data = ID_LED_DEFAULT_I210;
  657. break;
  658. }
  659. }
  660. out:
  661. return ret_val;
  662. }
  663. /**
  664. * __igb_access_xmdio_reg - Read/write XMDIO register
  665. * @hw: pointer to the HW structure
  666. * @address: XMDIO address to program
  667. * @dev_addr: device address to program
  668. * @data: pointer to value to read/write from/to the XMDIO address
  669. * @read: boolean flag to indicate read or write
  670. **/
  671. static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
  672. u8 dev_addr, u16 *data, bool read)
  673. {
  674. s32 ret_val = 0;
  675. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
  676. if (ret_val)
  677. return ret_val;
  678. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
  679. if (ret_val)
  680. return ret_val;
  681. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
  682. dev_addr);
  683. if (ret_val)
  684. return ret_val;
  685. if (read)
  686. ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
  687. else
  688. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
  689. if (ret_val)
  690. return ret_val;
  691. /* Recalibrate the device back to 0 */
  692. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
  693. if (ret_val)
  694. return ret_val;
  695. return ret_val;
  696. }
  697. /**
  698. * igb_read_xmdio_reg - Read XMDIO register
  699. * @hw: pointer to the HW structure
  700. * @addr: XMDIO address to program
  701. * @dev_addr: device address to program
  702. * @data: value to be read from the EMI address
  703. **/
  704. s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
  705. {
  706. return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
  707. }
  708. /**
  709. * igb_write_xmdio_reg - Write XMDIO register
  710. * @hw: pointer to the HW structure
  711. * @addr: XMDIO address to program
  712. * @dev_addr: device address to program
  713. * @data: value to be written to the XMDIO address
  714. **/
  715. s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
  716. {
  717. return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
  718. }
  719. /**
  720. * igb_init_nvm_params_i210 - Init NVM func ptrs.
  721. * @hw: pointer to the HW structure
  722. **/
  723. s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
  724. {
  725. s32 ret_val = 0;
  726. struct e1000_nvm_info *nvm = &hw->nvm;
  727. nvm->ops.acquire = igb_acquire_nvm_i210;
  728. nvm->ops.release = igb_release_nvm_i210;
  729. nvm->ops.valid_led_default = igb_valid_led_default_i210;
  730. /* NVM Function Pointers */
  731. if (igb_get_flash_presence_i210(hw)) {
  732. hw->nvm.type = e1000_nvm_flash_hw;
  733. nvm->ops.read = igb_read_nvm_srrd_i210;
  734. nvm->ops.write = igb_write_nvm_srwr_i210;
  735. nvm->ops.validate = igb_validate_nvm_checksum_i210;
  736. nvm->ops.update = igb_update_nvm_checksum_i210;
  737. } else {
  738. hw->nvm.type = e1000_nvm_invm;
  739. nvm->ops.read = igb_read_invm_i210;
  740. nvm->ops.write = NULL;
  741. nvm->ops.validate = NULL;
  742. nvm->ops.update = NULL;
  743. }
  744. return ret_val;
  745. }
  746. /**
  747. * igb_pll_workaround_i210
  748. * @hw: pointer to the HW structure
  749. *
  750. * Works around an errata in the PLL circuit where it occasionally
  751. * provides the wrong clock frequency after power up.
  752. **/
  753. s32 igb_pll_workaround_i210(struct e1000_hw *hw)
  754. {
  755. s32 ret_val;
  756. u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
  757. u16 nvm_word, phy_word, pci_word, tmp_nvm;
  758. int i;
  759. /* Get and set needed register values */
  760. wuc = rd32(E1000_WUC);
  761. mdicnfg = rd32(E1000_MDICNFG);
  762. reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
  763. wr32(E1000_MDICNFG, reg_val);
  764. /* Get data from NVM, or set default */
  765. ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
  766. &nvm_word);
  767. if (ret_val)
  768. nvm_word = E1000_INVM_DEFAULT_AL;
  769. tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
  770. igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
  771. for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
  772. /* check current state directly from internal PHY */
  773. igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
  774. if ((phy_word & E1000_PHY_PLL_UNCONF)
  775. != E1000_PHY_PLL_UNCONF) {
  776. ret_val = 0;
  777. break;
  778. } else {
  779. ret_val = -E1000_ERR_PHY;
  780. }
  781. /* directly reset the internal PHY */
  782. ctrl = rd32(E1000_CTRL);
  783. wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
  784. ctrl_ext = rd32(E1000_CTRL_EXT);
  785. ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
  786. wr32(E1000_CTRL_EXT, ctrl_ext);
  787. wr32(E1000_WUC, 0);
  788. reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
  789. wr32(E1000_EEARBC_I210, reg_val);
  790. igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  791. pci_word |= E1000_PCI_PMCSR_D3;
  792. igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  793. usleep_range(1000, 2000);
  794. pci_word &= ~E1000_PCI_PMCSR_D3;
  795. igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  796. reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
  797. wr32(E1000_EEARBC_I210, reg_val);
  798. /* restore WUC register */
  799. wr32(E1000_WUC, wuc);
  800. }
  801. igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
  802. /* restore MDICNFG setting */
  803. wr32(E1000_MDICNFG, mdicnfg);
  804. return ret_val;
  805. }
  806. /**
  807. * igb_get_cfg_done_i210 - Read config done bit
  808. * @hw: pointer to the HW structure
  809. *
  810. * Read the management control register for the config done bit for
  811. * completion status. NOTE: silicon which is EEPROM-less will fail trying
  812. * to read the config done bit, so an error is *ONLY* logged and returns
  813. * 0. If we were to return with error, EEPROM-less silicon
  814. * would not be able to be reset or change link.
  815. **/
  816. s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
  817. {
  818. s32 timeout = PHY_CFG_TIMEOUT;
  819. u32 mask = E1000_NVM_CFG_DONE_PORT_0;
  820. while (timeout) {
  821. if (rd32(E1000_EEMNGCTL_I210) & mask)
  822. break;
  823. usleep_range(1000, 2000);
  824. timeout--;
  825. }
  826. if (!timeout)
  827. hw_dbg("MNG configuration cycle has not completed.\n");
  828. return 0;
  829. }