ixgbe_82599.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. #include <linux/pci.h>
  4. #include <linux/delay.h>
  5. #include <linux/sched.h>
  6. #include "ixgbe.h"
  7. #include "ixgbe_phy.h"
  8. #include "ixgbe_mbx.h"
  9. #define IXGBE_82599_MAX_TX_QUEUES 128
  10. #define IXGBE_82599_MAX_RX_QUEUES 128
  11. #define IXGBE_82599_RAR_ENTRIES 128
  12. #define IXGBE_82599_MC_TBL_SIZE 128
  13. #define IXGBE_82599_VFT_TBL_SIZE 128
  14. #define IXGBE_82599_RX_PB_SIZE 512
  15. static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
  16. static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
  17. static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
  18. static void
  19. ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
  20. static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
  21. ixgbe_link_speed speed,
  22. bool autoneg_wait_to_complete);
  23. static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
  24. static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
  25. bool autoneg_wait_to_complete);
  26. static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
  27. ixgbe_link_speed speed,
  28. bool autoneg_wait_to_complete);
  29. static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
  30. ixgbe_link_speed speed,
  31. bool autoneg_wait_to_complete);
  32. static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
  33. static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
  34. u8 dev_addr, u8 *data);
  35. static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
  36. u8 dev_addr, u8 data);
  37. static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
  38. static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
  39. bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
  40. {
  41. u32 fwsm, manc, factps;
  42. fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
  43. if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
  44. return false;
  45. manc = IXGBE_READ_REG(hw, IXGBE_MANC);
  46. if (!(manc & IXGBE_MANC_RCV_TCO_EN))
  47. return false;
  48. factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
  49. if (factps & IXGBE_FACTPS_MNGCG)
  50. return false;
  51. return true;
  52. }
  53. static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
  54. {
  55. struct ixgbe_mac_info *mac = &hw->mac;
  56. /* enable the laser control functions for SFP+ fiber
  57. * and MNG not enabled
  58. */
  59. if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
  60. !ixgbe_mng_enabled(hw)) {
  61. mac->ops.disable_tx_laser =
  62. &ixgbe_disable_tx_laser_multispeed_fiber;
  63. mac->ops.enable_tx_laser =
  64. &ixgbe_enable_tx_laser_multispeed_fiber;
  65. mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
  66. } else {
  67. mac->ops.disable_tx_laser = NULL;
  68. mac->ops.enable_tx_laser = NULL;
  69. mac->ops.flap_tx_laser = NULL;
  70. }
  71. if (hw->phy.multispeed_fiber) {
  72. /* Set up dual speed SFP+ support */
  73. mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
  74. mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
  75. mac->ops.set_rate_select_speed =
  76. ixgbe_set_hard_rate_select_speed;
  77. } else {
  78. if ((mac->ops.get_media_type(hw) ==
  79. ixgbe_media_type_backplane) &&
  80. (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
  81. hw->phy.smart_speed == ixgbe_smart_speed_on) &&
  82. !ixgbe_verify_lesm_fw_enabled_82599(hw))
  83. mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
  84. else
  85. mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
  86. }
  87. }
  88. static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
  89. {
  90. s32 ret_val;
  91. u16 list_offset, data_offset, data_value;
  92. if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
  93. ixgbe_init_mac_link_ops_82599(hw);
  94. hw->phy.ops.reset = NULL;
  95. ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
  96. &data_offset);
  97. if (ret_val)
  98. return ret_val;
  99. /* PHY config will finish before releasing the semaphore */
  100. ret_val = hw->mac.ops.acquire_swfw_sync(hw,
  101. IXGBE_GSSR_MAC_CSR_SM);
  102. if (ret_val)
  103. return IXGBE_ERR_SWFW_SYNC;
  104. if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
  105. goto setup_sfp_err;
  106. while (data_value != 0xffff) {
  107. IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
  108. IXGBE_WRITE_FLUSH(hw);
  109. if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
  110. goto setup_sfp_err;
  111. }
  112. /* Release the semaphore */
  113. hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
  114. /*
  115. * Delay obtaining semaphore again to allow FW access,
  116. * semaphore_delay is in ms usleep_range needs us.
  117. */
  118. usleep_range(hw->eeprom.semaphore_delay * 1000,
  119. hw->eeprom.semaphore_delay * 2000);
  120. /* Restart DSP and set SFI mode */
  121. ret_val = hw->mac.ops.prot_autoc_write(hw,
  122. hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
  123. false);
  124. if (ret_val) {
  125. hw_dbg(hw, " sfp module setup not complete\n");
  126. return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
  127. }
  128. }
  129. return 0;
  130. setup_sfp_err:
  131. /* Release the semaphore */
  132. hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
  133. /* Delay obtaining semaphore again to allow FW access,
  134. * semaphore_delay is in ms usleep_range needs us.
  135. */
  136. usleep_range(hw->eeprom.semaphore_delay * 1000,
  137. hw->eeprom.semaphore_delay * 2000);
  138. hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
  139. return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
  140. }
  141. /**
  142. * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
  143. * @hw: pointer to hardware structure
  144. * @locked: Return the if we locked for this read.
  145. * @reg_val: Value we read from AUTOC
  146. *
  147. * For this part (82599) we need to wrap read-modify-writes with a possible
  148. * FW/SW lock. It is assumed this lock will be freed with the next
  149. * prot_autoc_write_82599(). Note, that locked can only be true in cases
  150. * where this function doesn't return an error.
  151. **/
  152. static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
  153. u32 *reg_val)
  154. {
  155. s32 ret_val;
  156. *locked = false;
  157. /* If LESM is on then we need to hold the SW/FW semaphore. */
  158. if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
  159. ret_val = hw->mac.ops.acquire_swfw_sync(hw,
  160. IXGBE_GSSR_MAC_CSR_SM);
  161. if (ret_val)
  162. return IXGBE_ERR_SWFW_SYNC;
  163. *locked = true;
  164. }
  165. *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  166. return 0;
  167. }
  168. /**
  169. * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
  170. * @hw: pointer to hardware structure
  171. * @autoc: value to write to AUTOC
  172. * @locked: bool to indicate whether the SW/FW lock was already taken by
  173. * previous proc_autoc_read_82599.
  174. *
  175. * This part (82599) may need to hold a the SW/FW lock around all writes to
  176. * AUTOC. Likewise after a write we need to do a pipeline reset.
  177. **/
  178. static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
  179. {
  180. s32 ret_val = 0;
  181. /* Blocked by MNG FW so bail */
  182. if (ixgbe_check_reset_blocked(hw))
  183. goto out;
  184. /* We only need to get the lock if:
  185. * - We didn't do it already (in the read part of a read-modify-write)
  186. * - LESM is enabled.
  187. */
  188. if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
  189. ret_val = hw->mac.ops.acquire_swfw_sync(hw,
  190. IXGBE_GSSR_MAC_CSR_SM);
  191. if (ret_val)
  192. return IXGBE_ERR_SWFW_SYNC;
  193. locked = true;
  194. }
  195. IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
  196. ret_val = ixgbe_reset_pipeline_82599(hw);
  197. out:
  198. /* Free the SW/FW semaphore as we either grabbed it here or
  199. * already had it when this function was called.
  200. */
  201. if (locked)
  202. hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
  203. return ret_val;
  204. }
  205. static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
  206. {
  207. struct ixgbe_mac_info *mac = &hw->mac;
  208. ixgbe_init_mac_link_ops_82599(hw);
  209. mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
  210. mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
  211. mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
  212. mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
  213. mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
  214. mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
  215. mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
  216. return 0;
  217. }
  218. /**
  219. * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
  220. * @hw: pointer to hardware structure
  221. *
  222. * Initialize any function pointers that were not able to be
  223. * set during get_invariants because the PHY/SFP type was
  224. * not known. Perform the SFP init if necessary.
  225. *
  226. **/
  227. static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
  228. {
  229. struct ixgbe_mac_info *mac = &hw->mac;
  230. struct ixgbe_phy_info *phy = &hw->phy;
  231. s32 ret_val;
  232. u32 esdp;
  233. if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
  234. /* Store flag indicating I2C bus access control unit. */
  235. hw->phy.qsfp_shared_i2c_bus = true;
  236. /* Initialize access to QSFP+ I2C bus */
  237. esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  238. esdp |= IXGBE_ESDP_SDP0_DIR;
  239. esdp &= ~IXGBE_ESDP_SDP1_DIR;
  240. esdp &= ~IXGBE_ESDP_SDP0;
  241. esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
  242. esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
  243. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
  244. IXGBE_WRITE_FLUSH(hw);
  245. phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
  246. phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
  247. }
  248. /* Identify the PHY or SFP module */
  249. ret_val = phy->ops.identify(hw);
  250. /* Setup function pointers based on detected SFP module and speeds */
  251. ixgbe_init_mac_link_ops_82599(hw);
  252. /* If copper media, overwrite with copper function pointers */
  253. if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
  254. mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
  255. mac->ops.get_link_capabilities =
  256. &ixgbe_get_copper_link_capabilities_generic;
  257. }
  258. /* Set necessary function pointers based on phy type */
  259. switch (hw->phy.type) {
  260. case ixgbe_phy_tn:
  261. phy->ops.check_link = &ixgbe_check_phy_link_tnx;
  262. phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
  263. break;
  264. default:
  265. break;
  266. }
  267. return ret_val;
  268. }
  269. /**
  270. * ixgbe_get_link_capabilities_82599 - Determines link capabilities
  271. * @hw: pointer to hardware structure
  272. * @speed: pointer to link speed
  273. * @autoneg: true when autoneg or autotry is enabled
  274. *
  275. * Determines the link capabilities by reading the AUTOC register.
  276. **/
  277. static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
  278. ixgbe_link_speed *speed,
  279. bool *autoneg)
  280. {
  281. u32 autoc = 0;
  282. /* Determine 1G link capabilities off of SFP+ type */
  283. if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
  284. hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
  285. hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
  286. hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
  287. hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
  288. hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
  289. *speed = IXGBE_LINK_SPEED_1GB_FULL;
  290. *autoneg = true;
  291. return 0;
  292. }
  293. /*
  294. * Determine link capabilities based on the stored value of AUTOC,
  295. * which represents EEPROM defaults. If AUTOC value has not been
  296. * stored, use the current register value.
  297. */
  298. if (hw->mac.orig_link_settings_stored)
  299. autoc = hw->mac.orig_autoc;
  300. else
  301. autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  302. switch (autoc & IXGBE_AUTOC_LMS_MASK) {
  303. case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
  304. *speed = IXGBE_LINK_SPEED_1GB_FULL;
  305. *autoneg = false;
  306. break;
  307. case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
  308. *speed = IXGBE_LINK_SPEED_10GB_FULL;
  309. *autoneg = false;
  310. break;
  311. case IXGBE_AUTOC_LMS_1G_AN:
  312. *speed = IXGBE_LINK_SPEED_1GB_FULL;
  313. *autoneg = true;
  314. break;
  315. case IXGBE_AUTOC_LMS_10G_SERIAL:
  316. *speed = IXGBE_LINK_SPEED_10GB_FULL;
  317. *autoneg = false;
  318. break;
  319. case IXGBE_AUTOC_LMS_KX4_KX_KR:
  320. case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
  321. *speed = IXGBE_LINK_SPEED_UNKNOWN;
  322. if (autoc & IXGBE_AUTOC_KR_SUPP)
  323. *speed |= IXGBE_LINK_SPEED_10GB_FULL;
  324. if (autoc & IXGBE_AUTOC_KX4_SUPP)
  325. *speed |= IXGBE_LINK_SPEED_10GB_FULL;
  326. if (autoc & IXGBE_AUTOC_KX_SUPP)
  327. *speed |= IXGBE_LINK_SPEED_1GB_FULL;
  328. *autoneg = true;
  329. break;
  330. case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
  331. *speed = IXGBE_LINK_SPEED_100_FULL;
  332. if (autoc & IXGBE_AUTOC_KR_SUPP)
  333. *speed |= IXGBE_LINK_SPEED_10GB_FULL;
  334. if (autoc & IXGBE_AUTOC_KX4_SUPP)
  335. *speed |= IXGBE_LINK_SPEED_10GB_FULL;
  336. if (autoc & IXGBE_AUTOC_KX_SUPP)
  337. *speed |= IXGBE_LINK_SPEED_1GB_FULL;
  338. *autoneg = true;
  339. break;
  340. case IXGBE_AUTOC_LMS_SGMII_1G_100M:
  341. *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
  342. *autoneg = false;
  343. break;
  344. default:
  345. return IXGBE_ERR_LINK_SETUP;
  346. }
  347. if (hw->phy.multispeed_fiber) {
  348. *speed |= IXGBE_LINK_SPEED_10GB_FULL |
  349. IXGBE_LINK_SPEED_1GB_FULL;
  350. /* QSFP must not enable auto-negotiation */
  351. if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
  352. *autoneg = false;
  353. else
  354. *autoneg = true;
  355. }
  356. return 0;
  357. }
  358. /**
  359. * ixgbe_get_media_type_82599 - Get media type
  360. * @hw: pointer to hardware structure
  361. *
  362. * Returns the media type (fiber, copper, backplane)
  363. **/
  364. static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
  365. {
  366. /* Detect if there is a copper PHY attached. */
  367. switch (hw->phy.type) {
  368. case ixgbe_phy_cu_unknown:
  369. case ixgbe_phy_tn:
  370. return ixgbe_media_type_copper;
  371. default:
  372. break;
  373. }
  374. switch (hw->device_id) {
  375. case IXGBE_DEV_ID_82599_KX4:
  376. case IXGBE_DEV_ID_82599_KX4_MEZZ:
  377. case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
  378. case IXGBE_DEV_ID_82599_KR:
  379. case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
  380. case IXGBE_DEV_ID_82599_XAUI_LOM:
  381. /* Default device ID is mezzanine card KX/KX4 */
  382. return ixgbe_media_type_backplane;
  383. case IXGBE_DEV_ID_82599_SFP:
  384. case IXGBE_DEV_ID_82599_SFP_FCOE:
  385. case IXGBE_DEV_ID_82599_SFP_EM:
  386. case IXGBE_DEV_ID_82599_SFP_SF2:
  387. case IXGBE_DEV_ID_82599_SFP_SF_QP:
  388. case IXGBE_DEV_ID_82599EN_SFP:
  389. return ixgbe_media_type_fiber;
  390. case IXGBE_DEV_ID_82599_CX4:
  391. return ixgbe_media_type_cx4;
  392. case IXGBE_DEV_ID_82599_T3_LOM:
  393. return ixgbe_media_type_copper;
  394. case IXGBE_DEV_ID_82599_LS:
  395. return ixgbe_media_type_fiber_lco;
  396. case IXGBE_DEV_ID_82599_QSFP_SF_QP:
  397. return ixgbe_media_type_fiber_qsfp;
  398. default:
  399. return ixgbe_media_type_unknown;
  400. }
  401. }
  402. /**
  403. * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
  404. * @hw: pointer to hardware structure
  405. *
  406. * Disables link, should be called during D3 power down sequence.
  407. *
  408. **/
  409. static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
  410. {
  411. u32 autoc2_reg;
  412. u16 ee_ctrl_2 = 0;
  413. hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
  414. if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
  415. ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
  416. autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  417. autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
  418. IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
  419. }
  420. }
  421. /**
  422. * ixgbe_start_mac_link_82599 - Setup MAC link settings
  423. * @hw: pointer to hardware structure
  424. * @autoneg_wait_to_complete: true when waiting for completion is needed
  425. *
  426. * Configures link settings based on values in the ixgbe_hw struct.
  427. * Restarts the link. Performs autonegotiation if needed.
  428. **/
  429. static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
  430. bool autoneg_wait_to_complete)
  431. {
  432. u32 autoc_reg;
  433. u32 links_reg;
  434. u32 i;
  435. s32 status = 0;
  436. bool got_lock = false;
  437. if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
  438. status = hw->mac.ops.acquire_swfw_sync(hw,
  439. IXGBE_GSSR_MAC_CSR_SM);
  440. if (status)
  441. return status;
  442. got_lock = true;
  443. }
  444. /* Restart link */
  445. ixgbe_reset_pipeline_82599(hw);
  446. if (got_lock)
  447. hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
  448. /* Only poll for autoneg to complete if specified to do so */
  449. if (autoneg_wait_to_complete) {
  450. autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  451. if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
  452. IXGBE_AUTOC_LMS_KX4_KX_KR ||
  453. (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
  454. IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
  455. (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
  456. IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
  457. links_reg = 0; /* Just in case Autoneg time = 0 */
  458. for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
  459. links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
  460. if (links_reg & IXGBE_LINKS_KX_AN_COMP)
  461. break;
  462. msleep(100);
  463. }
  464. if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
  465. status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
  466. hw_dbg(hw, "Autoneg did not complete.\n");
  467. }
  468. }
  469. }
  470. /* Add delay to filter out noises during initial link setup */
  471. msleep(50);
  472. return status;
  473. }
  474. /**
  475. * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
  476. * @hw: pointer to hardware structure
  477. *
  478. * The base drivers may require better control over SFP+ module
  479. * PHY states. This includes selectively shutting down the Tx
  480. * laser on the PHY, effectively halting physical link.
  481. **/
  482. static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  483. {
  484. u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
  485. /* Blocked by MNG FW so bail */
  486. if (ixgbe_check_reset_blocked(hw))
  487. return;
  488. /* Disable tx laser; allow 100us to go dark per spec */
  489. esdp_reg |= IXGBE_ESDP_SDP3;
  490. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
  491. IXGBE_WRITE_FLUSH(hw);
  492. udelay(100);
  493. }
  494. /**
  495. * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
  496. * @hw: pointer to hardware structure
  497. *
  498. * The base drivers may require better control over SFP+ module
  499. * PHY states. This includes selectively turning on the Tx
  500. * laser on the PHY, effectively starting physical link.
  501. **/
  502. static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  503. {
  504. u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
  505. /* Enable tx laser; allow 100ms to light up */
  506. esdp_reg &= ~IXGBE_ESDP_SDP3;
  507. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
  508. IXGBE_WRITE_FLUSH(hw);
  509. msleep(100);
  510. }
  511. /**
  512. * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
  513. * @hw: pointer to hardware structure
  514. *
  515. * When the driver changes the link speeds that it can support,
  516. * it sets autotry_restart to true to indicate that we need to
  517. * initiate a new autotry session with the link partner. To do
  518. * so, we set the speed then disable and re-enable the tx laser, to
  519. * alert the link partner that it also needs to restart autotry on its
  520. * end. This is consistent with true clause 37 autoneg, which also
  521. * involves a loss of signal.
  522. **/
  523. static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  524. {
  525. /* Blocked by MNG FW so bail */
  526. if (ixgbe_check_reset_blocked(hw))
  527. return;
  528. if (hw->mac.autotry_restart) {
  529. ixgbe_disable_tx_laser_multispeed_fiber(hw);
  530. ixgbe_enable_tx_laser_multispeed_fiber(hw);
  531. hw->mac.autotry_restart = false;
  532. }
  533. }
  534. /**
  535. * ixgbe_set_hard_rate_select_speed - Set module link speed
  536. * @hw: pointer to hardware structure
  537. * @speed: link speed to set
  538. *
  539. * Set module link speed via RS0/RS1 rate select pins.
  540. */
  541. static void
  542. ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
  543. {
  544. u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
  545. switch (speed) {
  546. case IXGBE_LINK_SPEED_10GB_FULL:
  547. esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
  548. break;
  549. case IXGBE_LINK_SPEED_1GB_FULL:
  550. esdp_reg &= ~IXGBE_ESDP_SDP5;
  551. esdp_reg |= IXGBE_ESDP_SDP5_DIR;
  552. break;
  553. default:
  554. hw_dbg(hw, "Invalid fixed module speed\n");
  555. return;
  556. }
  557. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
  558. IXGBE_WRITE_FLUSH(hw);
  559. }
  560. /**
  561. * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
  562. * @hw: pointer to hardware structure
  563. * @speed: new link speed
  564. * @autoneg_wait_to_complete: true when waiting for completion is needed
  565. *
  566. * Implements the Intel SmartSpeed algorithm.
  567. **/
  568. static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
  569. ixgbe_link_speed speed,
  570. bool autoneg_wait_to_complete)
  571. {
  572. s32 status = 0;
  573. ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
  574. s32 i, j;
  575. bool link_up = false;
  576. u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  577. /* Set autoneg_advertised value based on input link speed */
  578. hw->phy.autoneg_advertised = 0;
  579. if (speed & IXGBE_LINK_SPEED_10GB_FULL)
  580. hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
  581. if (speed & IXGBE_LINK_SPEED_1GB_FULL)
  582. hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
  583. if (speed & IXGBE_LINK_SPEED_100_FULL)
  584. hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
  585. /*
  586. * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
  587. * autoneg advertisement if link is unable to be established at the
  588. * highest negotiated rate. This can sometimes happen due to integrity
  589. * issues with the physical media connection.
  590. */
  591. /* First, try to get link with full advertisement */
  592. hw->phy.smart_speed_active = false;
  593. for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
  594. status = ixgbe_setup_mac_link_82599(hw, speed,
  595. autoneg_wait_to_complete);
  596. if (status != 0)
  597. goto out;
  598. /*
  599. * Wait for the controller to acquire link. Per IEEE 802.3ap,
  600. * Section 73.10.2, we may have to wait up to 500ms if KR is
  601. * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
  602. * Table 9 in the AN MAS.
  603. */
  604. for (i = 0; i < 5; i++) {
  605. mdelay(100);
  606. /* If we have link, just jump out */
  607. status = hw->mac.ops.check_link(hw, &link_speed,
  608. &link_up, false);
  609. if (status != 0)
  610. goto out;
  611. if (link_up)
  612. goto out;
  613. }
  614. }
  615. /*
  616. * We didn't get link. If we advertised KR plus one of KX4/KX
  617. * (or BX4/BX), then disable KR and try again.
  618. */
  619. if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
  620. ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
  621. goto out;
  622. /* Turn SmartSpeed on to disable KR support */
  623. hw->phy.smart_speed_active = true;
  624. status = ixgbe_setup_mac_link_82599(hw, speed,
  625. autoneg_wait_to_complete);
  626. if (status != 0)
  627. goto out;
  628. /*
  629. * Wait for the controller to acquire link. 600ms will allow for
  630. * the AN link_fail_inhibit_timer as well for multiple cycles of
  631. * parallel detect, both 10g and 1g. This allows for the maximum
  632. * connect attempts as defined in the AN MAS table 73-7.
  633. */
  634. for (i = 0; i < 6; i++) {
  635. mdelay(100);
  636. /* If we have link, just jump out */
  637. status = hw->mac.ops.check_link(hw, &link_speed,
  638. &link_up, false);
  639. if (status != 0)
  640. goto out;
  641. if (link_up)
  642. goto out;
  643. }
  644. /* We didn't get link. Turn SmartSpeed back off. */
  645. hw->phy.smart_speed_active = false;
  646. status = ixgbe_setup_mac_link_82599(hw, speed,
  647. autoneg_wait_to_complete);
  648. out:
  649. if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
  650. hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
  651. return status;
  652. }
  653. /**
  654. * ixgbe_setup_mac_link_82599 - Set MAC link speed
  655. * @hw: pointer to hardware structure
  656. * @speed: new link speed
  657. * @autoneg_wait_to_complete: true when waiting for completion is needed
  658. *
  659. * Set the link speed in the AUTOC register and restarts link.
  660. **/
  661. static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
  662. ixgbe_link_speed speed,
  663. bool autoneg_wait_to_complete)
  664. {
  665. bool autoneg = false;
  666. s32 status;
  667. u32 pma_pmd_1g, link_mode, links_reg, i;
  668. u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  669. u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
  670. ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
  671. /* holds the value of AUTOC register at this current point in time */
  672. u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  673. /* holds the cached value of AUTOC register */
  674. u32 orig_autoc = 0;
  675. /* temporary variable used for comparison purposes */
  676. u32 autoc = current_autoc;
  677. /* Check to see if speed passed in is supported. */
  678. status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
  679. &autoneg);
  680. if (status)
  681. return status;
  682. speed &= link_capabilities;
  683. if (speed == IXGBE_LINK_SPEED_UNKNOWN)
  684. return IXGBE_ERR_LINK_SETUP;
  685. /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
  686. if (hw->mac.orig_link_settings_stored)
  687. orig_autoc = hw->mac.orig_autoc;
  688. else
  689. orig_autoc = autoc;
  690. link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
  691. pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
  692. if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
  693. link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
  694. link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
  695. /* Set KX4/KX/KR support according to speed requested */
  696. autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
  697. if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
  698. if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
  699. autoc |= IXGBE_AUTOC_KX4_SUPP;
  700. if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
  701. (hw->phy.smart_speed_active == false))
  702. autoc |= IXGBE_AUTOC_KR_SUPP;
  703. }
  704. if (speed & IXGBE_LINK_SPEED_1GB_FULL)
  705. autoc |= IXGBE_AUTOC_KX_SUPP;
  706. } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
  707. (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
  708. link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
  709. /* Switch from 1G SFI to 10G SFI if requested */
  710. if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
  711. (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
  712. autoc &= ~IXGBE_AUTOC_LMS_MASK;
  713. autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
  714. }
  715. } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
  716. (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
  717. /* Switch from 10G SFI to 1G SFI if requested */
  718. if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
  719. (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
  720. autoc &= ~IXGBE_AUTOC_LMS_MASK;
  721. if (autoneg)
  722. autoc |= IXGBE_AUTOC_LMS_1G_AN;
  723. else
  724. autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
  725. }
  726. }
  727. if (autoc != current_autoc) {
  728. /* Restart link */
  729. status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
  730. if (status)
  731. return status;
  732. /* Only poll for autoneg to complete if specified to do so */
  733. if (autoneg_wait_to_complete) {
  734. if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
  735. link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
  736. link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
  737. links_reg = 0; /*Just in case Autoneg time=0*/
  738. for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
  739. links_reg =
  740. IXGBE_READ_REG(hw, IXGBE_LINKS);
  741. if (links_reg & IXGBE_LINKS_KX_AN_COMP)
  742. break;
  743. msleep(100);
  744. }
  745. if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
  746. status =
  747. IXGBE_ERR_AUTONEG_NOT_COMPLETE;
  748. hw_dbg(hw, "Autoneg did not complete.\n");
  749. }
  750. }
  751. }
  752. /* Add delay to filter out noises during initial link setup */
  753. msleep(50);
  754. }
  755. return status;
  756. }
  757. /**
  758. * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
  759. * @hw: pointer to hardware structure
  760. * @speed: new link speed
  761. * @autoneg_wait_to_complete: true if waiting is needed to complete
  762. *
  763. * Restarts link on PHY and MAC based on settings passed in.
  764. **/
  765. static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
  766. ixgbe_link_speed speed,
  767. bool autoneg_wait_to_complete)
  768. {
  769. s32 status;
  770. /* Setup the PHY according to input speed */
  771. status = hw->phy.ops.setup_link_speed(hw, speed,
  772. autoneg_wait_to_complete);
  773. /* Set up MAC */
  774. ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
  775. return status;
  776. }
  777. /**
  778. * ixgbe_reset_hw_82599 - Perform hardware reset
  779. * @hw: pointer to hardware structure
  780. *
  781. * Resets the hardware by resetting the transmit and receive units, masks
  782. * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
  783. * reset.
  784. **/
  785. static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
  786. {
  787. ixgbe_link_speed link_speed;
  788. s32 status;
  789. u32 ctrl, i, autoc, autoc2;
  790. u32 curr_lms;
  791. bool link_up = false;
  792. /* Call adapter stop to disable tx/rx and clear interrupts */
  793. status = hw->mac.ops.stop_adapter(hw);
  794. if (status)
  795. return status;
  796. /* flush pending Tx transactions */
  797. ixgbe_clear_tx_pending(hw);
  798. /* PHY ops must be identified and initialized prior to reset */
  799. /* Identify PHY and related function pointers */
  800. status = hw->phy.ops.init(hw);
  801. if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
  802. return status;
  803. /* Setup SFP module if there is one present. */
  804. if (hw->phy.sfp_setup_needed) {
  805. status = hw->mac.ops.setup_sfp(hw);
  806. hw->phy.sfp_setup_needed = false;
  807. }
  808. if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
  809. return status;
  810. /* Reset PHY */
  811. if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
  812. hw->phy.ops.reset(hw);
  813. /* remember AUTOC from before we reset */
  814. curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
  815. mac_reset_top:
  816. /*
  817. * Issue global reset to the MAC. Needs to be SW reset if link is up.
  818. * If link reset is used when link is up, it might reset the PHY when
  819. * mng is using it. If link is down or the flag to force full link
  820. * reset is set, then perform link reset.
  821. */
  822. ctrl = IXGBE_CTRL_LNK_RST;
  823. if (!hw->force_full_reset) {
  824. hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  825. if (link_up)
  826. ctrl = IXGBE_CTRL_RST;
  827. }
  828. ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
  829. IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
  830. IXGBE_WRITE_FLUSH(hw);
  831. usleep_range(1000, 1200);
  832. /* Poll for reset bit to self-clear indicating reset is complete */
  833. for (i = 0; i < 10; i++) {
  834. ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
  835. if (!(ctrl & IXGBE_CTRL_RST_MASK))
  836. break;
  837. udelay(1);
  838. }
  839. if (ctrl & IXGBE_CTRL_RST_MASK) {
  840. status = IXGBE_ERR_RESET_FAILED;
  841. hw_dbg(hw, "Reset polling failed to complete.\n");
  842. }
  843. msleep(50);
  844. /*
  845. * Double resets are required for recovery from certain error
  846. * conditions. Between resets, it is necessary to stall to allow time
  847. * for any pending HW events to complete.
  848. */
  849. if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
  850. hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
  851. goto mac_reset_top;
  852. }
  853. /*
  854. * Store the original AUTOC/AUTOC2 values if they have not been
  855. * stored off yet. Otherwise restore the stored original
  856. * values since the reset operation sets back to defaults.
  857. */
  858. autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  859. autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  860. /* Enable link if disabled in NVM */
  861. if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
  862. autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
  863. IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
  864. IXGBE_WRITE_FLUSH(hw);
  865. }
  866. if (hw->mac.orig_link_settings_stored == false) {
  867. hw->mac.orig_autoc = autoc;
  868. hw->mac.orig_autoc2 = autoc2;
  869. hw->mac.orig_link_settings_stored = true;
  870. } else {
  871. /* If MNG FW is running on a multi-speed device that
  872. * doesn't autoneg with out driver support we need to
  873. * leave LMS in the state it was before we MAC reset.
  874. * Likewise if we support WoL we don't want change the
  875. * LMS state either.
  876. */
  877. if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
  878. hw->wol_enabled)
  879. hw->mac.orig_autoc =
  880. (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
  881. curr_lms;
  882. if (autoc != hw->mac.orig_autoc) {
  883. status = hw->mac.ops.prot_autoc_write(hw,
  884. hw->mac.orig_autoc,
  885. false);
  886. if (status)
  887. return status;
  888. }
  889. if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
  890. (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
  891. autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
  892. autoc2 |= (hw->mac.orig_autoc2 &
  893. IXGBE_AUTOC2_UPPER_MASK);
  894. IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
  895. }
  896. }
  897. /* Store the permanent mac address */
  898. hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
  899. /*
  900. * Store MAC address from RAR0, clear receive address registers, and
  901. * clear the multicast table. Also reset num_rar_entries to 128,
  902. * since we modify this value when programming the SAN MAC address.
  903. */
  904. hw->mac.num_rar_entries = 128;
  905. hw->mac.ops.init_rx_addrs(hw);
  906. /* Store the permanent SAN mac address */
  907. hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
  908. /* Add the SAN MAC address to the RAR only if it's a valid address */
  909. if (is_valid_ether_addr(hw->mac.san_addr)) {
  910. /* Save the SAN MAC RAR index */
  911. hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
  912. hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
  913. hw->mac.san_addr, 0, IXGBE_RAH_AV);
  914. /* clear VMDq pool/queue selection for this RAR */
  915. hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
  916. IXGBE_CLEAR_VMDQ_ALL);
  917. /* Reserve the last RAR for the SAN MAC address */
  918. hw->mac.num_rar_entries--;
  919. }
  920. /* Store the alternative WWNN/WWPN prefix */
  921. hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
  922. &hw->mac.wwpn_prefix);
  923. return status;
  924. }
  925. /**
  926. * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
  927. * @hw: pointer to hardware structure
  928. * @fdircmd: current value of FDIRCMD register
  929. */
  930. static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
  931. {
  932. int i;
  933. for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
  934. *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
  935. if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
  936. return 0;
  937. udelay(10);
  938. }
  939. return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
  940. }
  941. /**
  942. * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
  943. * @hw: pointer to hardware structure
  944. **/
  945. s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
  946. {
  947. int i;
  948. u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
  949. u32 fdircmd;
  950. s32 err;
  951. fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
  952. /*
  953. * Before starting reinitialization process,
  954. * FDIRCMD.CMD must be zero.
  955. */
  956. err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
  957. if (err) {
  958. hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n");
  959. return err;
  960. }
  961. IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
  962. IXGBE_WRITE_FLUSH(hw);
  963. /*
  964. * 82599 adapters flow director init flow cannot be restarted,
  965. * Workaround 82599 silicon errata by performing the following steps
  966. * before re-writing the FDIRCTRL control register with the same value.
  967. * - write 1 to bit 8 of FDIRCMD register &
  968. * - write 0 to bit 8 of FDIRCMD register
  969. */
  970. IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
  971. (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
  972. IXGBE_FDIRCMD_CLEARHT));
  973. IXGBE_WRITE_FLUSH(hw);
  974. IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
  975. (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
  976. ~IXGBE_FDIRCMD_CLEARHT));
  977. IXGBE_WRITE_FLUSH(hw);
  978. /*
  979. * Clear FDIR Hash register to clear any leftover hashes
  980. * waiting to be programmed.
  981. */
  982. IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
  983. IXGBE_WRITE_FLUSH(hw);
  984. IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
  985. IXGBE_WRITE_FLUSH(hw);
  986. /* Poll init-done after we write FDIRCTRL register */
  987. for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
  988. if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
  989. IXGBE_FDIRCTRL_INIT_DONE)
  990. break;
  991. usleep_range(1000, 2000);
  992. }
  993. if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
  994. hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
  995. return IXGBE_ERR_FDIR_REINIT_FAILED;
  996. }
  997. /* Clear FDIR statistics registers (read to clear) */
  998. IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
  999. IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
  1000. IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
  1001. IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
  1002. IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
  1003. return 0;
  1004. }
  1005. /**
  1006. * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
  1007. * @hw: pointer to hardware structure
  1008. * @fdirctrl: value to write to flow director control register
  1009. **/
  1010. static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
  1011. {
  1012. int i;
  1013. /* Prime the keys for hashing */
  1014. IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
  1015. IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
  1016. /*
  1017. * Poll init-done after we write the register. Estimated times:
  1018. * 10G: PBALLOC = 11b, timing is 60us
  1019. * 1G: PBALLOC = 11b, timing is 600us
  1020. * 100M: PBALLOC = 11b, timing is 6ms
  1021. *
  1022. * Multiple these timings by 4 if under full Rx load
  1023. *
  1024. * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
  1025. * 1 msec per poll time. If we're at line rate and drop to 100M, then
  1026. * this might not finish in our poll time, but we can live with that
  1027. * for now.
  1028. */
  1029. IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
  1030. IXGBE_WRITE_FLUSH(hw);
  1031. for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
  1032. if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
  1033. IXGBE_FDIRCTRL_INIT_DONE)
  1034. break;
  1035. usleep_range(1000, 2000);
  1036. }
  1037. if (i >= IXGBE_FDIR_INIT_DONE_POLL)
  1038. hw_dbg(hw, "Flow Director poll time exceeded!\n");
  1039. }
  1040. /**
  1041. * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
  1042. * @hw: pointer to hardware structure
  1043. * @fdirctrl: value to write to flow director control register, initially
  1044. * contains just the value of the Rx packet buffer allocation
  1045. **/
  1046. s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
  1047. {
  1048. /*
  1049. * Continue setup of fdirctrl register bits:
  1050. * Move the flexible bytes to use the ethertype - shift 6 words
  1051. * Set the maximum length per hash bucket to 0xA filters
  1052. * Send interrupt when 64 filters are left
  1053. */
  1054. fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
  1055. (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
  1056. (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
  1057. /* write hashes and fdirctrl register, poll for completion */
  1058. ixgbe_fdir_enable_82599(hw, fdirctrl);
  1059. return 0;
  1060. }
  1061. /**
  1062. * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
  1063. * @hw: pointer to hardware structure
  1064. * @fdirctrl: value to write to flow director control register, initially
  1065. * contains just the value of the Rx packet buffer allocation
  1066. **/
  1067. s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
  1068. {
  1069. /*
  1070. * Continue setup of fdirctrl register bits:
  1071. * Turn perfect match filtering on
  1072. * Initialize the drop queue
  1073. * Move the flexible bytes to use the ethertype - shift 6 words
  1074. * Set the maximum length per hash bucket to 0xA filters
  1075. * Send interrupt when 64 (0x4 * 16) filters are left
  1076. */
  1077. fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
  1078. (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
  1079. (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
  1080. (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
  1081. (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
  1082. /* write hashes and fdirctrl register, poll for completion */
  1083. ixgbe_fdir_enable_82599(hw, fdirctrl);
  1084. return 0;
  1085. }
  1086. /*
  1087. * These defines allow us to quickly generate all of the necessary instructions
  1088. * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
  1089. * for values 0 through 15
  1090. */
  1091. #define IXGBE_ATR_COMMON_HASH_KEY \
  1092. (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
  1093. #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
  1094. do { \
  1095. u32 n = (_n); \
  1096. if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
  1097. common_hash ^= lo_hash_dword >> n; \
  1098. else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
  1099. bucket_hash ^= lo_hash_dword >> n; \
  1100. else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
  1101. sig_hash ^= lo_hash_dword << (16 - n); \
  1102. if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
  1103. common_hash ^= hi_hash_dword >> n; \
  1104. else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
  1105. bucket_hash ^= hi_hash_dword >> n; \
  1106. else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
  1107. sig_hash ^= hi_hash_dword << (16 - n); \
  1108. } while (0)
  1109. /**
  1110. * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
  1111. * @input: input bitstream to compute the hash on
  1112. * @common: compressed common input dword
  1113. *
  1114. * This function is almost identical to the function above but contains
  1115. * several optimizations such as unwinding all of the loops, letting the
  1116. * compiler work out all of the conditional ifs since the keys are static
  1117. * defines, and computing two keys at once since the hashed dword stream
  1118. * will be the same for both keys.
  1119. **/
  1120. static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
  1121. union ixgbe_atr_hash_dword common)
  1122. {
  1123. u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
  1124. u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
  1125. /* record the flow_vm_vlan bits as they are a key part to the hash */
  1126. flow_vm_vlan = ntohl(input.dword);
  1127. /* generate common hash dword */
  1128. hi_hash_dword = ntohl(common.dword);
  1129. /* low dword is word swapped version of common */
  1130. lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
  1131. /* apply flow ID/VM pool/VLAN ID bits to hash words */
  1132. hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
  1133. /* Process bits 0 and 16 */
  1134. IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
  1135. /*
  1136. * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
  1137. * delay this because bit 0 of the stream should not be processed
  1138. * so we do not add the vlan until after bit 0 was processed
  1139. */
  1140. lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
  1141. /* Process remaining 30 bit of the key */
  1142. IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
  1143. IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
  1144. IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
  1145. IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
  1146. IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
  1147. IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
  1148. IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
  1149. IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
  1150. IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
  1151. IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
  1152. IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
  1153. IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
  1154. IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
  1155. IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
  1156. IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
  1157. /* combine common_hash result with signature and bucket hashes */
  1158. bucket_hash ^= common_hash;
  1159. bucket_hash &= IXGBE_ATR_HASH_MASK;
  1160. sig_hash ^= common_hash << 16;
  1161. sig_hash &= IXGBE_ATR_HASH_MASK << 16;
  1162. /* return completed signature hash */
  1163. return sig_hash ^ bucket_hash;
  1164. }
  1165. /**
  1166. * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
  1167. * @hw: pointer to hardware structure
  1168. * @input: unique input dword
  1169. * @common: compressed common input dword
  1170. * @queue: queue index to direct traffic to
  1171. *
  1172. * Note that the tunnel bit in input must not be set when the hardware
  1173. * tunneling support does not exist.
  1174. **/
  1175. s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
  1176. union ixgbe_atr_hash_dword input,
  1177. union ixgbe_atr_hash_dword common,
  1178. u8 queue)
  1179. {
  1180. u64 fdirhashcmd;
  1181. u8 flow_type;
  1182. bool tunnel;
  1183. u32 fdircmd;
  1184. /*
  1185. * Get the flow_type in order to program FDIRCMD properly
  1186. * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
  1187. */
  1188. tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
  1189. flow_type = input.formatted.flow_type &
  1190. (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
  1191. switch (flow_type) {
  1192. case IXGBE_ATR_FLOW_TYPE_TCPV4:
  1193. case IXGBE_ATR_FLOW_TYPE_UDPV4:
  1194. case IXGBE_ATR_FLOW_TYPE_SCTPV4:
  1195. case IXGBE_ATR_FLOW_TYPE_TCPV6:
  1196. case IXGBE_ATR_FLOW_TYPE_UDPV6:
  1197. case IXGBE_ATR_FLOW_TYPE_SCTPV6:
  1198. break;
  1199. default:
  1200. hw_dbg(hw, " Error on flow type input\n");
  1201. return IXGBE_ERR_CONFIG;
  1202. }
  1203. /* configure FDIRCMD register */
  1204. fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
  1205. IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
  1206. fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
  1207. fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
  1208. if (tunnel)
  1209. fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
  1210. /*
  1211. * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
  1212. * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
  1213. */
  1214. fdirhashcmd = (u64)fdircmd << 32;
  1215. fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
  1216. IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
  1217. hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
  1218. return 0;
  1219. }
  1220. #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
  1221. do { \
  1222. u32 n = (_n); \
  1223. if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
  1224. bucket_hash ^= lo_hash_dword >> n; \
  1225. if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
  1226. bucket_hash ^= hi_hash_dword >> n; \
  1227. } while (0)
  1228. /**
  1229. * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
  1230. * @input: input bitstream to compute the hash on
  1231. * @input_mask: mask for the input bitstream
  1232. *
  1233. * This function serves two main purposes. First it applies the input_mask
  1234. * to the atr_input resulting in a cleaned up atr_input data stream.
  1235. * Secondly it computes the hash and stores it in the bkt_hash field at
  1236. * the end of the input byte stream. This way it will be available for
  1237. * future use without needing to recompute the hash.
  1238. **/
  1239. void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
  1240. union ixgbe_atr_input *input_mask)
  1241. {
  1242. u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
  1243. u32 bucket_hash = 0;
  1244. __be32 hi_dword = 0;
  1245. int i;
  1246. /* Apply masks to input data */
  1247. for (i = 0; i <= 10; i++)
  1248. input->dword_stream[i] &= input_mask->dword_stream[i];
  1249. /* record the flow_vm_vlan bits as they are a key part to the hash */
  1250. flow_vm_vlan = ntohl(input->dword_stream[0]);
  1251. /* generate common hash dword */
  1252. for (i = 1; i <= 10; i++)
  1253. hi_dword ^= input->dword_stream[i];
  1254. hi_hash_dword = ntohl(hi_dword);
  1255. /* low dword is word swapped version of common */
  1256. lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
  1257. /* apply flow ID/VM pool/VLAN ID bits to hash words */
  1258. hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
  1259. /* Process bits 0 and 16 */
  1260. IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
  1261. /*
  1262. * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
  1263. * delay this because bit 0 of the stream should not be processed
  1264. * so we do not add the vlan until after bit 0 was processed
  1265. */
  1266. lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
  1267. /* Process remaining 30 bit of the key */
  1268. for (i = 1; i <= 15; i++)
  1269. IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
  1270. /*
  1271. * Limit hash to 13 bits since max bucket count is 8K.
  1272. * Store result at the end of the input stream.
  1273. */
  1274. input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
  1275. }
  1276. /**
  1277. * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
  1278. * @input_mask: mask to be bit swapped
  1279. *
  1280. * The source and destination port masks for flow director are bit swapped
  1281. * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
  1282. * generate a correctly swapped value we need to bit swap the mask and that
  1283. * is what is accomplished by this function.
  1284. **/
  1285. static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
  1286. {
  1287. u32 mask = ntohs(input_mask->formatted.dst_port);
  1288. mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
  1289. mask |= ntohs(input_mask->formatted.src_port);
  1290. mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
  1291. mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
  1292. mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
  1293. return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
  1294. }
  1295. /*
  1296. * These two macros are meant to address the fact that we have registers
  1297. * that are either all or in part big-endian. As a result on big-endian
  1298. * systems we will end up byte swapping the value to little-endian before
  1299. * it is byte swapped again and written to the hardware in the original
  1300. * big-endian format.
  1301. */
  1302. #define IXGBE_STORE_AS_BE32(_value) \
  1303. (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
  1304. (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
  1305. #define IXGBE_WRITE_REG_BE32(a, reg, value) \
  1306. IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
  1307. #define IXGBE_STORE_AS_BE16(_value) \
  1308. ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
  1309. s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
  1310. union ixgbe_atr_input *input_mask)
  1311. {
  1312. /* mask IPv6 since it is currently not supported */
  1313. u32 fdirm = IXGBE_FDIRM_DIPv6;
  1314. u32 fdirtcpm;
  1315. /*
  1316. * Program the relevant mask registers. If src/dst_port or src/dst_addr
  1317. * are zero, then assume a full mask for that field. Also assume that
  1318. * a VLAN of 0 is unspecified, so mask that out as well. L4type
  1319. * cannot be masked out in this implementation.
  1320. *
  1321. * This also assumes IPv4 only. IPv6 masking isn't supported at this
  1322. * point in time.
  1323. */
  1324. /* verify bucket hash is cleared on hash generation */
  1325. if (input_mask->formatted.bkt_hash)
  1326. hw_dbg(hw, " bucket hash should always be 0 in mask\n");
  1327. /* Program FDIRM and verify partial masks */
  1328. switch (input_mask->formatted.vm_pool & 0x7F) {
  1329. case 0x0:
  1330. fdirm |= IXGBE_FDIRM_POOL;
  1331. case 0x7F:
  1332. break;
  1333. default:
  1334. hw_dbg(hw, " Error on vm pool mask\n");
  1335. return IXGBE_ERR_CONFIG;
  1336. }
  1337. switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
  1338. case 0x0:
  1339. fdirm |= IXGBE_FDIRM_L4P;
  1340. if (input_mask->formatted.dst_port ||
  1341. input_mask->formatted.src_port) {
  1342. hw_dbg(hw, " Error on src/dst port mask\n");
  1343. return IXGBE_ERR_CONFIG;
  1344. }
  1345. case IXGBE_ATR_L4TYPE_MASK:
  1346. break;
  1347. default:
  1348. hw_dbg(hw, " Error on flow type mask\n");
  1349. return IXGBE_ERR_CONFIG;
  1350. }
  1351. switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
  1352. case 0x0000:
  1353. /* mask VLAN ID */
  1354. fdirm |= IXGBE_FDIRM_VLANID;
  1355. /* fall through */
  1356. case 0x0FFF:
  1357. /* mask VLAN priority */
  1358. fdirm |= IXGBE_FDIRM_VLANP;
  1359. break;
  1360. case 0xE000:
  1361. /* mask VLAN ID only */
  1362. fdirm |= IXGBE_FDIRM_VLANID;
  1363. /* fall through */
  1364. case 0xEFFF:
  1365. /* no VLAN fields masked */
  1366. break;
  1367. default:
  1368. hw_dbg(hw, " Error on VLAN mask\n");
  1369. return IXGBE_ERR_CONFIG;
  1370. }
  1371. switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
  1372. case 0x0000:
  1373. /* Mask Flex Bytes */
  1374. fdirm |= IXGBE_FDIRM_FLEX;
  1375. /* fall through */
  1376. case 0xFFFF:
  1377. break;
  1378. default:
  1379. hw_dbg(hw, " Error on flexible byte mask\n");
  1380. return IXGBE_ERR_CONFIG;
  1381. }
  1382. /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
  1383. IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
  1384. /* store the TCP/UDP port masks, bit reversed from port layout */
  1385. fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
  1386. /* write both the same so that UDP and TCP use the same mask */
  1387. IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
  1388. IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
  1389. /* also use it for SCTP */
  1390. switch (hw->mac.type) {
  1391. case ixgbe_mac_X550:
  1392. case ixgbe_mac_X550EM_x:
  1393. case ixgbe_mac_x550em_a:
  1394. IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
  1395. break;
  1396. default:
  1397. break;
  1398. }
  1399. /* store source and destination IP masks (big-enian) */
  1400. IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
  1401. ~input_mask->formatted.src_ip[0]);
  1402. IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
  1403. ~input_mask->formatted.dst_ip[0]);
  1404. return 0;
  1405. }
  1406. s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
  1407. union ixgbe_atr_input *input,
  1408. u16 soft_id, u8 queue)
  1409. {
  1410. u32 fdirport, fdirvlan, fdirhash, fdircmd;
  1411. s32 err;
  1412. /* currently IPv6 is not supported, must be programmed with 0 */
  1413. IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
  1414. input->formatted.src_ip[0]);
  1415. IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
  1416. input->formatted.src_ip[1]);
  1417. IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
  1418. input->formatted.src_ip[2]);
  1419. /* record the source address (big-endian) */
  1420. IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
  1421. /* record the first 32 bits of the destination address (big-endian) */
  1422. IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
  1423. /* record source and destination port (little-endian)*/
  1424. fdirport = ntohs(input->formatted.dst_port);
  1425. fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
  1426. fdirport |= ntohs(input->formatted.src_port);
  1427. IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
  1428. /* record vlan (little-endian) and flex_bytes(big-endian) */
  1429. fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes);
  1430. fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
  1431. fdirvlan |= ntohs(input->formatted.vlan_id);
  1432. IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
  1433. /* configure FDIRHASH register */
  1434. fdirhash = (__force u32)input->formatted.bkt_hash;
  1435. fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
  1436. IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
  1437. /*
  1438. * flush all previous writes to make certain registers are
  1439. * programmed prior to issuing the command
  1440. */
  1441. IXGBE_WRITE_FLUSH(hw);
  1442. /* configure FDIRCMD register */
  1443. fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
  1444. IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
  1445. if (queue == IXGBE_FDIR_DROP_QUEUE)
  1446. fdircmd |= IXGBE_FDIRCMD_DROP;
  1447. fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
  1448. fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
  1449. fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
  1450. IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
  1451. err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
  1452. if (err) {
  1453. hw_dbg(hw, "Flow Director command did not complete!\n");
  1454. return err;
  1455. }
  1456. return 0;
  1457. }
  1458. s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
  1459. union ixgbe_atr_input *input,
  1460. u16 soft_id)
  1461. {
  1462. u32 fdirhash;
  1463. u32 fdircmd;
  1464. s32 err;
  1465. /* configure FDIRHASH register */
  1466. fdirhash = (__force u32)input->formatted.bkt_hash;
  1467. fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
  1468. IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
  1469. /* flush hash to HW */
  1470. IXGBE_WRITE_FLUSH(hw);
  1471. /* Query if filter is present */
  1472. IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
  1473. err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
  1474. if (err) {
  1475. hw_dbg(hw, "Flow Director command did not complete!\n");
  1476. return err;
  1477. }
  1478. /* if filter exists in hardware then remove it */
  1479. if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
  1480. IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
  1481. IXGBE_WRITE_FLUSH(hw);
  1482. IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
  1483. IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
  1484. }
  1485. return 0;
  1486. }
  1487. /**
  1488. * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
  1489. * @hw: pointer to hardware structure
  1490. * @reg: analog register to read
  1491. * @val: read value
  1492. *
  1493. * Performs read operation to Omer analog register specified.
  1494. **/
  1495. static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
  1496. {
  1497. u32 core_ctl;
  1498. IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
  1499. (reg << 8));
  1500. IXGBE_WRITE_FLUSH(hw);
  1501. udelay(10);
  1502. core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
  1503. *val = (u8)core_ctl;
  1504. return 0;
  1505. }
  1506. /**
  1507. * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
  1508. * @hw: pointer to hardware structure
  1509. * @reg: atlas register to write
  1510. * @val: value to write
  1511. *
  1512. * Performs write operation to Omer analog register specified.
  1513. **/
  1514. static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
  1515. {
  1516. u32 core_ctl;
  1517. core_ctl = (reg << 8) | val;
  1518. IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
  1519. IXGBE_WRITE_FLUSH(hw);
  1520. udelay(10);
  1521. return 0;
  1522. }
  1523. /**
  1524. * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
  1525. * @hw: pointer to hardware structure
  1526. *
  1527. * Starts the hardware using the generic start_hw function
  1528. * and the generation start_hw function.
  1529. * Then performs revision-specific operations, if any.
  1530. **/
  1531. static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
  1532. {
  1533. s32 ret_val = 0;
  1534. ret_val = ixgbe_start_hw_generic(hw);
  1535. if (ret_val)
  1536. return ret_val;
  1537. ret_val = ixgbe_start_hw_gen2(hw);
  1538. if (ret_val)
  1539. return ret_val;
  1540. /* We need to run link autotry after the driver loads */
  1541. hw->mac.autotry_restart = true;
  1542. return ixgbe_verify_fw_version_82599(hw);
  1543. }
  1544. /**
  1545. * ixgbe_identify_phy_82599 - Get physical layer module
  1546. * @hw: pointer to hardware structure
  1547. *
  1548. * Determines the physical layer module found on the current adapter.
  1549. * If PHY already detected, maintains current PHY type in hw struct,
  1550. * otherwise executes the PHY detection routine.
  1551. **/
  1552. static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
  1553. {
  1554. s32 status;
  1555. /* Detect PHY if not unknown - returns success if already detected. */
  1556. status = ixgbe_identify_phy_generic(hw);
  1557. if (status) {
  1558. /* 82599 10GBASE-T requires an external PHY */
  1559. if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
  1560. return status;
  1561. status = ixgbe_identify_module_generic(hw);
  1562. }
  1563. /* Set PHY type none if no PHY detected */
  1564. if (hw->phy.type == ixgbe_phy_unknown) {
  1565. hw->phy.type = ixgbe_phy_none;
  1566. status = 0;
  1567. }
  1568. /* Return error if SFP module has been detected but is not supported */
  1569. if (hw->phy.type == ixgbe_phy_sfp_unsupported)
  1570. return IXGBE_ERR_SFP_NOT_SUPPORTED;
  1571. return status;
  1572. }
  1573. /**
  1574. * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
  1575. * @hw: pointer to hardware structure
  1576. * @regval: register value to write to RXCTRL
  1577. *
  1578. * Enables the Rx DMA unit for 82599
  1579. **/
  1580. static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
  1581. {
  1582. /*
  1583. * Workaround for 82599 silicon errata when enabling the Rx datapath.
  1584. * If traffic is incoming before we enable the Rx unit, it could hang
  1585. * the Rx DMA unit. Therefore, make sure the security engine is
  1586. * completely disabled prior to enabling the Rx unit.
  1587. */
  1588. hw->mac.ops.disable_rx_buff(hw);
  1589. if (regval & IXGBE_RXCTRL_RXEN)
  1590. hw->mac.ops.enable_rx(hw);
  1591. else
  1592. hw->mac.ops.disable_rx(hw);
  1593. hw->mac.ops.enable_rx_buff(hw);
  1594. return 0;
  1595. }
  1596. /**
  1597. * ixgbe_verify_fw_version_82599 - verify fw version for 82599
  1598. * @hw: pointer to hardware structure
  1599. *
  1600. * Verifies that installed the firmware version is 0.6 or higher
  1601. * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
  1602. *
  1603. * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
  1604. * if the FW version is not supported.
  1605. **/
  1606. static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
  1607. {
  1608. s32 status = IXGBE_ERR_EEPROM_VERSION;
  1609. u16 fw_offset, fw_ptp_cfg_offset;
  1610. u16 offset;
  1611. u16 fw_version = 0;
  1612. /* firmware check is only necessary for SFI devices */
  1613. if (hw->phy.media_type != ixgbe_media_type_fiber)
  1614. return 0;
  1615. /* get the offset to the Firmware Module block */
  1616. offset = IXGBE_FW_PTR;
  1617. if (hw->eeprom.ops.read(hw, offset, &fw_offset))
  1618. goto fw_version_err;
  1619. if (fw_offset == 0 || fw_offset == 0xFFFF)
  1620. return IXGBE_ERR_EEPROM_VERSION;
  1621. /* get the offset to the Pass Through Patch Configuration block */
  1622. offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
  1623. if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
  1624. goto fw_version_err;
  1625. if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
  1626. return IXGBE_ERR_EEPROM_VERSION;
  1627. /* get the firmware version */
  1628. offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
  1629. if (hw->eeprom.ops.read(hw, offset, &fw_version))
  1630. goto fw_version_err;
  1631. if (fw_version > 0x5)
  1632. status = 0;
  1633. return status;
  1634. fw_version_err:
  1635. hw_err(hw, "eeprom read at offset %d failed\n", offset);
  1636. return IXGBE_ERR_EEPROM_VERSION;
  1637. }
  1638. /**
  1639. * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
  1640. * @hw: pointer to hardware structure
  1641. *
  1642. * Returns true if the LESM FW module is present and enabled. Otherwise
  1643. * returns false. Smart Speed must be disabled if LESM FW module is enabled.
  1644. **/
  1645. static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
  1646. {
  1647. u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
  1648. s32 status;
  1649. /* get the offset to the Firmware Module block */
  1650. status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
  1651. if (status || fw_offset == 0 || fw_offset == 0xFFFF)
  1652. return false;
  1653. /* get the offset to the LESM Parameters block */
  1654. status = hw->eeprom.ops.read(hw, (fw_offset +
  1655. IXGBE_FW_LESM_PARAMETERS_PTR),
  1656. &fw_lesm_param_offset);
  1657. if (status ||
  1658. fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF)
  1659. return false;
  1660. /* get the lesm state word */
  1661. status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
  1662. IXGBE_FW_LESM_STATE_1),
  1663. &fw_lesm_state);
  1664. if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
  1665. return true;
  1666. return false;
  1667. }
  1668. /**
  1669. * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
  1670. * fastest available method
  1671. *
  1672. * @hw: pointer to hardware structure
  1673. * @offset: offset of word in EEPROM to read
  1674. * @words: number of words
  1675. * @data: word(s) read from the EEPROM
  1676. *
  1677. * Retrieves 16 bit word(s) read from EEPROM
  1678. **/
  1679. static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
  1680. u16 words, u16 *data)
  1681. {
  1682. struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
  1683. /* If EEPROM is detected and can be addressed using 14 bits,
  1684. * use EERD otherwise use bit bang
  1685. */
  1686. if (eeprom->type == ixgbe_eeprom_spi &&
  1687. offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)
  1688. return ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
  1689. return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words,
  1690. data);
  1691. }
  1692. /**
  1693. * ixgbe_read_eeprom_82599 - Read EEPROM word using
  1694. * fastest available method
  1695. *
  1696. * @hw: pointer to hardware structure
  1697. * @offset: offset of word in the EEPROM to read
  1698. * @data: word read from the EEPROM
  1699. *
  1700. * Reads a 16 bit word from the EEPROM
  1701. **/
  1702. static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
  1703. u16 offset, u16 *data)
  1704. {
  1705. struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
  1706. /*
  1707. * If EEPROM is detected and can be addressed using 14 bits,
  1708. * use EERD otherwise use bit bang
  1709. */
  1710. if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR)
  1711. return ixgbe_read_eerd_generic(hw, offset, data);
  1712. return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
  1713. }
  1714. /**
  1715. * ixgbe_reset_pipeline_82599 - perform pipeline reset
  1716. *
  1717. * @hw: pointer to hardware structure
  1718. *
  1719. * Reset pipeline by asserting Restart_AN together with LMS change to ensure
  1720. * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
  1721. * to AUTOC, so this function assumes the semaphore is held.
  1722. **/
  1723. static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
  1724. {
  1725. s32 ret_val;
  1726. u32 anlp1_reg = 0;
  1727. u32 i, autoc_reg, autoc2_reg;
  1728. /* Enable link if disabled in NVM */
  1729. autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
  1730. if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
  1731. autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
  1732. IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
  1733. IXGBE_WRITE_FLUSH(hw);
  1734. }
  1735. autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
  1736. autoc_reg |= IXGBE_AUTOC_AN_RESTART;
  1737. /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
  1738. IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
  1739. autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
  1740. /* Wait for AN to leave state 0 */
  1741. for (i = 0; i < 10; i++) {
  1742. usleep_range(4000, 8000);
  1743. anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
  1744. if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
  1745. break;
  1746. }
  1747. if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
  1748. hw_dbg(hw, "auto negotiation not completed\n");
  1749. ret_val = IXGBE_ERR_RESET_FAILED;
  1750. goto reset_pipeline_out;
  1751. }
  1752. ret_val = 0;
  1753. reset_pipeline_out:
  1754. /* Write AUTOC register with original LMS field and Restart_AN */
  1755. IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
  1756. IXGBE_WRITE_FLUSH(hw);
  1757. return ret_val;
  1758. }
  1759. /**
  1760. * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
  1761. * @hw: pointer to hardware structure
  1762. * @byte_offset: byte offset to read
  1763. * @dev_addr: address to read from
  1764. * @data: value read
  1765. *
  1766. * Performs byte read operation to SFP module's EEPROM over I2C interface at
  1767. * a specified device address.
  1768. **/
  1769. static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
  1770. u8 dev_addr, u8 *data)
  1771. {
  1772. u32 esdp;
  1773. s32 status;
  1774. s32 timeout = 200;
  1775. if (hw->phy.qsfp_shared_i2c_bus == true) {
  1776. /* Acquire I2C bus ownership. */
  1777. esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  1778. esdp |= IXGBE_ESDP_SDP0;
  1779. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
  1780. IXGBE_WRITE_FLUSH(hw);
  1781. while (timeout) {
  1782. esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  1783. if (esdp & IXGBE_ESDP_SDP1)
  1784. break;
  1785. usleep_range(5000, 10000);
  1786. timeout--;
  1787. }
  1788. if (!timeout) {
  1789. hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
  1790. status = IXGBE_ERR_I2C;
  1791. goto release_i2c_access;
  1792. }
  1793. }
  1794. status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
  1795. release_i2c_access:
  1796. if (hw->phy.qsfp_shared_i2c_bus == true) {
  1797. /* Release I2C bus ownership. */
  1798. esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  1799. esdp &= ~IXGBE_ESDP_SDP0;
  1800. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
  1801. IXGBE_WRITE_FLUSH(hw);
  1802. }
  1803. return status;
  1804. }
  1805. /**
  1806. * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
  1807. * @hw: pointer to hardware structure
  1808. * @byte_offset: byte offset to write
  1809. * @dev_addr: address to write to
  1810. * @data: value to write
  1811. *
  1812. * Performs byte write operation to SFP module's EEPROM over I2C interface at
  1813. * a specified device address.
  1814. **/
  1815. static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
  1816. u8 dev_addr, u8 data)
  1817. {
  1818. u32 esdp;
  1819. s32 status;
  1820. s32 timeout = 200;
  1821. if (hw->phy.qsfp_shared_i2c_bus == true) {
  1822. /* Acquire I2C bus ownership. */
  1823. esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  1824. esdp |= IXGBE_ESDP_SDP0;
  1825. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
  1826. IXGBE_WRITE_FLUSH(hw);
  1827. while (timeout) {
  1828. esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  1829. if (esdp & IXGBE_ESDP_SDP1)
  1830. break;
  1831. usleep_range(5000, 10000);
  1832. timeout--;
  1833. }
  1834. if (!timeout) {
  1835. hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
  1836. status = IXGBE_ERR_I2C;
  1837. goto release_i2c_access;
  1838. }
  1839. }
  1840. status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
  1841. release_i2c_access:
  1842. if (hw->phy.qsfp_shared_i2c_bus == true) {
  1843. /* Release I2C bus ownership. */
  1844. esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  1845. esdp &= ~IXGBE_ESDP_SDP0;
  1846. IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
  1847. IXGBE_WRITE_FLUSH(hw);
  1848. }
  1849. return status;
  1850. }
  1851. static const struct ixgbe_mac_operations mac_ops_82599 = {
  1852. .init_hw = &ixgbe_init_hw_generic,
  1853. .reset_hw = &ixgbe_reset_hw_82599,
  1854. .start_hw = &ixgbe_start_hw_82599,
  1855. .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
  1856. .get_media_type = &ixgbe_get_media_type_82599,
  1857. .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
  1858. .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
  1859. .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
  1860. .get_mac_addr = &ixgbe_get_mac_addr_generic,
  1861. .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
  1862. .get_device_caps = &ixgbe_get_device_caps_generic,
  1863. .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
  1864. .stop_adapter = &ixgbe_stop_adapter_generic,
  1865. .get_bus_info = &ixgbe_get_bus_info_generic,
  1866. .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
  1867. .read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
  1868. .write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
  1869. .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599,
  1870. .setup_link = &ixgbe_setup_mac_link_82599,
  1871. .set_rxpba = &ixgbe_set_rxpba_generic,
  1872. .check_link = &ixgbe_check_mac_link_generic,
  1873. .get_link_capabilities = &ixgbe_get_link_capabilities_82599,
  1874. .led_on = &ixgbe_led_on_generic,
  1875. .led_off = &ixgbe_led_off_generic,
  1876. .init_led_link_act = ixgbe_init_led_link_act_generic,
  1877. .blink_led_start = &ixgbe_blink_led_start_generic,
  1878. .blink_led_stop = &ixgbe_blink_led_stop_generic,
  1879. .set_rar = &ixgbe_set_rar_generic,
  1880. .clear_rar = &ixgbe_clear_rar_generic,
  1881. .set_vmdq = &ixgbe_set_vmdq_generic,
  1882. .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
  1883. .clear_vmdq = &ixgbe_clear_vmdq_generic,
  1884. .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
  1885. .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
  1886. .enable_mc = &ixgbe_enable_mc_generic,
  1887. .disable_mc = &ixgbe_disable_mc_generic,
  1888. .clear_vfta = &ixgbe_clear_vfta_generic,
  1889. .set_vfta = &ixgbe_set_vfta_generic,
  1890. .fc_enable = &ixgbe_fc_enable_generic,
  1891. .setup_fc = ixgbe_setup_fc_generic,
  1892. .fc_autoneg = ixgbe_fc_autoneg,
  1893. .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
  1894. .init_uta_tables = &ixgbe_init_uta_tables_generic,
  1895. .setup_sfp = &ixgbe_setup_sfp_modules_82599,
  1896. .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
  1897. .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
  1898. .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
  1899. .release_swfw_sync = &ixgbe_release_swfw_sync,
  1900. .init_swfw_sync = NULL,
  1901. .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
  1902. .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
  1903. .prot_autoc_read = &prot_autoc_read_82599,
  1904. .prot_autoc_write = &prot_autoc_write_82599,
  1905. .enable_rx = &ixgbe_enable_rx_generic,
  1906. .disable_rx = &ixgbe_disable_rx_generic,
  1907. };
  1908. static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
  1909. .init_params = &ixgbe_init_eeprom_params_generic,
  1910. .read = &ixgbe_read_eeprom_82599,
  1911. .read_buffer = &ixgbe_read_eeprom_buffer_82599,
  1912. .write = &ixgbe_write_eeprom_generic,
  1913. .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
  1914. .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
  1915. .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
  1916. .update_checksum = &ixgbe_update_eeprom_checksum_generic,
  1917. };
  1918. static const struct ixgbe_phy_operations phy_ops_82599 = {
  1919. .identify = &ixgbe_identify_phy_82599,
  1920. .identify_sfp = &ixgbe_identify_module_generic,
  1921. .init = &ixgbe_init_phy_ops_82599,
  1922. .reset = &ixgbe_reset_phy_generic,
  1923. .read_reg = &ixgbe_read_phy_reg_generic,
  1924. .write_reg = &ixgbe_write_phy_reg_generic,
  1925. .setup_link = &ixgbe_setup_phy_link_generic,
  1926. .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
  1927. .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
  1928. .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
  1929. .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
  1930. .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
  1931. .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
  1932. .check_overtemp = &ixgbe_tn_check_overtemp,
  1933. };
  1934. const struct ixgbe_info ixgbe_82599_info = {
  1935. .mac = ixgbe_mac_82599EB,
  1936. .get_invariants = &ixgbe_get_invariants_82599,
  1937. .mac_ops = &mac_ops_82599,
  1938. .eeprom_ops = &eeprom_ops_82599,
  1939. .phy_ops = &phy_ops_82599,
  1940. .mbx_ops = &mbx_ops_generic,
  1941. .mvals = ixgbe_mvals_8259X,
  1942. };