amd-xgbe-phy.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901
  1. /*
  2. * AMD 10Gb Ethernet PHY driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. *
  25. * License 2: Modified BSD
  26. *
  27. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  28. * All rights reserved.
  29. *
  30. * Redistribution and use in source and binary forms, with or without
  31. * modification, are permitted provided that the following conditions are met:
  32. * * Redistributions of source code must retain the above copyright
  33. * notice, this list of conditions and the following disclaimer.
  34. * * Redistributions in binary form must reproduce the above copyright
  35. * notice, this list of conditions and the following disclaimer in the
  36. * documentation and/or other materials provided with the distribution.
  37. * * Neither the name of Advanced Micro Devices, Inc. nor the
  38. * names of its contributors may be used to endorse or promote products
  39. * derived from this software without specific prior written permission.
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  42. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  43. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  44. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  45. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  46. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  47. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  48. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  50. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51. */
  52. #include <linux/kernel.h>
  53. #include <linux/device.h>
  54. #include <linux/platform_device.h>
  55. #include <linux/string.h>
  56. #include <linux/errno.h>
  57. #include <linux/unistd.h>
  58. #include <linux/slab.h>
  59. #include <linux/interrupt.h>
  60. #include <linux/init.h>
  61. #include <linux/delay.h>
  62. #include <linux/workqueue.h>
  63. #include <linux/netdevice.h>
  64. #include <linux/etherdevice.h>
  65. #include <linux/skbuff.h>
  66. #include <linux/mm.h>
  67. #include <linux/module.h>
  68. #include <linux/mii.h>
  69. #include <linux/ethtool.h>
  70. #include <linux/phy.h>
  71. #include <linux/mdio.h>
  72. #include <linux/io.h>
  73. #include <linux/of.h>
  74. #include <linux/of_platform.h>
  75. #include <linux/of_device.h>
  76. #include <linux/uaccess.h>
  77. #include <linux/bitops.h>
  78. #include <linux/property.h>
  79. #include <linux/acpi.h>
  80. #include <linux/jiffies.h>
  81. MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  82. MODULE_LICENSE("Dual BSD/GPL");
  83. MODULE_VERSION("1.0.0-a");
  84. MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
  85. #define XGBE_PHY_ID 0x000162d0
  86. #define XGBE_PHY_MASK 0xfffffff0
  87. #define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
  88. #define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
  89. #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
  90. #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
  91. #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
  92. #define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
  93. #define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
  94. #define XGBE_PHY_SPEEDS 3
  95. #define XGBE_PHY_SPEED_1000 0
  96. #define XGBE_PHY_SPEED_2500 1
  97. #define XGBE_PHY_SPEED_10000 2
  98. #define XGBE_AN_MS_TIMEOUT 500
  99. #define XGBE_AN_INT_CMPLT 0x01
  100. #define XGBE_AN_INC_LINK 0x02
  101. #define XGBE_AN_PG_RCV 0x04
  102. #define XGBE_AN_INT_MASK 0x07
  103. #define XNP_MCF_NULL_MESSAGE 0x001
  104. #define XNP_ACK_PROCESSED BIT(12)
  105. #define XNP_MP_FORMATTED BIT(13)
  106. #define XNP_NP_EXCHANGE BIT(15)
  107. #define XGBE_PHY_RATECHANGE_COUNT 500
  108. #define XGBE_PHY_KR_TRAINING_START 0x01
  109. #define XGBE_PHY_KR_TRAINING_ENABLE 0x02
  110. #define XGBE_PHY_FEC_ENABLE 0x01
  111. #define XGBE_PHY_FEC_FORWARD 0x02
  112. #define XGBE_PHY_FEC_MASK 0x03
  113. #ifndef MDIO_PMA_10GBR_PMD_CTRL
  114. #define MDIO_PMA_10GBR_PMD_CTRL 0x0096
  115. #endif
  116. #ifndef MDIO_PMA_10GBR_FEC_ABILITY
  117. #define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
  118. #endif
  119. #ifndef MDIO_PMA_10GBR_FEC_CTRL
  120. #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
  121. #endif
  122. #ifndef MDIO_AN_XNP
  123. #define MDIO_AN_XNP 0x0016
  124. #endif
  125. #ifndef MDIO_AN_LPX
  126. #define MDIO_AN_LPX 0x0019
  127. #endif
  128. #ifndef MDIO_AN_INTMASK
  129. #define MDIO_AN_INTMASK 0x8001
  130. #endif
  131. #ifndef MDIO_AN_INT
  132. #define MDIO_AN_INT 0x8002
  133. #endif
  134. #ifndef MDIO_CTRL1_SPEED1G
  135. #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
  136. #endif
  137. /* SerDes integration register offsets */
  138. #define SIR0_KR_RT_1 0x002c
  139. #define SIR0_STATUS 0x0040
  140. #define SIR1_SPEED 0x0000
  141. /* SerDes integration register entry bit positions and sizes */
  142. #define SIR0_KR_RT_1_RESET_INDEX 11
  143. #define SIR0_KR_RT_1_RESET_WIDTH 1
  144. #define SIR0_STATUS_RX_READY_INDEX 0
  145. #define SIR0_STATUS_RX_READY_WIDTH 1
  146. #define SIR0_STATUS_TX_READY_INDEX 8
  147. #define SIR0_STATUS_TX_READY_WIDTH 1
  148. #define SIR1_SPEED_CDR_RATE_INDEX 12
  149. #define SIR1_SPEED_CDR_RATE_WIDTH 4
  150. #define SIR1_SPEED_DATARATE_INDEX 4
  151. #define SIR1_SPEED_DATARATE_WIDTH 2
  152. #define SIR1_SPEED_PLLSEL_INDEX 3
  153. #define SIR1_SPEED_PLLSEL_WIDTH 1
  154. #define SIR1_SPEED_RATECHANGE_INDEX 6
  155. #define SIR1_SPEED_RATECHANGE_WIDTH 1
  156. #define SIR1_SPEED_TXAMP_INDEX 8
  157. #define SIR1_SPEED_TXAMP_WIDTH 4
  158. #define SIR1_SPEED_WORDMODE_INDEX 0
  159. #define SIR1_SPEED_WORDMODE_WIDTH 3
  160. #define SPEED_10000_BLWC 0
  161. #define SPEED_10000_CDR 0x7
  162. #define SPEED_10000_PLL 0x1
  163. #define SPEED_10000_PQ 0x12
  164. #define SPEED_10000_RATE 0x0
  165. #define SPEED_10000_TXAMP 0xa
  166. #define SPEED_10000_WORD 0x7
  167. #define SPEED_10000_DFE_TAP_CONFIG 0x1
  168. #define SPEED_10000_DFE_TAP_ENABLE 0x7f
  169. #define SPEED_2500_BLWC 1
  170. #define SPEED_2500_CDR 0x2
  171. #define SPEED_2500_PLL 0x0
  172. #define SPEED_2500_PQ 0xa
  173. #define SPEED_2500_RATE 0x1
  174. #define SPEED_2500_TXAMP 0xf
  175. #define SPEED_2500_WORD 0x1
  176. #define SPEED_2500_DFE_TAP_CONFIG 0x3
  177. #define SPEED_2500_DFE_TAP_ENABLE 0x0
  178. #define SPEED_1000_BLWC 1
  179. #define SPEED_1000_CDR 0x2
  180. #define SPEED_1000_PLL 0x0
  181. #define SPEED_1000_PQ 0xa
  182. #define SPEED_1000_RATE 0x3
  183. #define SPEED_1000_TXAMP 0xf
  184. #define SPEED_1000_WORD 0x1
  185. #define SPEED_1000_DFE_TAP_CONFIG 0x3
  186. #define SPEED_1000_DFE_TAP_ENABLE 0x0
  187. /* SerDes RxTx register offsets */
  188. #define RXTX_REG6 0x0018
  189. #define RXTX_REG20 0x0050
  190. #define RXTX_REG22 0x0058
  191. #define RXTX_REG114 0x01c8
  192. #define RXTX_REG129 0x0204
  193. /* SerDes RxTx register entry bit positions and sizes */
  194. #define RXTX_REG6_RESETB_RXD_INDEX 8
  195. #define RXTX_REG6_RESETB_RXD_WIDTH 1
  196. #define RXTX_REG20_BLWC_ENA_INDEX 2
  197. #define RXTX_REG20_BLWC_ENA_WIDTH 1
  198. #define RXTX_REG114_PQ_REG_INDEX 9
  199. #define RXTX_REG114_PQ_REG_WIDTH 7
  200. #define RXTX_REG129_RXDFE_CONFIG_INDEX 14
  201. #define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
  202. /* Bit setting and getting macros
  203. * The get macro will extract the current bit field value from within
  204. * the variable
  205. *
  206. * The set macro will clear the current bit field value within the
  207. * variable and then set the bit field of the variable to the
  208. * specified value
  209. */
  210. #define GET_BITS(_var, _index, _width) \
  211. (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
  212. #define SET_BITS(_var, _index, _width, _val) \
  213. do { \
  214. (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
  215. (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
  216. } while (0)
  217. #define XSIR_GET_BITS(_var, _prefix, _field) \
  218. GET_BITS((_var), \
  219. _prefix##_##_field##_INDEX, \
  220. _prefix##_##_field##_WIDTH)
  221. #define XSIR_SET_BITS(_var, _prefix, _field, _val) \
  222. SET_BITS((_var), \
  223. _prefix##_##_field##_INDEX, \
  224. _prefix##_##_field##_WIDTH, (_val))
  225. /* Macros for reading or writing SerDes integration registers
  226. * The ioread macros will get bit fields or full values using the
  227. * register definitions formed using the input names
  228. *
  229. * The iowrite macros will set bit fields or full values using the
  230. * register definitions formed using the input names
  231. */
  232. #define XSIR0_IOREAD(_priv, _reg) \
  233. ioread16((_priv)->sir0_regs + _reg)
  234. #define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
  235. GET_BITS(XSIR0_IOREAD((_priv), _reg), \
  236. _reg##_##_field##_INDEX, \
  237. _reg##_##_field##_WIDTH)
  238. #define XSIR0_IOWRITE(_priv, _reg, _val) \
  239. iowrite16((_val), (_priv)->sir0_regs + _reg)
  240. #define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
  241. do { \
  242. u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
  243. SET_BITS(reg_val, \
  244. _reg##_##_field##_INDEX, \
  245. _reg##_##_field##_WIDTH, (_val)); \
  246. XSIR0_IOWRITE((_priv), _reg, reg_val); \
  247. } while (0)
  248. #define XSIR1_IOREAD(_priv, _reg) \
  249. ioread16((_priv)->sir1_regs + _reg)
  250. #define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
  251. GET_BITS(XSIR1_IOREAD((_priv), _reg), \
  252. _reg##_##_field##_INDEX, \
  253. _reg##_##_field##_WIDTH)
  254. #define XSIR1_IOWRITE(_priv, _reg, _val) \
  255. iowrite16((_val), (_priv)->sir1_regs + _reg)
  256. #define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
  257. do { \
  258. u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
  259. SET_BITS(reg_val, \
  260. _reg##_##_field##_INDEX, \
  261. _reg##_##_field##_WIDTH, (_val)); \
  262. XSIR1_IOWRITE((_priv), _reg, reg_val); \
  263. } while (0)
  264. /* Macros for reading or writing SerDes RxTx registers
  265. * The ioread macros will get bit fields or full values using the
  266. * register definitions formed using the input names
  267. *
  268. * The iowrite macros will set bit fields or full values using the
  269. * register definitions formed using the input names
  270. */
  271. #define XRXTX_IOREAD(_priv, _reg) \
  272. ioread16((_priv)->rxtx_regs + _reg)
  273. #define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
  274. GET_BITS(XRXTX_IOREAD((_priv), _reg), \
  275. _reg##_##_field##_INDEX, \
  276. _reg##_##_field##_WIDTH)
  277. #define XRXTX_IOWRITE(_priv, _reg, _val) \
  278. iowrite16((_val), (_priv)->rxtx_regs + _reg)
  279. #define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
  280. do { \
  281. u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
  282. SET_BITS(reg_val, \
  283. _reg##_##_field##_INDEX, \
  284. _reg##_##_field##_WIDTH, (_val)); \
  285. XRXTX_IOWRITE((_priv), _reg, reg_val); \
  286. } while (0)
  287. static const u32 amd_xgbe_phy_serdes_blwc[] = {
  288. SPEED_1000_BLWC,
  289. SPEED_2500_BLWC,
  290. SPEED_10000_BLWC,
  291. };
  292. static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
  293. SPEED_1000_CDR,
  294. SPEED_2500_CDR,
  295. SPEED_10000_CDR,
  296. };
  297. static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
  298. SPEED_1000_PQ,
  299. SPEED_2500_PQ,
  300. SPEED_10000_PQ,
  301. };
  302. static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
  303. SPEED_1000_TXAMP,
  304. SPEED_2500_TXAMP,
  305. SPEED_10000_TXAMP,
  306. };
  307. static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
  308. SPEED_1000_DFE_TAP_CONFIG,
  309. SPEED_2500_DFE_TAP_CONFIG,
  310. SPEED_10000_DFE_TAP_CONFIG,
  311. };
  312. static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
  313. SPEED_1000_DFE_TAP_ENABLE,
  314. SPEED_2500_DFE_TAP_ENABLE,
  315. SPEED_10000_DFE_TAP_ENABLE,
  316. };
  317. enum amd_xgbe_phy_an {
  318. AMD_XGBE_AN_READY = 0,
  319. AMD_XGBE_AN_PAGE_RECEIVED,
  320. AMD_XGBE_AN_INCOMPAT_LINK,
  321. AMD_XGBE_AN_COMPLETE,
  322. AMD_XGBE_AN_NO_LINK,
  323. AMD_XGBE_AN_ERROR,
  324. };
  325. enum amd_xgbe_phy_rx {
  326. AMD_XGBE_RX_BPA = 0,
  327. AMD_XGBE_RX_XNP,
  328. AMD_XGBE_RX_COMPLETE,
  329. AMD_XGBE_RX_ERROR,
  330. };
  331. enum amd_xgbe_phy_mode {
  332. AMD_XGBE_MODE_KR,
  333. AMD_XGBE_MODE_KX,
  334. };
  335. enum amd_xgbe_phy_speedset {
  336. AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
  337. AMD_XGBE_PHY_SPEEDSET_2500_10000,
  338. };
  339. struct amd_xgbe_phy_priv {
  340. struct platform_device *pdev;
  341. struct acpi_device *adev;
  342. struct device *dev;
  343. struct phy_device *phydev;
  344. /* SerDes related mmio resources */
  345. struct resource *rxtx_res;
  346. struct resource *sir0_res;
  347. struct resource *sir1_res;
  348. /* SerDes related mmio registers */
  349. void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
  350. void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
  351. void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
  352. int an_irq;
  353. char an_irq_name[IFNAMSIZ + 32];
  354. struct work_struct an_irq_work;
  355. unsigned int an_irq_allocated;
  356. unsigned int speed_set;
  357. /* SerDes UEFI configurable settings.
  358. * Switching between modes/speeds requires new values for some
  359. * SerDes settings. The values can be supplied as device
  360. * properties in array format. The first array entry is for
  361. * 1GbE, second for 2.5GbE and third for 10GbE
  362. */
  363. u32 serdes_blwc[XGBE_PHY_SPEEDS];
  364. u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
  365. u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
  366. u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
  367. u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
  368. u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
  369. /* Auto-negotiation state machine support */
  370. struct mutex an_mutex;
  371. enum amd_xgbe_phy_an an_result;
  372. enum amd_xgbe_phy_an an_state;
  373. enum amd_xgbe_phy_rx kr_state;
  374. enum amd_xgbe_phy_rx kx_state;
  375. struct work_struct an_work;
  376. struct workqueue_struct *an_workqueue;
  377. unsigned int an_supported;
  378. unsigned int parallel_detect;
  379. unsigned int fec_ability;
  380. unsigned long an_start;
  381. unsigned int lpm_ctrl; /* CTRL1 for resume */
  382. };
  383. static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
  384. {
  385. int ret;
  386. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  387. if (ret < 0)
  388. return ret;
  389. ret |= XGBE_PHY_KR_TRAINING_ENABLE;
  390. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
  391. return 0;
  392. }
  393. static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
  394. {
  395. int ret;
  396. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  397. if (ret < 0)
  398. return ret;
  399. ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
  400. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
  401. return 0;
  402. }
  403. static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
  404. {
  405. int ret;
  406. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  407. if (ret < 0)
  408. return ret;
  409. ret |= MDIO_CTRL1_LPOWER;
  410. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  411. usleep_range(75, 100);
  412. ret &= ~MDIO_CTRL1_LPOWER;
  413. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  414. return 0;
  415. }
  416. static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
  417. {
  418. struct amd_xgbe_phy_priv *priv = phydev->priv;
  419. /* Assert Rx and Tx ratechange */
  420. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
  421. }
  422. static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
  423. {
  424. struct amd_xgbe_phy_priv *priv = phydev->priv;
  425. unsigned int wait;
  426. u16 status;
  427. /* Release Rx and Tx ratechange */
  428. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
  429. /* Wait for Rx and Tx ready */
  430. wait = XGBE_PHY_RATECHANGE_COUNT;
  431. while (wait--) {
  432. usleep_range(50, 75);
  433. status = XSIR0_IOREAD(priv, SIR0_STATUS);
  434. if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
  435. XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
  436. goto rx_reset;
  437. }
  438. netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
  439. status);
  440. rx_reset:
  441. /* Perform Rx reset for the DFE changes */
  442. XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
  443. XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
  444. }
  445. static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
  446. {
  447. struct amd_xgbe_phy_priv *priv = phydev->priv;
  448. int ret;
  449. /* Enable KR training */
  450. ret = amd_xgbe_an_enable_kr_training(phydev);
  451. if (ret < 0)
  452. return ret;
  453. /* Set PCS to KR/10G speed */
  454. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  455. if (ret < 0)
  456. return ret;
  457. ret &= ~MDIO_PCS_CTRL2_TYPE;
  458. ret |= MDIO_PCS_CTRL2_10GBR;
  459. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  460. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  461. if (ret < 0)
  462. return ret;
  463. ret &= ~MDIO_CTRL1_SPEEDSEL;
  464. ret |= MDIO_CTRL1_SPEED10G;
  465. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  466. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  467. if (ret < 0)
  468. return ret;
  469. /* Set SerDes to 10G speed */
  470. amd_xgbe_phy_serdes_start_ratechange(phydev);
  471. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
  472. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
  473. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
  474. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  475. priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
  476. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  477. priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
  478. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  479. priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
  480. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  481. priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
  482. XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
  483. priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
  484. XRXTX_IOWRITE(priv, RXTX_REG22,
  485. priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
  486. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  487. return 0;
  488. }
  489. static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
  490. {
  491. struct amd_xgbe_phy_priv *priv = phydev->priv;
  492. int ret;
  493. /* Disable KR training */
  494. ret = amd_xgbe_an_disable_kr_training(phydev);
  495. if (ret < 0)
  496. return ret;
  497. /* Set PCS to KX/1G speed */
  498. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  499. if (ret < 0)
  500. return ret;
  501. ret &= ~MDIO_PCS_CTRL2_TYPE;
  502. ret |= MDIO_PCS_CTRL2_10GBX;
  503. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  504. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  505. if (ret < 0)
  506. return ret;
  507. ret &= ~MDIO_CTRL1_SPEEDSEL;
  508. ret |= MDIO_CTRL1_SPEED1G;
  509. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  510. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  511. if (ret < 0)
  512. return ret;
  513. /* Set SerDes to 2.5G speed */
  514. amd_xgbe_phy_serdes_start_ratechange(phydev);
  515. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
  516. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
  517. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
  518. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  519. priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
  520. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  521. priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
  522. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  523. priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
  524. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  525. priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
  526. XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
  527. priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
  528. XRXTX_IOWRITE(priv, RXTX_REG22,
  529. priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
  530. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  531. return 0;
  532. }
  533. static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
  534. {
  535. struct amd_xgbe_phy_priv *priv = phydev->priv;
  536. int ret;
  537. /* Disable KR training */
  538. ret = amd_xgbe_an_disable_kr_training(phydev);
  539. if (ret < 0)
  540. return ret;
  541. /* Set PCS to KX/1G speed */
  542. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  543. if (ret < 0)
  544. return ret;
  545. ret &= ~MDIO_PCS_CTRL2_TYPE;
  546. ret |= MDIO_PCS_CTRL2_10GBX;
  547. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  548. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  549. if (ret < 0)
  550. return ret;
  551. ret &= ~MDIO_CTRL1_SPEEDSEL;
  552. ret |= MDIO_CTRL1_SPEED1G;
  553. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  554. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  555. if (ret < 0)
  556. return ret;
  557. /* Set SerDes to 1G speed */
  558. amd_xgbe_phy_serdes_start_ratechange(phydev);
  559. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
  560. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
  561. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
  562. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  563. priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
  564. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  565. priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
  566. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  567. priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
  568. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  569. priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
  570. XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
  571. priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
  572. XRXTX_IOWRITE(priv, RXTX_REG22,
  573. priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
  574. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  575. return 0;
  576. }
  577. static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
  578. enum amd_xgbe_phy_mode *mode)
  579. {
  580. int ret;
  581. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  582. if (ret < 0)
  583. return ret;
  584. if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
  585. *mode = AMD_XGBE_MODE_KR;
  586. else
  587. *mode = AMD_XGBE_MODE_KX;
  588. return 0;
  589. }
  590. static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
  591. {
  592. enum amd_xgbe_phy_mode mode;
  593. if (amd_xgbe_phy_cur_mode(phydev, &mode))
  594. return false;
  595. return (mode == AMD_XGBE_MODE_KR);
  596. }
  597. static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
  598. {
  599. struct amd_xgbe_phy_priv *priv = phydev->priv;
  600. int ret;
  601. /* If we are in KR switch to KX, and vice-versa */
  602. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  603. if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
  604. ret = amd_xgbe_phy_gmii_mode(phydev);
  605. else
  606. ret = amd_xgbe_phy_gmii_2500_mode(phydev);
  607. } else {
  608. ret = amd_xgbe_phy_xgmii_mode(phydev);
  609. }
  610. return ret;
  611. }
  612. static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
  613. enum amd_xgbe_phy_mode mode)
  614. {
  615. enum amd_xgbe_phy_mode cur_mode;
  616. int ret;
  617. ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
  618. if (ret)
  619. return ret;
  620. if (mode != cur_mode)
  621. ret = amd_xgbe_phy_switch_mode(phydev);
  622. return ret;
  623. }
  624. static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
  625. {
  626. if (phydev->autoneg == AUTONEG_ENABLE) {
  627. if (phydev->advertising & ADVERTISED_10000baseKR_Full)
  628. return true;
  629. } else {
  630. if (phydev->speed == SPEED_10000)
  631. return true;
  632. }
  633. return false;
  634. }
  635. static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
  636. {
  637. if (phydev->autoneg == AUTONEG_ENABLE) {
  638. if (phydev->advertising & ADVERTISED_2500baseX_Full)
  639. return true;
  640. } else {
  641. if (phydev->speed == SPEED_2500)
  642. return true;
  643. }
  644. return false;
  645. }
  646. static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
  647. {
  648. if (phydev->autoneg == AUTONEG_ENABLE) {
  649. if (phydev->advertising & ADVERTISED_1000baseKX_Full)
  650. return true;
  651. } else {
  652. if (phydev->speed == SPEED_1000)
  653. return true;
  654. }
  655. return false;
  656. }
  657. static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
  658. bool restart)
  659. {
  660. int ret;
  661. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
  662. if (ret < 0)
  663. return ret;
  664. ret &= ~MDIO_AN_CTRL1_ENABLE;
  665. if (enable)
  666. ret |= MDIO_AN_CTRL1_ENABLE;
  667. if (restart)
  668. ret |= MDIO_AN_CTRL1_RESTART;
  669. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
  670. return 0;
  671. }
  672. static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
  673. {
  674. return amd_xgbe_phy_set_an(phydev, true, true);
  675. }
  676. static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
  677. {
  678. return amd_xgbe_phy_set_an(phydev, false, false);
  679. }
  680. static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
  681. enum amd_xgbe_phy_rx *state)
  682. {
  683. struct amd_xgbe_phy_priv *priv = phydev->priv;
  684. int ad_reg, lp_reg, ret;
  685. *state = AMD_XGBE_RX_COMPLETE;
  686. /* If we're not in KR mode then we're done */
  687. if (!amd_xgbe_phy_in_kr_mode(phydev))
  688. return AMD_XGBE_AN_PAGE_RECEIVED;
  689. /* Enable/Disable FEC */
  690. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
  691. if (ad_reg < 0)
  692. return AMD_XGBE_AN_ERROR;
  693. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
  694. if (lp_reg < 0)
  695. return AMD_XGBE_AN_ERROR;
  696. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
  697. if (ret < 0)
  698. return AMD_XGBE_AN_ERROR;
  699. ret &= ~XGBE_PHY_FEC_MASK;
  700. if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
  701. ret |= priv->fec_ability;
  702. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
  703. /* Start KR training */
  704. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  705. if (ret < 0)
  706. return AMD_XGBE_AN_ERROR;
  707. if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
  708. XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
  709. ret |= XGBE_PHY_KR_TRAINING_START;
  710. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
  711. ret);
  712. XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
  713. }
  714. return AMD_XGBE_AN_PAGE_RECEIVED;
  715. }
  716. static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
  717. enum amd_xgbe_phy_rx *state)
  718. {
  719. u16 msg;
  720. *state = AMD_XGBE_RX_XNP;
  721. msg = XNP_MCF_NULL_MESSAGE;
  722. msg |= XNP_MP_FORMATTED;
  723. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
  724. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
  725. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
  726. return AMD_XGBE_AN_PAGE_RECEIVED;
  727. }
  728. static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
  729. enum amd_xgbe_phy_rx *state)
  730. {
  731. unsigned int link_support;
  732. int ret, ad_reg, lp_reg;
  733. /* Read Base Ability register 2 first */
  734. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
  735. if (ret < 0)
  736. return AMD_XGBE_AN_ERROR;
  737. /* Check for a supported mode, otherwise restart in a different one */
  738. link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
  739. if (!(ret & link_support))
  740. return AMD_XGBE_AN_INCOMPAT_LINK;
  741. /* Check Extended Next Page support */
  742. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  743. if (ad_reg < 0)
  744. return AMD_XGBE_AN_ERROR;
  745. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
  746. if (lp_reg < 0)
  747. return AMD_XGBE_AN_ERROR;
  748. return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
  749. amd_xgbe_an_tx_xnp(phydev, state) :
  750. amd_xgbe_an_tx_training(phydev, state);
  751. }
  752. static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
  753. enum amd_xgbe_phy_rx *state)
  754. {
  755. int ad_reg, lp_reg;
  756. /* Check Extended Next Page support */
  757. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
  758. if (ad_reg < 0)
  759. return AMD_XGBE_AN_ERROR;
  760. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
  761. if (lp_reg < 0)
  762. return AMD_XGBE_AN_ERROR;
  763. return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
  764. amd_xgbe_an_tx_xnp(phydev, state) :
  765. amd_xgbe_an_tx_training(phydev, state);
  766. }
  767. static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
  768. {
  769. struct amd_xgbe_phy_priv *priv = phydev->priv;
  770. enum amd_xgbe_phy_rx *state;
  771. unsigned long an_timeout;
  772. int ret;
  773. if (!priv->an_start) {
  774. priv->an_start = jiffies;
  775. } else {
  776. an_timeout = priv->an_start +
  777. msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
  778. if (time_after(jiffies, an_timeout)) {
  779. /* Auto-negotiation timed out, reset state */
  780. priv->kr_state = AMD_XGBE_RX_BPA;
  781. priv->kx_state = AMD_XGBE_RX_BPA;
  782. priv->an_start = jiffies;
  783. }
  784. }
  785. state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
  786. : &priv->kx_state;
  787. switch (*state) {
  788. case AMD_XGBE_RX_BPA:
  789. ret = amd_xgbe_an_rx_bpa(phydev, state);
  790. break;
  791. case AMD_XGBE_RX_XNP:
  792. ret = amd_xgbe_an_rx_xnp(phydev, state);
  793. break;
  794. default:
  795. ret = AMD_XGBE_AN_ERROR;
  796. }
  797. return ret;
  798. }
  799. static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
  800. {
  801. struct amd_xgbe_phy_priv *priv = phydev->priv;
  802. int ret;
  803. /* Be sure we aren't looping trying to negotiate */
  804. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  805. priv->kr_state = AMD_XGBE_RX_ERROR;
  806. if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
  807. !(phydev->advertising & SUPPORTED_2500baseX_Full))
  808. return AMD_XGBE_AN_NO_LINK;
  809. if (priv->kx_state != AMD_XGBE_RX_BPA)
  810. return AMD_XGBE_AN_NO_LINK;
  811. } else {
  812. priv->kx_state = AMD_XGBE_RX_ERROR;
  813. if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
  814. return AMD_XGBE_AN_NO_LINK;
  815. if (priv->kr_state != AMD_XGBE_RX_BPA)
  816. return AMD_XGBE_AN_NO_LINK;
  817. }
  818. ret = amd_xgbe_phy_disable_an(phydev);
  819. if (ret)
  820. return AMD_XGBE_AN_ERROR;
  821. ret = amd_xgbe_phy_switch_mode(phydev);
  822. if (ret)
  823. return AMD_XGBE_AN_ERROR;
  824. ret = amd_xgbe_phy_restart_an(phydev);
  825. if (ret)
  826. return AMD_XGBE_AN_ERROR;
  827. return AMD_XGBE_AN_INCOMPAT_LINK;
  828. }
  829. static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
  830. {
  831. struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
  832. /* Interrupt reason must be read and cleared outside of IRQ context */
  833. disable_irq_nosync(priv->an_irq);
  834. queue_work(priv->an_workqueue, &priv->an_irq_work);
  835. return IRQ_HANDLED;
  836. }
  837. static void amd_xgbe_an_irq_work(struct work_struct *work)
  838. {
  839. struct amd_xgbe_phy_priv *priv = container_of(work,
  840. struct amd_xgbe_phy_priv,
  841. an_irq_work);
  842. /* Avoid a race between enabling the IRQ and exiting the work by
  843. * waiting for the work to finish and then queueing it
  844. */
  845. flush_work(&priv->an_work);
  846. queue_work(priv->an_workqueue, &priv->an_work);
  847. }
  848. static void amd_xgbe_an_state_machine(struct work_struct *work)
  849. {
  850. struct amd_xgbe_phy_priv *priv = container_of(work,
  851. struct amd_xgbe_phy_priv,
  852. an_work);
  853. struct phy_device *phydev = priv->phydev;
  854. enum amd_xgbe_phy_an cur_state = priv->an_state;
  855. int int_reg, int_mask;
  856. mutex_lock(&priv->an_mutex);
  857. /* Read the interrupt */
  858. int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
  859. if (!int_reg)
  860. goto out;
  861. next_int:
  862. if (int_reg < 0) {
  863. priv->an_state = AMD_XGBE_AN_ERROR;
  864. int_mask = XGBE_AN_INT_MASK;
  865. } else if (int_reg & XGBE_AN_PG_RCV) {
  866. priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
  867. int_mask = XGBE_AN_PG_RCV;
  868. } else if (int_reg & XGBE_AN_INC_LINK) {
  869. priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
  870. int_mask = XGBE_AN_INC_LINK;
  871. } else if (int_reg & XGBE_AN_INT_CMPLT) {
  872. priv->an_state = AMD_XGBE_AN_COMPLETE;
  873. int_mask = XGBE_AN_INT_CMPLT;
  874. } else {
  875. priv->an_state = AMD_XGBE_AN_ERROR;
  876. int_mask = 0;
  877. }
  878. /* Clear the interrupt to be processed */
  879. int_reg &= ~int_mask;
  880. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
  881. priv->an_result = priv->an_state;
  882. again:
  883. cur_state = priv->an_state;
  884. switch (priv->an_state) {
  885. case AMD_XGBE_AN_READY:
  886. priv->an_supported = 0;
  887. break;
  888. case AMD_XGBE_AN_PAGE_RECEIVED:
  889. priv->an_state = amd_xgbe_an_page_received(phydev);
  890. priv->an_supported++;
  891. break;
  892. case AMD_XGBE_AN_INCOMPAT_LINK:
  893. priv->an_supported = 0;
  894. priv->parallel_detect = 0;
  895. priv->an_state = amd_xgbe_an_incompat_link(phydev);
  896. break;
  897. case AMD_XGBE_AN_COMPLETE:
  898. priv->parallel_detect = priv->an_supported ? 0 : 1;
  899. netdev_dbg(phydev->attached_dev, "%s successful\n",
  900. priv->an_supported ? "Auto negotiation"
  901. : "Parallel detection");
  902. break;
  903. case AMD_XGBE_AN_NO_LINK:
  904. break;
  905. default:
  906. priv->an_state = AMD_XGBE_AN_ERROR;
  907. }
  908. if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
  909. int_reg = 0;
  910. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  911. } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
  912. netdev_err(phydev->attached_dev,
  913. "error during auto-negotiation, state=%u\n",
  914. cur_state);
  915. int_reg = 0;
  916. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  917. }
  918. if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
  919. priv->an_result = priv->an_state;
  920. priv->an_state = AMD_XGBE_AN_READY;
  921. priv->kr_state = AMD_XGBE_RX_BPA;
  922. priv->kx_state = AMD_XGBE_RX_BPA;
  923. priv->an_start = 0;
  924. }
  925. if (cur_state != priv->an_state)
  926. goto again;
  927. if (int_reg)
  928. goto next_int;
  929. out:
  930. enable_irq(priv->an_irq);
  931. mutex_unlock(&priv->an_mutex);
  932. }
  933. static int amd_xgbe_an_init(struct phy_device *phydev)
  934. {
  935. int ret;
  936. /* Set up Advertisement register 3 first */
  937. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
  938. if (ret < 0)
  939. return ret;
  940. if (phydev->advertising & SUPPORTED_10000baseR_FEC)
  941. ret |= 0xc000;
  942. else
  943. ret &= ~0xc000;
  944. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
  945. /* Set up Advertisement register 2 next */
  946. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
  947. if (ret < 0)
  948. return ret;
  949. if (phydev->advertising & SUPPORTED_10000baseKR_Full)
  950. ret |= 0x80;
  951. else
  952. ret &= ~0x80;
  953. if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
  954. (phydev->advertising & SUPPORTED_2500baseX_Full))
  955. ret |= 0x20;
  956. else
  957. ret &= ~0x20;
  958. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
  959. /* Set up Advertisement register 1 last */
  960. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  961. if (ret < 0)
  962. return ret;
  963. if (phydev->advertising & SUPPORTED_Pause)
  964. ret |= 0x400;
  965. else
  966. ret &= ~0x400;
  967. if (phydev->advertising & SUPPORTED_Asym_Pause)
  968. ret |= 0x800;
  969. else
  970. ret &= ~0x800;
  971. /* We don't intend to perform XNP */
  972. ret &= ~XNP_NP_EXCHANGE;
  973. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
  974. return 0;
  975. }
  976. static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
  977. {
  978. int count, ret;
  979. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  980. if (ret < 0)
  981. return ret;
  982. ret |= MDIO_CTRL1_RESET;
  983. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  984. count = 50;
  985. do {
  986. msleep(20);
  987. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  988. if (ret < 0)
  989. return ret;
  990. } while ((ret & MDIO_CTRL1_RESET) && --count);
  991. if (ret & MDIO_CTRL1_RESET)
  992. return -ETIMEDOUT;
  993. /* Disable auto-negotiation for now */
  994. ret = amd_xgbe_phy_disable_an(phydev);
  995. if (ret < 0)
  996. return ret;
  997. /* Clear auto-negotiation interrupts */
  998. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  999. return 0;
  1000. }
  1001. static int amd_xgbe_phy_config_init(struct phy_device *phydev)
  1002. {
  1003. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1004. struct net_device *netdev = phydev->attached_dev;
  1005. int ret;
  1006. if (!priv->an_irq_allocated) {
  1007. /* Allocate the auto-negotiation workqueue and interrupt */
  1008. snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
  1009. "%s-pcs", netdev_name(netdev));
  1010. priv->an_workqueue =
  1011. create_singlethread_workqueue(priv->an_irq_name);
  1012. if (!priv->an_workqueue) {
  1013. netdev_err(netdev, "phy workqueue creation failed\n");
  1014. return -ENOMEM;
  1015. }
  1016. ret = devm_request_irq(priv->dev, priv->an_irq,
  1017. amd_xgbe_an_isr, 0, priv->an_irq_name,
  1018. priv);
  1019. if (ret) {
  1020. netdev_err(netdev, "phy irq request failed\n");
  1021. destroy_workqueue(priv->an_workqueue);
  1022. return ret;
  1023. }
  1024. priv->an_irq_allocated = 1;
  1025. }
  1026. /* Set initial mode - call the mode setting routines
  1027. * directly to insure we are properly configured
  1028. */
  1029. if (amd_xgbe_phy_use_xgmii_mode(phydev))
  1030. ret = amd_xgbe_phy_xgmii_mode(phydev);
  1031. else if (amd_xgbe_phy_use_gmii_mode(phydev))
  1032. ret = amd_xgbe_phy_gmii_mode(phydev);
  1033. else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
  1034. ret = amd_xgbe_phy_gmii_2500_mode(phydev);
  1035. else
  1036. ret = -EINVAL;
  1037. if (ret < 0)
  1038. return ret;
  1039. /* Set up advertisement registers based on current settings */
  1040. ret = amd_xgbe_an_init(phydev);
  1041. if (ret)
  1042. return ret;
  1043. /* Enable auto-negotiation interrupts */
  1044. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
  1045. return 0;
  1046. }
  1047. static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
  1048. {
  1049. int ret;
  1050. /* Disable auto-negotiation */
  1051. ret = amd_xgbe_phy_disable_an(phydev);
  1052. if (ret < 0)
  1053. return ret;
  1054. /* Validate/Set specified speed */
  1055. switch (phydev->speed) {
  1056. case SPEED_10000:
  1057. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1058. break;
  1059. case SPEED_2500:
  1060. case SPEED_1000:
  1061. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1062. break;
  1063. default:
  1064. ret = -EINVAL;
  1065. }
  1066. if (ret < 0)
  1067. return ret;
  1068. /* Validate duplex mode */
  1069. if (phydev->duplex != DUPLEX_FULL)
  1070. return -EINVAL;
  1071. phydev->pause = 0;
  1072. phydev->asym_pause = 0;
  1073. return 0;
  1074. }
  1075. static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
  1076. {
  1077. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1078. u32 mmd_mask = phydev->c45_ids.devices_in_package;
  1079. int ret;
  1080. if (phydev->autoneg != AUTONEG_ENABLE)
  1081. return amd_xgbe_phy_setup_forced(phydev);
  1082. /* Make sure we have the AN MMD present */
  1083. if (!(mmd_mask & MDIO_DEVS_AN))
  1084. return -EINVAL;
  1085. /* Disable auto-negotiation interrupt */
  1086. disable_irq(priv->an_irq);
  1087. /* Start auto-negotiation in a supported mode */
  1088. if (phydev->advertising & SUPPORTED_10000baseKR_Full)
  1089. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1090. else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
  1091. (phydev->advertising & SUPPORTED_2500baseX_Full))
  1092. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1093. else
  1094. ret = -EINVAL;
  1095. if (ret < 0) {
  1096. enable_irq(priv->an_irq);
  1097. return ret;
  1098. }
  1099. /* Disable and stop any in progress auto-negotiation */
  1100. ret = amd_xgbe_phy_disable_an(phydev);
  1101. if (ret < 0)
  1102. return ret;
  1103. /* Clear any auto-negotitation interrupts */
  1104. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  1105. priv->an_result = AMD_XGBE_AN_READY;
  1106. priv->an_state = AMD_XGBE_AN_READY;
  1107. priv->kr_state = AMD_XGBE_RX_BPA;
  1108. priv->kx_state = AMD_XGBE_RX_BPA;
  1109. /* Re-enable auto-negotiation interrupt */
  1110. enable_irq(priv->an_irq);
  1111. /* Set up advertisement registers based on current settings */
  1112. ret = amd_xgbe_an_init(phydev);
  1113. if (ret)
  1114. return ret;
  1115. /* Enable and start auto-negotiation */
  1116. return amd_xgbe_phy_restart_an(phydev);
  1117. }
  1118. static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
  1119. {
  1120. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1121. int ret;
  1122. mutex_lock(&priv->an_mutex);
  1123. ret = __amd_xgbe_phy_config_aneg(phydev);
  1124. mutex_unlock(&priv->an_mutex);
  1125. return ret;
  1126. }
  1127. static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
  1128. {
  1129. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1130. return (priv->an_result == AMD_XGBE_AN_COMPLETE);
  1131. }
  1132. static int amd_xgbe_phy_update_link(struct phy_device *phydev)
  1133. {
  1134. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1135. int ret;
  1136. /* If we're doing auto-negotiation don't report link down */
  1137. if (priv->an_state != AMD_XGBE_AN_READY) {
  1138. phydev->link = 1;
  1139. return 0;
  1140. }
  1141. /* Link status is latched low, so read once to clear
  1142. * and then read again to get current state
  1143. */
  1144. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
  1145. if (ret < 0)
  1146. return ret;
  1147. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
  1148. if (ret < 0)
  1149. return ret;
  1150. phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
  1151. return 0;
  1152. }
  1153. static int amd_xgbe_phy_read_status(struct phy_device *phydev)
  1154. {
  1155. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1156. u32 mmd_mask = phydev->c45_ids.devices_in_package;
  1157. int ret, ad_ret, lp_ret;
  1158. ret = amd_xgbe_phy_update_link(phydev);
  1159. if (ret)
  1160. return ret;
  1161. if ((phydev->autoneg == AUTONEG_ENABLE) &&
  1162. !priv->parallel_detect) {
  1163. if (!(mmd_mask & MDIO_DEVS_AN))
  1164. return -EINVAL;
  1165. if (!amd_xgbe_phy_aneg_done(phydev))
  1166. return 0;
  1167. /* Compare Advertisement and Link Partner register 1 */
  1168. ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  1169. if (ad_ret < 0)
  1170. return ad_ret;
  1171. lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
  1172. if (lp_ret < 0)
  1173. return lp_ret;
  1174. ad_ret &= lp_ret;
  1175. phydev->pause = (ad_ret & 0x400) ? 1 : 0;
  1176. phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
  1177. /* Compare Advertisement and Link Partner register 2 */
  1178. ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
  1179. MDIO_AN_ADVERTISE + 1);
  1180. if (ad_ret < 0)
  1181. return ad_ret;
  1182. lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
  1183. if (lp_ret < 0)
  1184. return lp_ret;
  1185. ad_ret &= lp_ret;
  1186. if (ad_ret & 0x80) {
  1187. phydev->speed = SPEED_10000;
  1188. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1189. if (ret)
  1190. return ret;
  1191. } else {
  1192. switch (priv->speed_set) {
  1193. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1194. phydev->speed = SPEED_1000;
  1195. break;
  1196. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1197. phydev->speed = SPEED_2500;
  1198. break;
  1199. }
  1200. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1201. if (ret)
  1202. return ret;
  1203. }
  1204. phydev->duplex = DUPLEX_FULL;
  1205. } else {
  1206. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  1207. phydev->speed = SPEED_10000;
  1208. } else {
  1209. switch (priv->speed_set) {
  1210. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1211. phydev->speed = SPEED_1000;
  1212. break;
  1213. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1214. phydev->speed = SPEED_2500;
  1215. break;
  1216. }
  1217. }
  1218. phydev->duplex = DUPLEX_FULL;
  1219. phydev->pause = 0;
  1220. phydev->asym_pause = 0;
  1221. }
  1222. return 0;
  1223. }
  1224. static int amd_xgbe_phy_suspend(struct phy_device *phydev)
  1225. {
  1226. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1227. int ret;
  1228. mutex_lock(&phydev->lock);
  1229. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  1230. if (ret < 0)
  1231. goto unlock;
  1232. priv->lpm_ctrl = ret;
  1233. ret |= MDIO_CTRL1_LPOWER;
  1234. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  1235. ret = 0;
  1236. unlock:
  1237. mutex_unlock(&phydev->lock);
  1238. return ret;
  1239. }
  1240. static int amd_xgbe_phy_resume(struct phy_device *phydev)
  1241. {
  1242. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1243. mutex_lock(&phydev->lock);
  1244. priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
  1245. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
  1246. mutex_unlock(&phydev->lock);
  1247. return 0;
  1248. }
  1249. static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
  1250. unsigned int type)
  1251. {
  1252. unsigned int count;
  1253. int i;
  1254. for (i = 0, count = 0; i < pdev->num_resources; i++) {
  1255. struct resource *r = &pdev->resource[i];
  1256. if (type == resource_type(r))
  1257. count++;
  1258. }
  1259. return count;
  1260. }
  1261. static int amd_xgbe_phy_probe(struct phy_device *phydev)
  1262. {
  1263. struct amd_xgbe_phy_priv *priv;
  1264. struct platform_device *phy_pdev;
  1265. struct device *dev, *phy_dev;
  1266. unsigned int phy_resnum, phy_irqnum;
  1267. int ret;
  1268. if (!phydev->bus || !phydev->bus->parent)
  1269. return -EINVAL;
  1270. dev = phydev->bus->parent;
  1271. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1272. if (!priv)
  1273. return -ENOMEM;
  1274. priv->pdev = to_platform_device(dev);
  1275. priv->adev = ACPI_COMPANION(dev);
  1276. priv->dev = dev;
  1277. priv->phydev = phydev;
  1278. mutex_init(&priv->an_mutex);
  1279. INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
  1280. INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
  1281. if (!priv->adev || acpi_disabled) {
  1282. struct device_node *bus_node;
  1283. struct device_node *phy_node;
  1284. bus_node = priv->dev->of_node;
  1285. phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
  1286. if (!phy_node) {
  1287. dev_err(dev, "unable to parse phy-handle\n");
  1288. ret = -EINVAL;
  1289. goto err_priv;
  1290. }
  1291. phy_pdev = of_find_device_by_node(phy_node);
  1292. of_node_put(phy_node);
  1293. if (!phy_pdev) {
  1294. dev_err(dev, "unable to obtain phy device\n");
  1295. ret = -EINVAL;
  1296. goto err_priv;
  1297. }
  1298. phy_resnum = 0;
  1299. phy_irqnum = 0;
  1300. } else {
  1301. /* In ACPI, the XGBE and PHY resources are the grouped
  1302. * together with the PHY resources at the end
  1303. */
  1304. phy_pdev = priv->pdev;
  1305. phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
  1306. IORESOURCE_MEM) - 3;
  1307. phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
  1308. IORESOURCE_IRQ) - 1;
  1309. }
  1310. phy_dev = &phy_pdev->dev;
  1311. /* Get the device mmio areas */
  1312. priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1313. phy_resnum++);
  1314. priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
  1315. if (IS_ERR(priv->rxtx_regs)) {
  1316. dev_err(dev, "rxtx ioremap failed\n");
  1317. ret = PTR_ERR(priv->rxtx_regs);
  1318. goto err_put;
  1319. }
  1320. priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1321. phy_resnum++);
  1322. priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
  1323. if (IS_ERR(priv->sir0_regs)) {
  1324. dev_err(dev, "sir0 ioremap failed\n");
  1325. ret = PTR_ERR(priv->sir0_regs);
  1326. goto err_rxtx;
  1327. }
  1328. priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1329. phy_resnum++);
  1330. priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
  1331. if (IS_ERR(priv->sir1_regs)) {
  1332. dev_err(dev, "sir1 ioremap failed\n");
  1333. ret = PTR_ERR(priv->sir1_regs);
  1334. goto err_sir0;
  1335. }
  1336. /* Get the auto-negotiation interrupt */
  1337. ret = platform_get_irq(phy_pdev, phy_irqnum);
  1338. if (ret < 0) {
  1339. dev_err(dev, "platform_get_irq failed\n");
  1340. goto err_sir1;
  1341. }
  1342. priv->an_irq = ret;
  1343. /* Get the device speed set property */
  1344. ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
  1345. &priv->speed_set);
  1346. if (ret) {
  1347. dev_err(dev, "invalid %s property\n",
  1348. XGBE_PHY_SPEEDSET_PROPERTY);
  1349. goto err_sir1;
  1350. }
  1351. switch (priv->speed_set) {
  1352. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1353. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1354. break;
  1355. default:
  1356. dev_err(dev, "invalid %s property\n",
  1357. XGBE_PHY_SPEEDSET_PROPERTY);
  1358. ret = -EINVAL;
  1359. goto err_sir1;
  1360. }
  1361. if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
  1362. ret = device_property_read_u32_array(phy_dev,
  1363. XGBE_PHY_BLWC_PROPERTY,
  1364. priv->serdes_blwc,
  1365. XGBE_PHY_SPEEDS);
  1366. if (ret) {
  1367. dev_err(dev, "invalid %s property\n",
  1368. XGBE_PHY_BLWC_PROPERTY);
  1369. goto err_sir1;
  1370. }
  1371. } else {
  1372. memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
  1373. sizeof(priv->serdes_blwc));
  1374. }
  1375. if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
  1376. ret = device_property_read_u32_array(phy_dev,
  1377. XGBE_PHY_CDR_RATE_PROPERTY,
  1378. priv->serdes_cdr_rate,
  1379. XGBE_PHY_SPEEDS);
  1380. if (ret) {
  1381. dev_err(dev, "invalid %s property\n",
  1382. XGBE_PHY_CDR_RATE_PROPERTY);
  1383. goto err_sir1;
  1384. }
  1385. } else {
  1386. memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
  1387. sizeof(priv->serdes_cdr_rate));
  1388. }
  1389. if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
  1390. ret = device_property_read_u32_array(phy_dev,
  1391. XGBE_PHY_PQ_SKEW_PROPERTY,
  1392. priv->serdes_pq_skew,
  1393. XGBE_PHY_SPEEDS);
  1394. if (ret) {
  1395. dev_err(dev, "invalid %s property\n",
  1396. XGBE_PHY_PQ_SKEW_PROPERTY);
  1397. goto err_sir1;
  1398. }
  1399. } else {
  1400. memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
  1401. sizeof(priv->serdes_pq_skew));
  1402. }
  1403. if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
  1404. ret = device_property_read_u32_array(phy_dev,
  1405. XGBE_PHY_TX_AMP_PROPERTY,
  1406. priv->serdes_tx_amp,
  1407. XGBE_PHY_SPEEDS);
  1408. if (ret) {
  1409. dev_err(dev, "invalid %s property\n",
  1410. XGBE_PHY_TX_AMP_PROPERTY);
  1411. goto err_sir1;
  1412. }
  1413. } else {
  1414. memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
  1415. sizeof(priv->serdes_tx_amp));
  1416. }
  1417. if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
  1418. ret = device_property_read_u32_array(phy_dev,
  1419. XGBE_PHY_DFE_CFG_PROPERTY,
  1420. priv->serdes_dfe_tap_cfg,
  1421. XGBE_PHY_SPEEDS);
  1422. if (ret) {
  1423. dev_err(dev, "invalid %s property\n",
  1424. XGBE_PHY_DFE_CFG_PROPERTY);
  1425. goto err_sir1;
  1426. }
  1427. } else {
  1428. memcpy(priv->serdes_dfe_tap_cfg,
  1429. amd_xgbe_phy_serdes_dfe_tap_cfg,
  1430. sizeof(priv->serdes_dfe_tap_cfg));
  1431. }
  1432. if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
  1433. ret = device_property_read_u32_array(phy_dev,
  1434. XGBE_PHY_DFE_ENA_PROPERTY,
  1435. priv->serdes_dfe_tap_ena,
  1436. XGBE_PHY_SPEEDS);
  1437. if (ret) {
  1438. dev_err(dev, "invalid %s property\n",
  1439. XGBE_PHY_DFE_ENA_PROPERTY);
  1440. goto err_sir1;
  1441. }
  1442. } else {
  1443. memcpy(priv->serdes_dfe_tap_ena,
  1444. amd_xgbe_phy_serdes_dfe_tap_ena,
  1445. sizeof(priv->serdes_dfe_tap_ena));
  1446. }
  1447. /* Initialize supported features */
  1448. phydev->supported = SUPPORTED_Autoneg;
  1449. phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  1450. phydev->supported |= SUPPORTED_Backplane;
  1451. phydev->supported |= SUPPORTED_10000baseKR_Full;
  1452. switch (priv->speed_set) {
  1453. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1454. phydev->supported |= SUPPORTED_1000baseKX_Full;
  1455. break;
  1456. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1457. phydev->supported |= SUPPORTED_2500baseX_Full;
  1458. break;
  1459. }
  1460. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
  1461. if (ret < 0)
  1462. return ret;
  1463. priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
  1464. if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
  1465. phydev->supported |= SUPPORTED_10000baseR_FEC;
  1466. phydev->advertising = phydev->supported;
  1467. phydev->priv = priv;
  1468. if (!priv->adev || acpi_disabled)
  1469. platform_device_put(phy_pdev);
  1470. return 0;
  1471. err_sir1:
  1472. devm_iounmap(dev, priv->sir1_regs);
  1473. devm_release_mem_region(dev, priv->sir1_res->start,
  1474. resource_size(priv->sir1_res));
  1475. err_sir0:
  1476. devm_iounmap(dev, priv->sir0_regs);
  1477. devm_release_mem_region(dev, priv->sir0_res->start,
  1478. resource_size(priv->sir0_res));
  1479. err_rxtx:
  1480. devm_iounmap(dev, priv->rxtx_regs);
  1481. devm_release_mem_region(dev, priv->rxtx_res->start,
  1482. resource_size(priv->rxtx_res));
  1483. err_put:
  1484. if (!priv->adev || acpi_disabled)
  1485. platform_device_put(phy_pdev);
  1486. err_priv:
  1487. devm_kfree(dev, priv);
  1488. return ret;
  1489. }
  1490. static void amd_xgbe_phy_remove(struct phy_device *phydev)
  1491. {
  1492. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1493. struct device *dev = priv->dev;
  1494. if (priv->an_irq_allocated) {
  1495. devm_free_irq(dev, priv->an_irq, priv);
  1496. flush_workqueue(priv->an_workqueue);
  1497. destroy_workqueue(priv->an_workqueue);
  1498. }
  1499. /* Release resources */
  1500. devm_iounmap(dev, priv->sir1_regs);
  1501. devm_release_mem_region(dev, priv->sir1_res->start,
  1502. resource_size(priv->sir1_res));
  1503. devm_iounmap(dev, priv->sir0_regs);
  1504. devm_release_mem_region(dev, priv->sir0_res->start,
  1505. resource_size(priv->sir0_res));
  1506. devm_iounmap(dev, priv->rxtx_regs);
  1507. devm_release_mem_region(dev, priv->rxtx_res->start,
  1508. resource_size(priv->rxtx_res));
  1509. devm_kfree(dev, priv);
  1510. }
  1511. static int amd_xgbe_match_phy_device(struct phy_device *phydev)
  1512. {
  1513. return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
  1514. }
  1515. static struct phy_driver amd_xgbe_phy_driver[] = {
  1516. {
  1517. .phy_id = XGBE_PHY_ID,
  1518. .phy_id_mask = XGBE_PHY_MASK,
  1519. .name = "AMD XGBE PHY",
  1520. .features = 0,
  1521. .flags = PHY_IS_INTERNAL,
  1522. .probe = amd_xgbe_phy_probe,
  1523. .remove = amd_xgbe_phy_remove,
  1524. .soft_reset = amd_xgbe_phy_soft_reset,
  1525. .config_init = amd_xgbe_phy_config_init,
  1526. .suspend = amd_xgbe_phy_suspend,
  1527. .resume = amd_xgbe_phy_resume,
  1528. .config_aneg = amd_xgbe_phy_config_aneg,
  1529. .aneg_done = amd_xgbe_phy_aneg_done,
  1530. .read_status = amd_xgbe_phy_read_status,
  1531. .match_phy_device = amd_xgbe_match_phy_device,
  1532. .driver = {
  1533. .owner = THIS_MODULE,
  1534. },
  1535. },
  1536. };
  1537. module_phy_driver(amd_xgbe_phy_driver);
  1538. static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
  1539. { XGBE_PHY_ID, XGBE_PHY_MASK },
  1540. { }
  1541. };
  1542. MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);