amd-xgbe-phy.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. /*
  2. * AMD 10Gb Ethernet PHY driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. *
  25. * License 2: Modified BSD
  26. *
  27. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  28. * All rights reserved.
  29. *
  30. * Redistribution and use in source and binary forms, with or without
  31. * modification, are permitted provided that the following conditions are met:
  32. * * Redistributions of source code must retain the above copyright
  33. * notice, this list of conditions and the following disclaimer.
  34. * * Redistributions in binary form must reproduce the above copyright
  35. * notice, this list of conditions and the following disclaimer in the
  36. * documentation and/or other materials provided with the distribution.
  37. * * Neither the name of Advanced Micro Devices, Inc. nor the
  38. * names of its contributors may be used to endorse or promote products
  39. * derived from this software without specific prior written permission.
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  42. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  43. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  44. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  45. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  46. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  47. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  48. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  50. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51. */
  52. #include <linux/kernel.h>
  53. #include <linux/device.h>
  54. #include <linux/platform_device.h>
  55. #include <linux/string.h>
  56. #include <linux/errno.h>
  57. #include <linux/unistd.h>
  58. #include <linux/slab.h>
  59. #include <linux/interrupt.h>
  60. #include <linux/init.h>
  61. #include <linux/delay.h>
  62. #include <linux/workqueue.h>
  63. #include <linux/netdevice.h>
  64. #include <linux/etherdevice.h>
  65. #include <linux/skbuff.h>
  66. #include <linux/mm.h>
  67. #include <linux/module.h>
  68. #include <linux/mii.h>
  69. #include <linux/ethtool.h>
  70. #include <linux/phy.h>
  71. #include <linux/mdio.h>
  72. #include <linux/io.h>
  73. #include <linux/of.h>
  74. #include <linux/of_platform.h>
  75. #include <linux/of_device.h>
  76. #include <linux/uaccess.h>
  77. #include <linux/bitops.h>
  78. #include <linux/property.h>
  79. #include <linux/acpi.h>
  80. #include <linux/jiffies.h>
  81. MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  82. MODULE_LICENSE("Dual BSD/GPL");
  83. MODULE_VERSION("1.0.0-a");
  84. MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
  85. #define XGBE_PHY_ID 0x000162d0
  86. #define XGBE_PHY_MASK 0xfffffff0
  87. #define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
  88. #define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
  89. #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
  90. #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
  91. #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
  92. #define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
  93. #define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
  94. #define XGBE_PHY_SPEEDS 3
  95. #define XGBE_PHY_SPEED_1000 0
  96. #define XGBE_PHY_SPEED_2500 1
  97. #define XGBE_PHY_SPEED_10000 2
  98. #define XGBE_AN_MS_TIMEOUT 500
  99. #define XGBE_AN_INT_CMPLT 0x01
  100. #define XGBE_AN_INC_LINK 0x02
  101. #define XGBE_AN_PG_RCV 0x04
  102. #define XGBE_AN_INT_MASK 0x07
  103. #define XNP_MCF_NULL_MESSAGE 0x001
  104. #define XNP_ACK_PROCESSED BIT(12)
  105. #define XNP_MP_FORMATTED BIT(13)
  106. #define XNP_NP_EXCHANGE BIT(15)
  107. #define XGBE_PHY_RATECHANGE_COUNT 500
  108. #define XGBE_PHY_KR_TRAINING_START 0x01
  109. #define XGBE_PHY_KR_TRAINING_ENABLE 0x02
  110. #define XGBE_PHY_FEC_ENABLE 0x01
  111. #define XGBE_PHY_FEC_FORWARD 0x02
  112. #define XGBE_PHY_FEC_MASK 0x03
  113. #ifndef MDIO_PMA_10GBR_PMD_CTRL
  114. #define MDIO_PMA_10GBR_PMD_CTRL 0x0096
  115. #endif
  116. #ifndef MDIO_PMA_10GBR_FEC_ABILITY
  117. #define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
  118. #endif
  119. #ifndef MDIO_PMA_10GBR_FEC_CTRL
  120. #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
  121. #endif
  122. #ifndef MDIO_AN_XNP
  123. #define MDIO_AN_XNP 0x0016
  124. #endif
  125. #ifndef MDIO_AN_LPX
  126. #define MDIO_AN_LPX 0x0019
  127. #endif
  128. #ifndef MDIO_AN_INTMASK
  129. #define MDIO_AN_INTMASK 0x8001
  130. #endif
  131. #ifndef MDIO_AN_INT
  132. #define MDIO_AN_INT 0x8002
  133. #endif
  134. #ifndef MDIO_CTRL1_SPEED1G
  135. #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
  136. #endif
  137. /* SerDes integration register offsets */
  138. #define SIR0_KR_RT_1 0x002c
  139. #define SIR0_STATUS 0x0040
  140. #define SIR1_SPEED 0x0000
  141. /* SerDes integration register entry bit positions and sizes */
  142. #define SIR0_KR_RT_1_RESET_INDEX 11
  143. #define SIR0_KR_RT_1_RESET_WIDTH 1
  144. #define SIR0_STATUS_RX_READY_INDEX 0
  145. #define SIR0_STATUS_RX_READY_WIDTH 1
  146. #define SIR0_STATUS_TX_READY_INDEX 8
  147. #define SIR0_STATUS_TX_READY_WIDTH 1
  148. #define SIR1_SPEED_CDR_RATE_INDEX 12
  149. #define SIR1_SPEED_CDR_RATE_WIDTH 4
  150. #define SIR1_SPEED_DATARATE_INDEX 4
  151. #define SIR1_SPEED_DATARATE_WIDTH 2
  152. #define SIR1_SPEED_PLLSEL_INDEX 3
  153. #define SIR1_SPEED_PLLSEL_WIDTH 1
  154. #define SIR1_SPEED_RATECHANGE_INDEX 6
  155. #define SIR1_SPEED_RATECHANGE_WIDTH 1
  156. #define SIR1_SPEED_TXAMP_INDEX 8
  157. #define SIR1_SPEED_TXAMP_WIDTH 4
  158. #define SIR1_SPEED_WORDMODE_INDEX 0
  159. #define SIR1_SPEED_WORDMODE_WIDTH 3
  160. #define SPEED_10000_BLWC 0
  161. #define SPEED_10000_CDR 0x7
  162. #define SPEED_10000_PLL 0x1
  163. #define SPEED_10000_PQ 0x12
  164. #define SPEED_10000_RATE 0x0
  165. #define SPEED_10000_TXAMP 0xa
  166. #define SPEED_10000_WORD 0x7
  167. #define SPEED_10000_DFE_TAP_CONFIG 0x1
  168. #define SPEED_10000_DFE_TAP_ENABLE 0x7f
  169. #define SPEED_2500_BLWC 1
  170. #define SPEED_2500_CDR 0x2
  171. #define SPEED_2500_PLL 0x0
  172. #define SPEED_2500_PQ 0xa
  173. #define SPEED_2500_RATE 0x1
  174. #define SPEED_2500_TXAMP 0xf
  175. #define SPEED_2500_WORD 0x1
  176. #define SPEED_2500_DFE_TAP_CONFIG 0x3
  177. #define SPEED_2500_DFE_TAP_ENABLE 0x0
  178. #define SPEED_1000_BLWC 1
  179. #define SPEED_1000_CDR 0x2
  180. #define SPEED_1000_PLL 0x0
  181. #define SPEED_1000_PQ 0xa
  182. #define SPEED_1000_RATE 0x3
  183. #define SPEED_1000_TXAMP 0xf
  184. #define SPEED_1000_WORD 0x1
  185. #define SPEED_1000_DFE_TAP_CONFIG 0x3
  186. #define SPEED_1000_DFE_TAP_ENABLE 0x0
  187. /* SerDes RxTx register offsets */
  188. #define RXTX_REG6 0x0018
  189. #define RXTX_REG20 0x0050
  190. #define RXTX_REG22 0x0058
  191. #define RXTX_REG114 0x01c8
  192. #define RXTX_REG129 0x0204
  193. /* SerDes RxTx register entry bit positions and sizes */
  194. #define RXTX_REG6_RESETB_RXD_INDEX 8
  195. #define RXTX_REG6_RESETB_RXD_WIDTH 1
  196. #define RXTX_REG20_BLWC_ENA_INDEX 2
  197. #define RXTX_REG20_BLWC_ENA_WIDTH 1
  198. #define RXTX_REG114_PQ_REG_INDEX 9
  199. #define RXTX_REG114_PQ_REG_WIDTH 7
  200. #define RXTX_REG129_RXDFE_CONFIG_INDEX 14
  201. #define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
  202. /* Bit setting and getting macros
  203. * The get macro will extract the current bit field value from within
  204. * the variable
  205. *
  206. * The set macro will clear the current bit field value within the
  207. * variable and then set the bit field of the variable to the
  208. * specified value
  209. */
  210. #define GET_BITS(_var, _index, _width) \
  211. (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
  212. #define SET_BITS(_var, _index, _width, _val) \
  213. do { \
  214. (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
  215. (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
  216. } while (0)
  217. #define XSIR_GET_BITS(_var, _prefix, _field) \
  218. GET_BITS((_var), \
  219. _prefix##_##_field##_INDEX, \
  220. _prefix##_##_field##_WIDTH)
  221. #define XSIR_SET_BITS(_var, _prefix, _field, _val) \
  222. SET_BITS((_var), \
  223. _prefix##_##_field##_INDEX, \
  224. _prefix##_##_field##_WIDTH, (_val))
  225. /* Macros for reading or writing SerDes integration registers
  226. * The ioread macros will get bit fields or full values using the
  227. * register definitions formed using the input names
  228. *
  229. * The iowrite macros will set bit fields or full values using the
  230. * register definitions formed using the input names
  231. */
  232. #define XSIR0_IOREAD(_priv, _reg) \
  233. ioread16((_priv)->sir0_regs + _reg)
  234. #define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
  235. GET_BITS(XSIR0_IOREAD((_priv), _reg), \
  236. _reg##_##_field##_INDEX, \
  237. _reg##_##_field##_WIDTH)
  238. #define XSIR0_IOWRITE(_priv, _reg, _val) \
  239. iowrite16((_val), (_priv)->sir0_regs + _reg)
  240. #define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
  241. do { \
  242. u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
  243. SET_BITS(reg_val, \
  244. _reg##_##_field##_INDEX, \
  245. _reg##_##_field##_WIDTH, (_val)); \
  246. XSIR0_IOWRITE((_priv), _reg, reg_val); \
  247. } while (0)
  248. #define XSIR1_IOREAD(_priv, _reg) \
  249. ioread16((_priv)->sir1_regs + _reg)
  250. #define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
  251. GET_BITS(XSIR1_IOREAD((_priv), _reg), \
  252. _reg##_##_field##_INDEX, \
  253. _reg##_##_field##_WIDTH)
  254. #define XSIR1_IOWRITE(_priv, _reg, _val) \
  255. iowrite16((_val), (_priv)->sir1_regs + _reg)
  256. #define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
  257. do { \
  258. u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
  259. SET_BITS(reg_val, \
  260. _reg##_##_field##_INDEX, \
  261. _reg##_##_field##_WIDTH, (_val)); \
  262. XSIR1_IOWRITE((_priv), _reg, reg_val); \
  263. } while (0)
  264. /* Macros for reading or writing SerDes RxTx registers
  265. * The ioread macros will get bit fields or full values using the
  266. * register definitions formed using the input names
  267. *
  268. * The iowrite macros will set bit fields or full values using the
  269. * register definitions formed using the input names
  270. */
  271. #define XRXTX_IOREAD(_priv, _reg) \
  272. ioread16((_priv)->rxtx_regs + _reg)
  273. #define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
  274. GET_BITS(XRXTX_IOREAD((_priv), _reg), \
  275. _reg##_##_field##_INDEX, \
  276. _reg##_##_field##_WIDTH)
  277. #define XRXTX_IOWRITE(_priv, _reg, _val) \
  278. iowrite16((_val), (_priv)->rxtx_regs + _reg)
  279. #define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
  280. do { \
  281. u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
  282. SET_BITS(reg_val, \
  283. _reg##_##_field##_INDEX, \
  284. _reg##_##_field##_WIDTH, (_val)); \
  285. XRXTX_IOWRITE((_priv), _reg, reg_val); \
  286. } while (0)
  287. static const u32 amd_xgbe_phy_serdes_blwc[] = {
  288. SPEED_1000_BLWC,
  289. SPEED_2500_BLWC,
  290. SPEED_10000_BLWC,
  291. };
  292. static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
  293. SPEED_1000_CDR,
  294. SPEED_2500_CDR,
  295. SPEED_10000_CDR,
  296. };
  297. static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
  298. SPEED_1000_PQ,
  299. SPEED_2500_PQ,
  300. SPEED_10000_PQ,
  301. };
  302. static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
  303. SPEED_1000_TXAMP,
  304. SPEED_2500_TXAMP,
  305. SPEED_10000_TXAMP,
  306. };
  307. static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
  308. SPEED_1000_DFE_TAP_CONFIG,
  309. SPEED_2500_DFE_TAP_CONFIG,
  310. SPEED_10000_DFE_TAP_CONFIG,
  311. };
  312. static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
  313. SPEED_1000_DFE_TAP_ENABLE,
  314. SPEED_2500_DFE_TAP_ENABLE,
  315. SPEED_10000_DFE_TAP_ENABLE,
  316. };
  317. enum amd_xgbe_phy_an {
  318. AMD_XGBE_AN_READY = 0,
  319. AMD_XGBE_AN_PAGE_RECEIVED,
  320. AMD_XGBE_AN_INCOMPAT_LINK,
  321. AMD_XGBE_AN_COMPLETE,
  322. AMD_XGBE_AN_NO_LINK,
  323. AMD_XGBE_AN_ERROR,
  324. };
  325. enum amd_xgbe_phy_rx {
  326. AMD_XGBE_RX_BPA = 0,
  327. AMD_XGBE_RX_XNP,
  328. AMD_XGBE_RX_COMPLETE,
  329. AMD_XGBE_RX_ERROR,
  330. };
  331. enum amd_xgbe_phy_mode {
  332. AMD_XGBE_MODE_KR,
  333. AMD_XGBE_MODE_KX,
  334. };
  335. enum amd_xgbe_phy_speedset {
  336. AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
  337. AMD_XGBE_PHY_SPEEDSET_2500_10000,
  338. };
  339. struct amd_xgbe_phy_priv {
  340. struct platform_device *pdev;
  341. struct acpi_device *adev;
  342. struct device *dev;
  343. struct phy_device *phydev;
  344. /* SerDes related mmio resources */
  345. struct resource *rxtx_res;
  346. struct resource *sir0_res;
  347. struct resource *sir1_res;
  348. /* SerDes related mmio registers */
  349. void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
  350. void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
  351. void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
  352. int an_irq;
  353. char an_irq_name[IFNAMSIZ + 32];
  354. struct work_struct an_irq_work;
  355. unsigned int an_irq_allocated;
  356. unsigned int speed_set;
  357. /* SerDes UEFI configurable settings.
  358. * Switching between modes/speeds requires new values for some
  359. * SerDes settings. The values can be supplied as device
  360. * properties in array format. The first array entry is for
  361. * 1GbE, second for 2.5GbE and third for 10GbE
  362. */
  363. u32 serdes_blwc[XGBE_PHY_SPEEDS];
  364. u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
  365. u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
  366. u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
  367. u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
  368. u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
  369. /* Auto-negotiation state machine support */
  370. struct mutex an_mutex;
  371. enum amd_xgbe_phy_an an_result;
  372. enum amd_xgbe_phy_an an_state;
  373. enum amd_xgbe_phy_rx kr_state;
  374. enum amd_xgbe_phy_rx kx_state;
  375. struct work_struct an_work;
  376. struct workqueue_struct *an_workqueue;
  377. unsigned int an_supported;
  378. unsigned int parallel_detect;
  379. unsigned int fec_ability;
  380. unsigned long an_start;
  381. unsigned int lpm_ctrl; /* CTRL1 for resume */
  382. };
  383. static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
  384. {
  385. int ret;
  386. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  387. if (ret < 0)
  388. return ret;
  389. ret |= XGBE_PHY_KR_TRAINING_ENABLE;
  390. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
  391. return 0;
  392. }
  393. static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
  394. {
  395. int ret;
  396. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  397. if (ret < 0)
  398. return ret;
  399. ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
  400. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
  401. return 0;
  402. }
  403. static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
  404. {
  405. int ret;
  406. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  407. if (ret < 0)
  408. return ret;
  409. ret |= MDIO_CTRL1_LPOWER;
  410. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  411. usleep_range(75, 100);
  412. ret &= ~MDIO_CTRL1_LPOWER;
  413. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  414. return 0;
  415. }
  416. static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
  417. {
  418. struct amd_xgbe_phy_priv *priv = phydev->priv;
  419. /* Assert Rx and Tx ratechange */
  420. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
  421. }
  422. static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
  423. {
  424. struct amd_xgbe_phy_priv *priv = phydev->priv;
  425. unsigned int wait;
  426. u16 status;
  427. /* Release Rx and Tx ratechange */
  428. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
  429. /* Wait for Rx and Tx ready */
  430. wait = XGBE_PHY_RATECHANGE_COUNT;
  431. while (wait--) {
  432. usleep_range(50, 75);
  433. status = XSIR0_IOREAD(priv, SIR0_STATUS);
  434. if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
  435. XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
  436. goto rx_reset;
  437. }
  438. netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
  439. status);
  440. rx_reset:
  441. /* Perform Rx reset for the DFE changes */
  442. XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
  443. XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
  444. }
  445. static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
  446. {
  447. struct amd_xgbe_phy_priv *priv = phydev->priv;
  448. int ret;
  449. /* Enable KR training */
  450. ret = amd_xgbe_an_enable_kr_training(phydev);
  451. if (ret < 0)
  452. return ret;
  453. /* Set PCS to KR/10G speed */
  454. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  455. if (ret < 0)
  456. return ret;
  457. ret &= ~MDIO_PCS_CTRL2_TYPE;
  458. ret |= MDIO_PCS_CTRL2_10GBR;
  459. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  460. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  461. if (ret < 0)
  462. return ret;
  463. ret &= ~MDIO_CTRL1_SPEEDSEL;
  464. ret |= MDIO_CTRL1_SPEED10G;
  465. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  466. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  467. if (ret < 0)
  468. return ret;
  469. /* Set SerDes to 10G speed */
  470. amd_xgbe_phy_serdes_start_ratechange(phydev);
  471. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
  472. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
  473. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
  474. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  475. priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
  476. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  477. priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
  478. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  479. priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
  480. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  481. priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
  482. XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
  483. priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
  484. XRXTX_IOWRITE(priv, RXTX_REG22,
  485. priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
  486. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  487. return 0;
  488. }
  489. static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
  490. {
  491. struct amd_xgbe_phy_priv *priv = phydev->priv;
  492. int ret;
  493. /* Disable KR training */
  494. ret = amd_xgbe_an_disable_kr_training(phydev);
  495. if (ret < 0)
  496. return ret;
  497. /* Set PCS to KX/1G speed */
  498. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  499. if (ret < 0)
  500. return ret;
  501. ret &= ~MDIO_PCS_CTRL2_TYPE;
  502. ret |= MDIO_PCS_CTRL2_10GBX;
  503. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  504. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  505. if (ret < 0)
  506. return ret;
  507. ret &= ~MDIO_CTRL1_SPEEDSEL;
  508. ret |= MDIO_CTRL1_SPEED1G;
  509. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  510. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  511. if (ret < 0)
  512. return ret;
  513. /* Set SerDes to 2.5G speed */
  514. amd_xgbe_phy_serdes_start_ratechange(phydev);
  515. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
  516. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
  517. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
  518. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  519. priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
  520. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  521. priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
  522. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  523. priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
  524. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  525. priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
  526. XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
  527. priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
  528. XRXTX_IOWRITE(priv, RXTX_REG22,
  529. priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
  530. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  531. return 0;
  532. }
  533. static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
  534. {
  535. struct amd_xgbe_phy_priv *priv = phydev->priv;
  536. int ret;
  537. /* Disable KR training */
  538. ret = amd_xgbe_an_disable_kr_training(phydev);
  539. if (ret < 0)
  540. return ret;
  541. /* Set PCS to KX/1G speed */
  542. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  543. if (ret < 0)
  544. return ret;
  545. ret &= ~MDIO_PCS_CTRL2_TYPE;
  546. ret |= MDIO_PCS_CTRL2_10GBX;
  547. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  548. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  549. if (ret < 0)
  550. return ret;
  551. ret &= ~MDIO_CTRL1_SPEEDSEL;
  552. ret |= MDIO_CTRL1_SPEED1G;
  553. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  554. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  555. if (ret < 0)
  556. return ret;
  557. /* Set SerDes to 1G speed */
  558. amd_xgbe_phy_serdes_start_ratechange(phydev);
  559. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
  560. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
  561. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
  562. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  563. priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
  564. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  565. priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
  566. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  567. priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
  568. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  569. priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
  570. XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
  571. priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
  572. XRXTX_IOWRITE(priv, RXTX_REG22,
  573. priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
  574. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  575. return 0;
  576. }
  577. static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
  578. enum amd_xgbe_phy_mode *mode)
  579. {
  580. int ret;
  581. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  582. if (ret < 0)
  583. return ret;
  584. if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
  585. *mode = AMD_XGBE_MODE_KR;
  586. else
  587. *mode = AMD_XGBE_MODE_KX;
  588. return 0;
  589. }
  590. static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
  591. {
  592. enum amd_xgbe_phy_mode mode;
  593. if (amd_xgbe_phy_cur_mode(phydev, &mode))
  594. return false;
  595. return (mode == AMD_XGBE_MODE_KR);
  596. }
  597. static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
  598. {
  599. struct amd_xgbe_phy_priv *priv = phydev->priv;
  600. int ret;
  601. /* If we are in KR switch to KX, and vice-versa */
  602. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  603. if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
  604. ret = amd_xgbe_phy_gmii_mode(phydev);
  605. else
  606. ret = amd_xgbe_phy_gmii_2500_mode(phydev);
  607. } else {
  608. ret = amd_xgbe_phy_xgmii_mode(phydev);
  609. }
  610. return ret;
  611. }
  612. static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
  613. enum amd_xgbe_phy_mode mode)
  614. {
  615. enum amd_xgbe_phy_mode cur_mode;
  616. int ret;
  617. ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
  618. if (ret)
  619. return ret;
  620. if (mode != cur_mode)
  621. ret = amd_xgbe_phy_switch_mode(phydev);
  622. return ret;
  623. }
  624. static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
  625. bool restart)
  626. {
  627. int ret;
  628. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
  629. if (ret < 0)
  630. return ret;
  631. ret &= ~MDIO_AN_CTRL1_ENABLE;
  632. if (enable)
  633. ret |= MDIO_AN_CTRL1_ENABLE;
  634. if (restart)
  635. ret |= MDIO_AN_CTRL1_RESTART;
  636. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
  637. return 0;
  638. }
  639. static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
  640. {
  641. return amd_xgbe_phy_set_an(phydev, true, true);
  642. }
  643. static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
  644. {
  645. return amd_xgbe_phy_set_an(phydev, false, false);
  646. }
  647. static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
  648. enum amd_xgbe_phy_rx *state)
  649. {
  650. struct amd_xgbe_phy_priv *priv = phydev->priv;
  651. int ad_reg, lp_reg, ret;
  652. *state = AMD_XGBE_RX_COMPLETE;
  653. /* If we're not in KR mode then we're done */
  654. if (!amd_xgbe_phy_in_kr_mode(phydev))
  655. return AMD_XGBE_AN_PAGE_RECEIVED;
  656. /* Enable/Disable FEC */
  657. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
  658. if (ad_reg < 0)
  659. return AMD_XGBE_AN_ERROR;
  660. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
  661. if (lp_reg < 0)
  662. return AMD_XGBE_AN_ERROR;
  663. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
  664. if (ret < 0)
  665. return AMD_XGBE_AN_ERROR;
  666. ret &= ~XGBE_PHY_FEC_MASK;
  667. if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
  668. ret |= priv->fec_ability;
  669. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
  670. /* Start KR training */
  671. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  672. if (ret < 0)
  673. return AMD_XGBE_AN_ERROR;
  674. if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
  675. XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
  676. ret |= XGBE_PHY_KR_TRAINING_START;
  677. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
  678. ret);
  679. XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
  680. }
  681. return AMD_XGBE_AN_PAGE_RECEIVED;
  682. }
  683. static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
  684. enum amd_xgbe_phy_rx *state)
  685. {
  686. u16 msg;
  687. *state = AMD_XGBE_RX_XNP;
  688. msg = XNP_MCF_NULL_MESSAGE;
  689. msg |= XNP_MP_FORMATTED;
  690. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
  691. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
  692. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
  693. return AMD_XGBE_AN_PAGE_RECEIVED;
  694. }
  695. static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
  696. enum amd_xgbe_phy_rx *state)
  697. {
  698. unsigned int link_support;
  699. int ret, ad_reg, lp_reg;
  700. /* Read Base Ability register 2 first */
  701. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
  702. if (ret < 0)
  703. return AMD_XGBE_AN_ERROR;
  704. /* Check for a supported mode, otherwise restart in a different one */
  705. link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
  706. if (!(ret & link_support))
  707. return AMD_XGBE_AN_INCOMPAT_LINK;
  708. /* Check Extended Next Page support */
  709. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  710. if (ad_reg < 0)
  711. return AMD_XGBE_AN_ERROR;
  712. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
  713. if (lp_reg < 0)
  714. return AMD_XGBE_AN_ERROR;
  715. return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
  716. amd_xgbe_an_tx_xnp(phydev, state) :
  717. amd_xgbe_an_tx_training(phydev, state);
  718. }
  719. static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
  720. enum amd_xgbe_phy_rx *state)
  721. {
  722. int ad_reg, lp_reg;
  723. /* Check Extended Next Page support */
  724. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
  725. if (ad_reg < 0)
  726. return AMD_XGBE_AN_ERROR;
  727. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
  728. if (lp_reg < 0)
  729. return AMD_XGBE_AN_ERROR;
  730. return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
  731. amd_xgbe_an_tx_xnp(phydev, state) :
  732. amd_xgbe_an_tx_training(phydev, state);
  733. }
  734. static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
  735. {
  736. struct amd_xgbe_phy_priv *priv = phydev->priv;
  737. enum amd_xgbe_phy_rx *state;
  738. unsigned long an_timeout;
  739. int ret;
  740. if (!priv->an_start) {
  741. priv->an_start = jiffies;
  742. } else {
  743. an_timeout = priv->an_start +
  744. msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
  745. if (time_after(jiffies, an_timeout)) {
  746. /* Auto-negotiation timed out, reset state */
  747. priv->kr_state = AMD_XGBE_RX_BPA;
  748. priv->kx_state = AMD_XGBE_RX_BPA;
  749. priv->an_start = jiffies;
  750. }
  751. }
  752. state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
  753. : &priv->kx_state;
  754. switch (*state) {
  755. case AMD_XGBE_RX_BPA:
  756. ret = amd_xgbe_an_rx_bpa(phydev, state);
  757. break;
  758. case AMD_XGBE_RX_XNP:
  759. ret = amd_xgbe_an_rx_xnp(phydev, state);
  760. break;
  761. default:
  762. ret = AMD_XGBE_AN_ERROR;
  763. }
  764. return ret;
  765. }
  766. static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
  767. {
  768. struct amd_xgbe_phy_priv *priv = phydev->priv;
  769. int ret;
  770. /* Be sure we aren't looping trying to negotiate */
  771. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  772. priv->kr_state = AMD_XGBE_RX_ERROR;
  773. if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
  774. !(phydev->advertising & SUPPORTED_2500baseX_Full))
  775. return AMD_XGBE_AN_NO_LINK;
  776. if (priv->kx_state != AMD_XGBE_RX_BPA)
  777. return AMD_XGBE_AN_NO_LINK;
  778. } else {
  779. priv->kx_state = AMD_XGBE_RX_ERROR;
  780. if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
  781. return AMD_XGBE_AN_NO_LINK;
  782. if (priv->kr_state != AMD_XGBE_RX_BPA)
  783. return AMD_XGBE_AN_NO_LINK;
  784. }
  785. ret = amd_xgbe_phy_disable_an(phydev);
  786. if (ret)
  787. return AMD_XGBE_AN_ERROR;
  788. ret = amd_xgbe_phy_switch_mode(phydev);
  789. if (ret)
  790. return AMD_XGBE_AN_ERROR;
  791. ret = amd_xgbe_phy_restart_an(phydev);
  792. if (ret)
  793. return AMD_XGBE_AN_ERROR;
  794. return AMD_XGBE_AN_INCOMPAT_LINK;
  795. }
  796. static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
  797. {
  798. struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
  799. /* Interrupt reason must be read and cleared outside of IRQ context */
  800. disable_irq_nosync(priv->an_irq);
  801. queue_work(priv->an_workqueue, &priv->an_irq_work);
  802. return IRQ_HANDLED;
  803. }
  804. static void amd_xgbe_an_irq_work(struct work_struct *work)
  805. {
  806. struct amd_xgbe_phy_priv *priv = container_of(work,
  807. struct amd_xgbe_phy_priv,
  808. an_irq_work);
  809. /* Avoid a race between enabling the IRQ and exiting the work by
  810. * waiting for the work to finish and then queueing it
  811. */
  812. flush_work(&priv->an_work);
  813. queue_work(priv->an_workqueue, &priv->an_work);
  814. }
  815. static void amd_xgbe_an_state_machine(struct work_struct *work)
  816. {
  817. struct amd_xgbe_phy_priv *priv = container_of(work,
  818. struct amd_xgbe_phy_priv,
  819. an_work);
  820. struct phy_device *phydev = priv->phydev;
  821. enum amd_xgbe_phy_an cur_state = priv->an_state;
  822. int int_reg, int_mask;
  823. mutex_lock(&priv->an_mutex);
  824. /* Read the interrupt */
  825. int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
  826. if (!int_reg)
  827. goto out;
  828. next_int:
  829. if (int_reg < 0) {
  830. priv->an_state = AMD_XGBE_AN_ERROR;
  831. int_mask = XGBE_AN_INT_MASK;
  832. } else if (int_reg & XGBE_AN_PG_RCV) {
  833. priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
  834. int_mask = XGBE_AN_PG_RCV;
  835. } else if (int_reg & XGBE_AN_INC_LINK) {
  836. priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
  837. int_mask = XGBE_AN_INC_LINK;
  838. } else if (int_reg & XGBE_AN_INT_CMPLT) {
  839. priv->an_state = AMD_XGBE_AN_COMPLETE;
  840. int_mask = XGBE_AN_INT_CMPLT;
  841. } else {
  842. priv->an_state = AMD_XGBE_AN_ERROR;
  843. int_mask = 0;
  844. }
  845. /* Clear the interrupt to be processed */
  846. int_reg &= ~int_mask;
  847. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
  848. priv->an_result = priv->an_state;
  849. again:
  850. cur_state = priv->an_state;
  851. switch (priv->an_state) {
  852. case AMD_XGBE_AN_READY:
  853. priv->an_supported = 0;
  854. break;
  855. case AMD_XGBE_AN_PAGE_RECEIVED:
  856. priv->an_state = amd_xgbe_an_page_received(phydev);
  857. priv->an_supported++;
  858. break;
  859. case AMD_XGBE_AN_INCOMPAT_LINK:
  860. priv->an_supported = 0;
  861. priv->parallel_detect = 0;
  862. priv->an_state = amd_xgbe_an_incompat_link(phydev);
  863. break;
  864. case AMD_XGBE_AN_COMPLETE:
  865. priv->parallel_detect = priv->an_supported ? 0 : 1;
  866. netdev_dbg(phydev->attached_dev, "%s successful\n",
  867. priv->an_supported ? "Auto negotiation"
  868. : "Parallel detection");
  869. break;
  870. case AMD_XGBE_AN_NO_LINK:
  871. break;
  872. default:
  873. priv->an_state = AMD_XGBE_AN_ERROR;
  874. }
  875. if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
  876. int_reg = 0;
  877. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  878. } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
  879. netdev_err(phydev->attached_dev,
  880. "error during auto-negotiation, state=%u\n",
  881. cur_state);
  882. int_reg = 0;
  883. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  884. }
  885. if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
  886. priv->an_result = priv->an_state;
  887. priv->an_state = AMD_XGBE_AN_READY;
  888. priv->kr_state = AMD_XGBE_RX_BPA;
  889. priv->kx_state = AMD_XGBE_RX_BPA;
  890. priv->an_start = 0;
  891. }
  892. if (cur_state != priv->an_state)
  893. goto again;
  894. if (int_reg)
  895. goto next_int;
  896. out:
  897. enable_irq(priv->an_irq);
  898. mutex_unlock(&priv->an_mutex);
  899. }
  900. static int amd_xgbe_an_init(struct phy_device *phydev)
  901. {
  902. int ret;
  903. /* Set up Advertisement register 3 first */
  904. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
  905. if (ret < 0)
  906. return ret;
  907. if (phydev->advertising & SUPPORTED_10000baseR_FEC)
  908. ret |= 0xc000;
  909. else
  910. ret &= ~0xc000;
  911. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
  912. /* Set up Advertisement register 2 next */
  913. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
  914. if (ret < 0)
  915. return ret;
  916. if (phydev->advertising & SUPPORTED_10000baseKR_Full)
  917. ret |= 0x80;
  918. else
  919. ret &= ~0x80;
  920. if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
  921. (phydev->advertising & SUPPORTED_2500baseX_Full))
  922. ret |= 0x20;
  923. else
  924. ret &= ~0x20;
  925. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
  926. /* Set up Advertisement register 1 last */
  927. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  928. if (ret < 0)
  929. return ret;
  930. if (phydev->advertising & SUPPORTED_Pause)
  931. ret |= 0x400;
  932. else
  933. ret &= ~0x400;
  934. if (phydev->advertising & SUPPORTED_Asym_Pause)
  935. ret |= 0x800;
  936. else
  937. ret &= ~0x800;
  938. /* We don't intend to perform XNP */
  939. ret &= ~XNP_NP_EXCHANGE;
  940. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
  941. return 0;
  942. }
  943. static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
  944. {
  945. int count, ret;
  946. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  947. if (ret < 0)
  948. return ret;
  949. ret |= MDIO_CTRL1_RESET;
  950. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  951. count = 50;
  952. do {
  953. msleep(20);
  954. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  955. if (ret < 0)
  956. return ret;
  957. } while ((ret & MDIO_CTRL1_RESET) && --count);
  958. if (ret & MDIO_CTRL1_RESET)
  959. return -ETIMEDOUT;
  960. /* Disable auto-negotiation for now */
  961. ret = amd_xgbe_phy_disable_an(phydev);
  962. if (ret < 0)
  963. return ret;
  964. /* Clear auto-negotiation interrupts */
  965. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  966. return 0;
  967. }
  968. static int amd_xgbe_phy_config_init(struct phy_device *phydev)
  969. {
  970. struct amd_xgbe_phy_priv *priv = phydev->priv;
  971. struct net_device *netdev = phydev->attached_dev;
  972. int ret;
  973. if (!priv->an_irq_allocated) {
  974. /* Allocate the auto-negotiation workqueue and interrupt */
  975. snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
  976. "%s-pcs", netdev_name(netdev));
  977. priv->an_workqueue =
  978. create_singlethread_workqueue(priv->an_irq_name);
  979. if (!priv->an_workqueue) {
  980. netdev_err(netdev, "phy workqueue creation failed\n");
  981. return -ENOMEM;
  982. }
  983. ret = devm_request_irq(priv->dev, priv->an_irq,
  984. amd_xgbe_an_isr, 0, priv->an_irq_name,
  985. priv);
  986. if (ret) {
  987. netdev_err(netdev, "phy irq request failed\n");
  988. destroy_workqueue(priv->an_workqueue);
  989. return ret;
  990. }
  991. priv->an_irq_allocated = 1;
  992. }
  993. /* Set initial mode - call the mode setting routines
  994. * directly to insure we are properly configured
  995. */
  996. if (phydev->advertising & SUPPORTED_10000baseKR_Full)
  997. ret = amd_xgbe_phy_xgmii_mode(phydev);
  998. else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
  999. ret = amd_xgbe_phy_gmii_mode(phydev);
  1000. else if (phydev->advertising & SUPPORTED_2500baseX_Full)
  1001. ret = amd_xgbe_phy_gmii_2500_mode(phydev);
  1002. else
  1003. ret = -EINVAL;
  1004. if (ret < 0)
  1005. return ret;
  1006. /* Set up advertisement registers based on current settings */
  1007. ret = amd_xgbe_an_init(phydev);
  1008. if (ret)
  1009. return ret;
  1010. /* Enable auto-negotiation interrupts */
  1011. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
  1012. return 0;
  1013. }
  1014. static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
  1015. {
  1016. int ret;
  1017. /* Disable auto-negotiation */
  1018. ret = amd_xgbe_phy_disable_an(phydev);
  1019. if (ret < 0)
  1020. return ret;
  1021. /* Validate/Set specified speed */
  1022. switch (phydev->speed) {
  1023. case SPEED_10000:
  1024. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1025. break;
  1026. case SPEED_2500:
  1027. case SPEED_1000:
  1028. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1029. break;
  1030. default:
  1031. ret = -EINVAL;
  1032. }
  1033. if (ret < 0)
  1034. return ret;
  1035. /* Validate duplex mode */
  1036. if (phydev->duplex != DUPLEX_FULL)
  1037. return -EINVAL;
  1038. phydev->pause = 0;
  1039. phydev->asym_pause = 0;
  1040. return 0;
  1041. }
  1042. static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
  1043. {
  1044. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1045. u32 mmd_mask = phydev->c45_ids.devices_in_package;
  1046. int ret;
  1047. if (phydev->autoneg != AUTONEG_ENABLE)
  1048. return amd_xgbe_phy_setup_forced(phydev);
  1049. /* Make sure we have the AN MMD present */
  1050. if (!(mmd_mask & MDIO_DEVS_AN))
  1051. return -EINVAL;
  1052. /* Disable auto-negotiation interrupt */
  1053. disable_irq(priv->an_irq);
  1054. /* Start auto-negotiation in a supported mode */
  1055. if (phydev->advertising & SUPPORTED_10000baseKR_Full)
  1056. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1057. else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
  1058. (phydev->advertising & SUPPORTED_2500baseX_Full))
  1059. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1060. else
  1061. ret = -EINVAL;
  1062. if (ret < 0) {
  1063. enable_irq(priv->an_irq);
  1064. return ret;
  1065. }
  1066. /* Disable and stop any in progress auto-negotiation */
  1067. ret = amd_xgbe_phy_disable_an(phydev);
  1068. if (ret < 0)
  1069. return ret;
  1070. /* Clear any auto-negotitation interrupts */
  1071. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  1072. priv->an_result = AMD_XGBE_AN_READY;
  1073. priv->an_state = AMD_XGBE_AN_READY;
  1074. priv->kr_state = AMD_XGBE_RX_BPA;
  1075. priv->kx_state = AMD_XGBE_RX_BPA;
  1076. /* Re-enable auto-negotiation interrupt */
  1077. enable_irq(priv->an_irq);
  1078. /* Set up advertisement registers based on current settings */
  1079. ret = amd_xgbe_an_init(phydev);
  1080. if (ret)
  1081. return ret;
  1082. /* Enable and start auto-negotiation */
  1083. return amd_xgbe_phy_restart_an(phydev);
  1084. }
  1085. static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
  1086. {
  1087. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1088. int ret;
  1089. mutex_lock(&priv->an_mutex);
  1090. ret = __amd_xgbe_phy_config_aneg(phydev);
  1091. mutex_unlock(&priv->an_mutex);
  1092. return ret;
  1093. }
  1094. static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
  1095. {
  1096. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1097. return (priv->an_result == AMD_XGBE_AN_COMPLETE);
  1098. }
  1099. static int amd_xgbe_phy_update_link(struct phy_device *phydev)
  1100. {
  1101. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1102. int ret;
  1103. /* If we're doing auto-negotiation don't report link down */
  1104. if (priv->an_state != AMD_XGBE_AN_READY) {
  1105. phydev->link = 1;
  1106. return 0;
  1107. }
  1108. /* Link status is latched low, so read once to clear
  1109. * and then read again to get current state
  1110. */
  1111. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
  1112. if (ret < 0)
  1113. return ret;
  1114. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
  1115. if (ret < 0)
  1116. return ret;
  1117. phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
  1118. return 0;
  1119. }
  1120. static int amd_xgbe_phy_read_status(struct phy_device *phydev)
  1121. {
  1122. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1123. u32 mmd_mask = phydev->c45_ids.devices_in_package;
  1124. int ret, ad_ret, lp_ret;
  1125. ret = amd_xgbe_phy_update_link(phydev);
  1126. if (ret)
  1127. return ret;
  1128. if ((phydev->autoneg == AUTONEG_ENABLE) &&
  1129. !priv->parallel_detect) {
  1130. if (!(mmd_mask & MDIO_DEVS_AN))
  1131. return -EINVAL;
  1132. if (!amd_xgbe_phy_aneg_done(phydev))
  1133. return 0;
  1134. /* Compare Advertisement and Link Partner register 1 */
  1135. ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  1136. if (ad_ret < 0)
  1137. return ad_ret;
  1138. lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
  1139. if (lp_ret < 0)
  1140. return lp_ret;
  1141. ad_ret &= lp_ret;
  1142. phydev->pause = (ad_ret & 0x400) ? 1 : 0;
  1143. phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
  1144. /* Compare Advertisement and Link Partner register 2 */
  1145. ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
  1146. MDIO_AN_ADVERTISE + 1);
  1147. if (ad_ret < 0)
  1148. return ad_ret;
  1149. lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
  1150. if (lp_ret < 0)
  1151. return lp_ret;
  1152. ad_ret &= lp_ret;
  1153. if (ad_ret & 0x80) {
  1154. phydev->speed = SPEED_10000;
  1155. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1156. if (ret)
  1157. return ret;
  1158. } else {
  1159. switch (priv->speed_set) {
  1160. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1161. phydev->speed = SPEED_1000;
  1162. break;
  1163. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1164. phydev->speed = SPEED_2500;
  1165. break;
  1166. }
  1167. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1168. if (ret)
  1169. return ret;
  1170. }
  1171. phydev->duplex = DUPLEX_FULL;
  1172. } else {
  1173. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  1174. phydev->speed = SPEED_10000;
  1175. } else {
  1176. switch (priv->speed_set) {
  1177. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1178. phydev->speed = SPEED_1000;
  1179. break;
  1180. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1181. phydev->speed = SPEED_2500;
  1182. break;
  1183. }
  1184. }
  1185. phydev->duplex = DUPLEX_FULL;
  1186. phydev->pause = 0;
  1187. phydev->asym_pause = 0;
  1188. }
  1189. return 0;
  1190. }
  1191. static int amd_xgbe_phy_suspend(struct phy_device *phydev)
  1192. {
  1193. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1194. int ret;
  1195. mutex_lock(&phydev->lock);
  1196. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  1197. if (ret < 0)
  1198. goto unlock;
  1199. priv->lpm_ctrl = ret;
  1200. ret |= MDIO_CTRL1_LPOWER;
  1201. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  1202. ret = 0;
  1203. unlock:
  1204. mutex_unlock(&phydev->lock);
  1205. return ret;
  1206. }
  1207. static int amd_xgbe_phy_resume(struct phy_device *phydev)
  1208. {
  1209. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1210. mutex_lock(&phydev->lock);
  1211. priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
  1212. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
  1213. mutex_unlock(&phydev->lock);
  1214. return 0;
  1215. }
  1216. static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
  1217. unsigned int type)
  1218. {
  1219. unsigned int count;
  1220. int i;
  1221. for (i = 0, count = 0; i < pdev->num_resources; i++) {
  1222. struct resource *r = &pdev->resource[i];
  1223. if (type == resource_type(r))
  1224. count++;
  1225. }
  1226. return count;
  1227. }
  1228. static int amd_xgbe_phy_probe(struct phy_device *phydev)
  1229. {
  1230. struct amd_xgbe_phy_priv *priv;
  1231. struct platform_device *phy_pdev;
  1232. struct device *dev, *phy_dev;
  1233. unsigned int phy_resnum, phy_irqnum;
  1234. int ret;
  1235. if (!phydev->bus || !phydev->bus->parent)
  1236. return -EINVAL;
  1237. dev = phydev->bus->parent;
  1238. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1239. if (!priv)
  1240. return -ENOMEM;
  1241. priv->pdev = to_platform_device(dev);
  1242. priv->adev = ACPI_COMPANION(dev);
  1243. priv->dev = dev;
  1244. priv->phydev = phydev;
  1245. mutex_init(&priv->an_mutex);
  1246. INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
  1247. INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
  1248. if (!priv->adev || acpi_disabled) {
  1249. struct device_node *bus_node;
  1250. struct device_node *phy_node;
  1251. bus_node = priv->dev->of_node;
  1252. phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
  1253. if (!phy_node) {
  1254. dev_err(dev, "unable to parse phy-handle\n");
  1255. ret = -EINVAL;
  1256. goto err_priv;
  1257. }
  1258. phy_pdev = of_find_device_by_node(phy_node);
  1259. of_node_put(phy_node);
  1260. if (!phy_pdev) {
  1261. dev_err(dev, "unable to obtain phy device\n");
  1262. ret = -EINVAL;
  1263. goto err_priv;
  1264. }
  1265. phy_resnum = 0;
  1266. phy_irqnum = 0;
  1267. } else {
  1268. /* In ACPI, the XGBE and PHY resources are the grouped
  1269. * together with the PHY resources at the end
  1270. */
  1271. phy_pdev = priv->pdev;
  1272. phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
  1273. IORESOURCE_MEM) - 3;
  1274. phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
  1275. IORESOURCE_IRQ) - 1;
  1276. }
  1277. phy_dev = &phy_pdev->dev;
  1278. /* Get the device mmio areas */
  1279. priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1280. phy_resnum++);
  1281. priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
  1282. if (IS_ERR(priv->rxtx_regs)) {
  1283. dev_err(dev, "rxtx ioremap failed\n");
  1284. ret = PTR_ERR(priv->rxtx_regs);
  1285. goto err_put;
  1286. }
  1287. priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1288. phy_resnum++);
  1289. priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
  1290. if (IS_ERR(priv->sir0_regs)) {
  1291. dev_err(dev, "sir0 ioremap failed\n");
  1292. ret = PTR_ERR(priv->sir0_regs);
  1293. goto err_rxtx;
  1294. }
  1295. priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1296. phy_resnum++);
  1297. priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
  1298. if (IS_ERR(priv->sir1_regs)) {
  1299. dev_err(dev, "sir1 ioremap failed\n");
  1300. ret = PTR_ERR(priv->sir1_regs);
  1301. goto err_sir0;
  1302. }
  1303. /* Get the auto-negotiation interrupt */
  1304. ret = platform_get_irq(phy_pdev, phy_irqnum);
  1305. if (ret < 0) {
  1306. dev_err(dev, "platform_get_irq failed\n");
  1307. goto err_sir1;
  1308. }
  1309. priv->an_irq = ret;
  1310. /* Get the device speed set property */
  1311. ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
  1312. &priv->speed_set);
  1313. if (ret) {
  1314. dev_err(dev, "invalid %s property\n",
  1315. XGBE_PHY_SPEEDSET_PROPERTY);
  1316. goto err_sir1;
  1317. }
  1318. switch (priv->speed_set) {
  1319. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1320. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1321. break;
  1322. default:
  1323. dev_err(dev, "invalid %s property\n",
  1324. XGBE_PHY_SPEEDSET_PROPERTY);
  1325. ret = -EINVAL;
  1326. goto err_sir1;
  1327. }
  1328. if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
  1329. ret = device_property_read_u32_array(phy_dev,
  1330. XGBE_PHY_BLWC_PROPERTY,
  1331. priv->serdes_blwc,
  1332. XGBE_PHY_SPEEDS);
  1333. if (ret) {
  1334. dev_err(dev, "invalid %s property\n",
  1335. XGBE_PHY_BLWC_PROPERTY);
  1336. goto err_sir1;
  1337. }
  1338. } else {
  1339. memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
  1340. sizeof(priv->serdes_blwc));
  1341. }
  1342. if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
  1343. ret = device_property_read_u32_array(phy_dev,
  1344. XGBE_PHY_CDR_RATE_PROPERTY,
  1345. priv->serdes_cdr_rate,
  1346. XGBE_PHY_SPEEDS);
  1347. if (ret) {
  1348. dev_err(dev, "invalid %s property\n",
  1349. XGBE_PHY_CDR_RATE_PROPERTY);
  1350. goto err_sir1;
  1351. }
  1352. } else {
  1353. memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
  1354. sizeof(priv->serdes_cdr_rate));
  1355. }
  1356. if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
  1357. ret = device_property_read_u32_array(phy_dev,
  1358. XGBE_PHY_PQ_SKEW_PROPERTY,
  1359. priv->serdes_pq_skew,
  1360. XGBE_PHY_SPEEDS);
  1361. if (ret) {
  1362. dev_err(dev, "invalid %s property\n",
  1363. XGBE_PHY_PQ_SKEW_PROPERTY);
  1364. goto err_sir1;
  1365. }
  1366. } else {
  1367. memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
  1368. sizeof(priv->serdes_pq_skew));
  1369. }
  1370. if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
  1371. ret = device_property_read_u32_array(phy_dev,
  1372. XGBE_PHY_TX_AMP_PROPERTY,
  1373. priv->serdes_tx_amp,
  1374. XGBE_PHY_SPEEDS);
  1375. if (ret) {
  1376. dev_err(dev, "invalid %s property\n",
  1377. XGBE_PHY_TX_AMP_PROPERTY);
  1378. goto err_sir1;
  1379. }
  1380. } else {
  1381. memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
  1382. sizeof(priv->serdes_tx_amp));
  1383. }
  1384. if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
  1385. ret = device_property_read_u32_array(phy_dev,
  1386. XGBE_PHY_DFE_CFG_PROPERTY,
  1387. priv->serdes_dfe_tap_cfg,
  1388. XGBE_PHY_SPEEDS);
  1389. if (ret) {
  1390. dev_err(dev, "invalid %s property\n",
  1391. XGBE_PHY_DFE_CFG_PROPERTY);
  1392. goto err_sir1;
  1393. }
  1394. } else {
  1395. memcpy(priv->serdes_dfe_tap_cfg,
  1396. amd_xgbe_phy_serdes_dfe_tap_cfg,
  1397. sizeof(priv->serdes_dfe_tap_cfg));
  1398. }
  1399. if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
  1400. ret = device_property_read_u32_array(phy_dev,
  1401. XGBE_PHY_DFE_ENA_PROPERTY,
  1402. priv->serdes_dfe_tap_ena,
  1403. XGBE_PHY_SPEEDS);
  1404. if (ret) {
  1405. dev_err(dev, "invalid %s property\n",
  1406. XGBE_PHY_DFE_ENA_PROPERTY);
  1407. goto err_sir1;
  1408. }
  1409. } else {
  1410. memcpy(priv->serdes_dfe_tap_ena,
  1411. amd_xgbe_phy_serdes_dfe_tap_ena,
  1412. sizeof(priv->serdes_dfe_tap_ena));
  1413. }
  1414. /* Initialize supported features */
  1415. phydev->supported = SUPPORTED_Autoneg;
  1416. phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  1417. phydev->supported |= SUPPORTED_Backplane;
  1418. phydev->supported |= SUPPORTED_10000baseKR_Full;
  1419. switch (priv->speed_set) {
  1420. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1421. phydev->supported |= SUPPORTED_1000baseKX_Full;
  1422. break;
  1423. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1424. phydev->supported |= SUPPORTED_2500baseX_Full;
  1425. break;
  1426. }
  1427. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
  1428. if (ret < 0)
  1429. return ret;
  1430. priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
  1431. if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
  1432. phydev->supported |= SUPPORTED_10000baseR_FEC;
  1433. phydev->advertising = phydev->supported;
  1434. phydev->priv = priv;
  1435. if (!priv->adev || acpi_disabled)
  1436. platform_device_put(phy_pdev);
  1437. return 0;
  1438. err_sir1:
  1439. devm_iounmap(dev, priv->sir1_regs);
  1440. devm_release_mem_region(dev, priv->sir1_res->start,
  1441. resource_size(priv->sir1_res));
  1442. err_sir0:
  1443. devm_iounmap(dev, priv->sir0_regs);
  1444. devm_release_mem_region(dev, priv->sir0_res->start,
  1445. resource_size(priv->sir0_res));
  1446. err_rxtx:
  1447. devm_iounmap(dev, priv->rxtx_regs);
  1448. devm_release_mem_region(dev, priv->rxtx_res->start,
  1449. resource_size(priv->rxtx_res));
  1450. err_put:
  1451. if (!priv->adev || acpi_disabled)
  1452. platform_device_put(phy_pdev);
  1453. err_priv:
  1454. devm_kfree(dev, priv);
  1455. return ret;
  1456. }
  1457. static void amd_xgbe_phy_remove(struct phy_device *phydev)
  1458. {
  1459. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1460. struct device *dev = priv->dev;
  1461. if (priv->an_irq_allocated) {
  1462. devm_free_irq(dev, priv->an_irq, priv);
  1463. flush_workqueue(priv->an_workqueue);
  1464. destroy_workqueue(priv->an_workqueue);
  1465. }
  1466. /* Release resources */
  1467. devm_iounmap(dev, priv->sir1_regs);
  1468. devm_release_mem_region(dev, priv->sir1_res->start,
  1469. resource_size(priv->sir1_res));
  1470. devm_iounmap(dev, priv->sir0_regs);
  1471. devm_release_mem_region(dev, priv->sir0_res->start,
  1472. resource_size(priv->sir0_res));
  1473. devm_iounmap(dev, priv->rxtx_regs);
  1474. devm_release_mem_region(dev, priv->rxtx_res->start,
  1475. resource_size(priv->rxtx_res));
  1476. devm_kfree(dev, priv);
  1477. }
  1478. static int amd_xgbe_match_phy_device(struct phy_device *phydev)
  1479. {
  1480. return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
  1481. }
  1482. static struct phy_driver amd_xgbe_phy_driver[] = {
  1483. {
  1484. .phy_id = XGBE_PHY_ID,
  1485. .phy_id_mask = XGBE_PHY_MASK,
  1486. .name = "AMD XGBE PHY",
  1487. .features = 0,
  1488. .flags = PHY_IS_INTERNAL,
  1489. .probe = amd_xgbe_phy_probe,
  1490. .remove = amd_xgbe_phy_remove,
  1491. .soft_reset = amd_xgbe_phy_soft_reset,
  1492. .config_init = amd_xgbe_phy_config_init,
  1493. .suspend = amd_xgbe_phy_suspend,
  1494. .resume = amd_xgbe_phy_resume,
  1495. .config_aneg = amd_xgbe_phy_config_aneg,
  1496. .aneg_done = amd_xgbe_phy_aneg_done,
  1497. .read_status = amd_xgbe_phy_read_status,
  1498. .match_phy_device = amd_xgbe_match_phy_device,
  1499. .driver = {
  1500. .owner = THIS_MODULE,
  1501. },
  1502. },
  1503. };
  1504. module_phy_driver(amd_xgbe_phy_driver);
  1505. static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
  1506. { XGBE_PHY_ID, XGBE_PHY_MASK },
  1507. { }
  1508. };
  1509. MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);