amd-xgbe-phy.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764
  1. /*
  2. * AMD 10Gb Ethernet PHY driver
  3. *
  4. * This file is available to you under your choice of the following two
  5. * licenses:
  6. *
  7. * License 1: GPLv2
  8. *
  9. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10. *
  11. * This file is free software; you may copy, redistribute and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation, either version 2 of the License, or (at
  14. * your option) any later version.
  15. *
  16. * This file is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. *
  25. * License 2: Modified BSD
  26. *
  27. * Copyright (c) 2014 Advanced Micro Devices, Inc.
  28. * All rights reserved.
  29. *
  30. * Redistribution and use in source and binary forms, with or without
  31. * modification, are permitted provided that the following conditions are met:
  32. * * Redistributions of source code must retain the above copyright
  33. * notice, this list of conditions and the following disclaimer.
  34. * * Redistributions in binary form must reproduce the above copyright
  35. * notice, this list of conditions and the following disclaimer in the
  36. * documentation and/or other materials provided with the distribution.
  37. * * Neither the name of Advanced Micro Devices, Inc. nor the
  38. * names of its contributors may be used to endorse or promote products
  39. * derived from this software without specific prior written permission.
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  42. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  43. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  44. * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  45. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  46. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  47. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  48. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  50. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51. */
  52. #include <linux/kernel.h>
  53. #include <linux/device.h>
  54. #include <linux/platform_device.h>
  55. #include <linux/string.h>
  56. #include <linux/errno.h>
  57. #include <linux/unistd.h>
  58. #include <linux/slab.h>
  59. #include <linux/interrupt.h>
  60. #include <linux/init.h>
  61. #include <linux/delay.h>
  62. #include <linux/workqueue.h>
  63. #include <linux/netdevice.h>
  64. #include <linux/etherdevice.h>
  65. #include <linux/skbuff.h>
  66. #include <linux/mm.h>
  67. #include <linux/module.h>
  68. #include <linux/mii.h>
  69. #include <linux/ethtool.h>
  70. #include <linux/phy.h>
  71. #include <linux/mdio.h>
  72. #include <linux/io.h>
  73. #include <linux/of.h>
  74. #include <linux/of_platform.h>
  75. #include <linux/of_device.h>
  76. #include <linux/uaccess.h>
  77. #include <linux/bitops.h>
  78. #include <linux/property.h>
  79. #include <linux/acpi.h>
  80. MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  81. MODULE_LICENSE("Dual BSD/GPL");
  82. MODULE_VERSION("1.0.0-a");
  83. MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
  84. #define XGBE_PHY_ID 0x000162d0
  85. #define XGBE_PHY_MASK 0xfffffff0
  86. #define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
  87. #define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
  88. #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
  89. #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
  90. #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
  91. #define XGBE_PHY_SPEEDS 3
  92. #define XGBE_PHY_SPEED_1000 0
  93. #define XGBE_PHY_SPEED_2500 1
  94. #define XGBE_PHY_SPEED_10000 2
  95. #define XGBE_AN_INT_CMPLT 0x01
  96. #define XGBE_AN_INC_LINK 0x02
  97. #define XGBE_AN_PG_RCV 0x04
  98. #define XGBE_AN_INT_MASK 0x07
  99. #define XNP_MCF_NULL_MESSAGE 0x001
  100. #define XNP_ACK_PROCESSED BIT(12)
  101. #define XNP_MP_FORMATTED BIT(13)
  102. #define XNP_NP_EXCHANGE BIT(15)
  103. #define XGBE_PHY_RATECHANGE_COUNT 500
  104. #define XGBE_PHY_KR_TRAINING_START 0x01
  105. #define XGBE_PHY_KR_TRAINING_ENABLE 0x02
  106. #define XGBE_PHY_FEC_ENABLE 0x01
  107. #define XGBE_PHY_FEC_FORWARD 0x02
  108. #define XGBE_PHY_FEC_MASK 0x03
  109. #ifndef MDIO_PMA_10GBR_PMD_CTRL
  110. #define MDIO_PMA_10GBR_PMD_CTRL 0x0096
  111. #endif
  112. #ifndef MDIO_PMA_10GBR_FEC_ABILITY
  113. #define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
  114. #endif
  115. #ifndef MDIO_PMA_10GBR_FEC_CTRL
  116. #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
  117. #endif
  118. #ifndef MDIO_AN_XNP
  119. #define MDIO_AN_XNP 0x0016
  120. #endif
  121. #ifndef MDIO_AN_LPX
  122. #define MDIO_AN_LPX 0x0019
  123. #endif
  124. #ifndef MDIO_AN_INTMASK
  125. #define MDIO_AN_INTMASK 0x8001
  126. #endif
  127. #ifndef MDIO_AN_INT
  128. #define MDIO_AN_INT 0x8002
  129. #endif
  130. #ifndef MDIO_CTRL1_SPEED1G
  131. #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
  132. #endif
  133. /* SerDes integration register offsets */
  134. #define SIR0_KR_RT_1 0x002c
  135. #define SIR0_STATUS 0x0040
  136. #define SIR1_SPEED 0x0000
  137. /* SerDes integration register entry bit positions and sizes */
  138. #define SIR0_KR_RT_1_RESET_INDEX 11
  139. #define SIR0_KR_RT_1_RESET_WIDTH 1
  140. #define SIR0_STATUS_RX_READY_INDEX 0
  141. #define SIR0_STATUS_RX_READY_WIDTH 1
  142. #define SIR0_STATUS_TX_READY_INDEX 8
  143. #define SIR0_STATUS_TX_READY_WIDTH 1
  144. #define SIR1_SPEED_CDR_RATE_INDEX 12
  145. #define SIR1_SPEED_CDR_RATE_WIDTH 4
  146. #define SIR1_SPEED_DATARATE_INDEX 4
  147. #define SIR1_SPEED_DATARATE_WIDTH 2
  148. #define SIR1_SPEED_PLLSEL_INDEX 3
  149. #define SIR1_SPEED_PLLSEL_WIDTH 1
  150. #define SIR1_SPEED_RATECHANGE_INDEX 6
  151. #define SIR1_SPEED_RATECHANGE_WIDTH 1
  152. #define SIR1_SPEED_TXAMP_INDEX 8
  153. #define SIR1_SPEED_TXAMP_WIDTH 4
  154. #define SIR1_SPEED_WORDMODE_INDEX 0
  155. #define SIR1_SPEED_WORDMODE_WIDTH 3
  156. #define SPEED_10000_BLWC 0
  157. #define SPEED_10000_CDR 0x7
  158. #define SPEED_10000_PLL 0x1
  159. #define SPEED_10000_PQ 0x1e
  160. #define SPEED_10000_RATE 0x0
  161. #define SPEED_10000_TXAMP 0xa
  162. #define SPEED_10000_WORD 0x7
  163. #define SPEED_2500_BLWC 1
  164. #define SPEED_2500_CDR 0x2
  165. #define SPEED_2500_PLL 0x0
  166. #define SPEED_2500_PQ 0xa
  167. #define SPEED_2500_RATE 0x1
  168. #define SPEED_2500_TXAMP 0xf
  169. #define SPEED_2500_WORD 0x1
  170. #define SPEED_1000_BLWC 1
  171. #define SPEED_1000_CDR 0x2
  172. #define SPEED_1000_PLL 0x0
  173. #define SPEED_1000_PQ 0xa
  174. #define SPEED_1000_RATE 0x3
  175. #define SPEED_1000_TXAMP 0xf
  176. #define SPEED_1000_WORD 0x1
  177. /* SerDes RxTx register offsets */
  178. #define RXTX_REG20 0x0050
  179. #define RXTX_REG114 0x01c8
  180. /* SerDes RxTx register entry bit positions and sizes */
  181. #define RXTX_REG20_BLWC_ENA_INDEX 2
  182. #define RXTX_REG20_BLWC_ENA_WIDTH 1
  183. #define RXTX_REG114_PQ_REG_INDEX 9
  184. #define RXTX_REG114_PQ_REG_WIDTH 7
  185. /* Bit setting and getting macros
  186. * The get macro will extract the current bit field value from within
  187. * the variable
  188. *
  189. * The set macro will clear the current bit field value within the
  190. * variable and then set the bit field of the variable to the
  191. * specified value
  192. */
  193. #define GET_BITS(_var, _index, _width) \
  194. (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
  195. #define SET_BITS(_var, _index, _width, _val) \
  196. do { \
  197. (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
  198. (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
  199. } while (0)
  200. #define XSIR_GET_BITS(_var, _prefix, _field) \
  201. GET_BITS((_var), \
  202. _prefix##_##_field##_INDEX, \
  203. _prefix##_##_field##_WIDTH)
  204. #define XSIR_SET_BITS(_var, _prefix, _field, _val) \
  205. SET_BITS((_var), \
  206. _prefix##_##_field##_INDEX, \
  207. _prefix##_##_field##_WIDTH, (_val))
  208. /* Macros for reading or writing SerDes integration registers
  209. * The ioread macros will get bit fields or full values using the
  210. * register definitions formed using the input names
  211. *
  212. * The iowrite macros will set bit fields or full values using the
  213. * register definitions formed using the input names
  214. */
  215. #define XSIR0_IOREAD(_priv, _reg) \
  216. ioread16((_priv)->sir0_regs + _reg)
  217. #define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
  218. GET_BITS(XSIR0_IOREAD((_priv), _reg), \
  219. _reg##_##_field##_INDEX, \
  220. _reg##_##_field##_WIDTH)
  221. #define XSIR0_IOWRITE(_priv, _reg, _val) \
  222. iowrite16((_val), (_priv)->sir0_regs + _reg)
  223. #define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
  224. do { \
  225. u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
  226. SET_BITS(reg_val, \
  227. _reg##_##_field##_INDEX, \
  228. _reg##_##_field##_WIDTH, (_val)); \
  229. XSIR0_IOWRITE((_priv), _reg, reg_val); \
  230. } while (0)
  231. #define XSIR1_IOREAD(_priv, _reg) \
  232. ioread16((_priv)->sir1_regs + _reg)
  233. #define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
  234. GET_BITS(XSIR1_IOREAD((_priv), _reg), \
  235. _reg##_##_field##_INDEX, \
  236. _reg##_##_field##_WIDTH)
  237. #define XSIR1_IOWRITE(_priv, _reg, _val) \
  238. iowrite16((_val), (_priv)->sir1_regs + _reg)
  239. #define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
  240. do { \
  241. u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
  242. SET_BITS(reg_val, \
  243. _reg##_##_field##_INDEX, \
  244. _reg##_##_field##_WIDTH, (_val)); \
  245. XSIR1_IOWRITE((_priv), _reg, reg_val); \
  246. } while (0)
  247. /* Macros for reading or writing SerDes RxTx registers
  248. * The ioread macros will get bit fields or full values using the
  249. * register definitions formed using the input names
  250. *
  251. * The iowrite macros will set bit fields or full values using the
  252. * register definitions formed using the input names
  253. */
  254. #define XRXTX_IOREAD(_priv, _reg) \
  255. ioread16((_priv)->rxtx_regs + _reg)
  256. #define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
  257. GET_BITS(XRXTX_IOREAD((_priv), _reg), \
  258. _reg##_##_field##_INDEX, \
  259. _reg##_##_field##_WIDTH)
  260. #define XRXTX_IOWRITE(_priv, _reg, _val) \
  261. iowrite16((_val), (_priv)->rxtx_regs + _reg)
  262. #define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
  263. do { \
  264. u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
  265. SET_BITS(reg_val, \
  266. _reg##_##_field##_INDEX, \
  267. _reg##_##_field##_WIDTH, (_val)); \
  268. XRXTX_IOWRITE((_priv), _reg, reg_val); \
  269. } while (0)
  270. static const u32 amd_xgbe_phy_serdes_blwc[] = {
  271. SPEED_1000_BLWC,
  272. SPEED_2500_BLWC,
  273. SPEED_10000_BLWC,
  274. };
  275. static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
  276. SPEED_1000_CDR,
  277. SPEED_2500_CDR,
  278. SPEED_10000_CDR,
  279. };
  280. static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
  281. SPEED_1000_PQ,
  282. SPEED_2500_PQ,
  283. SPEED_10000_PQ,
  284. };
  285. static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
  286. SPEED_1000_TXAMP,
  287. SPEED_2500_TXAMP,
  288. SPEED_10000_TXAMP,
  289. };
  290. enum amd_xgbe_phy_an {
  291. AMD_XGBE_AN_READY = 0,
  292. AMD_XGBE_AN_PAGE_RECEIVED,
  293. AMD_XGBE_AN_INCOMPAT_LINK,
  294. AMD_XGBE_AN_COMPLETE,
  295. AMD_XGBE_AN_NO_LINK,
  296. AMD_XGBE_AN_ERROR,
  297. };
  298. enum amd_xgbe_phy_rx {
  299. AMD_XGBE_RX_BPA = 0,
  300. AMD_XGBE_RX_XNP,
  301. AMD_XGBE_RX_COMPLETE,
  302. AMD_XGBE_RX_ERROR,
  303. };
  304. enum amd_xgbe_phy_mode {
  305. AMD_XGBE_MODE_KR,
  306. AMD_XGBE_MODE_KX,
  307. };
  308. enum amd_xgbe_phy_speedset {
  309. AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
  310. AMD_XGBE_PHY_SPEEDSET_2500_10000,
  311. };
  312. struct amd_xgbe_phy_priv {
  313. struct platform_device *pdev;
  314. struct acpi_device *adev;
  315. struct device *dev;
  316. struct phy_device *phydev;
  317. /* SerDes related mmio resources */
  318. struct resource *rxtx_res;
  319. struct resource *sir0_res;
  320. struct resource *sir1_res;
  321. /* SerDes related mmio registers */
  322. void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
  323. void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
  324. void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
  325. int an_irq;
  326. char an_irq_name[IFNAMSIZ + 32];
  327. struct work_struct an_irq_work;
  328. unsigned int an_irq_allocated;
  329. unsigned int speed_set;
  330. /* SerDes UEFI configurable settings.
  331. * Switching between modes/speeds requires new values for some
  332. * SerDes settings. The values can be supplied as device
  333. * properties in array format. The first array entry is for
  334. * 1GbE, second for 2.5GbE and third for 10GbE
  335. */
  336. u32 serdes_blwc[XGBE_PHY_SPEEDS];
  337. u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
  338. u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
  339. u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
  340. /* Auto-negotiation state machine support */
  341. struct mutex an_mutex;
  342. enum amd_xgbe_phy_an an_result;
  343. enum amd_xgbe_phy_an an_state;
  344. enum amd_xgbe_phy_rx kr_state;
  345. enum amd_xgbe_phy_rx kx_state;
  346. struct work_struct an_work;
  347. struct workqueue_struct *an_workqueue;
  348. unsigned int an_supported;
  349. unsigned int parallel_detect;
  350. unsigned int fec_ability;
  351. unsigned int lpm_ctrl; /* CTRL1 for resume */
  352. };
  353. static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
  354. {
  355. int ret;
  356. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  357. if (ret < 0)
  358. return ret;
  359. ret |= XGBE_PHY_KR_TRAINING_ENABLE;
  360. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
  361. return 0;
  362. }
  363. static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
  364. {
  365. int ret;
  366. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  367. if (ret < 0)
  368. return ret;
  369. ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
  370. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
  371. return 0;
  372. }
  373. static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
  374. {
  375. int ret;
  376. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  377. if (ret < 0)
  378. return ret;
  379. ret |= MDIO_CTRL1_LPOWER;
  380. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  381. usleep_range(75, 100);
  382. ret &= ~MDIO_CTRL1_LPOWER;
  383. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  384. return 0;
  385. }
  386. static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
  387. {
  388. struct amd_xgbe_phy_priv *priv = phydev->priv;
  389. /* Assert Rx and Tx ratechange */
  390. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
  391. }
  392. static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
  393. {
  394. struct amd_xgbe_phy_priv *priv = phydev->priv;
  395. unsigned int wait;
  396. u16 status;
  397. /* Release Rx and Tx ratechange */
  398. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
  399. /* Wait for Rx and Tx ready */
  400. wait = XGBE_PHY_RATECHANGE_COUNT;
  401. while (wait--) {
  402. usleep_range(50, 75);
  403. status = XSIR0_IOREAD(priv, SIR0_STATUS);
  404. if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
  405. XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
  406. return;
  407. }
  408. netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
  409. status);
  410. }
  411. static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
  412. {
  413. struct amd_xgbe_phy_priv *priv = phydev->priv;
  414. int ret;
  415. /* Enable KR training */
  416. ret = amd_xgbe_an_enable_kr_training(phydev);
  417. if (ret < 0)
  418. return ret;
  419. /* Set PCS to KR/10G speed */
  420. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  421. if (ret < 0)
  422. return ret;
  423. ret &= ~MDIO_PCS_CTRL2_TYPE;
  424. ret |= MDIO_PCS_CTRL2_10GBR;
  425. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  426. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  427. if (ret < 0)
  428. return ret;
  429. ret &= ~MDIO_CTRL1_SPEEDSEL;
  430. ret |= MDIO_CTRL1_SPEED10G;
  431. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  432. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  433. if (ret < 0)
  434. return ret;
  435. /* Set SerDes to 10G speed */
  436. amd_xgbe_phy_serdes_start_ratechange(phydev);
  437. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
  438. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
  439. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
  440. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  441. priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
  442. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  443. priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
  444. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  445. priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
  446. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  447. priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
  448. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  449. return 0;
  450. }
  451. static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
  452. {
  453. struct amd_xgbe_phy_priv *priv = phydev->priv;
  454. int ret;
  455. /* Disable KR training */
  456. ret = amd_xgbe_an_disable_kr_training(phydev);
  457. if (ret < 0)
  458. return ret;
  459. /* Set PCS to KX/1G speed */
  460. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  461. if (ret < 0)
  462. return ret;
  463. ret &= ~MDIO_PCS_CTRL2_TYPE;
  464. ret |= MDIO_PCS_CTRL2_10GBX;
  465. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  466. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  467. if (ret < 0)
  468. return ret;
  469. ret &= ~MDIO_CTRL1_SPEEDSEL;
  470. ret |= MDIO_CTRL1_SPEED1G;
  471. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  472. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  473. if (ret < 0)
  474. return ret;
  475. /* Set SerDes to 2.5G speed */
  476. amd_xgbe_phy_serdes_start_ratechange(phydev);
  477. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
  478. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
  479. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
  480. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  481. priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
  482. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  483. priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
  484. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  485. priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
  486. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  487. priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
  488. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  489. return 0;
  490. }
  491. static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
  492. {
  493. struct amd_xgbe_phy_priv *priv = phydev->priv;
  494. int ret;
  495. /* Disable KR training */
  496. ret = amd_xgbe_an_disable_kr_training(phydev);
  497. if (ret < 0)
  498. return ret;
  499. /* Set PCS to KX/1G speed */
  500. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  501. if (ret < 0)
  502. return ret;
  503. ret &= ~MDIO_PCS_CTRL2_TYPE;
  504. ret |= MDIO_PCS_CTRL2_10GBX;
  505. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
  506. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  507. if (ret < 0)
  508. return ret;
  509. ret &= ~MDIO_CTRL1_SPEEDSEL;
  510. ret |= MDIO_CTRL1_SPEED1G;
  511. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  512. ret = amd_xgbe_phy_pcs_power_cycle(phydev);
  513. if (ret < 0)
  514. return ret;
  515. /* Set SerDes to 1G speed */
  516. amd_xgbe_phy_serdes_start_ratechange(phydev);
  517. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
  518. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
  519. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
  520. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
  521. priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
  522. XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
  523. priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
  524. XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
  525. priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
  526. XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
  527. priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
  528. amd_xgbe_phy_serdes_complete_ratechange(phydev);
  529. return 0;
  530. }
  531. static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
  532. enum amd_xgbe_phy_mode *mode)
  533. {
  534. int ret;
  535. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
  536. if (ret < 0)
  537. return ret;
  538. if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
  539. *mode = AMD_XGBE_MODE_KR;
  540. else
  541. *mode = AMD_XGBE_MODE_KX;
  542. return 0;
  543. }
  544. static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
  545. {
  546. enum amd_xgbe_phy_mode mode;
  547. if (amd_xgbe_phy_cur_mode(phydev, &mode))
  548. return false;
  549. return (mode == AMD_XGBE_MODE_KR);
  550. }
  551. static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
  552. {
  553. struct amd_xgbe_phy_priv *priv = phydev->priv;
  554. int ret;
  555. /* If we are in KR switch to KX, and vice-versa */
  556. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  557. if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
  558. ret = amd_xgbe_phy_gmii_mode(phydev);
  559. else
  560. ret = amd_xgbe_phy_gmii_2500_mode(phydev);
  561. } else {
  562. ret = amd_xgbe_phy_xgmii_mode(phydev);
  563. }
  564. return ret;
  565. }
  566. static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
  567. enum amd_xgbe_phy_mode mode)
  568. {
  569. enum amd_xgbe_phy_mode cur_mode;
  570. int ret;
  571. ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
  572. if (ret)
  573. return ret;
  574. if (mode != cur_mode)
  575. ret = amd_xgbe_phy_switch_mode(phydev);
  576. return ret;
  577. }
  578. static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
  579. bool restart)
  580. {
  581. int ret;
  582. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
  583. if (ret < 0)
  584. return ret;
  585. ret &= ~MDIO_AN_CTRL1_ENABLE;
  586. if (enable)
  587. ret |= MDIO_AN_CTRL1_ENABLE;
  588. if (restart)
  589. ret |= MDIO_AN_CTRL1_RESTART;
  590. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
  591. return 0;
  592. }
  593. static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
  594. {
  595. return amd_xgbe_phy_set_an(phydev, true, true);
  596. }
  597. static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
  598. {
  599. return amd_xgbe_phy_set_an(phydev, false, false);
  600. }
  601. static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
  602. enum amd_xgbe_phy_rx *state)
  603. {
  604. struct amd_xgbe_phy_priv *priv = phydev->priv;
  605. int ad_reg, lp_reg, ret;
  606. *state = AMD_XGBE_RX_COMPLETE;
  607. /* If we're not in KR mode then we're done */
  608. if (!amd_xgbe_phy_in_kr_mode(phydev))
  609. return AMD_XGBE_AN_PAGE_RECEIVED;
  610. /* Enable/Disable FEC */
  611. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
  612. if (ad_reg < 0)
  613. return AMD_XGBE_AN_ERROR;
  614. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
  615. if (lp_reg < 0)
  616. return AMD_XGBE_AN_ERROR;
  617. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
  618. if (ret < 0)
  619. return AMD_XGBE_AN_ERROR;
  620. ret &= ~XGBE_PHY_FEC_MASK;
  621. if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
  622. ret |= priv->fec_ability;
  623. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
  624. /* Start KR training */
  625. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
  626. if (ret < 0)
  627. return AMD_XGBE_AN_ERROR;
  628. if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
  629. XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
  630. ret |= XGBE_PHY_KR_TRAINING_START;
  631. phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
  632. ret);
  633. XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
  634. }
  635. return AMD_XGBE_AN_PAGE_RECEIVED;
  636. }
  637. static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
  638. enum amd_xgbe_phy_rx *state)
  639. {
  640. u16 msg;
  641. *state = AMD_XGBE_RX_XNP;
  642. msg = XNP_MCF_NULL_MESSAGE;
  643. msg |= XNP_MP_FORMATTED;
  644. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
  645. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
  646. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
  647. return AMD_XGBE_AN_PAGE_RECEIVED;
  648. }
  649. static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
  650. enum amd_xgbe_phy_rx *state)
  651. {
  652. unsigned int link_support;
  653. int ret, ad_reg, lp_reg;
  654. /* Read Base Ability register 2 first */
  655. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
  656. if (ret < 0)
  657. return AMD_XGBE_AN_ERROR;
  658. /* Check for a supported mode, otherwise restart in a different one */
  659. link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
  660. if (!(ret & link_support))
  661. return AMD_XGBE_AN_INCOMPAT_LINK;
  662. /* Check Extended Next Page support */
  663. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  664. if (ad_reg < 0)
  665. return AMD_XGBE_AN_ERROR;
  666. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
  667. if (lp_reg < 0)
  668. return AMD_XGBE_AN_ERROR;
  669. return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
  670. amd_xgbe_an_tx_xnp(phydev, state) :
  671. amd_xgbe_an_tx_training(phydev, state);
  672. }
  673. static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
  674. enum amd_xgbe_phy_rx *state)
  675. {
  676. int ad_reg, lp_reg;
  677. /* Check Extended Next Page support */
  678. ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
  679. if (ad_reg < 0)
  680. return AMD_XGBE_AN_ERROR;
  681. lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
  682. if (lp_reg < 0)
  683. return AMD_XGBE_AN_ERROR;
  684. return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
  685. amd_xgbe_an_tx_xnp(phydev, state) :
  686. amd_xgbe_an_tx_training(phydev, state);
  687. }
  688. static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
  689. {
  690. struct amd_xgbe_phy_priv *priv = phydev->priv;
  691. enum amd_xgbe_phy_rx *state;
  692. int ret;
  693. state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
  694. : &priv->kx_state;
  695. switch (*state) {
  696. case AMD_XGBE_RX_BPA:
  697. ret = amd_xgbe_an_rx_bpa(phydev, state);
  698. break;
  699. case AMD_XGBE_RX_XNP:
  700. ret = amd_xgbe_an_rx_xnp(phydev, state);
  701. break;
  702. default:
  703. ret = AMD_XGBE_AN_ERROR;
  704. }
  705. return ret;
  706. }
  707. static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
  708. {
  709. struct amd_xgbe_phy_priv *priv = phydev->priv;
  710. int ret;
  711. /* Be sure we aren't looping trying to negotiate */
  712. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  713. priv->kr_state = AMD_XGBE_RX_ERROR;
  714. if (!(phydev->supported & SUPPORTED_1000baseKX_Full) &&
  715. !(phydev->supported & SUPPORTED_2500baseX_Full))
  716. return AMD_XGBE_AN_NO_LINK;
  717. if (priv->kx_state != AMD_XGBE_RX_BPA)
  718. return AMD_XGBE_AN_NO_LINK;
  719. } else {
  720. priv->kx_state = AMD_XGBE_RX_ERROR;
  721. if (!(phydev->supported & SUPPORTED_10000baseKR_Full))
  722. return AMD_XGBE_AN_NO_LINK;
  723. if (priv->kr_state != AMD_XGBE_RX_BPA)
  724. return AMD_XGBE_AN_NO_LINK;
  725. }
  726. ret = amd_xgbe_phy_disable_an(phydev);
  727. if (ret)
  728. return AMD_XGBE_AN_ERROR;
  729. ret = amd_xgbe_phy_switch_mode(phydev);
  730. if (ret)
  731. return AMD_XGBE_AN_ERROR;
  732. ret = amd_xgbe_phy_restart_an(phydev);
  733. if (ret)
  734. return AMD_XGBE_AN_ERROR;
  735. return AMD_XGBE_AN_INCOMPAT_LINK;
  736. }
  737. static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
  738. {
  739. struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
  740. /* Interrupt reason must be read and cleared outside of IRQ context */
  741. disable_irq_nosync(priv->an_irq);
  742. queue_work(priv->an_workqueue, &priv->an_irq_work);
  743. return IRQ_HANDLED;
  744. }
  745. static void amd_xgbe_an_irq_work(struct work_struct *work)
  746. {
  747. struct amd_xgbe_phy_priv *priv = container_of(work,
  748. struct amd_xgbe_phy_priv,
  749. an_irq_work);
  750. /* Avoid a race between enabling the IRQ and exiting the work by
  751. * waiting for the work to finish and then queueing it
  752. */
  753. flush_work(&priv->an_work);
  754. queue_work(priv->an_workqueue, &priv->an_work);
  755. }
  756. static void amd_xgbe_an_state_machine(struct work_struct *work)
  757. {
  758. struct amd_xgbe_phy_priv *priv = container_of(work,
  759. struct amd_xgbe_phy_priv,
  760. an_work);
  761. struct phy_device *phydev = priv->phydev;
  762. enum amd_xgbe_phy_an cur_state = priv->an_state;
  763. int int_reg, int_mask;
  764. mutex_lock(&priv->an_mutex);
  765. /* Read the interrupt */
  766. int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
  767. if (!int_reg)
  768. goto out;
  769. next_int:
  770. if (int_reg < 0) {
  771. priv->an_state = AMD_XGBE_AN_ERROR;
  772. int_mask = XGBE_AN_INT_MASK;
  773. } else if (int_reg & XGBE_AN_PG_RCV) {
  774. priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
  775. int_mask = XGBE_AN_PG_RCV;
  776. } else if (int_reg & XGBE_AN_INC_LINK) {
  777. priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
  778. int_mask = XGBE_AN_INC_LINK;
  779. } else if (int_reg & XGBE_AN_INT_CMPLT) {
  780. priv->an_state = AMD_XGBE_AN_COMPLETE;
  781. int_mask = XGBE_AN_INT_CMPLT;
  782. } else {
  783. priv->an_state = AMD_XGBE_AN_ERROR;
  784. int_mask = 0;
  785. }
  786. /* Clear the interrupt to be processed */
  787. int_reg &= ~int_mask;
  788. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
  789. priv->an_result = priv->an_state;
  790. again:
  791. cur_state = priv->an_state;
  792. switch (priv->an_state) {
  793. case AMD_XGBE_AN_READY:
  794. priv->an_supported = 0;
  795. break;
  796. case AMD_XGBE_AN_PAGE_RECEIVED:
  797. priv->an_state = amd_xgbe_an_page_received(phydev);
  798. priv->an_supported++;
  799. break;
  800. case AMD_XGBE_AN_INCOMPAT_LINK:
  801. priv->an_supported = 0;
  802. priv->parallel_detect = 0;
  803. priv->an_state = amd_xgbe_an_incompat_link(phydev);
  804. break;
  805. case AMD_XGBE_AN_COMPLETE:
  806. priv->parallel_detect = priv->an_supported ? 0 : 1;
  807. netdev_dbg(phydev->attached_dev, "%s successful\n",
  808. priv->an_supported ? "Auto negotiation"
  809. : "Parallel detection");
  810. break;
  811. case AMD_XGBE_AN_NO_LINK:
  812. break;
  813. default:
  814. priv->an_state = AMD_XGBE_AN_ERROR;
  815. }
  816. if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
  817. int_reg = 0;
  818. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  819. } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
  820. netdev_err(phydev->attached_dev,
  821. "error during auto-negotiation, state=%u\n",
  822. cur_state);
  823. int_reg = 0;
  824. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  825. }
  826. if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
  827. priv->an_result = priv->an_state;
  828. priv->an_state = AMD_XGBE_AN_READY;
  829. priv->kr_state = AMD_XGBE_RX_BPA;
  830. priv->kx_state = AMD_XGBE_RX_BPA;
  831. }
  832. if (cur_state != priv->an_state)
  833. goto again;
  834. if (int_reg)
  835. goto next_int;
  836. out:
  837. enable_irq(priv->an_irq);
  838. mutex_unlock(&priv->an_mutex);
  839. }
  840. static int amd_xgbe_an_init(struct phy_device *phydev)
  841. {
  842. int ret;
  843. /* Set up Advertisement register 3 first */
  844. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
  845. if (ret < 0)
  846. return ret;
  847. if (phydev->supported & SUPPORTED_10000baseR_FEC)
  848. ret |= 0xc000;
  849. else
  850. ret &= ~0xc000;
  851. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
  852. /* Set up Advertisement register 2 next */
  853. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
  854. if (ret < 0)
  855. return ret;
  856. if (phydev->supported & SUPPORTED_10000baseKR_Full)
  857. ret |= 0x80;
  858. else
  859. ret &= ~0x80;
  860. if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
  861. (phydev->supported & SUPPORTED_2500baseX_Full))
  862. ret |= 0x20;
  863. else
  864. ret &= ~0x20;
  865. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
  866. /* Set up Advertisement register 1 last */
  867. ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  868. if (ret < 0)
  869. return ret;
  870. if (phydev->supported & SUPPORTED_Pause)
  871. ret |= 0x400;
  872. else
  873. ret &= ~0x400;
  874. if (phydev->supported & SUPPORTED_Asym_Pause)
  875. ret |= 0x800;
  876. else
  877. ret &= ~0x800;
  878. /* We don't intend to perform XNP */
  879. ret &= ~XNP_NP_EXCHANGE;
  880. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
  881. return 0;
  882. }
  883. static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
  884. {
  885. int count, ret;
  886. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  887. if (ret < 0)
  888. return ret;
  889. ret |= MDIO_CTRL1_RESET;
  890. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  891. count = 50;
  892. do {
  893. msleep(20);
  894. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  895. if (ret < 0)
  896. return ret;
  897. } while ((ret & MDIO_CTRL1_RESET) && --count);
  898. if (ret & MDIO_CTRL1_RESET)
  899. return -ETIMEDOUT;
  900. /* Disable auto-negotiation for now */
  901. ret = amd_xgbe_phy_disable_an(phydev);
  902. if (ret < 0)
  903. return ret;
  904. /* Clear auto-negotiation interrupts */
  905. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  906. return 0;
  907. }
  908. static int amd_xgbe_phy_config_init(struct phy_device *phydev)
  909. {
  910. struct amd_xgbe_phy_priv *priv = phydev->priv;
  911. struct net_device *netdev = phydev->attached_dev;
  912. int ret;
  913. if (!priv->an_irq_allocated) {
  914. /* Allocate the auto-negotiation workqueue and interrupt */
  915. snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
  916. "%s-pcs", netdev_name(netdev));
  917. priv->an_workqueue =
  918. create_singlethread_workqueue(priv->an_irq_name);
  919. if (!priv->an_workqueue) {
  920. netdev_err(netdev, "phy workqueue creation failed\n");
  921. return -ENOMEM;
  922. }
  923. ret = devm_request_irq(priv->dev, priv->an_irq,
  924. amd_xgbe_an_isr, 0, priv->an_irq_name,
  925. priv);
  926. if (ret) {
  927. netdev_err(netdev, "phy irq request failed\n");
  928. destroy_workqueue(priv->an_workqueue);
  929. return ret;
  930. }
  931. priv->an_irq_allocated = 1;
  932. }
  933. ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
  934. if (ret < 0)
  935. return ret;
  936. priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
  937. /* Initialize supported features */
  938. phydev->supported = SUPPORTED_Autoneg;
  939. phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  940. phydev->supported |= SUPPORTED_Backplane;
  941. phydev->supported |= SUPPORTED_10000baseKR_Full;
  942. switch (priv->speed_set) {
  943. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  944. phydev->supported |= SUPPORTED_1000baseKX_Full;
  945. break;
  946. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  947. phydev->supported |= SUPPORTED_2500baseX_Full;
  948. break;
  949. }
  950. if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
  951. phydev->supported |= SUPPORTED_10000baseR_FEC;
  952. phydev->advertising = phydev->supported;
  953. /* Set initial mode - call the mode setting routines
  954. * directly to insure we are properly configured
  955. */
  956. if (phydev->supported & SUPPORTED_10000baseKR_Full)
  957. ret = amd_xgbe_phy_xgmii_mode(phydev);
  958. else if (phydev->supported & SUPPORTED_1000baseKX_Full)
  959. ret = amd_xgbe_phy_gmii_mode(phydev);
  960. else if (phydev->supported & SUPPORTED_2500baseX_Full)
  961. ret = amd_xgbe_phy_gmii_2500_mode(phydev);
  962. else
  963. ret = -EINVAL;
  964. if (ret < 0)
  965. return ret;
  966. /* Set up advertisement registers based on current settings */
  967. ret = amd_xgbe_an_init(phydev);
  968. if (ret)
  969. return ret;
  970. /* Enable auto-negotiation interrupts */
  971. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
  972. return 0;
  973. }
  974. static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
  975. {
  976. int ret;
  977. /* Disable auto-negotiation */
  978. ret = amd_xgbe_phy_disable_an(phydev);
  979. if (ret < 0)
  980. return ret;
  981. /* Validate/Set specified speed */
  982. switch (phydev->speed) {
  983. case SPEED_10000:
  984. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  985. break;
  986. case SPEED_2500:
  987. case SPEED_1000:
  988. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  989. break;
  990. default:
  991. ret = -EINVAL;
  992. }
  993. if (ret < 0)
  994. return ret;
  995. /* Validate duplex mode */
  996. if (phydev->duplex != DUPLEX_FULL)
  997. return -EINVAL;
  998. phydev->pause = 0;
  999. phydev->asym_pause = 0;
  1000. return 0;
  1001. }
  1002. static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
  1003. {
  1004. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1005. u32 mmd_mask = phydev->c45_ids.devices_in_package;
  1006. int ret;
  1007. if (phydev->autoneg != AUTONEG_ENABLE)
  1008. return amd_xgbe_phy_setup_forced(phydev);
  1009. /* Make sure we have the AN MMD present */
  1010. if (!(mmd_mask & MDIO_DEVS_AN))
  1011. return -EINVAL;
  1012. /* Disable auto-negotiation interrupt */
  1013. disable_irq(priv->an_irq);
  1014. /* Start auto-negotiation in a supported mode */
  1015. if (phydev->supported & SUPPORTED_10000baseKR_Full)
  1016. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1017. else if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
  1018. (phydev->supported & SUPPORTED_2500baseX_Full))
  1019. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1020. else
  1021. ret = -EINVAL;
  1022. if (ret < 0) {
  1023. enable_irq(priv->an_irq);
  1024. return ret;
  1025. }
  1026. /* Disable and stop any in progress auto-negotiation */
  1027. ret = amd_xgbe_phy_disable_an(phydev);
  1028. if (ret < 0)
  1029. return ret;
  1030. /* Clear any auto-negotitation interrupts */
  1031. phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
  1032. priv->an_result = AMD_XGBE_AN_READY;
  1033. priv->an_state = AMD_XGBE_AN_READY;
  1034. priv->kr_state = AMD_XGBE_RX_BPA;
  1035. priv->kx_state = AMD_XGBE_RX_BPA;
  1036. /* Re-enable auto-negotiation interrupt */
  1037. enable_irq(priv->an_irq);
  1038. /* Set up advertisement registers based on current settings */
  1039. ret = amd_xgbe_an_init(phydev);
  1040. if (ret)
  1041. return ret;
  1042. /* Enable and start auto-negotiation */
  1043. return amd_xgbe_phy_restart_an(phydev);
  1044. }
  1045. static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
  1046. {
  1047. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1048. int ret;
  1049. mutex_lock(&priv->an_mutex);
  1050. ret = __amd_xgbe_phy_config_aneg(phydev);
  1051. mutex_unlock(&priv->an_mutex);
  1052. return ret;
  1053. }
  1054. static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
  1055. {
  1056. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1057. return (priv->an_result == AMD_XGBE_AN_COMPLETE);
  1058. }
  1059. static int amd_xgbe_phy_update_link(struct phy_device *phydev)
  1060. {
  1061. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1062. int ret;
  1063. /* If we're doing auto-negotiation don't report link down */
  1064. if (priv->an_state != AMD_XGBE_AN_READY) {
  1065. phydev->link = 1;
  1066. return 0;
  1067. }
  1068. /* Link status is latched low, so read once to clear
  1069. * and then read again to get current state
  1070. */
  1071. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
  1072. if (ret < 0)
  1073. return ret;
  1074. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
  1075. if (ret < 0)
  1076. return ret;
  1077. phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
  1078. return 0;
  1079. }
  1080. static int amd_xgbe_phy_read_status(struct phy_device *phydev)
  1081. {
  1082. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1083. u32 mmd_mask = phydev->c45_ids.devices_in_package;
  1084. int ret, ad_ret, lp_ret;
  1085. ret = amd_xgbe_phy_update_link(phydev);
  1086. if (ret)
  1087. return ret;
  1088. if ((phydev->autoneg == AUTONEG_ENABLE) &&
  1089. !priv->parallel_detect) {
  1090. if (!(mmd_mask & MDIO_DEVS_AN))
  1091. return -EINVAL;
  1092. if (!amd_xgbe_phy_aneg_done(phydev))
  1093. return 0;
  1094. /* Compare Advertisement and Link Partner register 1 */
  1095. ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
  1096. if (ad_ret < 0)
  1097. return ad_ret;
  1098. lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
  1099. if (lp_ret < 0)
  1100. return lp_ret;
  1101. ad_ret &= lp_ret;
  1102. phydev->pause = (ad_ret & 0x400) ? 1 : 0;
  1103. phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
  1104. /* Compare Advertisement and Link Partner register 2 */
  1105. ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
  1106. MDIO_AN_ADVERTISE + 1);
  1107. if (ad_ret < 0)
  1108. return ad_ret;
  1109. lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
  1110. if (lp_ret < 0)
  1111. return lp_ret;
  1112. ad_ret &= lp_ret;
  1113. if (ad_ret & 0x80) {
  1114. phydev->speed = SPEED_10000;
  1115. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
  1116. if (ret)
  1117. return ret;
  1118. } else {
  1119. switch (priv->speed_set) {
  1120. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1121. phydev->speed = SPEED_1000;
  1122. break;
  1123. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1124. phydev->speed = SPEED_2500;
  1125. break;
  1126. }
  1127. ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
  1128. if (ret)
  1129. return ret;
  1130. }
  1131. phydev->duplex = DUPLEX_FULL;
  1132. } else {
  1133. if (amd_xgbe_phy_in_kr_mode(phydev)) {
  1134. phydev->speed = SPEED_10000;
  1135. } else {
  1136. switch (priv->speed_set) {
  1137. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1138. phydev->speed = SPEED_1000;
  1139. break;
  1140. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1141. phydev->speed = SPEED_2500;
  1142. break;
  1143. }
  1144. }
  1145. phydev->duplex = DUPLEX_FULL;
  1146. phydev->pause = 0;
  1147. phydev->asym_pause = 0;
  1148. }
  1149. return 0;
  1150. }
  1151. static int amd_xgbe_phy_suspend(struct phy_device *phydev)
  1152. {
  1153. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1154. int ret;
  1155. mutex_lock(&phydev->lock);
  1156. ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
  1157. if (ret < 0)
  1158. goto unlock;
  1159. priv->lpm_ctrl = ret;
  1160. ret |= MDIO_CTRL1_LPOWER;
  1161. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
  1162. ret = 0;
  1163. unlock:
  1164. mutex_unlock(&phydev->lock);
  1165. return ret;
  1166. }
  1167. static int amd_xgbe_phy_resume(struct phy_device *phydev)
  1168. {
  1169. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1170. mutex_lock(&phydev->lock);
  1171. priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
  1172. phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
  1173. mutex_unlock(&phydev->lock);
  1174. return 0;
  1175. }
  1176. static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
  1177. unsigned int type)
  1178. {
  1179. unsigned int count;
  1180. int i;
  1181. for (i = 0, count = 0; i < pdev->num_resources; i++) {
  1182. struct resource *r = &pdev->resource[i];
  1183. if (type == resource_type(r))
  1184. count++;
  1185. }
  1186. return count;
  1187. }
  1188. static int amd_xgbe_phy_probe(struct phy_device *phydev)
  1189. {
  1190. struct amd_xgbe_phy_priv *priv;
  1191. struct platform_device *phy_pdev;
  1192. struct device *dev, *phy_dev;
  1193. unsigned int phy_resnum, phy_irqnum;
  1194. int ret;
  1195. if (!phydev->bus || !phydev->bus->parent)
  1196. return -EINVAL;
  1197. dev = phydev->bus->parent;
  1198. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1199. if (!priv)
  1200. return -ENOMEM;
  1201. priv->pdev = to_platform_device(dev);
  1202. priv->adev = ACPI_COMPANION(dev);
  1203. priv->dev = dev;
  1204. priv->phydev = phydev;
  1205. mutex_init(&priv->an_mutex);
  1206. INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
  1207. INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
  1208. if (!priv->adev || acpi_disabled) {
  1209. struct device_node *bus_node;
  1210. struct device_node *phy_node;
  1211. bus_node = priv->dev->of_node;
  1212. phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
  1213. if (!phy_node) {
  1214. dev_err(dev, "unable to parse phy-handle\n");
  1215. ret = -EINVAL;
  1216. goto err_priv;
  1217. }
  1218. phy_pdev = of_find_device_by_node(phy_node);
  1219. of_node_put(phy_node);
  1220. if (!phy_pdev) {
  1221. dev_err(dev, "unable to obtain phy device\n");
  1222. ret = -EINVAL;
  1223. goto err_priv;
  1224. }
  1225. phy_resnum = 0;
  1226. phy_irqnum = 0;
  1227. } else {
  1228. /* In ACPI, the XGBE and PHY resources are the grouped
  1229. * together with the PHY resources at the end
  1230. */
  1231. phy_pdev = priv->pdev;
  1232. phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
  1233. IORESOURCE_MEM) - 3;
  1234. phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
  1235. IORESOURCE_IRQ) - 1;
  1236. }
  1237. phy_dev = &phy_pdev->dev;
  1238. /* Get the device mmio areas */
  1239. priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1240. phy_resnum++);
  1241. priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
  1242. if (IS_ERR(priv->rxtx_regs)) {
  1243. dev_err(dev, "rxtx ioremap failed\n");
  1244. ret = PTR_ERR(priv->rxtx_regs);
  1245. goto err_put;
  1246. }
  1247. priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1248. phy_resnum++);
  1249. priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
  1250. if (IS_ERR(priv->sir0_regs)) {
  1251. dev_err(dev, "sir0 ioremap failed\n");
  1252. ret = PTR_ERR(priv->sir0_regs);
  1253. goto err_rxtx;
  1254. }
  1255. priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
  1256. phy_resnum++);
  1257. priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
  1258. if (IS_ERR(priv->sir1_regs)) {
  1259. dev_err(dev, "sir1 ioremap failed\n");
  1260. ret = PTR_ERR(priv->sir1_regs);
  1261. goto err_sir0;
  1262. }
  1263. /* Get the auto-negotiation interrupt */
  1264. ret = platform_get_irq(phy_pdev, phy_irqnum);
  1265. if (ret < 0) {
  1266. dev_err(dev, "platform_get_irq failed\n");
  1267. goto err_sir1;
  1268. }
  1269. priv->an_irq = ret;
  1270. /* Get the device speed set property */
  1271. ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
  1272. &priv->speed_set);
  1273. if (ret) {
  1274. dev_err(dev, "invalid %s property\n",
  1275. XGBE_PHY_SPEEDSET_PROPERTY);
  1276. goto err_sir1;
  1277. }
  1278. switch (priv->speed_set) {
  1279. case AMD_XGBE_PHY_SPEEDSET_1000_10000:
  1280. case AMD_XGBE_PHY_SPEEDSET_2500_10000:
  1281. break;
  1282. default:
  1283. dev_err(dev, "invalid %s property\n",
  1284. XGBE_PHY_SPEEDSET_PROPERTY);
  1285. ret = -EINVAL;
  1286. goto err_sir1;
  1287. }
  1288. if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
  1289. ret = device_property_read_u32_array(phy_dev,
  1290. XGBE_PHY_BLWC_PROPERTY,
  1291. priv->serdes_blwc,
  1292. XGBE_PHY_SPEEDS);
  1293. if (ret) {
  1294. dev_err(dev, "invalid %s property\n",
  1295. XGBE_PHY_BLWC_PROPERTY);
  1296. goto err_sir1;
  1297. }
  1298. } else {
  1299. memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
  1300. sizeof(priv->serdes_blwc));
  1301. }
  1302. if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
  1303. ret = device_property_read_u32_array(phy_dev,
  1304. XGBE_PHY_CDR_RATE_PROPERTY,
  1305. priv->serdes_cdr_rate,
  1306. XGBE_PHY_SPEEDS);
  1307. if (ret) {
  1308. dev_err(dev, "invalid %s property\n",
  1309. XGBE_PHY_CDR_RATE_PROPERTY);
  1310. goto err_sir1;
  1311. }
  1312. } else {
  1313. memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
  1314. sizeof(priv->serdes_cdr_rate));
  1315. }
  1316. if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
  1317. ret = device_property_read_u32_array(phy_dev,
  1318. XGBE_PHY_PQ_SKEW_PROPERTY,
  1319. priv->serdes_pq_skew,
  1320. XGBE_PHY_SPEEDS);
  1321. if (ret) {
  1322. dev_err(dev, "invalid %s property\n",
  1323. XGBE_PHY_PQ_SKEW_PROPERTY);
  1324. goto err_sir1;
  1325. }
  1326. } else {
  1327. memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
  1328. sizeof(priv->serdes_pq_skew));
  1329. }
  1330. if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
  1331. ret = device_property_read_u32_array(phy_dev,
  1332. XGBE_PHY_TX_AMP_PROPERTY,
  1333. priv->serdes_tx_amp,
  1334. XGBE_PHY_SPEEDS);
  1335. if (ret) {
  1336. dev_err(dev, "invalid %s property\n",
  1337. XGBE_PHY_TX_AMP_PROPERTY);
  1338. goto err_sir1;
  1339. }
  1340. } else {
  1341. memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
  1342. sizeof(priv->serdes_tx_amp));
  1343. }
  1344. phydev->priv = priv;
  1345. if (!priv->adev || acpi_disabled)
  1346. platform_device_put(phy_pdev);
  1347. return 0;
  1348. err_sir1:
  1349. devm_iounmap(dev, priv->sir1_regs);
  1350. devm_release_mem_region(dev, priv->sir1_res->start,
  1351. resource_size(priv->sir1_res));
  1352. err_sir0:
  1353. devm_iounmap(dev, priv->sir0_regs);
  1354. devm_release_mem_region(dev, priv->sir0_res->start,
  1355. resource_size(priv->sir0_res));
  1356. err_rxtx:
  1357. devm_iounmap(dev, priv->rxtx_regs);
  1358. devm_release_mem_region(dev, priv->rxtx_res->start,
  1359. resource_size(priv->rxtx_res));
  1360. err_put:
  1361. if (!priv->adev || acpi_disabled)
  1362. platform_device_put(phy_pdev);
  1363. err_priv:
  1364. devm_kfree(dev, priv);
  1365. return ret;
  1366. }
  1367. static void amd_xgbe_phy_remove(struct phy_device *phydev)
  1368. {
  1369. struct amd_xgbe_phy_priv *priv = phydev->priv;
  1370. struct device *dev = priv->dev;
  1371. if (priv->an_irq_allocated) {
  1372. devm_free_irq(dev, priv->an_irq, priv);
  1373. flush_workqueue(priv->an_workqueue);
  1374. destroy_workqueue(priv->an_workqueue);
  1375. }
  1376. /* Release resources */
  1377. devm_iounmap(dev, priv->sir1_regs);
  1378. devm_release_mem_region(dev, priv->sir1_res->start,
  1379. resource_size(priv->sir1_res));
  1380. devm_iounmap(dev, priv->sir0_regs);
  1381. devm_release_mem_region(dev, priv->sir0_res->start,
  1382. resource_size(priv->sir0_res));
  1383. devm_iounmap(dev, priv->rxtx_regs);
  1384. devm_release_mem_region(dev, priv->rxtx_res->start,
  1385. resource_size(priv->rxtx_res));
  1386. devm_kfree(dev, priv);
  1387. }
  1388. static int amd_xgbe_match_phy_device(struct phy_device *phydev)
  1389. {
  1390. return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
  1391. }
  1392. static struct phy_driver amd_xgbe_phy_driver[] = {
  1393. {
  1394. .phy_id = XGBE_PHY_ID,
  1395. .phy_id_mask = XGBE_PHY_MASK,
  1396. .name = "AMD XGBE PHY",
  1397. .features = 0,
  1398. .probe = amd_xgbe_phy_probe,
  1399. .remove = amd_xgbe_phy_remove,
  1400. .soft_reset = amd_xgbe_phy_soft_reset,
  1401. .config_init = amd_xgbe_phy_config_init,
  1402. .suspend = amd_xgbe_phy_suspend,
  1403. .resume = amd_xgbe_phy_resume,
  1404. .config_aneg = amd_xgbe_phy_config_aneg,
  1405. .aneg_done = amd_xgbe_phy_aneg_done,
  1406. .read_status = amd_xgbe_phy_read_status,
  1407. .match_phy_device = amd_xgbe_match_phy_device,
  1408. .driver = {
  1409. .owner = THIS_MODULE,
  1410. },
  1411. },
  1412. };
  1413. module_phy_driver(amd_xgbe_phy_driver);
  1414. static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
  1415. { XGBE_PHY_ID, XGBE_PHY_MASK },
  1416. { }
  1417. };
  1418. MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);