phy.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168
  1. /*
  2. * drivers/net/phy/phy.c
  3. *
  4. * Framework for configuring and reading PHY devices
  5. * Based on code in sungem_phy.c and gianfar_phy.c
  6. *
  7. * Author: Andy Fleming
  8. *
  9. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  10. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/kernel.h>
  20. #include <linux/string.h>
  21. #include <linux/errno.h>
  22. #include <linux/unistd.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/mm.h>
  30. #include <linux/module.h>
  31. #include <linux/mii.h>
  32. #include <linux/ethtool.h>
  33. #include <linux/phy.h>
  34. #include <linux/timer.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/mdio.h>
  37. #include <linux/atomic.h>
  38. #include <asm/io.h>
  39. #include <asm/irq.h>
  40. #include <asm/uaccess.h>
  41. /**
  42. * phy_print_status - Convenience function to print out the current phy status
  43. * @phydev: the phy_device struct
  44. */
  45. void phy_print_status(struct phy_device *phydev)
  46. {
  47. if (phydev->link)
  48. pr_info("%s - Link is Up - %d/%s\n",
  49. dev_name(&phydev->dev),
  50. phydev->speed,
  51. DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
  52. else
  53. pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
  54. }
  55. EXPORT_SYMBOL(phy_print_status);
  56. /**
  57. * phy_clear_interrupt - Ack the phy device's interrupt
  58. * @phydev: the phy_device struct
  59. *
  60. * If the @phydev driver has an ack_interrupt function, call it to
  61. * ack and clear the phy device's interrupt.
  62. *
  63. * Returns 0 on success on < 0 on error.
  64. */
  65. static int phy_clear_interrupt(struct phy_device *phydev)
  66. {
  67. int err = 0;
  68. if (phydev->drv->ack_interrupt)
  69. err = phydev->drv->ack_interrupt(phydev);
  70. return err;
  71. }
  72. /**
  73. * phy_config_interrupt - configure the PHY device for the requested interrupts
  74. * @phydev: the phy_device struct
  75. * @interrupts: interrupt flags to configure for this @phydev
  76. *
  77. * Returns 0 on success on < 0 on error.
  78. */
  79. static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  80. {
  81. int err = 0;
  82. phydev->interrupts = interrupts;
  83. if (phydev->drv->config_intr)
  84. err = phydev->drv->config_intr(phydev);
  85. return err;
  86. }
  87. /**
  88. * phy_aneg_done - return auto-negotiation status
  89. * @phydev: target phy_device struct
  90. *
  91. * Description: Reads the status register and returns 0 either if
  92. * auto-negotiation is incomplete, or if there was an error.
  93. * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
  94. */
  95. static inline int phy_aneg_done(struct phy_device *phydev)
  96. {
  97. int retval;
  98. retval = phy_read(phydev, MII_BMSR);
  99. return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
  100. }
  101. /* A structure for mapping a particular speed and duplex
  102. * combination to a particular SUPPORTED and ADVERTISED value */
  103. struct phy_setting {
  104. int speed;
  105. int duplex;
  106. u32 setting;
  107. };
  108. /* A mapping of all SUPPORTED settings to speed/duplex */
  109. static const struct phy_setting settings[] = {
  110. {
  111. .speed = 10000,
  112. .duplex = DUPLEX_FULL,
  113. .setting = SUPPORTED_10000baseT_Full,
  114. },
  115. {
  116. .speed = SPEED_1000,
  117. .duplex = DUPLEX_FULL,
  118. .setting = SUPPORTED_1000baseT_Full,
  119. },
  120. {
  121. .speed = SPEED_1000,
  122. .duplex = DUPLEX_HALF,
  123. .setting = SUPPORTED_1000baseT_Half,
  124. },
  125. {
  126. .speed = SPEED_100,
  127. .duplex = DUPLEX_FULL,
  128. .setting = SUPPORTED_100baseT_Full,
  129. },
  130. {
  131. .speed = SPEED_100,
  132. .duplex = DUPLEX_HALF,
  133. .setting = SUPPORTED_100baseT_Half,
  134. },
  135. {
  136. .speed = SPEED_10,
  137. .duplex = DUPLEX_FULL,
  138. .setting = SUPPORTED_10baseT_Full,
  139. },
  140. {
  141. .speed = SPEED_10,
  142. .duplex = DUPLEX_HALF,
  143. .setting = SUPPORTED_10baseT_Half,
  144. },
  145. };
  146. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  147. /**
  148. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  149. * @speed: speed to match
  150. * @duplex: duplex to match
  151. *
  152. * Description: Searches the settings array for the setting which
  153. * matches the desired speed and duplex, and returns the index
  154. * of that setting. Returns the index of the last setting if
  155. * none of the others match.
  156. */
  157. static inline int phy_find_setting(int speed, int duplex)
  158. {
  159. int idx = 0;
  160. while (idx < ARRAY_SIZE(settings) &&
  161. (settings[idx].speed != speed ||
  162. settings[idx].duplex != duplex))
  163. idx++;
  164. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  165. }
  166. /**
  167. * phy_find_valid - find a PHY setting that matches the requested features mask
  168. * @idx: The first index in settings[] to search
  169. * @features: A mask of the valid settings
  170. *
  171. * Description: Returns the index of the first valid setting less
  172. * than or equal to the one pointed to by idx, as determined by
  173. * the mask in features. Returns the index of the last setting
  174. * if nothing else matches.
  175. */
  176. static inline int phy_find_valid(int idx, u32 features)
  177. {
  178. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  179. idx++;
  180. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  181. }
  182. /**
  183. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  184. * @phydev: the target phy_device struct
  185. *
  186. * Description: Make sure the PHY is set to supported speeds and
  187. * duplexes. Drop down by one in this order: 1000/FULL,
  188. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  189. */
  190. static void phy_sanitize_settings(struct phy_device *phydev)
  191. {
  192. u32 features = phydev->supported;
  193. int idx;
  194. /* Sanitize settings based on PHY capabilities */
  195. if ((features & SUPPORTED_Autoneg) == 0)
  196. phydev->autoneg = AUTONEG_DISABLE;
  197. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  198. features);
  199. phydev->speed = settings[idx].speed;
  200. phydev->duplex = settings[idx].duplex;
  201. }
  202. /**
  203. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  204. * @phydev: target phy_device struct
  205. * @cmd: ethtool_cmd
  206. *
  207. * A few notes about parameter checking:
  208. * - We don't set port or transceiver, so we don't care what they
  209. * were set to.
  210. * - phy_start_aneg() will make sure forced settings are sane, and
  211. * choose the next best ones from the ones selected, so we don't
  212. * care if ethtool tries to give us bad values.
  213. */
  214. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  215. {
  216. u32 speed = ethtool_cmd_speed(cmd);
  217. if (cmd->phy_address != phydev->addr)
  218. return -EINVAL;
  219. /* We make sure that we don't pass unsupported
  220. * values in to the PHY */
  221. cmd->advertising &= phydev->supported;
  222. /* Verify the settings we care about. */
  223. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  224. return -EINVAL;
  225. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  226. return -EINVAL;
  227. if (cmd->autoneg == AUTONEG_DISABLE &&
  228. ((speed != SPEED_1000 &&
  229. speed != SPEED_100 &&
  230. speed != SPEED_10) ||
  231. (cmd->duplex != DUPLEX_HALF &&
  232. cmd->duplex != DUPLEX_FULL)))
  233. return -EINVAL;
  234. phydev->autoneg = cmd->autoneg;
  235. phydev->speed = speed;
  236. phydev->advertising = cmd->advertising;
  237. if (AUTONEG_ENABLE == cmd->autoneg)
  238. phydev->advertising |= ADVERTISED_Autoneg;
  239. else
  240. phydev->advertising &= ~ADVERTISED_Autoneg;
  241. phydev->duplex = cmd->duplex;
  242. /* Restart the PHY */
  243. phy_start_aneg(phydev);
  244. return 0;
  245. }
  246. EXPORT_SYMBOL(phy_ethtool_sset);
  247. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  248. {
  249. cmd->supported = phydev->supported;
  250. cmd->advertising = phydev->advertising;
  251. cmd->lp_advertising = phydev->lp_advertising;
  252. ethtool_cmd_speed_set(cmd, phydev->speed);
  253. cmd->duplex = phydev->duplex;
  254. cmd->port = PORT_MII;
  255. cmd->phy_address = phydev->addr;
  256. cmd->transceiver = phy_is_internal(phydev) ?
  257. XCVR_INTERNAL : XCVR_EXTERNAL;
  258. cmd->autoneg = phydev->autoneg;
  259. return 0;
  260. }
  261. EXPORT_SYMBOL(phy_ethtool_gset);
  262. /**
  263. * phy_mii_ioctl - generic PHY MII ioctl interface
  264. * @phydev: the phy_device struct
  265. * @ifr: &struct ifreq for socket ioctl's
  266. * @cmd: ioctl cmd to execute
  267. *
  268. * Note that this function is currently incompatible with the
  269. * PHYCONTROL layer. It changes registers without regard to
  270. * current state. Use at own risk.
  271. */
  272. int phy_mii_ioctl(struct phy_device *phydev,
  273. struct ifreq *ifr, int cmd)
  274. {
  275. struct mii_ioctl_data *mii_data = if_mii(ifr);
  276. u16 val = mii_data->val_in;
  277. int ret = 0;
  278. switch (cmd) {
  279. case SIOCGMIIPHY:
  280. mii_data->phy_id = phydev->addr;
  281. /* fall through */
  282. case SIOCGMIIREG:
  283. mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
  284. mii_data->reg_num);
  285. break;
  286. case SIOCSMIIREG:
  287. if (mii_data->phy_id == phydev->addr) {
  288. switch(mii_data->reg_num) {
  289. case MII_BMCR:
  290. if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
  291. phydev->autoneg = AUTONEG_DISABLE;
  292. else
  293. phydev->autoneg = AUTONEG_ENABLE;
  294. if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
  295. phydev->duplex = DUPLEX_FULL;
  296. else
  297. phydev->duplex = DUPLEX_HALF;
  298. if ((!phydev->autoneg) &&
  299. (val & BMCR_SPEED1000))
  300. phydev->speed = SPEED_1000;
  301. else if ((!phydev->autoneg) &&
  302. (val & BMCR_SPEED100))
  303. phydev->speed = SPEED_100;
  304. break;
  305. case MII_ADVERTISE:
  306. phydev->advertising = val;
  307. break;
  308. default:
  309. /* do nothing */
  310. break;
  311. }
  312. }
  313. mdiobus_write(phydev->bus, mii_data->phy_id,
  314. mii_data->reg_num, val);
  315. if (mii_data->reg_num == MII_BMCR &&
  316. val & BMCR_RESET)
  317. ret = phy_init_hw(phydev);
  318. break;
  319. case SIOCSHWTSTAMP:
  320. if (phydev->drv->hwtstamp)
  321. return phydev->drv->hwtstamp(phydev, ifr);
  322. /* fall through */
  323. default:
  324. return -EOPNOTSUPP;
  325. }
  326. return ret;
  327. }
  328. EXPORT_SYMBOL(phy_mii_ioctl);
  329. /**
  330. * phy_start_aneg - start auto-negotiation for this PHY device
  331. * @phydev: the phy_device struct
  332. *
  333. * Description: Sanitizes the settings (if we're not autonegotiating
  334. * them), and then calls the driver's config_aneg function.
  335. * If the PHYCONTROL Layer is operating, we change the state to
  336. * reflect the beginning of Auto-negotiation or forcing.
  337. */
  338. int phy_start_aneg(struct phy_device *phydev)
  339. {
  340. int err;
  341. mutex_lock(&phydev->lock);
  342. if (AUTONEG_DISABLE == phydev->autoneg)
  343. phy_sanitize_settings(phydev);
  344. err = phydev->drv->config_aneg(phydev);
  345. if (err < 0)
  346. goto out_unlock;
  347. if (phydev->state != PHY_HALTED) {
  348. if (AUTONEG_ENABLE == phydev->autoneg) {
  349. phydev->state = PHY_AN;
  350. phydev->link_timeout = PHY_AN_TIMEOUT;
  351. } else {
  352. phydev->state = PHY_FORCING;
  353. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  354. }
  355. }
  356. out_unlock:
  357. mutex_unlock(&phydev->lock);
  358. return err;
  359. }
  360. EXPORT_SYMBOL(phy_start_aneg);
  361. /**
  362. * phy_start_machine - start PHY state machine tracking
  363. * @phydev: the phy_device struct
  364. * @handler: callback function for state change notifications
  365. *
  366. * Description: The PHY infrastructure can run a state machine
  367. * which tracks whether the PHY is starting up, negotiating,
  368. * etc. This function starts the timer which tracks the state
  369. * of the PHY. If you want to be notified when the state changes,
  370. * pass in the callback @handler, otherwise, pass NULL. If you
  371. * want to maintain your own state machine, do not call this
  372. * function.
  373. */
  374. void phy_start_machine(struct phy_device *phydev,
  375. void (*handler)(struct net_device *))
  376. {
  377. phydev->adjust_state = handler;
  378. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
  379. }
  380. /**
  381. * phy_stop_machine - stop the PHY state machine tracking
  382. * @phydev: target phy_device struct
  383. *
  384. * Description: Stops the state machine timer, sets the state to UP
  385. * (unless it wasn't up yet). This function must be called BEFORE
  386. * phy_detach.
  387. */
  388. void phy_stop_machine(struct phy_device *phydev)
  389. {
  390. cancel_delayed_work_sync(&phydev->state_queue);
  391. mutex_lock(&phydev->lock);
  392. if (phydev->state > PHY_UP)
  393. phydev->state = PHY_UP;
  394. mutex_unlock(&phydev->lock);
  395. phydev->adjust_state = NULL;
  396. }
  397. /**
  398. * phy_error - enter HALTED state for this PHY device
  399. * @phydev: target phy_device struct
  400. *
  401. * Moves the PHY to the HALTED state in response to a read
  402. * or write error, and tells the controller the link is down.
  403. * Must not be called from interrupt context, or while the
  404. * phydev->lock is held.
  405. */
  406. static void phy_error(struct phy_device *phydev)
  407. {
  408. mutex_lock(&phydev->lock);
  409. phydev->state = PHY_HALTED;
  410. mutex_unlock(&phydev->lock);
  411. }
  412. /**
  413. * phy_interrupt - PHY interrupt handler
  414. * @irq: interrupt line
  415. * @phy_dat: phy_device pointer
  416. *
  417. * Description: When a PHY interrupt occurs, the handler disables
  418. * interrupts, and schedules a work task to clear the interrupt.
  419. */
  420. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  421. {
  422. struct phy_device *phydev = phy_dat;
  423. if (PHY_HALTED == phydev->state)
  424. return IRQ_NONE; /* It can't be ours. */
  425. /* The MDIO bus is not allowed to be written in interrupt
  426. * context, so we need to disable the irq here. A work
  427. * queue will write the PHY to disable and clear the
  428. * interrupt, and then reenable the irq line. */
  429. disable_irq_nosync(irq);
  430. atomic_inc(&phydev->irq_disable);
  431. queue_work(system_power_efficient_wq, &phydev->phy_queue);
  432. return IRQ_HANDLED;
  433. }
  434. /**
  435. * phy_enable_interrupts - Enable the interrupts from the PHY side
  436. * @phydev: target phy_device struct
  437. */
  438. static int phy_enable_interrupts(struct phy_device *phydev)
  439. {
  440. int err;
  441. err = phy_clear_interrupt(phydev);
  442. if (err < 0)
  443. return err;
  444. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  445. return err;
  446. }
  447. /**
  448. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  449. * @phydev: target phy_device struct
  450. */
  451. static int phy_disable_interrupts(struct phy_device *phydev)
  452. {
  453. int err;
  454. /* Disable PHY interrupts */
  455. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  456. if (err)
  457. goto phy_err;
  458. /* Clear the interrupt */
  459. err = phy_clear_interrupt(phydev);
  460. if (err)
  461. goto phy_err;
  462. return 0;
  463. phy_err:
  464. phy_error(phydev);
  465. return err;
  466. }
  467. /**
  468. * phy_start_interrupts - request and enable interrupts for a PHY device
  469. * @phydev: target phy_device struct
  470. *
  471. * Description: Request the interrupt for the given PHY.
  472. * If this fails, then we set irq to PHY_POLL.
  473. * Otherwise, we enable the interrupts in the PHY.
  474. * This should only be called with a valid IRQ number.
  475. * Returns 0 on success or < 0 on error.
  476. */
  477. int phy_start_interrupts(struct phy_device *phydev)
  478. {
  479. int err = 0;
  480. atomic_set(&phydev->irq_disable, 0);
  481. if (request_irq(phydev->irq, phy_interrupt,
  482. IRQF_SHARED,
  483. "phy_interrupt",
  484. phydev) < 0) {
  485. pr_warn("%s: Can't get IRQ %d (PHY)\n",
  486. phydev->bus->name, phydev->irq);
  487. phydev->irq = PHY_POLL;
  488. return 0;
  489. }
  490. err = phy_enable_interrupts(phydev);
  491. return err;
  492. }
  493. EXPORT_SYMBOL(phy_start_interrupts);
  494. /**
  495. * phy_stop_interrupts - disable interrupts from a PHY device
  496. * @phydev: target phy_device struct
  497. */
  498. int phy_stop_interrupts(struct phy_device *phydev)
  499. {
  500. int err;
  501. err = phy_disable_interrupts(phydev);
  502. if (err)
  503. phy_error(phydev);
  504. free_irq(phydev->irq, phydev);
  505. /*
  506. * Cannot call flush_scheduled_work() here as desired because
  507. * of rtnl_lock(), but we do not really care about what would
  508. * be done, except from enable_irq(), so cancel any work
  509. * possibly pending and take care of the matter below.
  510. */
  511. cancel_work_sync(&phydev->phy_queue);
  512. /*
  513. * If work indeed has been cancelled, disable_irq() will have
  514. * been left unbalanced from phy_interrupt() and enable_irq()
  515. * has to be called so that other devices on the line work.
  516. */
  517. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  518. enable_irq(phydev->irq);
  519. return err;
  520. }
  521. EXPORT_SYMBOL(phy_stop_interrupts);
  522. /**
  523. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  524. * @work: work_struct that describes the work to be done
  525. */
  526. void phy_change(struct work_struct *work)
  527. {
  528. int err;
  529. struct phy_device *phydev =
  530. container_of(work, struct phy_device, phy_queue);
  531. if (phydev->drv->did_interrupt &&
  532. !phydev->drv->did_interrupt(phydev))
  533. goto ignore;
  534. err = phy_disable_interrupts(phydev);
  535. if (err)
  536. goto phy_err;
  537. mutex_lock(&phydev->lock);
  538. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  539. phydev->state = PHY_CHANGELINK;
  540. mutex_unlock(&phydev->lock);
  541. atomic_dec(&phydev->irq_disable);
  542. enable_irq(phydev->irq);
  543. /* Reenable interrupts */
  544. if (PHY_HALTED != phydev->state)
  545. err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  546. if (err)
  547. goto irq_enable_err;
  548. /* reschedule state queue work to run as soon as possible */
  549. cancel_delayed_work_sync(&phydev->state_queue);
  550. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
  551. return;
  552. ignore:
  553. atomic_dec(&phydev->irq_disable);
  554. enable_irq(phydev->irq);
  555. return;
  556. irq_enable_err:
  557. disable_irq(phydev->irq);
  558. atomic_inc(&phydev->irq_disable);
  559. phy_err:
  560. phy_error(phydev);
  561. }
  562. /**
  563. * phy_stop - Bring down the PHY link, and stop checking the status
  564. * @phydev: target phy_device struct
  565. */
  566. void phy_stop(struct phy_device *phydev)
  567. {
  568. mutex_lock(&phydev->lock);
  569. if (PHY_HALTED == phydev->state)
  570. goto out_unlock;
  571. if (phy_interrupt_is_valid(phydev)) {
  572. /* Disable PHY Interrupts */
  573. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  574. /* Clear any pending interrupts */
  575. phy_clear_interrupt(phydev);
  576. }
  577. phydev->state = PHY_HALTED;
  578. out_unlock:
  579. mutex_unlock(&phydev->lock);
  580. /*
  581. * Cannot call flush_scheduled_work() here as desired because
  582. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  583. * will not reenable interrupts.
  584. */
  585. }
  586. /**
  587. * phy_start - start or restart a PHY device
  588. * @phydev: target phy_device struct
  589. *
  590. * Description: Indicates the attached device's readiness to
  591. * handle PHY-related work. Used during startup to start the
  592. * PHY, and after a call to phy_stop() to resume operation.
  593. * Also used to indicate the MDIO bus has cleared an error
  594. * condition.
  595. */
  596. void phy_start(struct phy_device *phydev)
  597. {
  598. mutex_lock(&phydev->lock);
  599. switch (phydev->state) {
  600. case PHY_STARTING:
  601. phydev->state = PHY_PENDING;
  602. break;
  603. case PHY_READY:
  604. phydev->state = PHY_UP;
  605. break;
  606. case PHY_HALTED:
  607. phydev->state = PHY_RESUMING;
  608. default:
  609. break;
  610. }
  611. mutex_unlock(&phydev->lock);
  612. }
  613. EXPORT_SYMBOL(phy_stop);
  614. EXPORT_SYMBOL(phy_start);
  615. /**
  616. * phy_state_machine - Handle the state machine
  617. * @work: work_struct that describes the work to be done
  618. */
  619. void phy_state_machine(struct work_struct *work)
  620. {
  621. struct delayed_work *dwork = to_delayed_work(work);
  622. struct phy_device *phydev =
  623. container_of(dwork, struct phy_device, state_queue);
  624. int needs_aneg = 0, do_suspend = 0;
  625. int err = 0;
  626. mutex_lock(&phydev->lock);
  627. if (phydev->adjust_state)
  628. phydev->adjust_state(phydev->attached_dev);
  629. switch(phydev->state) {
  630. case PHY_DOWN:
  631. case PHY_STARTING:
  632. case PHY_READY:
  633. case PHY_PENDING:
  634. break;
  635. case PHY_UP:
  636. needs_aneg = 1;
  637. phydev->link_timeout = PHY_AN_TIMEOUT;
  638. break;
  639. case PHY_AN:
  640. err = phy_read_status(phydev);
  641. if (err < 0)
  642. break;
  643. /* If the link is down, give up on
  644. * negotiation for now */
  645. if (!phydev->link) {
  646. phydev->state = PHY_NOLINK;
  647. netif_carrier_off(phydev->attached_dev);
  648. phydev->adjust_link(phydev->attached_dev);
  649. break;
  650. }
  651. /* Check if negotiation is done. Break
  652. * if there's an error */
  653. err = phy_aneg_done(phydev);
  654. if (err < 0)
  655. break;
  656. /* If AN is done, we're running */
  657. if (err > 0) {
  658. phydev->state = PHY_RUNNING;
  659. netif_carrier_on(phydev->attached_dev);
  660. phydev->adjust_link(phydev->attached_dev);
  661. } else if (0 == phydev->link_timeout--) {
  662. needs_aneg = 1;
  663. /* If we have the magic_aneg bit,
  664. * we try again */
  665. if (phydev->drv->flags & PHY_HAS_MAGICANEG)
  666. break;
  667. }
  668. break;
  669. case PHY_NOLINK:
  670. err = phy_read_status(phydev);
  671. if (err)
  672. break;
  673. if (phydev->link) {
  674. phydev->state = PHY_RUNNING;
  675. netif_carrier_on(phydev->attached_dev);
  676. phydev->adjust_link(phydev->attached_dev);
  677. }
  678. break;
  679. case PHY_FORCING:
  680. err = genphy_update_link(phydev);
  681. if (err)
  682. break;
  683. if (phydev->link) {
  684. phydev->state = PHY_RUNNING;
  685. netif_carrier_on(phydev->attached_dev);
  686. } else {
  687. if (0 == phydev->link_timeout--)
  688. needs_aneg = 1;
  689. }
  690. phydev->adjust_link(phydev->attached_dev);
  691. break;
  692. case PHY_RUNNING:
  693. /* Only register a CHANGE if we are
  694. * polling or ignoring interrupts
  695. */
  696. if (!phy_interrupt_is_valid(phydev))
  697. phydev->state = PHY_CHANGELINK;
  698. break;
  699. case PHY_CHANGELINK:
  700. err = phy_read_status(phydev);
  701. if (err)
  702. break;
  703. if (phydev->link) {
  704. phydev->state = PHY_RUNNING;
  705. netif_carrier_on(phydev->attached_dev);
  706. } else {
  707. phydev->state = PHY_NOLINK;
  708. netif_carrier_off(phydev->attached_dev);
  709. }
  710. phydev->adjust_link(phydev->attached_dev);
  711. if (phy_interrupt_is_valid(phydev))
  712. err = phy_config_interrupt(phydev,
  713. PHY_INTERRUPT_ENABLED);
  714. break;
  715. case PHY_HALTED:
  716. if (phydev->link) {
  717. phydev->link = 0;
  718. netif_carrier_off(phydev->attached_dev);
  719. phydev->adjust_link(phydev->attached_dev);
  720. do_suspend = 1;
  721. }
  722. break;
  723. case PHY_RESUMING:
  724. err = phy_clear_interrupt(phydev);
  725. if (err)
  726. break;
  727. err = phy_config_interrupt(phydev,
  728. PHY_INTERRUPT_ENABLED);
  729. if (err)
  730. break;
  731. if (AUTONEG_ENABLE == phydev->autoneg) {
  732. err = phy_aneg_done(phydev);
  733. if (err < 0)
  734. break;
  735. /* err > 0 if AN is done.
  736. * Otherwise, it's 0, and we're
  737. * still waiting for AN */
  738. if (err > 0) {
  739. err = phy_read_status(phydev);
  740. if (err)
  741. break;
  742. if (phydev->link) {
  743. phydev->state = PHY_RUNNING;
  744. netif_carrier_on(phydev->attached_dev);
  745. } else
  746. phydev->state = PHY_NOLINK;
  747. phydev->adjust_link(phydev->attached_dev);
  748. } else {
  749. phydev->state = PHY_AN;
  750. phydev->link_timeout = PHY_AN_TIMEOUT;
  751. }
  752. } else {
  753. err = phy_read_status(phydev);
  754. if (err)
  755. break;
  756. if (phydev->link) {
  757. phydev->state = PHY_RUNNING;
  758. netif_carrier_on(phydev->attached_dev);
  759. } else
  760. phydev->state = PHY_NOLINK;
  761. phydev->adjust_link(phydev->attached_dev);
  762. }
  763. break;
  764. }
  765. mutex_unlock(&phydev->lock);
  766. if (needs_aneg)
  767. err = phy_start_aneg(phydev);
  768. if (do_suspend)
  769. phy_suspend(phydev);
  770. if (err < 0)
  771. phy_error(phydev);
  772. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
  773. PHY_STATE_TIME * HZ);
  774. }
  775. void phy_mac_interrupt(struct phy_device *phydev, int new_link)
  776. {
  777. cancel_work_sync(&phydev->phy_queue);
  778. phydev->link = new_link;
  779. schedule_work(&phydev->phy_queue);
  780. }
  781. EXPORT_SYMBOL(phy_mac_interrupt);
  782. static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
  783. int addr)
  784. {
  785. /* Write the desired MMD Devad */
  786. bus->write(bus, addr, MII_MMD_CTRL, devad);
  787. /* Write the desired MMD register address */
  788. bus->write(bus, addr, MII_MMD_DATA, prtad);
  789. /* Select the Function : DATA with no post increment */
  790. bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
  791. }
  792. /**
  793. * phy_read_mmd_indirect - reads data from the MMD registers
  794. * @bus: the target MII bus
  795. * @prtad: MMD Address
  796. * @devad: MMD DEVAD
  797. * @addr: PHY address on the MII bus
  798. *
  799. * Description: it reads data from the MMD registers (clause 22 to access to
  800. * clause 45) of the specified phy address.
  801. * To read these register we have:
  802. * 1) Write reg 13 // DEVAD
  803. * 2) Write reg 14 // MMD Address
  804. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  805. * 3) Read reg 14 // Read MMD data
  806. */
  807. static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  808. int addr)
  809. {
  810. u32 ret;
  811. mmd_phy_indirect(bus, prtad, devad, addr);
  812. /* Read the content of the MMD's selected register */
  813. ret = bus->read(bus, addr, MII_MMD_DATA);
  814. return ret;
  815. }
  816. /**
  817. * phy_write_mmd_indirect - writes data to the MMD registers
  818. * @bus: the target MII bus
  819. * @prtad: MMD Address
  820. * @devad: MMD DEVAD
  821. * @addr: PHY address on the MII bus
  822. * @data: data to write in the MMD register
  823. *
  824. * Description: Write data from the MMD registers of the specified
  825. * phy address.
  826. * To write these register we have:
  827. * 1) Write reg 13 // DEVAD
  828. * 2) Write reg 14 // MMD Address
  829. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  830. * 3) Write reg 14 // Write MMD data
  831. */
  832. static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  833. int addr, u32 data)
  834. {
  835. mmd_phy_indirect(bus, prtad, devad, addr);
  836. /* Write the data into MMD's selected register */
  837. bus->write(bus, addr, MII_MMD_DATA, data);
  838. }
  839. /**
  840. * phy_init_eee - init and check the EEE feature
  841. * @phydev: target phy_device struct
  842. * @clk_stop_enable: PHY may stop the clock during LPI
  843. *
  844. * Description: it checks if the Energy-Efficient Ethernet (EEE)
  845. * is supported by looking at the MMD registers 3.20 and 7.60/61
  846. * and it programs the MMD register 3.0 setting the "Clock stop enable"
  847. * bit if required.
  848. */
  849. int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  850. {
  851. int ret = -EPROTONOSUPPORT;
  852. /* According to 802.3az,the EEE is supported only in full duplex-mode.
  853. * Also EEE feature is active when core is operating with MII, GMII
  854. * or RGMII.
  855. */
  856. if ((phydev->duplex == DUPLEX_FULL) &&
  857. ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
  858. (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
  859. (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
  860. int eee_lp, eee_cap, eee_adv;
  861. u32 lp, cap, adv;
  862. int idx, status;
  863. /* Read phy status to properly get the right settings */
  864. status = phy_read_status(phydev);
  865. if (status)
  866. return status;
  867. /* First check if the EEE ability is supported */
  868. eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
  869. MDIO_MMD_PCS, phydev->addr);
  870. if (eee_cap < 0)
  871. return eee_cap;
  872. cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  873. if (!cap)
  874. goto eee_exit;
  875. /* Check which link settings negotiated and verify it in
  876. * the EEE advertising registers.
  877. */
  878. eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
  879. MDIO_MMD_AN, phydev->addr);
  880. if (eee_lp < 0)
  881. return eee_lp;
  882. eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
  883. MDIO_MMD_AN, phydev->addr);
  884. if (eee_adv < 0)
  885. return eee_adv;
  886. adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
  887. lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  888. idx = phy_find_setting(phydev->speed, phydev->duplex);
  889. if (!(lp & adv & settings[idx].setting))
  890. goto eee_exit;
  891. if (clk_stop_enable) {
  892. /* Configure the PHY to stop receiving xMII
  893. * clock while it is signaling LPI.
  894. */
  895. int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
  896. MDIO_MMD_PCS,
  897. phydev->addr);
  898. if (val < 0)
  899. return val;
  900. val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
  901. phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
  902. MDIO_MMD_PCS, phydev->addr, val);
  903. }
  904. ret = 0; /* EEE supported */
  905. }
  906. eee_exit:
  907. return ret;
  908. }
  909. EXPORT_SYMBOL(phy_init_eee);
  910. /**
  911. * phy_get_eee_err - report the EEE wake error count
  912. * @phydev: target phy_device struct
  913. *
  914. * Description: it is to report the number of time where the PHY
  915. * failed to complete its normal wake sequence.
  916. */
  917. int phy_get_eee_err(struct phy_device *phydev)
  918. {
  919. return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
  920. MDIO_MMD_PCS, phydev->addr);
  921. }
  922. EXPORT_SYMBOL(phy_get_eee_err);
  923. /**
  924. * phy_ethtool_get_eee - get EEE supported and status
  925. * @phydev: target phy_device struct
  926. * @data: ethtool_eee data
  927. *
  928. * Description: it reportes the Supported/Advertisement/LP Advertisement
  929. * capabilities.
  930. */
  931. int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
  932. {
  933. int val;
  934. /* Get Supported EEE */
  935. val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
  936. MDIO_MMD_PCS, phydev->addr);
  937. if (val < 0)
  938. return val;
  939. data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
  940. /* Get advertisement EEE */
  941. val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
  942. MDIO_MMD_AN, phydev->addr);
  943. if (val < 0)
  944. return val;
  945. data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  946. /* Get LP advertisement EEE */
  947. val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
  948. MDIO_MMD_AN, phydev->addr);
  949. if (val < 0)
  950. return val;
  951. data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  952. return 0;
  953. }
  954. EXPORT_SYMBOL(phy_ethtool_get_eee);
  955. /**
  956. * phy_ethtool_set_eee - set EEE supported and status
  957. * @phydev: target phy_device struct
  958. * @data: ethtool_eee data
  959. *
  960. * Description: it is to program the Advertisement EEE register.
  961. */
  962. int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  963. {
  964. int val;
  965. val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  966. phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
  967. phydev->addr, val);
  968. return 0;
  969. }
  970. EXPORT_SYMBOL(phy_ethtool_set_eee);
  971. int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  972. {
  973. if (phydev->drv->set_wol)
  974. return phydev->drv->set_wol(phydev, wol);
  975. return -EOPNOTSUPP;
  976. }
  977. EXPORT_SYMBOL(phy_ethtool_set_wol);
  978. void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  979. {
  980. if (phydev->drv->get_wol)
  981. phydev->drv->get_wol(phydev, wol);
  982. }
  983. EXPORT_SYMBOL(phy_ethtool_get_wol);