phy-core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. /*
  2. * phy-core.c -- Generic Phy framework.
  3. *
  4. * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/export.h>
  15. #include <linux/module.h>
  16. #include <linux/err.h>
  17. #include <linux/device.h>
  18. #include <linux/slab.h>
  19. #include <linux/of.h>
  20. #include <linux/phy/phy.h>
  21. #include <linux/idr.h>
  22. #include <linux/pm_runtime.h>
  23. static struct class *phy_class;
  24. static DEFINE_MUTEX(phy_provider_mutex);
  25. static LIST_HEAD(phy_provider_list);
  26. static DEFINE_IDA(phy_ida);
  27. static void devm_phy_release(struct device *dev, void *res)
  28. {
  29. struct phy *phy = *(struct phy **)res;
  30. phy_put(phy);
  31. }
  32. static void devm_phy_provider_release(struct device *dev, void *res)
  33. {
  34. struct phy_provider *phy_provider = *(struct phy_provider **)res;
  35. of_phy_provider_unregister(phy_provider);
  36. }
  37. static void devm_phy_consume(struct device *dev, void *res)
  38. {
  39. struct phy *phy = *(struct phy **)res;
  40. phy_destroy(phy);
  41. }
  42. static int devm_phy_match(struct device *dev, void *res, void *match_data)
  43. {
  44. return res == match_data;
  45. }
  46. static struct phy *phy_lookup(struct device *device, const char *port)
  47. {
  48. unsigned int count;
  49. struct phy *phy;
  50. struct device *dev;
  51. struct phy_consumer *consumers;
  52. struct class_dev_iter iter;
  53. class_dev_iter_init(&iter, phy_class, NULL, NULL);
  54. while ((dev = class_dev_iter_next(&iter))) {
  55. phy = to_phy(dev);
  56. count = phy->init_data->num_consumers;
  57. consumers = phy->init_data->consumers;
  58. while (count--) {
  59. if (!strcmp(consumers->dev_name, dev_name(device)) &&
  60. !strcmp(consumers->port, port)) {
  61. class_dev_iter_exit(&iter);
  62. return phy;
  63. }
  64. consumers++;
  65. }
  66. }
  67. class_dev_iter_exit(&iter);
  68. return ERR_PTR(-ENODEV);
  69. }
  70. static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
  71. {
  72. struct phy_provider *phy_provider;
  73. list_for_each_entry(phy_provider, &phy_provider_list, list) {
  74. if (phy_provider->dev->of_node == node)
  75. return phy_provider;
  76. }
  77. return ERR_PTR(-EPROBE_DEFER);
  78. }
  79. int phy_pm_runtime_get(struct phy *phy)
  80. {
  81. int ret;
  82. if (!pm_runtime_enabled(&phy->dev))
  83. return -ENOTSUPP;
  84. ret = pm_runtime_get(&phy->dev);
  85. if (ret < 0 && ret != -EINPROGRESS)
  86. pm_runtime_put_noidle(&phy->dev);
  87. return ret;
  88. }
  89. EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
  90. int phy_pm_runtime_get_sync(struct phy *phy)
  91. {
  92. int ret;
  93. if (!pm_runtime_enabled(&phy->dev))
  94. return -ENOTSUPP;
  95. ret = pm_runtime_get_sync(&phy->dev);
  96. if (ret < 0)
  97. pm_runtime_put_sync(&phy->dev);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
  101. int phy_pm_runtime_put(struct phy *phy)
  102. {
  103. if (!pm_runtime_enabled(&phy->dev))
  104. return -ENOTSUPP;
  105. return pm_runtime_put(&phy->dev);
  106. }
  107. EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
  108. int phy_pm_runtime_put_sync(struct phy *phy)
  109. {
  110. if (!pm_runtime_enabled(&phy->dev))
  111. return -ENOTSUPP;
  112. return pm_runtime_put_sync(&phy->dev);
  113. }
  114. EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
  115. void phy_pm_runtime_allow(struct phy *phy)
  116. {
  117. if (!pm_runtime_enabled(&phy->dev))
  118. return;
  119. pm_runtime_allow(&phy->dev);
  120. }
  121. EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
  122. void phy_pm_runtime_forbid(struct phy *phy)
  123. {
  124. if (!pm_runtime_enabled(&phy->dev))
  125. return;
  126. pm_runtime_forbid(&phy->dev);
  127. }
  128. EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
  129. int phy_init(struct phy *phy)
  130. {
  131. int ret;
  132. if (!phy)
  133. return 0;
  134. ret = phy_pm_runtime_get_sync(phy);
  135. if (ret < 0 && ret != -ENOTSUPP)
  136. return ret;
  137. mutex_lock(&phy->mutex);
  138. if (phy->init_count == 0 && phy->ops->init) {
  139. ret = phy->ops->init(phy);
  140. if (ret < 0) {
  141. dev_err(&phy->dev, "phy init failed --> %d\n", ret);
  142. goto out;
  143. }
  144. }
  145. ++phy->init_count;
  146. out:
  147. mutex_unlock(&phy->mutex);
  148. phy_pm_runtime_put(phy);
  149. return ret;
  150. }
  151. EXPORT_SYMBOL_GPL(phy_init);
  152. int phy_exit(struct phy *phy)
  153. {
  154. int ret;
  155. if (!phy)
  156. return 0;
  157. ret = phy_pm_runtime_get_sync(phy);
  158. if (ret < 0 && ret != -ENOTSUPP)
  159. return ret;
  160. mutex_lock(&phy->mutex);
  161. if (phy->init_count == 1 && phy->ops->exit) {
  162. ret = phy->ops->exit(phy);
  163. if (ret < 0) {
  164. dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
  165. goto out;
  166. }
  167. }
  168. --phy->init_count;
  169. out:
  170. mutex_unlock(&phy->mutex);
  171. phy_pm_runtime_put(phy);
  172. return ret;
  173. }
  174. EXPORT_SYMBOL_GPL(phy_exit);
  175. int phy_power_on(struct phy *phy)
  176. {
  177. int ret;
  178. if (!phy)
  179. return 0;
  180. ret = phy_pm_runtime_get_sync(phy);
  181. if (ret < 0 && ret != -ENOTSUPP)
  182. return ret;
  183. mutex_lock(&phy->mutex);
  184. if (phy->power_count == 0 && phy->ops->power_on) {
  185. ret = phy->ops->power_on(phy);
  186. if (ret < 0) {
  187. dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
  188. goto out;
  189. }
  190. }
  191. ++phy->power_count;
  192. mutex_unlock(&phy->mutex);
  193. return 0;
  194. out:
  195. mutex_unlock(&phy->mutex);
  196. phy_pm_runtime_put_sync(phy);
  197. return ret;
  198. }
  199. EXPORT_SYMBOL_GPL(phy_power_on);
  200. int phy_power_off(struct phy *phy)
  201. {
  202. int ret;
  203. if (!phy)
  204. return 0;
  205. mutex_lock(&phy->mutex);
  206. if (phy->power_count == 1 && phy->ops->power_off) {
  207. ret = phy->ops->power_off(phy);
  208. if (ret < 0) {
  209. dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
  210. mutex_unlock(&phy->mutex);
  211. return ret;
  212. }
  213. }
  214. --phy->power_count;
  215. mutex_unlock(&phy->mutex);
  216. phy_pm_runtime_put(phy);
  217. return 0;
  218. }
  219. EXPORT_SYMBOL_GPL(phy_power_off);
  220. /**
  221. * of_phy_get() - lookup and obtain a reference to a phy by phandle
  222. * @dev: device that requests this phy
  223. * @index: the index of the phy
  224. *
  225. * Returns the phy associated with the given phandle value,
  226. * after getting a refcount to it or -ENODEV if there is no such phy or
  227. * -EPROBE_DEFER if there is a phandle to the phy, but the device is
  228. * not yet loaded. This function uses of_xlate call back function provided
  229. * while registering the phy_provider to find the phy instance.
  230. */
  231. static struct phy *of_phy_get(struct device *dev, int index)
  232. {
  233. int ret;
  234. struct phy_provider *phy_provider;
  235. struct phy *phy = NULL;
  236. struct of_phandle_args args;
  237. ret = of_parse_phandle_with_args(dev->of_node, "phys", "#phy-cells",
  238. index, &args);
  239. if (ret) {
  240. dev_dbg(dev, "failed to get phy in %s node\n",
  241. dev->of_node->full_name);
  242. return ERR_PTR(-ENODEV);
  243. }
  244. mutex_lock(&phy_provider_mutex);
  245. phy_provider = of_phy_provider_lookup(args.np);
  246. if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
  247. phy = ERR_PTR(-EPROBE_DEFER);
  248. goto err0;
  249. }
  250. phy = phy_provider->of_xlate(phy_provider->dev, &args);
  251. module_put(phy_provider->owner);
  252. err0:
  253. mutex_unlock(&phy_provider_mutex);
  254. of_node_put(args.np);
  255. return phy;
  256. }
  257. /**
  258. * phy_put() - release the PHY
  259. * @phy: the phy returned by phy_get()
  260. *
  261. * Releases a refcount the caller received from phy_get().
  262. */
  263. void phy_put(struct phy *phy)
  264. {
  265. if (!phy || IS_ERR(phy))
  266. return;
  267. module_put(phy->ops->owner);
  268. put_device(&phy->dev);
  269. }
  270. EXPORT_SYMBOL_GPL(phy_put);
  271. /**
  272. * devm_phy_put() - release the PHY
  273. * @dev: device that wants to release this phy
  274. * @phy: the phy returned by devm_phy_get()
  275. *
  276. * destroys the devres associated with this phy and invokes phy_put
  277. * to release the phy.
  278. */
  279. void devm_phy_put(struct device *dev, struct phy *phy)
  280. {
  281. int r;
  282. if (!phy)
  283. return;
  284. r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
  285. dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
  286. }
  287. EXPORT_SYMBOL_GPL(devm_phy_put);
  288. /**
  289. * of_phy_simple_xlate() - returns the phy instance from phy provider
  290. * @dev: the PHY provider device
  291. * @args: of_phandle_args (not used here)
  292. *
  293. * Intended to be used by phy provider for the common case where #phy-cells is
  294. * 0. For other cases where #phy-cells is greater than '0', the phy provider
  295. * should provide a custom of_xlate function that reads the *args* and returns
  296. * the appropriate phy.
  297. */
  298. struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
  299. *args)
  300. {
  301. struct phy *phy;
  302. struct class_dev_iter iter;
  303. struct device_node *node = dev->of_node;
  304. class_dev_iter_init(&iter, phy_class, NULL, NULL);
  305. while ((dev = class_dev_iter_next(&iter))) {
  306. phy = to_phy(dev);
  307. if (node != phy->dev.of_node)
  308. continue;
  309. class_dev_iter_exit(&iter);
  310. return phy;
  311. }
  312. class_dev_iter_exit(&iter);
  313. return ERR_PTR(-ENODEV);
  314. }
  315. EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
  316. /**
  317. * phy_get() - lookup and obtain a reference to a phy.
  318. * @dev: device that requests this phy
  319. * @string: the phy name as given in the dt data or the name of the controller
  320. * port for non-dt case
  321. *
  322. * Returns the phy driver, after getting a refcount to it; or
  323. * -ENODEV if there is no such phy. The caller is responsible for
  324. * calling phy_put() to release that count.
  325. */
  326. struct phy *phy_get(struct device *dev, const char *string)
  327. {
  328. int index = 0;
  329. struct phy *phy;
  330. if (string == NULL) {
  331. dev_WARN(dev, "missing string\n");
  332. return ERR_PTR(-EINVAL);
  333. }
  334. if (dev->of_node) {
  335. index = of_property_match_string(dev->of_node, "phy-names",
  336. string);
  337. phy = of_phy_get(dev, index);
  338. } else {
  339. phy = phy_lookup(dev, string);
  340. }
  341. if (IS_ERR(phy))
  342. return phy;
  343. if (!try_module_get(phy->ops->owner))
  344. return ERR_PTR(-EPROBE_DEFER);
  345. get_device(&phy->dev);
  346. return phy;
  347. }
  348. EXPORT_SYMBOL_GPL(phy_get);
  349. /**
  350. * phy_optional_get() - lookup and obtain a reference to an optional phy.
  351. * @dev: device that requests this phy
  352. * @string: the phy name as given in the dt data or the name of the controller
  353. * port for non-dt case
  354. *
  355. * Returns the phy driver, after getting a refcount to it; or
  356. * NULL if there is no such phy. The caller is responsible for
  357. * calling phy_put() to release that count.
  358. */
  359. struct phy *phy_optional_get(struct device *dev, const char *string)
  360. {
  361. struct phy *phy = phy_get(dev, string);
  362. if (PTR_ERR(phy) == -ENODEV)
  363. phy = NULL;
  364. return phy;
  365. }
  366. EXPORT_SYMBOL_GPL(phy_optional_get);
  367. /**
  368. * devm_phy_get() - lookup and obtain a reference to a phy.
  369. * @dev: device that requests this phy
  370. * @string: the phy name as given in the dt data or phy device name
  371. * for non-dt case
  372. *
  373. * Gets the phy using phy_get(), and associates a device with it using
  374. * devres. On driver detach, release function is invoked on the devres data,
  375. * then, devres data is freed.
  376. */
  377. struct phy *devm_phy_get(struct device *dev, const char *string)
  378. {
  379. struct phy **ptr, *phy;
  380. ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
  381. if (!ptr)
  382. return ERR_PTR(-ENOMEM);
  383. phy = phy_get(dev, string);
  384. if (!IS_ERR(phy)) {
  385. *ptr = phy;
  386. devres_add(dev, ptr);
  387. } else {
  388. devres_free(ptr);
  389. }
  390. return phy;
  391. }
  392. EXPORT_SYMBOL_GPL(devm_phy_get);
  393. /**
  394. * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
  395. * @dev: device that requests this phy
  396. * @string: the phy name as given in the dt data or phy device name
  397. * for non-dt case
  398. *
  399. * Gets the phy using phy_get(), and associates a device with it using
  400. * devres. On driver detach, release function is invoked on the devres
  401. * data, then, devres data is freed. This differs to devm_phy_get() in
  402. * that if the phy does not exist, it is not considered an error and
  403. * -ENODEV will not be returned. Instead the NULL phy is returned,
  404. * which can be passed to all other phy consumer calls.
  405. */
  406. struct phy *devm_phy_optional_get(struct device *dev, const char *string)
  407. {
  408. struct phy *phy = devm_phy_get(dev, string);
  409. if (PTR_ERR(phy) == -ENODEV)
  410. phy = NULL;
  411. return phy;
  412. }
  413. EXPORT_SYMBOL_GPL(devm_phy_optional_get);
  414. /**
  415. * phy_create() - create a new phy
  416. * @dev: device that is creating the new phy
  417. * @ops: function pointers for performing phy operations
  418. * @init_data: contains the list of PHY consumers or NULL
  419. *
  420. * Called to create a phy using phy framework.
  421. */
  422. struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
  423. struct phy_init_data *init_data)
  424. {
  425. int ret;
  426. int id;
  427. struct phy *phy;
  428. if (WARN_ON(!dev))
  429. return ERR_PTR(-EINVAL);
  430. phy = kzalloc(sizeof(*phy), GFP_KERNEL);
  431. if (!phy)
  432. return ERR_PTR(-ENOMEM);
  433. id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
  434. if (id < 0) {
  435. dev_err(dev, "unable to get id\n");
  436. ret = id;
  437. goto free_phy;
  438. }
  439. device_initialize(&phy->dev);
  440. mutex_init(&phy->mutex);
  441. phy->dev.class = phy_class;
  442. phy->dev.parent = dev;
  443. phy->dev.of_node = dev->of_node;
  444. phy->id = id;
  445. phy->ops = ops;
  446. phy->init_data = init_data;
  447. ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
  448. if (ret)
  449. goto put_dev;
  450. ret = device_add(&phy->dev);
  451. if (ret)
  452. goto put_dev;
  453. if (pm_runtime_enabled(dev)) {
  454. pm_runtime_enable(&phy->dev);
  455. pm_runtime_no_callbacks(&phy->dev);
  456. }
  457. return phy;
  458. put_dev:
  459. put_device(&phy->dev);
  460. ida_remove(&phy_ida, phy->id);
  461. free_phy:
  462. kfree(phy);
  463. return ERR_PTR(ret);
  464. }
  465. EXPORT_SYMBOL_GPL(phy_create);
  466. /**
  467. * devm_phy_create() - create a new phy
  468. * @dev: device that is creating the new phy
  469. * @ops: function pointers for performing phy operations
  470. * @init_data: contains the list of PHY consumers or NULL
  471. *
  472. * Creates a new PHY device adding it to the PHY class.
  473. * While at that, it also associates the device with the phy using devres.
  474. * On driver detach, release function is invoked on the devres data,
  475. * then, devres data is freed.
  476. */
  477. struct phy *devm_phy_create(struct device *dev, const struct phy_ops *ops,
  478. struct phy_init_data *init_data)
  479. {
  480. struct phy **ptr, *phy;
  481. ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
  482. if (!ptr)
  483. return ERR_PTR(-ENOMEM);
  484. phy = phy_create(dev, ops, init_data);
  485. if (!IS_ERR(phy)) {
  486. *ptr = phy;
  487. devres_add(dev, ptr);
  488. } else {
  489. devres_free(ptr);
  490. }
  491. return phy;
  492. }
  493. EXPORT_SYMBOL_GPL(devm_phy_create);
  494. /**
  495. * phy_destroy() - destroy the phy
  496. * @phy: the phy to be destroyed
  497. *
  498. * Called to destroy the phy.
  499. */
  500. void phy_destroy(struct phy *phy)
  501. {
  502. pm_runtime_disable(&phy->dev);
  503. device_unregister(&phy->dev);
  504. }
  505. EXPORT_SYMBOL_GPL(phy_destroy);
  506. /**
  507. * devm_phy_destroy() - destroy the PHY
  508. * @dev: device that wants to release this phy
  509. * @phy: the phy returned by devm_phy_get()
  510. *
  511. * destroys the devres associated with this phy and invokes phy_destroy
  512. * to destroy the phy.
  513. */
  514. void devm_phy_destroy(struct device *dev, struct phy *phy)
  515. {
  516. int r;
  517. r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
  518. dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
  519. }
  520. EXPORT_SYMBOL_GPL(devm_phy_destroy);
  521. /**
  522. * __of_phy_provider_register() - create/register phy provider with the framework
  523. * @dev: struct device of the phy provider
  524. * @owner: the module owner containing of_xlate
  525. * @of_xlate: function pointer to obtain phy instance from phy provider
  526. *
  527. * Creates struct phy_provider from dev and of_xlate function pointer.
  528. * This is used in the case of dt boot for finding the phy instance from
  529. * phy provider.
  530. */
  531. struct phy_provider *__of_phy_provider_register(struct device *dev,
  532. struct module *owner, struct phy * (*of_xlate)(struct device *dev,
  533. struct of_phandle_args *args))
  534. {
  535. struct phy_provider *phy_provider;
  536. phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
  537. if (!phy_provider)
  538. return ERR_PTR(-ENOMEM);
  539. phy_provider->dev = dev;
  540. phy_provider->owner = owner;
  541. phy_provider->of_xlate = of_xlate;
  542. mutex_lock(&phy_provider_mutex);
  543. list_add_tail(&phy_provider->list, &phy_provider_list);
  544. mutex_unlock(&phy_provider_mutex);
  545. return phy_provider;
  546. }
  547. EXPORT_SYMBOL_GPL(__of_phy_provider_register);
  548. /**
  549. * __devm_of_phy_provider_register() - create/register phy provider with the
  550. * framework
  551. * @dev: struct device of the phy provider
  552. * @owner: the module owner containing of_xlate
  553. * @of_xlate: function pointer to obtain phy instance from phy provider
  554. *
  555. * Creates struct phy_provider from dev and of_xlate function pointer.
  556. * This is used in the case of dt boot for finding the phy instance from
  557. * phy provider. While at that, it also associates the device with the
  558. * phy provider using devres. On driver detach, release function is invoked
  559. * on the devres data, then, devres data is freed.
  560. */
  561. struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
  562. struct module *owner, struct phy * (*of_xlate)(struct device *dev,
  563. struct of_phandle_args *args))
  564. {
  565. struct phy_provider **ptr, *phy_provider;
  566. ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
  567. if (!ptr)
  568. return ERR_PTR(-ENOMEM);
  569. phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
  570. if (!IS_ERR(phy_provider)) {
  571. *ptr = phy_provider;
  572. devres_add(dev, ptr);
  573. } else {
  574. devres_free(ptr);
  575. }
  576. return phy_provider;
  577. }
  578. EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
  579. /**
  580. * of_phy_provider_unregister() - unregister phy provider from the framework
  581. * @phy_provider: phy provider returned by of_phy_provider_register()
  582. *
  583. * Removes the phy_provider created using of_phy_provider_register().
  584. */
  585. void of_phy_provider_unregister(struct phy_provider *phy_provider)
  586. {
  587. if (IS_ERR(phy_provider))
  588. return;
  589. mutex_lock(&phy_provider_mutex);
  590. list_del(&phy_provider->list);
  591. kfree(phy_provider);
  592. mutex_unlock(&phy_provider_mutex);
  593. }
  594. EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
  595. /**
  596. * devm_of_phy_provider_unregister() - remove phy provider from the framework
  597. * @dev: struct device of the phy provider
  598. *
  599. * destroys the devres associated with this phy provider and invokes
  600. * of_phy_provider_unregister to unregister the phy provider.
  601. */
  602. void devm_of_phy_provider_unregister(struct device *dev,
  603. struct phy_provider *phy_provider) {
  604. int r;
  605. r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
  606. phy_provider);
  607. dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
  608. }
  609. EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
  610. /**
  611. * phy_release() - release the phy
  612. * @dev: the dev member within phy
  613. *
  614. * When the last reference to the device is removed, it is called
  615. * from the embedded kobject as release method.
  616. */
  617. static void phy_release(struct device *dev)
  618. {
  619. struct phy *phy;
  620. phy = to_phy(dev);
  621. dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
  622. ida_remove(&phy_ida, phy->id);
  623. kfree(phy);
  624. }
  625. static int __init phy_core_init(void)
  626. {
  627. phy_class = class_create(THIS_MODULE, "phy");
  628. if (IS_ERR(phy_class)) {
  629. pr_err("failed to create phy class --> %ld\n",
  630. PTR_ERR(phy_class));
  631. return PTR_ERR(phy_class);
  632. }
  633. phy_class->dev_release = phy_release;
  634. return 0;
  635. }
  636. module_init(phy_core_init);
  637. static void __exit phy_core_exit(void)
  638. {
  639. class_destroy(phy_class);
  640. }
  641. module_exit(phy_core_exit);
  642. MODULE_DESCRIPTION("Generic PHY Framework");
  643. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
  644. MODULE_LICENSE("GPL v2");