phy-core.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. /*
  2. * phy-core.c -- Generic Phy framework.
  3. *
  4. * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/export.h>
  15. #include <linux/module.h>
  16. #include <linux/err.h>
  17. #include <linux/device.h>
  18. #include <linux/slab.h>
  19. #include <linux/of.h>
  20. #include <linux/phy/phy.h>
  21. #include <linux/idr.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/regulator/consumer.h>
  24. static struct class *phy_class;
  25. static DEFINE_MUTEX(phy_provider_mutex);
  26. static LIST_HEAD(phy_provider_list);
  27. static DEFINE_IDA(phy_ida);
  28. static void devm_phy_release(struct device *dev, void *res)
  29. {
  30. struct phy *phy = *(struct phy **)res;
  31. phy_put(phy);
  32. }
  33. static void devm_phy_provider_release(struct device *dev, void *res)
  34. {
  35. struct phy_provider *phy_provider = *(struct phy_provider **)res;
  36. of_phy_provider_unregister(phy_provider);
  37. }
  38. static void devm_phy_consume(struct device *dev, void *res)
  39. {
  40. struct phy *phy = *(struct phy **)res;
  41. phy_destroy(phy);
  42. }
  43. static int devm_phy_match(struct device *dev, void *res, void *match_data)
  44. {
  45. return res == match_data;
  46. }
  47. static struct phy *phy_lookup(struct device *device, const char *port)
  48. {
  49. unsigned int count;
  50. struct phy *phy;
  51. struct device *dev;
  52. struct phy_consumer *consumers;
  53. struct class_dev_iter iter;
  54. class_dev_iter_init(&iter, phy_class, NULL, NULL);
  55. while ((dev = class_dev_iter_next(&iter))) {
  56. phy = to_phy(dev);
  57. if (!phy->init_data)
  58. continue;
  59. count = phy->init_data->num_consumers;
  60. consumers = phy->init_data->consumers;
  61. while (count--) {
  62. if (!strcmp(consumers->dev_name, dev_name(device)) &&
  63. !strcmp(consumers->port, port)) {
  64. class_dev_iter_exit(&iter);
  65. return phy;
  66. }
  67. consumers++;
  68. }
  69. }
  70. class_dev_iter_exit(&iter);
  71. return ERR_PTR(-ENODEV);
  72. }
  73. static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
  74. {
  75. struct phy_provider *phy_provider;
  76. struct device_node *child;
  77. list_for_each_entry(phy_provider, &phy_provider_list, list) {
  78. if (phy_provider->dev->of_node == node)
  79. return phy_provider;
  80. for_each_child_of_node(phy_provider->dev->of_node, child)
  81. if (child == node)
  82. return phy_provider;
  83. }
  84. return ERR_PTR(-EPROBE_DEFER);
  85. }
  86. int phy_pm_runtime_get(struct phy *phy)
  87. {
  88. int ret;
  89. if (!pm_runtime_enabled(&phy->dev))
  90. return -ENOTSUPP;
  91. ret = pm_runtime_get(&phy->dev);
  92. if (ret < 0 && ret != -EINPROGRESS)
  93. pm_runtime_put_noidle(&phy->dev);
  94. return ret;
  95. }
  96. EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
  97. int phy_pm_runtime_get_sync(struct phy *phy)
  98. {
  99. int ret;
  100. if (!pm_runtime_enabled(&phy->dev))
  101. return -ENOTSUPP;
  102. ret = pm_runtime_get_sync(&phy->dev);
  103. if (ret < 0)
  104. pm_runtime_put_sync(&phy->dev);
  105. return ret;
  106. }
  107. EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
  108. int phy_pm_runtime_put(struct phy *phy)
  109. {
  110. if (!pm_runtime_enabled(&phy->dev))
  111. return -ENOTSUPP;
  112. return pm_runtime_put(&phy->dev);
  113. }
  114. EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
  115. int phy_pm_runtime_put_sync(struct phy *phy)
  116. {
  117. if (!pm_runtime_enabled(&phy->dev))
  118. return -ENOTSUPP;
  119. return pm_runtime_put_sync(&phy->dev);
  120. }
  121. EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
  122. void phy_pm_runtime_allow(struct phy *phy)
  123. {
  124. if (!pm_runtime_enabled(&phy->dev))
  125. return;
  126. pm_runtime_allow(&phy->dev);
  127. }
  128. EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
  129. void phy_pm_runtime_forbid(struct phy *phy)
  130. {
  131. if (!pm_runtime_enabled(&phy->dev))
  132. return;
  133. pm_runtime_forbid(&phy->dev);
  134. }
  135. EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
  136. int phy_init(struct phy *phy)
  137. {
  138. int ret;
  139. if (!phy)
  140. return 0;
  141. ret = phy_pm_runtime_get_sync(phy);
  142. if (ret < 0 && ret != -ENOTSUPP)
  143. return ret;
  144. mutex_lock(&phy->mutex);
  145. if (phy->init_count == 0 && phy->ops->init) {
  146. ret = phy->ops->init(phy);
  147. if (ret < 0) {
  148. dev_err(&phy->dev, "phy init failed --> %d\n", ret);
  149. goto out;
  150. }
  151. } else {
  152. ret = 0; /* Override possible ret == -ENOTSUPP */
  153. }
  154. ++phy->init_count;
  155. out:
  156. mutex_unlock(&phy->mutex);
  157. phy_pm_runtime_put(phy);
  158. return ret;
  159. }
  160. EXPORT_SYMBOL_GPL(phy_init);
  161. int phy_exit(struct phy *phy)
  162. {
  163. int ret;
  164. if (!phy)
  165. return 0;
  166. ret = phy_pm_runtime_get_sync(phy);
  167. if (ret < 0 && ret != -ENOTSUPP)
  168. return ret;
  169. mutex_lock(&phy->mutex);
  170. if (phy->init_count == 1 && phy->ops->exit) {
  171. ret = phy->ops->exit(phy);
  172. if (ret < 0) {
  173. dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
  174. goto out;
  175. }
  176. }
  177. --phy->init_count;
  178. out:
  179. mutex_unlock(&phy->mutex);
  180. phy_pm_runtime_put(phy);
  181. return ret;
  182. }
  183. EXPORT_SYMBOL_GPL(phy_exit);
  184. int phy_power_on(struct phy *phy)
  185. {
  186. int ret;
  187. if (!phy)
  188. return 0;
  189. if (phy->pwr) {
  190. ret = regulator_enable(phy->pwr);
  191. if (ret)
  192. return ret;
  193. }
  194. ret = phy_pm_runtime_get_sync(phy);
  195. if (ret < 0 && ret != -ENOTSUPP)
  196. return ret;
  197. mutex_lock(&phy->mutex);
  198. if (phy->power_count == 0 && phy->ops->power_on) {
  199. ret = phy->ops->power_on(phy);
  200. if (ret < 0) {
  201. dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
  202. goto out;
  203. }
  204. } else {
  205. ret = 0; /* Override possible ret == -ENOTSUPP */
  206. }
  207. ++phy->power_count;
  208. mutex_unlock(&phy->mutex);
  209. return 0;
  210. out:
  211. mutex_unlock(&phy->mutex);
  212. phy_pm_runtime_put_sync(phy);
  213. if (phy->pwr)
  214. regulator_disable(phy->pwr);
  215. return ret;
  216. }
  217. EXPORT_SYMBOL_GPL(phy_power_on);
  218. int phy_power_off(struct phy *phy)
  219. {
  220. int ret;
  221. if (!phy)
  222. return 0;
  223. mutex_lock(&phy->mutex);
  224. if (phy->power_count == 1 && phy->ops->power_off) {
  225. ret = phy->ops->power_off(phy);
  226. if (ret < 0) {
  227. dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
  228. mutex_unlock(&phy->mutex);
  229. return ret;
  230. }
  231. }
  232. --phy->power_count;
  233. mutex_unlock(&phy->mutex);
  234. phy_pm_runtime_put(phy);
  235. if (phy->pwr)
  236. regulator_disable(phy->pwr);
  237. return 0;
  238. }
  239. EXPORT_SYMBOL_GPL(phy_power_off);
  240. /**
  241. * _of_phy_get() - lookup and obtain a reference to a phy by phandle
  242. * @np: device_node for which to get the phy
  243. * @index: the index of the phy
  244. *
  245. * Returns the phy associated with the given phandle value,
  246. * after getting a refcount to it or -ENODEV if there is no such phy or
  247. * -EPROBE_DEFER if there is a phandle to the phy, but the device is
  248. * not yet loaded. This function uses of_xlate call back function provided
  249. * while registering the phy_provider to find the phy instance.
  250. */
  251. static struct phy *_of_phy_get(struct device_node *np, int index)
  252. {
  253. int ret;
  254. struct phy_provider *phy_provider;
  255. struct phy *phy = NULL;
  256. struct of_phandle_args args;
  257. ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
  258. index, &args);
  259. if (ret)
  260. return ERR_PTR(-ENODEV);
  261. mutex_lock(&phy_provider_mutex);
  262. phy_provider = of_phy_provider_lookup(args.np);
  263. if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
  264. phy = ERR_PTR(-EPROBE_DEFER);
  265. goto err0;
  266. }
  267. phy = phy_provider->of_xlate(phy_provider->dev, &args);
  268. module_put(phy_provider->owner);
  269. err0:
  270. mutex_unlock(&phy_provider_mutex);
  271. of_node_put(args.np);
  272. return phy;
  273. }
  274. /**
  275. * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
  276. * @np: device_node for which to get the phy
  277. * @con_id: name of the phy from device's point of view
  278. *
  279. * Returns the phy driver, after getting a refcount to it; or
  280. * -ENODEV if there is no such phy. The caller is responsible for
  281. * calling phy_put() to release that count.
  282. */
  283. struct phy *of_phy_get(struct device_node *np, const char *con_id)
  284. {
  285. struct phy *phy = NULL;
  286. int index = 0;
  287. if (con_id)
  288. index = of_property_match_string(np, "phy-names", con_id);
  289. phy = _of_phy_get(np, index);
  290. if (IS_ERR(phy))
  291. return phy;
  292. if (!try_module_get(phy->ops->owner))
  293. return ERR_PTR(-EPROBE_DEFER);
  294. get_device(&phy->dev);
  295. return phy;
  296. }
  297. EXPORT_SYMBOL_GPL(of_phy_get);
  298. /**
  299. * phy_put() - release the PHY
  300. * @phy: the phy returned by phy_get()
  301. *
  302. * Releases a refcount the caller received from phy_get().
  303. */
  304. void phy_put(struct phy *phy)
  305. {
  306. if (!phy || IS_ERR(phy))
  307. return;
  308. module_put(phy->ops->owner);
  309. put_device(&phy->dev);
  310. }
  311. EXPORT_SYMBOL_GPL(phy_put);
  312. /**
  313. * devm_phy_put() - release the PHY
  314. * @dev: device that wants to release this phy
  315. * @phy: the phy returned by devm_phy_get()
  316. *
  317. * destroys the devres associated with this phy and invokes phy_put
  318. * to release the phy.
  319. */
  320. void devm_phy_put(struct device *dev, struct phy *phy)
  321. {
  322. int r;
  323. if (!phy)
  324. return;
  325. r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
  326. dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
  327. }
  328. EXPORT_SYMBOL_GPL(devm_phy_put);
  329. /**
  330. * of_phy_simple_xlate() - returns the phy instance from phy provider
  331. * @dev: the PHY provider device
  332. * @args: of_phandle_args (not used here)
  333. *
  334. * Intended to be used by phy provider for the common case where #phy-cells is
  335. * 0. For other cases where #phy-cells is greater than '0', the phy provider
  336. * should provide a custom of_xlate function that reads the *args* and returns
  337. * the appropriate phy.
  338. */
  339. struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
  340. *args)
  341. {
  342. struct phy *phy;
  343. struct class_dev_iter iter;
  344. struct device_node *node = dev->of_node;
  345. struct device_node *child;
  346. class_dev_iter_init(&iter, phy_class, NULL, NULL);
  347. while ((dev = class_dev_iter_next(&iter))) {
  348. phy = to_phy(dev);
  349. if (node != phy->dev.of_node) {
  350. for_each_child_of_node(node, child) {
  351. if (child == phy->dev.of_node)
  352. goto phy_found;
  353. }
  354. continue;
  355. }
  356. phy_found:
  357. class_dev_iter_exit(&iter);
  358. return phy;
  359. }
  360. class_dev_iter_exit(&iter);
  361. return ERR_PTR(-ENODEV);
  362. }
  363. EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
  364. /**
  365. * phy_get() - lookup and obtain a reference to a phy.
  366. * @dev: device that requests this phy
  367. * @string: the phy name as given in the dt data or the name of the controller
  368. * port for non-dt case
  369. *
  370. * Returns the phy driver, after getting a refcount to it; or
  371. * -ENODEV if there is no such phy. The caller is responsible for
  372. * calling phy_put() to release that count.
  373. */
  374. struct phy *phy_get(struct device *dev, const char *string)
  375. {
  376. int index = 0;
  377. struct phy *phy;
  378. if (string == NULL) {
  379. dev_WARN(dev, "missing string\n");
  380. return ERR_PTR(-EINVAL);
  381. }
  382. if (dev->of_node) {
  383. index = of_property_match_string(dev->of_node, "phy-names",
  384. string);
  385. phy = _of_phy_get(dev->of_node, index);
  386. } else {
  387. phy = phy_lookup(dev, string);
  388. }
  389. if (IS_ERR(phy))
  390. return phy;
  391. if (!try_module_get(phy->ops->owner))
  392. return ERR_PTR(-EPROBE_DEFER);
  393. get_device(&phy->dev);
  394. return phy;
  395. }
  396. EXPORT_SYMBOL_GPL(phy_get);
  397. /**
  398. * phy_optional_get() - lookup and obtain a reference to an optional phy.
  399. * @dev: device that requests this phy
  400. * @string: the phy name as given in the dt data or the name of the controller
  401. * port for non-dt case
  402. *
  403. * Returns the phy driver, after getting a refcount to it; or
  404. * NULL if there is no such phy. The caller is responsible for
  405. * calling phy_put() to release that count.
  406. */
  407. struct phy *phy_optional_get(struct device *dev, const char *string)
  408. {
  409. struct phy *phy = phy_get(dev, string);
  410. if (PTR_ERR(phy) == -ENODEV)
  411. phy = NULL;
  412. return phy;
  413. }
  414. EXPORT_SYMBOL_GPL(phy_optional_get);
  415. /**
  416. * devm_phy_get() - lookup and obtain a reference to a phy.
  417. * @dev: device that requests this phy
  418. * @string: the phy name as given in the dt data or phy device name
  419. * for non-dt case
  420. *
  421. * Gets the phy using phy_get(), and associates a device with it using
  422. * devres. On driver detach, release function is invoked on the devres data,
  423. * then, devres data is freed.
  424. */
  425. struct phy *devm_phy_get(struct device *dev, const char *string)
  426. {
  427. struct phy **ptr, *phy;
  428. ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
  429. if (!ptr)
  430. return ERR_PTR(-ENOMEM);
  431. phy = phy_get(dev, string);
  432. if (!IS_ERR(phy)) {
  433. *ptr = phy;
  434. devres_add(dev, ptr);
  435. } else {
  436. devres_free(ptr);
  437. }
  438. return phy;
  439. }
  440. EXPORT_SYMBOL_GPL(devm_phy_get);
  441. /**
  442. * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
  443. * @dev: device that requests this phy
  444. * @string: the phy name as given in the dt data or phy device name
  445. * for non-dt case
  446. *
  447. * Gets the phy using phy_get(), and associates a device with it using
  448. * devres. On driver detach, release function is invoked on the devres
  449. * data, then, devres data is freed. This differs to devm_phy_get() in
  450. * that if the phy does not exist, it is not considered an error and
  451. * -ENODEV will not be returned. Instead the NULL phy is returned,
  452. * which can be passed to all other phy consumer calls.
  453. */
  454. struct phy *devm_phy_optional_get(struct device *dev, const char *string)
  455. {
  456. struct phy *phy = devm_phy_get(dev, string);
  457. if (PTR_ERR(phy) == -ENODEV)
  458. phy = NULL;
  459. return phy;
  460. }
  461. EXPORT_SYMBOL_GPL(devm_phy_optional_get);
  462. /**
  463. * devm_of_phy_get() - lookup and obtain a reference to a phy.
  464. * @dev: device that requests this phy
  465. * @np: node containing the phy
  466. * @con_id: name of the phy from device's point of view
  467. *
  468. * Gets the phy using of_phy_get(), and associates a device with it using
  469. * devres. On driver detach, release function is invoked on the devres data,
  470. * then, devres data is freed.
  471. */
  472. struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
  473. const char *con_id)
  474. {
  475. struct phy **ptr, *phy;
  476. ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
  477. if (!ptr)
  478. return ERR_PTR(-ENOMEM);
  479. phy = of_phy_get(np, con_id);
  480. if (!IS_ERR(phy)) {
  481. *ptr = phy;
  482. devres_add(dev, ptr);
  483. } else {
  484. devres_free(ptr);
  485. }
  486. return phy;
  487. }
  488. EXPORT_SYMBOL_GPL(devm_of_phy_get);
  489. /**
  490. * phy_create() - create a new phy
  491. * @dev: device that is creating the new phy
  492. * @node: device node of the phy
  493. * @ops: function pointers for performing phy operations
  494. * @init_data: contains the list of PHY consumers or NULL
  495. *
  496. * Called to create a phy using phy framework.
  497. */
  498. struct phy *phy_create(struct device *dev, struct device_node *node,
  499. const struct phy_ops *ops,
  500. struct phy_init_data *init_data)
  501. {
  502. int ret;
  503. int id;
  504. struct phy *phy;
  505. if (WARN_ON(!dev))
  506. return ERR_PTR(-EINVAL);
  507. phy = kzalloc(sizeof(*phy), GFP_KERNEL);
  508. if (!phy)
  509. return ERR_PTR(-ENOMEM);
  510. id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
  511. if (id < 0) {
  512. dev_err(dev, "unable to get id\n");
  513. ret = id;
  514. goto free_phy;
  515. }
  516. /* phy-supply */
  517. phy->pwr = regulator_get_optional(dev, "phy");
  518. if (IS_ERR(phy->pwr)) {
  519. if (PTR_ERR(phy->pwr) == -EPROBE_DEFER) {
  520. ret = -EPROBE_DEFER;
  521. goto free_ida;
  522. }
  523. phy->pwr = NULL;
  524. }
  525. device_initialize(&phy->dev);
  526. mutex_init(&phy->mutex);
  527. phy->dev.class = phy_class;
  528. phy->dev.parent = dev;
  529. phy->dev.of_node = node ?: dev->of_node;
  530. phy->id = id;
  531. phy->ops = ops;
  532. phy->init_data = init_data;
  533. ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
  534. if (ret)
  535. goto put_dev;
  536. ret = device_add(&phy->dev);
  537. if (ret)
  538. goto put_dev;
  539. if (pm_runtime_enabled(dev)) {
  540. pm_runtime_enable(&phy->dev);
  541. pm_runtime_no_callbacks(&phy->dev);
  542. }
  543. return phy;
  544. put_dev:
  545. put_device(&phy->dev); /* calls phy_release() which frees resources */
  546. return ERR_PTR(ret);
  547. free_ida:
  548. ida_simple_remove(&phy_ida, phy->id);
  549. free_phy:
  550. kfree(phy);
  551. return ERR_PTR(ret);
  552. }
  553. EXPORT_SYMBOL_GPL(phy_create);
  554. /**
  555. * devm_phy_create() - create a new phy
  556. * @dev: device that is creating the new phy
  557. * @node: device node of the phy
  558. * @ops: function pointers for performing phy operations
  559. * @init_data: contains the list of PHY consumers or NULL
  560. *
  561. * Creates a new PHY device adding it to the PHY class.
  562. * While at that, it also associates the device with the phy using devres.
  563. * On driver detach, release function is invoked on the devres data,
  564. * then, devres data is freed.
  565. */
  566. struct phy *devm_phy_create(struct device *dev, struct device_node *node,
  567. const struct phy_ops *ops,
  568. struct phy_init_data *init_data)
  569. {
  570. struct phy **ptr, *phy;
  571. ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
  572. if (!ptr)
  573. return ERR_PTR(-ENOMEM);
  574. phy = phy_create(dev, node, ops, init_data);
  575. if (!IS_ERR(phy)) {
  576. *ptr = phy;
  577. devres_add(dev, ptr);
  578. } else {
  579. devres_free(ptr);
  580. }
  581. return phy;
  582. }
  583. EXPORT_SYMBOL_GPL(devm_phy_create);
  584. /**
  585. * phy_destroy() - destroy the phy
  586. * @phy: the phy to be destroyed
  587. *
  588. * Called to destroy the phy.
  589. */
  590. void phy_destroy(struct phy *phy)
  591. {
  592. pm_runtime_disable(&phy->dev);
  593. device_unregister(&phy->dev);
  594. }
  595. EXPORT_SYMBOL_GPL(phy_destroy);
  596. /**
  597. * devm_phy_destroy() - destroy the PHY
  598. * @dev: device that wants to release this phy
  599. * @phy: the phy returned by devm_phy_get()
  600. *
  601. * destroys the devres associated with this phy and invokes phy_destroy
  602. * to destroy the phy.
  603. */
  604. void devm_phy_destroy(struct device *dev, struct phy *phy)
  605. {
  606. int r;
  607. r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
  608. dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
  609. }
  610. EXPORT_SYMBOL_GPL(devm_phy_destroy);
  611. /**
  612. * __of_phy_provider_register() - create/register phy provider with the framework
  613. * @dev: struct device of the phy provider
  614. * @owner: the module owner containing of_xlate
  615. * @of_xlate: function pointer to obtain phy instance from phy provider
  616. *
  617. * Creates struct phy_provider from dev and of_xlate function pointer.
  618. * This is used in the case of dt boot for finding the phy instance from
  619. * phy provider.
  620. */
  621. struct phy_provider *__of_phy_provider_register(struct device *dev,
  622. struct module *owner, struct phy * (*of_xlate)(struct device *dev,
  623. struct of_phandle_args *args))
  624. {
  625. struct phy_provider *phy_provider;
  626. phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
  627. if (!phy_provider)
  628. return ERR_PTR(-ENOMEM);
  629. phy_provider->dev = dev;
  630. phy_provider->owner = owner;
  631. phy_provider->of_xlate = of_xlate;
  632. mutex_lock(&phy_provider_mutex);
  633. list_add_tail(&phy_provider->list, &phy_provider_list);
  634. mutex_unlock(&phy_provider_mutex);
  635. return phy_provider;
  636. }
  637. EXPORT_SYMBOL_GPL(__of_phy_provider_register);
  638. /**
  639. * __devm_of_phy_provider_register() - create/register phy provider with the
  640. * framework
  641. * @dev: struct device of the phy provider
  642. * @owner: the module owner containing of_xlate
  643. * @of_xlate: function pointer to obtain phy instance from phy provider
  644. *
  645. * Creates struct phy_provider from dev and of_xlate function pointer.
  646. * This is used in the case of dt boot for finding the phy instance from
  647. * phy provider. While at that, it also associates the device with the
  648. * phy provider using devres. On driver detach, release function is invoked
  649. * on the devres data, then, devres data is freed.
  650. */
  651. struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
  652. struct module *owner, struct phy * (*of_xlate)(struct device *dev,
  653. struct of_phandle_args *args))
  654. {
  655. struct phy_provider **ptr, *phy_provider;
  656. ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
  657. if (!ptr)
  658. return ERR_PTR(-ENOMEM);
  659. phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
  660. if (!IS_ERR(phy_provider)) {
  661. *ptr = phy_provider;
  662. devres_add(dev, ptr);
  663. } else {
  664. devres_free(ptr);
  665. }
  666. return phy_provider;
  667. }
  668. EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
  669. /**
  670. * of_phy_provider_unregister() - unregister phy provider from the framework
  671. * @phy_provider: phy provider returned by of_phy_provider_register()
  672. *
  673. * Removes the phy_provider created using of_phy_provider_register().
  674. */
  675. void of_phy_provider_unregister(struct phy_provider *phy_provider)
  676. {
  677. if (IS_ERR(phy_provider))
  678. return;
  679. mutex_lock(&phy_provider_mutex);
  680. list_del(&phy_provider->list);
  681. kfree(phy_provider);
  682. mutex_unlock(&phy_provider_mutex);
  683. }
  684. EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
  685. /**
  686. * devm_of_phy_provider_unregister() - remove phy provider from the framework
  687. * @dev: struct device of the phy provider
  688. *
  689. * destroys the devres associated with this phy provider and invokes
  690. * of_phy_provider_unregister to unregister the phy provider.
  691. */
  692. void devm_of_phy_provider_unregister(struct device *dev,
  693. struct phy_provider *phy_provider) {
  694. int r;
  695. r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
  696. phy_provider);
  697. dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
  698. }
  699. EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
  700. /**
  701. * phy_release() - release the phy
  702. * @dev: the dev member within phy
  703. *
  704. * When the last reference to the device is removed, it is called
  705. * from the embedded kobject as release method.
  706. */
  707. static void phy_release(struct device *dev)
  708. {
  709. struct phy *phy;
  710. phy = to_phy(dev);
  711. dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
  712. regulator_put(phy->pwr);
  713. ida_simple_remove(&phy_ida, phy->id);
  714. kfree(phy);
  715. }
  716. static int __init phy_core_init(void)
  717. {
  718. phy_class = class_create(THIS_MODULE, "phy");
  719. if (IS_ERR(phy_class)) {
  720. pr_err("failed to create phy class --> %ld\n",
  721. PTR_ERR(phy_class));
  722. return PTR_ERR(phy_class);
  723. }
  724. phy_class->dev_release = phy_release;
  725. return 0;
  726. }
  727. module_init(phy_core_init);
  728. static void __exit phy_core_exit(void)
  729. {
  730. class_destroy(phy_class);
  731. }
  732. module_exit(phy_core_exit);
  733. MODULE_DESCRIPTION("Generic PHY Framework");
  734. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
  735. MODULE_LICENSE("GPL v2");