phy-core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * phy-core.c -- Generic Phy framework.
  3. *
  4. * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/export.h>
  15. #include <linux/module.h>
  16. #include <linux/err.h>
  17. #include <linux/device.h>
  18. #include <linux/slab.h>
  19. #include <linux/of.h>
  20. #include <linux/phy/phy.h>
  21. #include <linux/idr.h>
  22. #include <linux/pm_runtime.h>
  23. static struct class *phy_class;
  24. static DEFINE_MUTEX(phy_provider_mutex);
  25. static LIST_HEAD(phy_provider_list);
  26. static DEFINE_IDA(phy_ida);
  27. static void devm_phy_release(struct device *dev, void *res)
  28. {
  29. struct phy *phy = *(struct phy **)res;
  30. phy_put(phy);
  31. }
  32. static void devm_phy_provider_release(struct device *dev, void *res)
  33. {
  34. struct phy_provider *phy_provider = *(struct phy_provider **)res;
  35. of_phy_provider_unregister(phy_provider);
  36. }
  37. static void devm_phy_consume(struct device *dev, void *res)
  38. {
  39. struct phy *phy = *(struct phy **)res;
  40. phy_destroy(phy);
  41. }
  42. static int devm_phy_match(struct device *dev, void *res, void *match_data)
  43. {
  44. return res == match_data;
  45. }
  46. static struct phy *phy_lookup(struct device *device, const char *port)
  47. {
  48. unsigned int count;
  49. struct phy *phy;
  50. struct device *dev;
  51. struct phy_consumer *consumers;
  52. struct class_dev_iter iter;
  53. class_dev_iter_init(&iter, phy_class, NULL, NULL);
  54. while ((dev = class_dev_iter_next(&iter))) {
  55. phy = to_phy(dev);
  56. count = phy->init_data->num_consumers;
  57. consumers = phy->init_data->consumers;
  58. while (count--) {
  59. if (!strcmp(consumers->dev_name, dev_name(device)) &&
  60. !strcmp(consumers->port, port)) {
  61. class_dev_iter_exit(&iter);
  62. return phy;
  63. }
  64. consumers++;
  65. }
  66. }
  67. class_dev_iter_exit(&iter);
  68. return ERR_PTR(-ENODEV);
  69. }
  70. static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
  71. {
  72. struct phy_provider *phy_provider;
  73. list_for_each_entry(phy_provider, &phy_provider_list, list) {
  74. if (phy_provider->dev->of_node == node)
  75. return phy_provider;
  76. }
  77. return ERR_PTR(-EPROBE_DEFER);
  78. }
  79. int phy_pm_runtime_get(struct phy *phy)
  80. {
  81. int ret;
  82. if (!pm_runtime_enabled(&phy->dev))
  83. return -ENOTSUPP;
  84. ret = pm_runtime_get(&phy->dev);
  85. if (ret < 0 && ret != -EINPROGRESS)
  86. pm_runtime_put_noidle(&phy->dev);
  87. return ret;
  88. }
  89. EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
  90. int phy_pm_runtime_get_sync(struct phy *phy)
  91. {
  92. int ret;
  93. if (!pm_runtime_enabled(&phy->dev))
  94. return -ENOTSUPP;
  95. ret = pm_runtime_get_sync(&phy->dev);
  96. if (ret < 0)
  97. pm_runtime_put_sync(&phy->dev);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
  101. int phy_pm_runtime_put(struct phy *phy)
  102. {
  103. if (!pm_runtime_enabled(&phy->dev))
  104. return -ENOTSUPP;
  105. return pm_runtime_put(&phy->dev);
  106. }
  107. EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
  108. int phy_pm_runtime_put_sync(struct phy *phy)
  109. {
  110. if (!pm_runtime_enabled(&phy->dev))
  111. return -ENOTSUPP;
  112. return pm_runtime_put_sync(&phy->dev);
  113. }
  114. EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
  115. void phy_pm_runtime_allow(struct phy *phy)
  116. {
  117. if (!pm_runtime_enabled(&phy->dev))
  118. return;
  119. pm_runtime_allow(&phy->dev);
  120. }
  121. EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
  122. void phy_pm_runtime_forbid(struct phy *phy)
  123. {
  124. if (!pm_runtime_enabled(&phy->dev))
  125. return;
  126. pm_runtime_forbid(&phy->dev);
  127. }
  128. EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
  129. int phy_init(struct phy *phy)
  130. {
  131. int ret;
  132. if (!phy)
  133. return 0;
  134. ret = phy_pm_runtime_get_sync(phy);
  135. if (ret < 0 && ret != -ENOTSUPP)
  136. return ret;
  137. mutex_lock(&phy->mutex);
  138. if (phy->init_count == 0 && phy->ops->init) {
  139. ret = phy->ops->init(phy);
  140. if (ret < 0) {
  141. dev_err(&phy->dev, "phy init failed --> %d\n", ret);
  142. goto out;
  143. }
  144. } else {
  145. ret = 0; /* Override possible ret == -ENOTSUPP */
  146. }
  147. ++phy->init_count;
  148. out:
  149. mutex_unlock(&phy->mutex);
  150. phy_pm_runtime_put(phy);
  151. return ret;
  152. }
  153. EXPORT_SYMBOL_GPL(phy_init);
  154. int phy_exit(struct phy *phy)
  155. {
  156. int ret;
  157. if (!phy)
  158. return 0;
  159. ret = phy_pm_runtime_get_sync(phy);
  160. if (ret < 0 && ret != -ENOTSUPP)
  161. return ret;
  162. mutex_lock(&phy->mutex);
  163. if (phy->init_count == 1 && phy->ops->exit) {
  164. ret = phy->ops->exit(phy);
  165. if (ret < 0) {
  166. dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
  167. goto out;
  168. }
  169. }
  170. --phy->init_count;
  171. out:
  172. mutex_unlock(&phy->mutex);
  173. phy_pm_runtime_put(phy);
  174. return ret;
  175. }
  176. EXPORT_SYMBOL_GPL(phy_exit);
  177. int phy_power_on(struct phy *phy)
  178. {
  179. int ret;
  180. if (!phy)
  181. return 0;
  182. ret = phy_pm_runtime_get_sync(phy);
  183. if (ret < 0 && ret != -ENOTSUPP)
  184. return ret;
  185. mutex_lock(&phy->mutex);
  186. if (phy->power_count == 0 && phy->ops->power_on) {
  187. ret = phy->ops->power_on(phy);
  188. if (ret < 0) {
  189. dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
  190. goto out;
  191. }
  192. } else {
  193. ret = 0; /* Override possible ret == -ENOTSUPP */
  194. }
  195. ++phy->power_count;
  196. mutex_unlock(&phy->mutex);
  197. return 0;
  198. out:
  199. mutex_unlock(&phy->mutex);
  200. phy_pm_runtime_put_sync(phy);
  201. return ret;
  202. }
  203. EXPORT_SYMBOL_GPL(phy_power_on);
  204. int phy_power_off(struct phy *phy)
  205. {
  206. int ret;
  207. if (!phy)
  208. return 0;
  209. mutex_lock(&phy->mutex);
  210. if (phy->power_count == 1 && phy->ops->power_off) {
  211. ret = phy->ops->power_off(phy);
  212. if (ret < 0) {
  213. dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
  214. mutex_unlock(&phy->mutex);
  215. return ret;
  216. }
  217. }
  218. --phy->power_count;
  219. mutex_unlock(&phy->mutex);
  220. phy_pm_runtime_put(phy);
  221. return 0;
  222. }
  223. EXPORT_SYMBOL_GPL(phy_power_off);
  224. /**
  225. * _of_phy_get() - lookup and obtain a reference to a phy by phandle
  226. * @np: device_node for which to get the phy
  227. * @index: the index of the phy
  228. *
  229. * Returns the phy associated with the given phandle value,
  230. * after getting a refcount to it or -ENODEV if there is no such phy or
  231. * -EPROBE_DEFER if there is a phandle to the phy, but the device is
  232. * not yet loaded. This function uses of_xlate call back function provided
  233. * while registering the phy_provider to find the phy instance.
  234. */
  235. static struct phy *_of_phy_get(struct device_node *np, int index)
  236. {
  237. int ret;
  238. struct phy_provider *phy_provider;
  239. struct phy *phy = NULL;
  240. struct of_phandle_args args;
  241. ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
  242. index, &args);
  243. if (ret)
  244. return ERR_PTR(-ENODEV);
  245. mutex_lock(&phy_provider_mutex);
  246. phy_provider = of_phy_provider_lookup(args.np);
  247. if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
  248. phy = ERR_PTR(-EPROBE_DEFER);
  249. goto err0;
  250. }
  251. phy = phy_provider->of_xlate(phy_provider->dev, &args);
  252. module_put(phy_provider->owner);
  253. err0:
  254. mutex_unlock(&phy_provider_mutex);
  255. of_node_put(args.np);
  256. return phy;
  257. }
  258. /**
  259. * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
  260. * @np: device_node for which to get the phy
  261. * @con_id: name of the phy from device's point of view
  262. *
  263. * Returns the phy driver, after getting a refcount to it; or
  264. * -ENODEV if there is no such phy. The caller is responsible for
  265. * calling phy_put() to release that count.
  266. */
  267. struct phy *of_phy_get(struct device_node *np, const char *con_id)
  268. {
  269. struct phy *phy = NULL;
  270. int index = 0;
  271. if (con_id)
  272. index = of_property_match_string(np, "phy-names", con_id);
  273. phy = _of_phy_get(np, index);
  274. if (IS_ERR(phy))
  275. return phy;
  276. if (!try_module_get(phy->ops->owner))
  277. return ERR_PTR(-EPROBE_DEFER);
  278. get_device(&phy->dev);
  279. return phy;
  280. }
  281. EXPORT_SYMBOL_GPL(of_phy_get);
  282. /**
  283. * phy_put() - release the PHY
  284. * @phy: the phy returned by phy_get()
  285. *
  286. * Releases a refcount the caller received from phy_get().
  287. */
  288. void phy_put(struct phy *phy)
  289. {
  290. if (!phy || IS_ERR(phy))
  291. return;
  292. module_put(phy->ops->owner);
  293. put_device(&phy->dev);
  294. }
  295. EXPORT_SYMBOL_GPL(phy_put);
  296. /**
  297. * devm_phy_put() - release the PHY
  298. * @dev: device that wants to release this phy
  299. * @phy: the phy returned by devm_phy_get()
  300. *
  301. * destroys the devres associated with this phy and invokes phy_put
  302. * to release the phy.
  303. */
  304. void devm_phy_put(struct device *dev, struct phy *phy)
  305. {
  306. int r;
  307. if (!phy)
  308. return;
  309. r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
  310. dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
  311. }
  312. EXPORT_SYMBOL_GPL(devm_phy_put);
  313. /**
  314. * of_phy_simple_xlate() - returns the phy instance from phy provider
  315. * @dev: the PHY provider device
  316. * @args: of_phandle_args (not used here)
  317. *
  318. * Intended to be used by phy provider for the common case where #phy-cells is
  319. * 0. For other cases where #phy-cells is greater than '0', the phy provider
  320. * should provide a custom of_xlate function that reads the *args* and returns
  321. * the appropriate phy.
  322. */
  323. struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
  324. *args)
  325. {
  326. struct phy *phy;
  327. struct class_dev_iter iter;
  328. struct device_node *node = dev->of_node;
  329. class_dev_iter_init(&iter, phy_class, NULL, NULL);
  330. while ((dev = class_dev_iter_next(&iter))) {
  331. phy = to_phy(dev);
  332. if (node != phy->dev.of_node)
  333. continue;
  334. class_dev_iter_exit(&iter);
  335. return phy;
  336. }
  337. class_dev_iter_exit(&iter);
  338. return ERR_PTR(-ENODEV);
  339. }
  340. EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
  341. /**
  342. * phy_get() - lookup and obtain a reference to a phy.
  343. * @dev: device that requests this phy
  344. * @string: the phy name as given in the dt data or the name of the controller
  345. * port for non-dt case
  346. *
  347. * Returns the phy driver, after getting a refcount to it; or
  348. * -ENODEV if there is no such phy. The caller is responsible for
  349. * calling phy_put() to release that count.
  350. */
  351. struct phy *phy_get(struct device *dev, const char *string)
  352. {
  353. int index = 0;
  354. struct phy *phy;
  355. if (string == NULL) {
  356. dev_WARN(dev, "missing string\n");
  357. return ERR_PTR(-EINVAL);
  358. }
  359. if (dev->of_node) {
  360. index = of_property_match_string(dev->of_node, "phy-names",
  361. string);
  362. phy = _of_phy_get(dev->of_node, index);
  363. } else {
  364. phy = phy_lookup(dev, string);
  365. }
  366. if (IS_ERR(phy))
  367. return phy;
  368. if (!try_module_get(phy->ops->owner))
  369. return ERR_PTR(-EPROBE_DEFER);
  370. get_device(&phy->dev);
  371. return phy;
  372. }
  373. EXPORT_SYMBOL_GPL(phy_get);
  374. /**
  375. * phy_optional_get() - lookup and obtain a reference to an optional phy.
  376. * @dev: device that requests this phy
  377. * @string: the phy name as given in the dt data or the name of the controller
  378. * port for non-dt case
  379. *
  380. * Returns the phy driver, after getting a refcount to it; or
  381. * NULL if there is no such phy. The caller is responsible for
  382. * calling phy_put() to release that count.
  383. */
  384. struct phy *phy_optional_get(struct device *dev, const char *string)
  385. {
  386. struct phy *phy = phy_get(dev, string);
  387. if (PTR_ERR(phy) == -ENODEV)
  388. phy = NULL;
  389. return phy;
  390. }
  391. EXPORT_SYMBOL_GPL(phy_optional_get);
  392. /**
  393. * devm_phy_get() - lookup and obtain a reference to a phy.
  394. * @dev: device that requests this phy
  395. * @string: the phy name as given in the dt data or phy device name
  396. * for non-dt case
  397. *
  398. * Gets the phy using phy_get(), and associates a device with it using
  399. * devres. On driver detach, release function is invoked on the devres data,
  400. * then, devres data is freed.
  401. */
  402. struct phy *devm_phy_get(struct device *dev, const char *string)
  403. {
  404. struct phy **ptr, *phy;
  405. ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
  406. if (!ptr)
  407. return ERR_PTR(-ENOMEM);
  408. phy = phy_get(dev, string);
  409. if (!IS_ERR(phy)) {
  410. *ptr = phy;
  411. devres_add(dev, ptr);
  412. } else {
  413. devres_free(ptr);
  414. }
  415. return phy;
  416. }
  417. EXPORT_SYMBOL_GPL(devm_phy_get);
  418. /**
  419. * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
  420. * @dev: device that requests this phy
  421. * @string: the phy name as given in the dt data or phy device name
  422. * for non-dt case
  423. *
  424. * Gets the phy using phy_get(), and associates a device with it using
  425. * devres. On driver detach, release function is invoked on the devres
  426. * data, then, devres data is freed. This differs to devm_phy_get() in
  427. * that if the phy does not exist, it is not considered an error and
  428. * -ENODEV will not be returned. Instead the NULL phy is returned,
  429. * which can be passed to all other phy consumer calls.
  430. */
  431. struct phy *devm_phy_optional_get(struct device *dev, const char *string)
  432. {
  433. struct phy *phy = devm_phy_get(dev, string);
  434. if (PTR_ERR(phy) == -ENODEV)
  435. phy = NULL;
  436. return phy;
  437. }
  438. EXPORT_SYMBOL_GPL(devm_phy_optional_get);
  439. /**
  440. * devm_of_phy_get() - lookup and obtain a reference to a phy.
  441. * @dev: device that requests this phy
  442. * @np: node containing the phy
  443. * @con_id: name of the phy from device's point of view
  444. *
  445. * Gets the phy using of_phy_get(), and associates a device with it using
  446. * devres. On driver detach, release function is invoked on the devres data,
  447. * then, devres data is freed.
  448. */
  449. struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
  450. const char *con_id)
  451. {
  452. struct phy **ptr, *phy;
  453. ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
  454. if (!ptr)
  455. return ERR_PTR(-ENOMEM);
  456. phy = of_phy_get(np, con_id);
  457. if (!IS_ERR(phy)) {
  458. *ptr = phy;
  459. devres_add(dev, ptr);
  460. } else {
  461. devres_free(ptr);
  462. }
  463. return phy;
  464. }
  465. EXPORT_SYMBOL_GPL(devm_of_phy_get);
  466. /**
  467. * phy_create() - create a new phy
  468. * @dev: device that is creating the new phy
  469. * @ops: function pointers for performing phy operations
  470. * @init_data: contains the list of PHY consumers or NULL
  471. *
  472. * Called to create a phy using phy framework.
  473. */
  474. struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
  475. struct phy_init_data *init_data)
  476. {
  477. int ret;
  478. int id;
  479. struct phy *phy;
  480. if (WARN_ON(!dev))
  481. return ERR_PTR(-EINVAL);
  482. phy = kzalloc(sizeof(*phy), GFP_KERNEL);
  483. if (!phy)
  484. return ERR_PTR(-ENOMEM);
  485. id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
  486. if (id < 0) {
  487. dev_err(dev, "unable to get id\n");
  488. ret = id;
  489. goto free_phy;
  490. }
  491. device_initialize(&phy->dev);
  492. mutex_init(&phy->mutex);
  493. phy->dev.class = phy_class;
  494. phy->dev.parent = dev;
  495. phy->dev.of_node = dev->of_node;
  496. phy->id = id;
  497. phy->ops = ops;
  498. phy->init_data = init_data;
  499. ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
  500. if (ret)
  501. goto put_dev;
  502. ret = device_add(&phy->dev);
  503. if (ret)
  504. goto put_dev;
  505. if (pm_runtime_enabled(dev)) {
  506. pm_runtime_enable(&phy->dev);
  507. pm_runtime_no_callbacks(&phy->dev);
  508. }
  509. return phy;
  510. put_dev:
  511. put_device(&phy->dev);
  512. ida_remove(&phy_ida, phy->id);
  513. free_phy:
  514. kfree(phy);
  515. return ERR_PTR(ret);
  516. }
  517. EXPORT_SYMBOL_GPL(phy_create);
  518. /**
  519. * devm_phy_create() - create a new phy
  520. * @dev: device that is creating the new phy
  521. * @ops: function pointers for performing phy operations
  522. * @init_data: contains the list of PHY consumers or NULL
  523. *
  524. * Creates a new PHY device adding it to the PHY class.
  525. * While at that, it also associates the device with the phy using devres.
  526. * On driver detach, release function is invoked on the devres data,
  527. * then, devres data is freed.
  528. */
  529. struct phy *devm_phy_create(struct device *dev, const struct phy_ops *ops,
  530. struct phy_init_data *init_data)
  531. {
  532. struct phy **ptr, *phy;
  533. ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
  534. if (!ptr)
  535. return ERR_PTR(-ENOMEM);
  536. phy = phy_create(dev, ops, init_data);
  537. if (!IS_ERR(phy)) {
  538. *ptr = phy;
  539. devres_add(dev, ptr);
  540. } else {
  541. devres_free(ptr);
  542. }
  543. return phy;
  544. }
  545. EXPORT_SYMBOL_GPL(devm_phy_create);
  546. /**
  547. * phy_destroy() - destroy the phy
  548. * @phy: the phy to be destroyed
  549. *
  550. * Called to destroy the phy.
  551. */
  552. void phy_destroy(struct phy *phy)
  553. {
  554. pm_runtime_disable(&phy->dev);
  555. device_unregister(&phy->dev);
  556. }
  557. EXPORT_SYMBOL_GPL(phy_destroy);
  558. /**
  559. * devm_phy_destroy() - destroy the PHY
  560. * @dev: device that wants to release this phy
  561. * @phy: the phy returned by devm_phy_get()
  562. *
  563. * destroys the devres associated with this phy and invokes phy_destroy
  564. * to destroy the phy.
  565. */
  566. void devm_phy_destroy(struct device *dev, struct phy *phy)
  567. {
  568. int r;
  569. r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
  570. dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
  571. }
  572. EXPORT_SYMBOL_GPL(devm_phy_destroy);
  573. /**
  574. * __of_phy_provider_register() - create/register phy provider with the framework
  575. * @dev: struct device of the phy provider
  576. * @owner: the module owner containing of_xlate
  577. * @of_xlate: function pointer to obtain phy instance from phy provider
  578. *
  579. * Creates struct phy_provider from dev and of_xlate function pointer.
  580. * This is used in the case of dt boot for finding the phy instance from
  581. * phy provider.
  582. */
  583. struct phy_provider *__of_phy_provider_register(struct device *dev,
  584. struct module *owner, struct phy * (*of_xlate)(struct device *dev,
  585. struct of_phandle_args *args))
  586. {
  587. struct phy_provider *phy_provider;
  588. phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
  589. if (!phy_provider)
  590. return ERR_PTR(-ENOMEM);
  591. phy_provider->dev = dev;
  592. phy_provider->owner = owner;
  593. phy_provider->of_xlate = of_xlate;
  594. mutex_lock(&phy_provider_mutex);
  595. list_add_tail(&phy_provider->list, &phy_provider_list);
  596. mutex_unlock(&phy_provider_mutex);
  597. return phy_provider;
  598. }
  599. EXPORT_SYMBOL_GPL(__of_phy_provider_register);
  600. /**
  601. * __devm_of_phy_provider_register() - create/register phy provider with the
  602. * framework
  603. * @dev: struct device of the phy provider
  604. * @owner: the module owner containing of_xlate
  605. * @of_xlate: function pointer to obtain phy instance from phy provider
  606. *
  607. * Creates struct phy_provider from dev and of_xlate function pointer.
  608. * This is used in the case of dt boot for finding the phy instance from
  609. * phy provider. While at that, it also associates the device with the
  610. * phy provider using devres. On driver detach, release function is invoked
  611. * on the devres data, then, devres data is freed.
  612. */
  613. struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
  614. struct module *owner, struct phy * (*of_xlate)(struct device *dev,
  615. struct of_phandle_args *args))
  616. {
  617. struct phy_provider **ptr, *phy_provider;
  618. ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
  619. if (!ptr)
  620. return ERR_PTR(-ENOMEM);
  621. phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
  622. if (!IS_ERR(phy_provider)) {
  623. *ptr = phy_provider;
  624. devres_add(dev, ptr);
  625. } else {
  626. devres_free(ptr);
  627. }
  628. return phy_provider;
  629. }
  630. EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
  631. /**
  632. * of_phy_provider_unregister() - unregister phy provider from the framework
  633. * @phy_provider: phy provider returned by of_phy_provider_register()
  634. *
  635. * Removes the phy_provider created using of_phy_provider_register().
  636. */
  637. void of_phy_provider_unregister(struct phy_provider *phy_provider)
  638. {
  639. if (IS_ERR(phy_provider))
  640. return;
  641. mutex_lock(&phy_provider_mutex);
  642. list_del(&phy_provider->list);
  643. kfree(phy_provider);
  644. mutex_unlock(&phy_provider_mutex);
  645. }
  646. EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
  647. /**
  648. * devm_of_phy_provider_unregister() - remove phy provider from the framework
  649. * @dev: struct device of the phy provider
  650. *
  651. * destroys the devres associated with this phy provider and invokes
  652. * of_phy_provider_unregister to unregister the phy provider.
  653. */
  654. void devm_of_phy_provider_unregister(struct device *dev,
  655. struct phy_provider *phy_provider) {
  656. int r;
  657. r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
  658. phy_provider);
  659. dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
  660. }
  661. EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
  662. /**
  663. * phy_release() - release the phy
  664. * @dev: the dev member within phy
  665. *
  666. * When the last reference to the device is removed, it is called
  667. * from the embedded kobject as release method.
  668. */
  669. static void phy_release(struct device *dev)
  670. {
  671. struct phy *phy;
  672. phy = to_phy(dev);
  673. dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
  674. ida_remove(&phy_ida, phy->id);
  675. kfree(phy);
  676. }
  677. static int __init phy_core_init(void)
  678. {
  679. phy_class = class_create(THIS_MODULE, "phy");
  680. if (IS_ERR(phy_class)) {
  681. pr_err("failed to create phy class --> %ld\n",
  682. PTR_ERR(phy_class));
  683. return PTR_ERR(phy_class);
  684. }
  685. phy_class->dev_release = phy_release;
  686. return 0;
  687. }
  688. module_init(phy_core_init);
  689. static void __exit phy_core_exit(void)
  690. {
  691. class_destroy(phy_class);
  692. }
  693. module_exit(phy_core_exit);
  694. MODULE_DESCRIPTION("Generic PHY Framework");
  695. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
  696. MODULE_LICENSE("GPL v2");