devres.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * drivers/base/devres.c - device resource management
  4. *
  5. * Copyright (c) 2006 SUSE Linux Products GmbH
  6. * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
  7. */
  8. #include <linux/device.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/percpu.h>
  12. #include "base.h"
  13. struct devres_node {
  14. struct list_head entry;
  15. dr_release_t release;
  16. #ifdef CONFIG_DEBUG_DEVRES
  17. const char *name;
  18. size_t size;
  19. #endif
  20. };
  21. struct devres {
  22. struct devres_node node;
  23. /* -- 3 pointers */
  24. unsigned long long data[]; /* guarantee ull alignment */
  25. };
  26. struct devres_group {
  27. struct devres_node node[2];
  28. void *id;
  29. int color;
  30. /* -- 8 pointers */
  31. };
  32. #ifdef CONFIG_DEBUG_DEVRES
  33. static int log_devres = 0;
  34. module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
  35. static void set_node_dbginfo(struct devres_node *node, const char *name,
  36. size_t size)
  37. {
  38. node->name = name;
  39. node->size = size;
  40. }
  41. static void devres_log(struct device *dev, struct devres_node *node,
  42. const char *op)
  43. {
  44. if (unlikely(log_devres))
  45. dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
  46. op, node, node->name, (unsigned long)node->size);
  47. }
  48. #else /* CONFIG_DEBUG_DEVRES */
  49. #define set_node_dbginfo(node, n, s) do {} while (0)
  50. #define devres_log(dev, node, op) do {} while (0)
  51. #endif /* CONFIG_DEBUG_DEVRES */
  52. /*
  53. * Release functions for devres group. These callbacks are used only
  54. * for identification.
  55. */
  56. static void group_open_release(struct device *dev, void *res)
  57. {
  58. /* noop */
  59. }
  60. static void group_close_release(struct device *dev, void *res)
  61. {
  62. /* noop */
  63. }
  64. static struct devres_group * node_to_group(struct devres_node *node)
  65. {
  66. if (node->release == &group_open_release)
  67. return container_of(node, struct devres_group, node[0]);
  68. if (node->release == &group_close_release)
  69. return container_of(node, struct devres_group, node[1]);
  70. return NULL;
  71. }
  72. static __always_inline struct devres * alloc_dr(dr_release_t release,
  73. size_t size, gfp_t gfp, int nid)
  74. {
  75. size_t tot_size;
  76. struct devres *dr;
  77. /* We must catch any near-SIZE_MAX cases that could overflow. */
  78. if (unlikely(check_add_overflow(sizeof(struct devres), size,
  79. &tot_size)))
  80. return NULL;
  81. dr = kmalloc_node_track_caller(tot_size, gfp, nid);
  82. if (unlikely(!dr))
  83. return NULL;
  84. memset(dr, 0, offsetof(struct devres, data));
  85. INIT_LIST_HEAD(&dr->node.entry);
  86. dr->node.release = release;
  87. return dr;
  88. }
  89. static void add_dr(struct device *dev, struct devres_node *node)
  90. {
  91. devres_log(dev, node, "ADD");
  92. BUG_ON(!list_empty(&node->entry));
  93. list_add_tail(&node->entry, &dev->devres_head);
  94. }
  95. #ifdef CONFIG_DEBUG_DEVRES
  96. void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
  97. const char *name)
  98. {
  99. struct devres *dr;
  100. dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
  101. if (unlikely(!dr))
  102. return NULL;
  103. set_node_dbginfo(&dr->node, name, size);
  104. return dr->data;
  105. }
  106. EXPORT_SYMBOL_GPL(__devres_alloc_node);
  107. #else
  108. /**
  109. * devres_alloc - Allocate device resource data
  110. * @release: Release function devres will be associated with
  111. * @size: Allocation size
  112. * @gfp: Allocation flags
  113. * @nid: NUMA node
  114. *
  115. * Allocate devres of @size bytes. The allocated area is zeroed, then
  116. * associated with @release. The returned pointer can be passed to
  117. * other devres_*() functions.
  118. *
  119. * RETURNS:
  120. * Pointer to allocated devres on success, NULL on failure.
  121. */
  122. void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
  123. {
  124. struct devres *dr;
  125. dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
  126. if (unlikely(!dr))
  127. return NULL;
  128. return dr->data;
  129. }
  130. EXPORT_SYMBOL_GPL(devres_alloc_node);
  131. #endif
  132. /**
  133. * devres_for_each_res - Resource iterator
  134. * @dev: Device to iterate resource from
  135. * @release: Look for resources associated with this release function
  136. * @match: Match function (optional)
  137. * @match_data: Data for the match function
  138. * @fn: Function to be called for each matched resource.
  139. * @data: Data for @fn, the 3rd parameter of @fn
  140. *
  141. * Call @fn for each devres of @dev which is associated with @release
  142. * and for which @match returns 1.
  143. *
  144. * RETURNS:
  145. * void
  146. */
  147. void devres_for_each_res(struct device *dev, dr_release_t release,
  148. dr_match_t match, void *match_data,
  149. void (*fn)(struct device *, void *, void *),
  150. void *data)
  151. {
  152. struct devres_node *node;
  153. struct devres_node *tmp;
  154. unsigned long flags;
  155. if (!fn)
  156. return;
  157. spin_lock_irqsave(&dev->devres_lock, flags);
  158. list_for_each_entry_safe_reverse(node, tmp,
  159. &dev->devres_head, entry) {
  160. struct devres *dr = container_of(node, struct devres, node);
  161. if (node->release != release)
  162. continue;
  163. if (match && !match(dev, dr->data, match_data))
  164. continue;
  165. fn(dev, dr->data, data);
  166. }
  167. spin_unlock_irqrestore(&dev->devres_lock, flags);
  168. }
  169. EXPORT_SYMBOL_GPL(devres_for_each_res);
  170. /**
  171. * devres_free - Free device resource data
  172. * @res: Pointer to devres data to free
  173. *
  174. * Free devres created with devres_alloc().
  175. */
  176. void devres_free(void *res)
  177. {
  178. if (res) {
  179. struct devres *dr = container_of(res, struct devres, data);
  180. BUG_ON(!list_empty(&dr->node.entry));
  181. kfree(dr);
  182. }
  183. }
  184. EXPORT_SYMBOL_GPL(devres_free);
  185. /**
  186. * devres_add - Register device resource
  187. * @dev: Device to add resource to
  188. * @res: Resource to register
  189. *
  190. * Register devres @res to @dev. @res should have been allocated
  191. * using devres_alloc(). On driver detach, the associated release
  192. * function will be invoked and devres will be freed automatically.
  193. */
  194. void devres_add(struct device *dev, void *res)
  195. {
  196. struct devres *dr = container_of(res, struct devres, data);
  197. unsigned long flags;
  198. spin_lock_irqsave(&dev->devres_lock, flags);
  199. add_dr(dev, &dr->node);
  200. spin_unlock_irqrestore(&dev->devres_lock, flags);
  201. }
  202. EXPORT_SYMBOL_GPL(devres_add);
  203. static struct devres *find_dr(struct device *dev, dr_release_t release,
  204. dr_match_t match, void *match_data)
  205. {
  206. struct devres_node *node;
  207. list_for_each_entry_reverse(node, &dev->devres_head, entry) {
  208. struct devres *dr = container_of(node, struct devres, node);
  209. if (node->release != release)
  210. continue;
  211. if (match && !match(dev, dr->data, match_data))
  212. continue;
  213. return dr;
  214. }
  215. return NULL;
  216. }
  217. /**
  218. * devres_find - Find device resource
  219. * @dev: Device to lookup resource from
  220. * @release: Look for resources associated with this release function
  221. * @match: Match function (optional)
  222. * @match_data: Data for the match function
  223. *
  224. * Find the latest devres of @dev which is associated with @release
  225. * and for which @match returns 1. If @match is NULL, it's considered
  226. * to match all.
  227. *
  228. * RETURNS:
  229. * Pointer to found devres, NULL if not found.
  230. */
  231. void * devres_find(struct device *dev, dr_release_t release,
  232. dr_match_t match, void *match_data)
  233. {
  234. struct devres *dr;
  235. unsigned long flags;
  236. spin_lock_irqsave(&dev->devres_lock, flags);
  237. dr = find_dr(dev, release, match, match_data);
  238. spin_unlock_irqrestore(&dev->devres_lock, flags);
  239. if (dr)
  240. return dr->data;
  241. return NULL;
  242. }
  243. EXPORT_SYMBOL_GPL(devres_find);
  244. /**
  245. * devres_get - Find devres, if non-existent, add one atomically
  246. * @dev: Device to lookup or add devres for
  247. * @new_res: Pointer to new initialized devres to add if not found
  248. * @match: Match function (optional)
  249. * @match_data: Data for the match function
  250. *
  251. * Find the latest devres of @dev which has the same release function
  252. * as @new_res and for which @match return 1. If found, @new_res is
  253. * freed; otherwise, @new_res is added atomically.
  254. *
  255. * RETURNS:
  256. * Pointer to found or added devres.
  257. */
  258. void * devres_get(struct device *dev, void *new_res,
  259. dr_match_t match, void *match_data)
  260. {
  261. struct devres *new_dr = container_of(new_res, struct devres, data);
  262. struct devres *dr;
  263. unsigned long flags;
  264. spin_lock_irqsave(&dev->devres_lock, flags);
  265. dr = find_dr(dev, new_dr->node.release, match, match_data);
  266. if (!dr) {
  267. add_dr(dev, &new_dr->node);
  268. dr = new_dr;
  269. new_res = NULL;
  270. }
  271. spin_unlock_irqrestore(&dev->devres_lock, flags);
  272. devres_free(new_res);
  273. return dr->data;
  274. }
  275. EXPORT_SYMBOL_GPL(devres_get);
  276. /**
  277. * devres_remove - Find a device resource and remove it
  278. * @dev: Device to find resource from
  279. * @release: Look for resources associated with this release function
  280. * @match: Match function (optional)
  281. * @match_data: Data for the match function
  282. *
  283. * Find the latest devres of @dev associated with @release and for
  284. * which @match returns 1. If @match is NULL, it's considered to
  285. * match all. If found, the resource is removed atomically and
  286. * returned.
  287. *
  288. * RETURNS:
  289. * Pointer to removed devres on success, NULL if not found.
  290. */
  291. void * devres_remove(struct device *dev, dr_release_t release,
  292. dr_match_t match, void *match_data)
  293. {
  294. struct devres *dr;
  295. unsigned long flags;
  296. spin_lock_irqsave(&dev->devres_lock, flags);
  297. dr = find_dr(dev, release, match, match_data);
  298. if (dr) {
  299. list_del_init(&dr->node.entry);
  300. devres_log(dev, &dr->node, "REM");
  301. }
  302. spin_unlock_irqrestore(&dev->devres_lock, flags);
  303. if (dr)
  304. return dr->data;
  305. return NULL;
  306. }
  307. EXPORT_SYMBOL_GPL(devres_remove);
  308. /**
  309. * devres_destroy - Find a device resource and destroy it
  310. * @dev: Device to find resource from
  311. * @release: Look for resources associated with this release function
  312. * @match: Match function (optional)
  313. * @match_data: Data for the match function
  314. *
  315. * Find the latest devres of @dev associated with @release and for
  316. * which @match returns 1. If @match is NULL, it's considered to
  317. * match all. If found, the resource is removed atomically and freed.
  318. *
  319. * Note that the release function for the resource will not be called,
  320. * only the devres-allocated data will be freed. The caller becomes
  321. * responsible for freeing any other data.
  322. *
  323. * RETURNS:
  324. * 0 if devres is found and freed, -ENOENT if not found.
  325. */
  326. int devres_destroy(struct device *dev, dr_release_t release,
  327. dr_match_t match, void *match_data)
  328. {
  329. void *res;
  330. res = devres_remove(dev, release, match, match_data);
  331. if (unlikely(!res))
  332. return -ENOENT;
  333. devres_free(res);
  334. return 0;
  335. }
  336. EXPORT_SYMBOL_GPL(devres_destroy);
  337. /**
  338. * devres_release - Find a device resource and destroy it, calling release
  339. * @dev: Device to find resource from
  340. * @release: Look for resources associated with this release function
  341. * @match: Match function (optional)
  342. * @match_data: Data for the match function
  343. *
  344. * Find the latest devres of @dev associated with @release and for
  345. * which @match returns 1. If @match is NULL, it's considered to
  346. * match all. If found, the resource is removed atomically, the
  347. * release function called and the resource freed.
  348. *
  349. * RETURNS:
  350. * 0 if devres is found and freed, -ENOENT if not found.
  351. */
  352. int devres_release(struct device *dev, dr_release_t release,
  353. dr_match_t match, void *match_data)
  354. {
  355. void *res;
  356. res = devres_remove(dev, release, match, match_data);
  357. if (unlikely(!res))
  358. return -ENOENT;
  359. (*release)(dev, res);
  360. devres_free(res);
  361. return 0;
  362. }
  363. EXPORT_SYMBOL_GPL(devres_release);
  364. static int remove_nodes(struct device *dev,
  365. struct list_head *first, struct list_head *end,
  366. struct list_head *todo)
  367. {
  368. int cnt = 0, nr_groups = 0;
  369. struct list_head *cur;
  370. /* First pass - move normal devres entries to @todo and clear
  371. * devres_group colors.
  372. */
  373. cur = first;
  374. while (cur != end) {
  375. struct devres_node *node;
  376. struct devres_group *grp;
  377. node = list_entry(cur, struct devres_node, entry);
  378. cur = cur->next;
  379. grp = node_to_group(node);
  380. if (grp) {
  381. /* clear color of group markers in the first pass */
  382. grp->color = 0;
  383. nr_groups++;
  384. } else {
  385. /* regular devres entry */
  386. if (&node->entry == first)
  387. first = first->next;
  388. list_move_tail(&node->entry, todo);
  389. cnt++;
  390. }
  391. }
  392. if (!nr_groups)
  393. return cnt;
  394. /* Second pass - Scan groups and color them. A group gets
  395. * color value of two iff the group is wholly contained in
  396. * [cur, end). That is, for a closed group, both opening and
  397. * closing markers should be in the range, while just the
  398. * opening marker is enough for an open group.
  399. */
  400. cur = first;
  401. while (cur != end) {
  402. struct devres_node *node;
  403. struct devres_group *grp;
  404. node = list_entry(cur, struct devres_node, entry);
  405. cur = cur->next;
  406. grp = node_to_group(node);
  407. BUG_ON(!grp || list_empty(&grp->node[0].entry));
  408. grp->color++;
  409. if (list_empty(&grp->node[1].entry))
  410. grp->color++;
  411. BUG_ON(grp->color <= 0 || grp->color > 2);
  412. if (grp->color == 2) {
  413. /* No need to update cur or end. The removed
  414. * nodes are always before both.
  415. */
  416. list_move_tail(&grp->node[0].entry, todo);
  417. list_del_init(&grp->node[1].entry);
  418. }
  419. }
  420. return cnt;
  421. }
  422. static int release_nodes(struct device *dev, struct list_head *first,
  423. struct list_head *end, unsigned long flags)
  424. __releases(&dev->devres_lock)
  425. {
  426. LIST_HEAD(todo);
  427. int cnt;
  428. struct devres *dr, *tmp;
  429. cnt = remove_nodes(dev, first, end, &todo);
  430. spin_unlock_irqrestore(&dev->devres_lock, flags);
  431. /* Release. Note that both devres and devres_group are
  432. * handled as devres in the following loop. This is safe.
  433. */
  434. list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
  435. devres_log(dev, &dr->node, "REL");
  436. dr->node.release(dev, dr->data);
  437. kfree(dr);
  438. }
  439. return cnt;
  440. }
  441. /**
  442. * devres_release_all - Release all managed resources
  443. * @dev: Device to release resources for
  444. *
  445. * Release all resources associated with @dev. This function is
  446. * called on driver detach.
  447. */
  448. int devres_release_all(struct device *dev)
  449. {
  450. unsigned long flags;
  451. /* Looks like an uninitialized device structure */
  452. if (WARN_ON(dev->devres_head.next == NULL))
  453. return -ENODEV;
  454. spin_lock_irqsave(&dev->devres_lock, flags);
  455. return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
  456. flags);
  457. }
  458. /**
  459. * devres_open_group - Open a new devres group
  460. * @dev: Device to open devres group for
  461. * @id: Separator ID
  462. * @gfp: Allocation flags
  463. *
  464. * Open a new devres group for @dev with @id. For @id, using a
  465. * pointer to an object which won't be used for another group is
  466. * recommended. If @id is NULL, address-wise unique ID is created.
  467. *
  468. * RETURNS:
  469. * ID of the new group, NULL on failure.
  470. */
  471. void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
  472. {
  473. struct devres_group *grp;
  474. unsigned long flags;
  475. grp = kmalloc(sizeof(*grp), gfp);
  476. if (unlikely(!grp))
  477. return NULL;
  478. grp->node[0].release = &group_open_release;
  479. grp->node[1].release = &group_close_release;
  480. INIT_LIST_HEAD(&grp->node[0].entry);
  481. INIT_LIST_HEAD(&grp->node[1].entry);
  482. set_node_dbginfo(&grp->node[0], "grp<", 0);
  483. set_node_dbginfo(&grp->node[1], "grp>", 0);
  484. grp->id = grp;
  485. if (id)
  486. grp->id = id;
  487. spin_lock_irqsave(&dev->devres_lock, flags);
  488. add_dr(dev, &grp->node[0]);
  489. spin_unlock_irqrestore(&dev->devres_lock, flags);
  490. return grp->id;
  491. }
  492. EXPORT_SYMBOL_GPL(devres_open_group);
  493. /* Find devres group with ID @id. If @id is NULL, look for the latest. */
  494. static struct devres_group * find_group(struct device *dev, void *id)
  495. {
  496. struct devres_node *node;
  497. list_for_each_entry_reverse(node, &dev->devres_head, entry) {
  498. struct devres_group *grp;
  499. if (node->release != &group_open_release)
  500. continue;
  501. grp = container_of(node, struct devres_group, node[0]);
  502. if (id) {
  503. if (grp->id == id)
  504. return grp;
  505. } else if (list_empty(&grp->node[1].entry))
  506. return grp;
  507. }
  508. return NULL;
  509. }
  510. /**
  511. * devres_close_group - Close a devres group
  512. * @dev: Device to close devres group for
  513. * @id: ID of target group, can be NULL
  514. *
  515. * Close the group identified by @id. If @id is NULL, the latest open
  516. * group is selected.
  517. */
  518. void devres_close_group(struct device *dev, void *id)
  519. {
  520. struct devres_group *grp;
  521. unsigned long flags;
  522. spin_lock_irqsave(&dev->devres_lock, flags);
  523. grp = find_group(dev, id);
  524. if (grp)
  525. add_dr(dev, &grp->node[1]);
  526. else
  527. WARN_ON(1);
  528. spin_unlock_irqrestore(&dev->devres_lock, flags);
  529. }
  530. EXPORT_SYMBOL_GPL(devres_close_group);
  531. /**
  532. * devres_remove_group - Remove a devres group
  533. * @dev: Device to remove group for
  534. * @id: ID of target group, can be NULL
  535. *
  536. * Remove the group identified by @id. If @id is NULL, the latest
  537. * open group is selected. Note that removing a group doesn't affect
  538. * any other resources.
  539. */
  540. void devres_remove_group(struct device *dev, void *id)
  541. {
  542. struct devres_group *grp;
  543. unsigned long flags;
  544. spin_lock_irqsave(&dev->devres_lock, flags);
  545. grp = find_group(dev, id);
  546. if (grp) {
  547. list_del_init(&grp->node[0].entry);
  548. list_del_init(&grp->node[1].entry);
  549. devres_log(dev, &grp->node[0], "REM");
  550. } else
  551. WARN_ON(1);
  552. spin_unlock_irqrestore(&dev->devres_lock, flags);
  553. kfree(grp);
  554. }
  555. EXPORT_SYMBOL_GPL(devres_remove_group);
  556. /**
  557. * devres_release_group - Release resources in a devres group
  558. * @dev: Device to release group for
  559. * @id: ID of target group, can be NULL
  560. *
  561. * Release all resources in the group identified by @id. If @id is
  562. * NULL, the latest open group is selected. The selected group and
  563. * groups properly nested inside the selected group are removed.
  564. *
  565. * RETURNS:
  566. * The number of released non-group resources.
  567. */
  568. int devres_release_group(struct device *dev, void *id)
  569. {
  570. struct devres_group *grp;
  571. unsigned long flags;
  572. int cnt = 0;
  573. spin_lock_irqsave(&dev->devres_lock, flags);
  574. grp = find_group(dev, id);
  575. if (grp) {
  576. struct list_head *first = &grp->node[0].entry;
  577. struct list_head *end = &dev->devres_head;
  578. if (!list_empty(&grp->node[1].entry))
  579. end = grp->node[1].entry.next;
  580. cnt = release_nodes(dev, first, end, flags);
  581. } else {
  582. WARN_ON(1);
  583. spin_unlock_irqrestore(&dev->devres_lock, flags);
  584. }
  585. return cnt;
  586. }
  587. EXPORT_SYMBOL_GPL(devres_release_group);
  588. /*
  589. * Custom devres actions allow inserting a simple function call
  590. * into the teadown sequence.
  591. */
  592. struct action_devres {
  593. void *data;
  594. void (*action)(void *);
  595. };
  596. static int devm_action_match(struct device *dev, void *res, void *p)
  597. {
  598. struct action_devres *devres = res;
  599. struct action_devres *target = p;
  600. return devres->action == target->action &&
  601. devres->data == target->data;
  602. }
  603. static void devm_action_release(struct device *dev, void *res)
  604. {
  605. struct action_devres *devres = res;
  606. devres->action(devres->data);
  607. }
  608. /**
  609. * devm_add_action() - add a custom action to list of managed resources
  610. * @dev: Device that owns the action
  611. * @action: Function that should be called
  612. * @data: Pointer to data passed to @action implementation
  613. *
  614. * This adds a custom action to the list of managed resources so that
  615. * it gets executed as part of standard resource unwinding.
  616. */
  617. int devm_add_action(struct device *dev, void (*action)(void *), void *data)
  618. {
  619. struct action_devres *devres;
  620. devres = devres_alloc(devm_action_release,
  621. sizeof(struct action_devres), GFP_KERNEL);
  622. if (!devres)
  623. return -ENOMEM;
  624. devres->data = data;
  625. devres->action = action;
  626. devres_add(dev, devres);
  627. return 0;
  628. }
  629. EXPORT_SYMBOL_GPL(devm_add_action);
  630. /**
  631. * devm_remove_action() - removes previously added custom action
  632. * @dev: Device that owns the action
  633. * @action: Function implementing the action
  634. * @data: Pointer to data passed to @action implementation
  635. *
  636. * Removes instance of @action previously added by devm_add_action().
  637. * Both action and data should match one of the existing entries.
  638. */
  639. void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
  640. {
  641. struct action_devres devres = {
  642. .data = data,
  643. .action = action,
  644. };
  645. WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
  646. &devres));
  647. }
  648. EXPORT_SYMBOL_GPL(devm_remove_action);
  649. /*
  650. * Managed kmalloc/kfree
  651. */
  652. static void devm_kmalloc_release(struct device *dev, void *res)
  653. {
  654. /* noop */
  655. }
  656. static int devm_kmalloc_match(struct device *dev, void *res, void *data)
  657. {
  658. return res == data;
  659. }
  660. /**
  661. * devm_kmalloc - Resource-managed kmalloc
  662. * @dev: Device to allocate memory for
  663. * @size: Allocation size
  664. * @gfp: Allocation gfp flags
  665. *
  666. * Managed kmalloc. Memory allocated with this function is
  667. * automatically freed on driver detach. Like all other devres
  668. * resources, guaranteed alignment is unsigned long long.
  669. *
  670. * RETURNS:
  671. * Pointer to allocated memory on success, NULL on failure.
  672. */
  673. void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
  674. {
  675. struct devres *dr;
  676. /* use raw alloc_dr for kmalloc caller tracing */
  677. dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
  678. if (unlikely(!dr))
  679. return NULL;
  680. /*
  681. * This is named devm_kzalloc_release for historical reasons
  682. * The initial implementation did not support kmalloc, only kzalloc
  683. */
  684. set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
  685. devres_add(dev, dr->data);
  686. return dr->data;
  687. }
  688. EXPORT_SYMBOL_GPL(devm_kmalloc);
  689. /**
  690. * devm_kstrdup - Allocate resource managed space and
  691. * copy an existing string into that.
  692. * @dev: Device to allocate memory for
  693. * @s: the string to duplicate
  694. * @gfp: the GFP mask used in the devm_kmalloc() call when
  695. * allocating memory
  696. * RETURNS:
  697. * Pointer to allocated string on success, NULL on failure.
  698. */
  699. char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
  700. {
  701. size_t size;
  702. char *buf;
  703. if (!s)
  704. return NULL;
  705. size = strlen(s) + 1;
  706. buf = devm_kmalloc(dev, size, gfp);
  707. if (buf)
  708. memcpy(buf, s, size);
  709. return buf;
  710. }
  711. EXPORT_SYMBOL_GPL(devm_kstrdup);
  712. /**
  713. * devm_kvasprintf - Allocate resource managed space and format a string
  714. * into that.
  715. * @dev: Device to allocate memory for
  716. * @gfp: the GFP mask used in the devm_kmalloc() call when
  717. * allocating memory
  718. * @fmt: The printf()-style format string
  719. * @ap: Arguments for the format string
  720. * RETURNS:
  721. * Pointer to allocated string on success, NULL on failure.
  722. */
  723. char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
  724. va_list ap)
  725. {
  726. unsigned int len;
  727. char *p;
  728. va_list aq;
  729. va_copy(aq, ap);
  730. len = vsnprintf(NULL, 0, fmt, aq);
  731. va_end(aq);
  732. p = devm_kmalloc(dev, len+1, gfp);
  733. if (!p)
  734. return NULL;
  735. vsnprintf(p, len+1, fmt, ap);
  736. return p;
  737. }
  738. EXPORT_SYMBOL(devm_kvasprintf);
  739. /**
  740. * devm_kasprintf - Allocate resource managed space and format a string
  741. * into that.
  742. * @dev: Device to allocate memory for
  743. * @gfp: the GFP mask used in the devm_kmalloc() call when
  744. * allocating memory
  745. * @fmt: The printf()-style format string
  746. * @...: Arguments for the format string
  747. * RETURNS:
  748. * Pointer to allocated string on success, NULL on failure.
  749. */
  750. char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
  751. {
  752. va_list ap;
  753. char *p;
  754. va_start(ap, fmt);
  755. p = devm_kvasprintf(dev, gfp, fmt, ap);
  756. va_end(ap);
  757. return p;
  758. }
  759. EXPORT_SYMBOL_GPL(devm_kasprintf);
  760. /**
  761. * devm_kfree - Resource-managed kfree
  762. * @dev: Device this memory belongs to
  763. * @p: Memory to free
  764. *
  765. * Free memory allocated with devm_kmalloc().
  766. */
  767. void devm_kfree(struct device *dev, void *p)
  768. {
  769. int rc;
  770. rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
  771. WARN_ON(rc);
  772. }
  773. EXPORT_SYMBOL_GPL(devm_kfree);
  774. /**
  775. * devm_kmemdup - Resource-managed kmemdup
  776. * @dev: Device this memory belongs to
  777. * @src: Memory region to duplicate
  778. * @len: Memory region length
  779. * @gfp: GFP mask to use
  780. *
  781. * Duplicate region of a memory using resource managed kmalloc
  782. */
  783. void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
  784. {
  785. void *p;
  786. p = devm_kmalloc(dev, len, gfp);
  787. if (p)
  788. memcpy(p, src, len);
  789. return p;
  790. }
  791. EXPORT_SYMBOL_GPL(devm_kmemdup);
  792. struct pages_devres {
  793. unsigned long addr;
  794. unsigned int order;
  795. };
  796. static int devm_pages_match(struct device *dev, void *res, void *p)
  797. {
  798. struct pages_devres *devres = res;
  799. struct pages_devres *target = p;
  800. return devres->addr == target->addr;
  801. }
  802. static void devm_pages_release(struct device *dev, void *res)
  803. {
  804. struct pages_devres *devres = res;
  805. free_pages(devres->addr, devres->order);
  806. }
  807. /**
  808. * devm_get_free_pages - Resource-managed __get_free_pages
  809. * @dev: Device to allocate memory for
  810. * @gfp_mask: Allocation gfp flags
  811. * @order: Allocation size is (1 << order) pages
  812. *
  813. * Managed get_free_pages. Memory allocated with this function is
  814. * automatically freed on driver detach.
  815. *
  816. * RETURNS:
  817. * Address of allocated memory on success, 0 on failure.
  818. */
  819. unsigned long devm_get_free_pages(struct device *dev,
  820. gfp_t gfp_mask, unsigned int order)
  821. {
  822. struct pages_devres *devres;
  823. unsigned long addr;
  824. addr = __get_free_pages(gfp_mask, order);
  825. if (unlikely(!addr))
  826. return 0;
  827. devres = devres_alloc(devm_pages_release,
  828. sizeof(struct pages_devres), GFP_KERNEL);
  829. if (unlikely(!devres)) {
  830. free_pages(addr, order);
  831. return 0;
  832. }
  833. devres->addr = addr;
  834. devres->order = order;
  835. devres_add(dev, devres);
  836. return addr;
  837. }
  838. EXPORT_SYMBOL_GPL(devm_get_free_pages);
  839. /**
  840. * devm_free_pages - Resource-managed free_pages
  841. * @dev: Device this memory belongs to
  842. * @addr: Memory to free
  843. *
  844. * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
  845. * there is no need to supply the @order.
  846. */
  847. void devm_free_pages(struct device *dev, unsigned long addr)
  848. {
  849. struct pages_devres devres = { .addr = addr };
  850. WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
  851. &devres));
  852. }
  853. EXPORT_SYMBOL_GPL(devm_free_pages);
  854. static void devm_percpu_release(struct device *dev, void *pdata)
  855. {
  856. void __percpu *p;
  857. p = *(void __percpu **)pdata;
  858. free_percpu(p);
  859. }
  860. static int devm_percpu_match(struct device *dev, void *data, void *p)
  861. {
  862. struct devres *devr = container_of(data, struct devres, data);
  863. return *(void **)devr->data == p;
  864. }
  865. /**
  866. * __devm_alloc_percpu - Resource-managed alloc_percpu
  867. * @dev: Device to allocate per-cpu memory for
  868. * @size: Size of per-cpu memory to allocate
  869. * @align: Alignment of per-cpu memory to allocate
  870. *
  871. * Managed alloc_percpu. Per-cpu memory allocated with this function is
  872. * automatically freed on driver detach.
  873. *
  874. * RETURNS:
  875. * Pointer to allocated memory on success, NULL on failure.
  876. */
  877. void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
  878. size_t align)
  879. {
  880. void *p;
  881. void __percpu *pcpu;
  882. pcpu = __alloc_percpu(size, align);
  883. if (!pcpu)
  884. return NULL;
  885. p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
  886. if (!p) {
  887. free_percpu(pcpu);
  888. return NULL;
  889. }
  890. *(void __percpu **)p = pcpu;
  891. devres_add(dev, p);
  892. return pcpu;
  893. }
  894. EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
  895. /**
  896. * devm_free_percpu - Resource-managed free_percpu
  897. * @dev: Device this memory belongs to
  898. * @pdata: Per-cpu memory to free
  899. *
  900. * Free memory allocated with devm_alloc_percpu().
  901. */
  902. void devm_free_percpu(struct device *dev, void __percpu *pdata)
  903. {
  904. WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
  905. (void *)pdata));
  906. }
  907. EXPORT_SYMBOL_GPL(devm_free_percpu);