v4l2-async.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * V4L2 asynchronous subdevice registration API
  3. *
  4. * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/device.h>
  11. #include <linux/err.h>
  12. #include <linux/i2c.h>
  13. #include <linux/list.h>
  14. #include <linux/mm.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/of.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slab.h>
  20. #include <linux/types.h>
  21. #include <media/v4l2-async.h>
  22. #include <media/v4l2-device.h>
  23. #include <media/v4l2-fwnode.h>
  24. #include <media/v4l2-subdev.h>
  25. static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
  26. struct v4l2_subdev *subdev,
  27. struct v4l2_async_subdev *asd)
  28. {
  29. if (!n->ops || !n->ops->bound)
  30. return 0;
  31. return n->ops->bound(n, subdev, asd);
  32. }
  33. static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
  34. struct v4l2_subdev *subdev,
  35. struct v4l2_async_subdev *asd)
  36. {
  37. if (!n->ops || !n->ops->unbind)
  38. return;
  39. n->ops->unbind(n, subdev, asd);
  40. }
  41. static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
  42. {
  43. if (!n->ops || !n->ops->complete)
  44. return 0;
  45. return n->ops->complete(n);
  46. }
  47. static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
  48. {
  49. #if IS_ENABLED(CONFIG_I2C)
  50. struct i2c_client *client = i2c_verify_client(sd->dev);
  51. return client &&
  52. asd->match.i2c.adapter_id == client->adapter->nr &&
  53. asd->match.i2c.address == client->addr;
  54. #else
  55. return false;
  56. #endif
  57. }
  58. static bool match_devname(struct v4l2_subdev *sd,
  59. struct v4l2_async_subdev *asd)
  60. {
  61. return !strcmp(asd->match.device_name, dev_name(sd->dev));
  62. }
  63. static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
  64. {
  65. return sd->fwnode == asd->match.fwnode;
  66. }
  67. static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
  68. {
  69. if (!asd->match.custom.match)
  70. /* Match always */
  71. return true;
  72. return asd->match.custom.match(sd->dev, asd);
  73. }
  74. static LIST_HEAD(subdev_list);
  75. static LIST_HEAD(notifier_list);
  76. static DEFINE_MUTEX(list_lock);
  77. static struct v4l2_async_subdev *v4l2_async_find_match(
  78. struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd)
  79. {
  80. bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
  81. struct v4l2_async_subdev *asd;
  82. list_for_each_entry(asd, &notifier->waiting, list) {
  83. /* bus_type has been verified valid before */
  84. switch (asd->match_type) {
  85. case V4L2_ASYNC_MATCH_CUSTOM:
  86. match = match_custom;
  87. break;
  88. case V4L2_ASYNC_MATCH_DEVNAME:
  89. match = match_devname;
  90. break;
  91. case V4L2_ASYNC_MATCH_I2C:
  92. match = match_i2c;
  93. break;
  94. case V4L2_ASYNC_MATCH_FWNODE:
  95. match = match_fwnode;
  96. break;
  97. default:
  98. /* Cannot happen, unless someone breaks us */
  99. WARN_ON(true);
  100. return NULL;
  101. }
  102. /* match cannot be NULL here */
  103. if (match(sd, asd))
  104. return asd;
  105. }
  106. return NULL;
  107. }
  108. /* Compare two async sub-device descriptors for equivalence */
  109. static bool asd_equal(struct v4l2_async_subdev *asd_x,
  110. struct v4l2_async_subdev *asd_y)
  111. {
  112. if (asd_x->match_type != asd_y->match_type)
  113. return false;
  114. switch (asd_x->match_type) {
  115. case V4L2_ASYNC_MATCH_DEVNAME:
  116. return strcmp(asd_x->match.device_name,
  117. asd_y->match.device_name) == 0;
  118. case V4L2_ASYNC_MATCH_I2C:
  119. return asd_x->match.i2c.adapter_id ==
  120. asd_y->match.i2c.adapter_id &&
  121. asd_x->match.i2c.address ==
  122. asd_y->match.i2c.address;
  123. case V4L2_ASYNC_MATCH_FWNODE:
  124. return asd_x->match.fwnode == asd_y->match.fwnode;
  125. default:
  126. break;
  127. }
  128. return false;
  129. }
  130. /* Find the sub-device notifier registered by a sub-device driver. */
  131. static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(
  132. struct v4l2_subdev *sd)
  133. {
  134. struct v4l2_async_notifier *n;
  135. list_for_each_entry(n, &notifier_list, list)
  136. if (n->sd == sd)
  137. return n;
  138. return NULL;
  139. }
  140. /* Get v4l2_device related to the notifier if one can be found. */
  141. static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(
  142. struct v4l2_async_notifier *notifier)
  143. {
  144. while (notifier->parent)
  145. notifier = notifier->parent;
  146. return notifier->v4l2_dev;
  147. }
  148. /*
  149. * Return true if all child sub-device notifiers are complete, false otherwise.
  150. */
  151. static bool v4l2_async_notifier_can_complete(
  152. struct v4l2_async_notifier *notifier)
  153. {
  154. struct v4l2_subdev *sd;
  155. if (!list_empty(&notifier->waiting))
  156. return false;
  157. list_for_each_entry(sd, &notifier->done, async_list) {
  158. struct v4l2_async_notifier *subdev_notifier =
  159. v4l2_async_find_subdev_notifier(sd);
  160. if (subdev_notifier &&
  161. !v4l2_async_notifier_can_complete(subdev_notifier))
  162. return false;
  163. }
  164. return true;
  165. }
  166. /*
  167. * Complete the master notifier if possible. This is done when all async
  168. * sub-devices have been bound; v4l2_device is also available then.
  169. */
  170. static int v4l2_async_notifier_try_complete(
  171. struct v4l2_async_notifier *notifier)
  172. {
  173. /* Quick check whether there are still more sub-devices here. */
  174. if (!list_empty(&notifier->waiting))
  175. return 0;
  176. /* Check the entire notifier tree; find the root notifier first. */
  177. while (notifier->parent)
  178. notifier = notifier->parent;
  179. /* This is root if it has v4l2_dev. */
  180. if (!notifier->v4l2_dev)
  181. return 0;
  182. /* Is everything ready? */
  183. if (!v4l2_async_notifier_can_complete(notifier))
  184. return 0;
  185. return v4l2_async_notifier_call_complete(notifier);
  186. }
  187. static int v4l2_async_notifier_try_all_subdevs(
  188. struct v4l2_async_notifier *notifier);
  189. static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
  190. struct v4l2_device *v4l2_dev,
  191. struct v4l2_subdev *sd,
  192. struct v4l2_async_subdev *asd)
  193. {
  194. struct v4l2_async_notifier *subdev_notifier;
  195. int ret;
  196. ret = v4l2_device_register_subdev(v4l2_dev, sd);
  197. if (ret < 0)
  198. return ret;
  199. ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
  200. if (ret < 0) {
  201. v4l2_device_unregister_subdev(sd);
  202. return ret;
  203. }
  204. /* Remove from the waiting list */
  205. list_del(&asd->list);
  206. sd->asd = asd;
  207. sd->notifier = notifier;
  208. /* Move from the global subdevice list to notifier's done */
  209. list_move(&sd->async_list, &notifier->done);
  210. /*
  211. * See if the sub-device has a notifier. If not, return here.
  212. */
  213. subdev_notifier = v4l2_async_find_subdev_notifier(sd);
  214. if (!subdev_notifier || subdev_notifier->parent)
  215. return 0;
  216. /*
  217. * Proceed with checking for the sub-device notifier's async
  218. * sub-devices, and return the result. The error will be handled by the
  219. * caller.
  220. */
  221. subdev_notifier->parent = notifier;
  222. return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
  223. }
  224. /* Test all async sub-devices in a notifier for a match. */
  225. static int v4l2_async_notifier_try_all_subdevs(
  226. struct v4l2_async_notifier *notifier)
  227. {
  228. struct v4l2_device *v4l2_dev =
  229. v4l2_async_notifier_find_v4l2_dev(notifier);
  230. struct v4l2_subdev *sd;
  231. if (!v4l2_dev)
  232. return 0;
  233. again:
  234. list_for_each_entry(sd, &subdev_list, async_list) {
  235. struct v4l2_async_subdev *asd;
  236. int ret;
  237. asd = v4l2_async_find_match(notifier, sd);
  238. if (!asd)
  239. continue;
  240. ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
  241. if (ret < 0)
  242. return ret;
  243. /*
  244. * v4l2_async_match_notify() may lead to registering a
  245. * new notifier and thus changing the async subdevs
  246. * list. In order to proceed safely from here, restart
  247. * parsing the list from the beginning.
  248. */
  249. goto again;
  250. }
  251. return 0;
  252. }
  253. static void v4l2_async_cleanup(struct v4l2_subdev *sd)
  254. {
  255. v4l2_device_unregister_subdev(sd);
  256. /* Subdevice driver will reprobe and put the subdev back onto the list */
  257. list_del_init(&sd->async_list);
  258. sd->asd = NULL;
  259. }
  260. /* Unbind all sub-devices in the notifier tree. */
  261. static void v4l2_async_notifier_unbind_all_subdevs(
  262. struct v4l2_async_notifier *notifier)
  263. {
  264. struct v4l2_subdev *sd, *tmp;
  265. list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
  266. struct v4l2_async_notifier *subdev_notifier =
  267. v4l2_async_find_subdev_notifier(sd);
  268. if (subdev_notifier)
  269. v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
  270. v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
  271. v4l2_async_cleanup(sd);
  272. list_move(&sd->async_list, &subdev_list);
  273. }
  274. notifier->parent = NULL;
  275. }
  276. /* See if an async sub-device can be found in a notifier's lists. */
  277. static bool
  278. __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
  279. struct v4l2_async_subdev *asd)
  280. {
  281. struct v4l2_async_subdev *asd_y;
  282. struct v4l2_subdev *sd;
  283. list_for_each_entry(asd_y, &notifier->waiting, list)
  284. if (asd_equal(asd, asd_y))
  285. return true;
  286. list_for_each_entry(sd, &notifier->done, async_list) {
  287. if (WARN_ON(!sd->asd))
  288. continue;
  289. if (asd_equal(asd, sd->asd))
  290. return true;
  291. }
  292. return false;
  293. }
  294. /*
  295. * Find out whether an async sub-device was set up already or
  296. * whether it exists in a given notifier before @this_index.
  297. */
  298. static bool
  299. v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
  300. struct v4l2_async_subdev *asd,
  301. unsigned int this_index)
  302. {
  303. struct v4l2_async_subdev *asd_y;
  304. unsigned int j;
  305. lockdep_assert_held(&list_lock);
  306. /* Check that an asd is not being added more than once. */
  307. if (notifier->subdevs) {
  308. for (j = 0; j < this_index; j++) {
  309. asd_y = notifier->subdevs[j];
  310. if (asd_equal(asd, asd_y))
  311. return true;
  312. }
  313. } else {
  314. j = 0;
  315. list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
  316. if (j++ >= this_index)
  317. break;
  318. if (asd_equal(asd, asd_y))
  319. return true;
  320. }
  321. }
  322. /* Check that an asd does not exist in other notifiers. */
  323. list_for_each_entry(notifier, &notifier_list, list)
  324. if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
  325. return true;
  326. return false;
  327. }
  328. static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
  329. struct v4l2_async_subdev *asd,
  330. unsigned int this_index)
  331. {
  332. struct device *dev =
  333. notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
  334. if (!asd)
  335. return -EINVAL;
  336. switch (asd->match_type) {
  337. case V4L2_ASYNC_MATCH_CUSTOM:
  338. case V4L2_ASYNC_MATCH_DEVNAME:
  339. case V4L2_ASYNC_MATCH_I2C:
  340. case V4L2_ASYNC_MATCH_FWNODE:
  341. if (v4l2_async_notifier_has_async_subdev(notifier, asd,
  342. this_index)) {
  343. dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
  344. return -EEXIST;
  345. }
  346. break;
  347. default:
  348. dev_err(dev, "Invalid match type %u on %p\n",
  349. asd->match_type, asd);
  350. return -EINVAL;
  351. }
  352. return 0;
  353. }
  354. void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
  355. {
  356. mutex_lock(&list_lock);
  357. INIT_LIST_HEAD(&notifier->asd_list);
  358. mutex_unlock(&list_lock);
  359. }
  360. EXPORT_SYMBOL(v4l2_async_notifier_init);
  361. static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
  362. {
  363. struct v4l2_async_subdev *asd;
  364. int ret;
  365. int i;
  366. if (notifier->num_subdevs > V4L2_MAX_SUBDEVS)
  367. return -EINVAL;
  368. INIT_LIST_HEAD(&notifier->waiting);
  369. INIT_LIST_HEAD(&notifier->done);
  370. mutex_lock(&list_lock);
  371. if (notifier->subdevs) {
  372. for (i = 0; i < notifier->num_subdevs; i++) {
  373. asd = notifier->subdevs[i];
  374. ret = v4l2_async_notifier_asd_valid(notifier, asd, i);
  375. if (ret)
  376. goto err_unlock;
  377. list_add_tail(&asd->list, &notifier->waiting);
  378. }
  379. } else {
  380. i = 0;
  381. list_for_each_entry(asd, &notifier->asd_list, asd_list) {
  382. ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
  383. if (ret)
  384. goto err_unlock;
  385. list_add_tail(&asd->list, &notifier->waiting);
  386. }
  387. }
  388. ret = v4l2_async_notifier_try_all_subdevs(notifier);
  389. if (ret < 0)
  390. goto err_unbind;
  391. ret = v4l2_async_notifier_try_complete(notifier);
  392. if (ret < 0)
  393. goto err_unbind;
  394. /* Keep also completed notifiers on the list */
  395. list_add(&notifier->list, &notifier_list);
  396. mutex_unlock(&list_lock);
  397. return 0;
  398. err_unbind:
  399. /*
  400. * On failure, unbind all sub-devices registered through this notifier.
  401. */
  402. v4l2_async_notifier_unbind_all_subdevs(notifier);
  403. err_unlock:
  404. mutex_unlock(&list_lock);
  405. return ret;
  406. }
  407. int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
  408. struct v4l2_async_notifier *notifier)
  409. {
  410. int ret;
  411. if (WARN_ON(!v4l2_dev || notifier->sd))
  412. return -EINVAL;
  413. notifier->v4l2_dev = v4l2_dev;
  414. ret = __v4l2_async_notifier_register(notifier);
  415. if (ret)
  416. notifier->v4l2_dev = NULL;
  417. return ret;
  418. }
  419. EXPORT_SYMBOL(v4l2_async_notifier_register);
  420. int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
  421. struct v4l2_async_notifier *notifier)
  422. {
  423. int ret;
  424. if (WARN_ON(!sd || notifier->v4l2_dev))
  425. return -EINVAL;
  426. notifier->sd = sd;
  427. ret = __v4l2_async_notifier_register(notifier);
  428. if (ret)
  429. notifier->sd = NULL;
  430. return ret;
  431. }
  432. EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
  433. static void __v4l2_async_notifier_unregister(
  434. struct v4l2_async_notifier *notifier)
  435. {
  436. if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
  437. return;
  438. v4l2_async_notifier_unbind_all_subdevs(notifier);
  439. notifier->sd = NULL;
  440. notifier->v4l2_dev = NULL;
  441. list_del(&notifier->list);
  442. }
  443. void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
  444. {
  445. mutex_lock(&list_lock);
  446. __v4l2_async_notifier_unregister(notifier);
  447. mutex_unlock(&list_lock);
  448. }
  449. EXPORT_SYMBOL(v4l2_async_notifier_unregister);
  450. static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
  451. {
  452. struct v4l2_async_subdev *asd, *tmp;
  453. unsigned int i;
  454. if (!notifier)
  455. return;
  456. if (notifier->subdevs) {
  457. for (i = 0; i < notifier->num_subdevs; i++) {
  458. asd = notifier->subdevs[i];
  459. switch (asd->match_type) {
  460. case V4L2_ASYNC_MATCH_FWNODE:
  461. fwnode_handle_put(asd->match.fwnode);
  462. break;
  463. default:
  464. break;
  465. }
  466. kfree(asd);
  467. }
  468. kvfree(notifier->subdevs);
  469. notifier->subdevs = NULL;
  470. } else {
  471. list_for_each_entry_safe(asd, tmp,
  472. &notifier->asd_list, asd_list) {
  473. switch (asd->match_type) {
  474. case V4L2_ASYNC_MATCH_FWNODE:
  475. fwnode_handle_put(asd->match.fwnode);
  476. break;
  477. default:
  478. break;
  479. }
  480. list_del(&asd->asd_list);
  481. kfree(asd);
  482. }
  483. }
  484. notifier->num_subdevs = 0;
  485. }
  486. void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
  487. {
  488. mutex_lock(&list_lock);
  489. __v4l2_async_notifier_cleanup(notifier);
  490. mutex_unlock(&list_lock);
  491. }
  492. EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
  493. int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
  494. struct v4l2_async_subdev *asd)
  495. {
  496. int ret;
  497. mutex_lock(&list_lock);
  498. if (notifier->num_subdevs >= V4L2_MAX_SUBDEVS) {
  499. ret = -EINVAL;
  500. goto unlock;
  501. }
  502. /*
  503. * If caller uses this function, it cannot also allocate and
  504. * place asd's in the notifier->subdevs array.
  505. */
  506. if (WARN_ON(notifier->subdevs)) {
  507. ret = -EINVAL;
  508. goto unlock;
  509. }
  510. ret = v4l2_async_notifier_asd_valid(notifier, asd,
  511. notifier->num_subdevs);
  512. if (ret)
  513. goto unlock;
  514. list_add_tail(&asd->asd_list, &notifier->asd_list);
  515. notifier->num_subdevs++;
  516. unlock:
  517. mutex_unlock(&list_lock);
  518. return ret;
  519. }
  520. EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev);
  521. struct v4l2_async_subdev *
  522. v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
  523. struct fwnode_handle *fwnode,
  524. unsigned int asd_struct_size)
  525. {
  526. struct v4l2_async_subdev *asd;
  527. int ret;
  528. asd = kzalloc(asd_struct_size, GFP_KERNEL);
  529. if (!asd)
  530. return ERR_PTR(-ENOMEM);
  531. asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
  532. asd->match.fwnode = fwnode;
  533. ret = v4l2_async_notifier_add_subdev(notifier, asd);
  534. if (ret) {
  535. kfree(asd);
  536. return ERR_PTR(ret);
  537. }
  538. return asd;
  539. }
  540. EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
  541. struct v4l2_async_subdev *
  542. v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
  543. int adapter_id, unsigned short address,
  544. unsigned int asd_struct_size)
  545. {
  546. struct v4l2_async_subdev *asd;
  547. int ret;
  548. asd = kzalloc(asd_struct_size, GFP_KERNEL);
  549. if (!asd)
  550. return ERR_PTR(-ENOMEM);
  551. asd->match_type = V4L2_ASYNC_MATCH_I2C;
  552. asd->match.i2c.adapter_id = adapter_id;
  553. asd->match.i2c.address = address;
  554. ret = v4l2_async_notifier_add_subdev(notifier, asd);
  555. if (ret) {
  556. kfree(asd);
  557. return ERR_PTR(ret);
  558. }
  559. return asd;
  560. }
  561. EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev);
  562. struct v4l2_async_subdev *
  563. v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
  564. const char *device_name,
  565. unsigned int asd_struct_size)
  566. {
  567. struct v4l2_async_subdev *asd;
  568. int ret;
  569. asd = kzalloc(asd_struct_size, GFP_KERNEL);
  570. if (!asd)
  571. return ERR_PTR(-ENOMEM);
  572. asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
  573. asd->match.device_name = device_name;
  574. ret = v4l2_async_notifier_add_subdev(notifier, asd);
  575. if (ret) {
  576. kfree(asd);
  577. return ERR_PTR(ret);
  578. }
  579. return asd;
  580. }
  581. EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev);
  582. int v4l2_async_register_subdev(struct v4l2_subdev *sd)
  583. {
  584. struct v4l2_async_notifier *subdev_notifier;
  585. struct v4l2_async_notifier *notifier;
  586. int ret;
  587. /*
  588. * No reference taken. The reference is held by the device
  589. * (struct v4l2_subdev.dev), and async sub-device does not
  590. * exist independently of the device at any point of time.
  591. */
  592. if (!sd->fwnode && sd->dev)
  593. sd->fwnode = dev_fwnode(sd->dev);
  594. mutex_lock(&list_lock);
  595. INIT_LIST_HEAD(&sd->async_list);
  596. list_for_each_entry(notifier, &notifier_list, list) {
  597. struct v4l2_device *v4l2_dev =
  598. v4l2_async_notifier_find_v4l2_dev(notifier);
  599. struct v4l2_async_subdev *asd;
  600. if (!v4l2_dev)
  601. continue;
  602. asd = v4l2_async_find_match(notifier, sd);
  603. if (!asd)
  604. continue;
  605. ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
  606. if (ret)
  607. goto err_unbind;
  608. ret = v4l2_async_notifier_try_complete(notifier);
  609. if (ret)
  610. goto err_unbind;
  611. goto out_unlock;
  612. }
  613. /* None matched, wait for hot-plugging */
  614. list_add(&sd->async_list, &subdev_list);
  615. out_unlock:
  616. mutex_unlock(&list_lock);
  617. return 0;
  618. err_unbind:
  619. /*
  620. * Complete failed. Unbind the sub-devices bound through registering
  621. * this async sub-device.
  622. */
  623. subdev_notifier = v4l2_async_find_subdev_notifier(sd);
  624. if (subdev_notifier)
  625. v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
  626. if (sd->asd)
  627. v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
  628. v4l2_async_cleanup(sd);
  629. mutex_unlock(&list_lock);
  630. return ret;
  631. }
  632. EXPORT_SYMBOL(v4l2_async_register_subdev);
  633. void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
  634. {
  635. mutex_lock(&list_lock);
  636. __v4l2_async_notifier_unregister(sd->subdev_notifier);
  637. __v4l2_async_notifier_cleanup(sd->subdev_notifier);
  638. kfree(sd->subdev_notifier);
  639. sd->subdev_notifier = NULL;
  640. if (sd->asd) {
  641. struct v4l2_async_notifier *notifier = sd->notifier;
  642. list_add(&sd->asd->list, &notifier->waiting);
  643. v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
  644. }
  645. v4l2_async_cleanup(sd);
  646. mutex_unlock(&list_lock);
  647. }
  648. EXPORT_SYMBOL(v4l2_async_unregister_subdev);