industrialio-trigger.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. /* The industrial I/O core, trigger handling functions
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/idr.h>
  11. #include <linux/err.h>
  12. #include <linux/device.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/slab.h>
  16. #include <linux/iio/iio.h>
  17. #include <linux/iio/trigger.h>
  18. #include "iio_core.h"
  19. #include "iio_core_trigger.h"
  20. #include <linux/iio/trigger_consumer.h>
  21. /* RFC - Question of approach
  22. * Make the common case (single sensor single trigger)
  23. * simple by starting trigger capture from when first sensors
  24. * is added.
  25. *
  26. * Complex simultaneous start requires use of 'hold' functionality
  27. * of the trigger. (not implemented)
  28. *
  29. * Any other suggestions?
  30. */
  31. static DEFINE_IDA(iio_trigger_ida);
  32. /* Single list of all available triggers */
  33. static LIST_HEAD(iio_trigger_list);
  34. static DEFINE_MUTEX(iio_trigger_list_lock);
  35. /**
  36. * iio_trigger_read_name() - retrieve useful identifying name
  37. * @dev: device associated with the iio_trigger
  38. * @attr: pointer to the device_attribute structure that is
  39. * being processed
  40. * @buf: buffer to print the name into
  41. *
  42. * Return: a negative number on failure or the number of written
  43. * characters on success.
  44. */
  45. static ssize_t iio_trigger_read_name(struct device *dev,
  46. struct device_attribute *attr,
  47. char *buf)
  48. {
  49. struct iio_trigger *trig = to_iio_trigger(dev);
  50. return sprintf(buf, "%s\n", trig->name);
  51. }
  52. static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
  53. static struct attribute *iio_trig_dev_attrs[] = {
  54. &dev_attr_name.attr,
  55. NULL,
  56. };
  57. ATTRIBUTE_GROUPS(iio_trig_dev);
  58. static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
  59. int iio_trigger_register(struct iio_trigger *trig_info)
  60. {
  61. int ret;
  62. /* trig_info->ops is required for the module member */
  63. if (!trig_info->ops)
  64. return -EINVAL;
  65. trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
  66. if (trig_info->id < 0)
  67. return trig_info->id;
  68. /* Set the name used for the sysfs directory etc */
  69. dev_set_name(&trig_info->dev, "trigger%ld",
  70. (unsigned long) trig_info->id);
  71. ret = device_add(&trig_info->dev);
  72. if (ret)
  73. goto error_unregister_id;
  74. /* Add to list of available triggers held by the IIO core */
  75. mutex_lock(&iio_trigger_list_lock);
  76. if (__iio_trigger_find_by_name(trig_info->name)) {
  77. pr_err("Duplicate trigger name '%s'\n", trig_info->name);
  78. ret = -EEXIST;
  79. goto error_device_del;
  80. }
  81. list_add_tail(&trig_info->list, &iio_trigger_list);
  82. mutex_unlock(&iio_trigger_list_lock);
  83. return 0;
  84. error_device_del:
  85. mutex_unlock(&iio_trigger_list_lock);
  86. device_del(&trig_info->dev);
  87. error_unregister_id:
  88. ida_simple_remove(&iio_trigger_ida, trig_info->id);
  89. return ret;
  90. }
  91. EXPORT_SYMBOL(iio_trigger_register);
  92. void iio_trigger_unregister(struct iio_trigger *trig_info)
  93. {
  94. mutex_lock(&iio_trigger_list_lock);
  95. list_del(&trig_info->list);
  96. mutex_unlock(&iio_trigger_list_lock);
  97. ida_simple_remove(&iio_trigger_ida, trig_info->id);
  98. /* Possible issue in here */
  99. device_del(&trig_info->dev);
  100. }
  101. EXPORT_SYMBOL(iio_trigger_unregister);
  102. int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
  103. {
  104. if (!indio_dev || !trig)
  105. return -EINVAL;
  106. mutex_lock(&indio_dev->mlock);
  107. WARN_ON(indio_dev->trig_readonly);
  108. indio_dev->trig = iio_trigger_get(trig);
  109. indio_dev->trig_readonly = true;
  110. mutex_unlock(&indio_dev->mlock);
  111. return 0;
  112. }
  113. EXPORT_SYMBOL(iio_trigger_set_immutable);
  114. /* Search for trigger by name, assuming iio_trigger_list_lock held */
  115. static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
  116. {
  117. struct iio_trigger *iter;
  118. list_for_each_entry(iter, &iio_trigger_list, list)
  119. if (!strcmp(iter->name, name))
  120. return iter;
  121. return NULL;
  122. }
  123. static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
  124. {
  125. struct iio_trigger *trig = NULL, *iter;
  126. mutex_lock(&iio_trigger_list_lock);
  127. list_for_each_entry(iter, &iio_trigger_list, list)
  128. if (sysfs_streq(iter->name, name)) {
  129. trig = iter;
  130. iio_trigger_get(trig);
  131. break;
  132. }
  133. mutex_unlock(&iio_trigger_list_lock);
  134. return trig;
  135. }
  136. void iio_trigger_poll(struct iio_trigger *trig)
  137. {
  138. int i;
  139. if (!atomic_read(&trig->use_count)) {
  140. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  141. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  142. if (trig->subirqs[i].enabled)
  143. generic_handle_irq(trig->subirq_base + i);
  144. else
  145. iio_trigger_notify_done(trig);
  146. }
  147. }
  148. }
  149. EXPORT_SYMBOL(iio_trigger_poll);
  150. irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
  151. {
  152. iio_trigger_poll(private);
  153. return IRQ_HANDLED;
  154. }
  155. EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
  156. void iio_trigger_poll_chained(struct iio_trigger *trig)
  157. {
  158. int i;
  159. if (!atomic_read(&trig->use_count)) {
  160. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  161. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  162. if (trig->subirqs[i].enabled)
  163. handle_nested_irq(trig->subirq_base + i);
  164. else
  165. iio_trigger_notify_done(trig);
  166. }
  167. }
  168. }
  169. EXPORT_SYMBOL(iio_trigger_poll_chained);
  170. void iio_trigger_notify_done(struct iio_trigger *trig)
  171. {
  172. if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
  173. if (trig->ops->try_reenable(trig))
  174. /* Missed an interrupt so launch new poll now */
  175. iio_trigger_poll(trig);
  176. }
  177. EXPORT_SYMBOL(iio_trigger_notify_done);
  178. /* Trigger Consumer related functions */
  179. static int iio_trigger_get_irq(struct iio_trigger *trig)
  180. {
  181. int ret;
  182. mutex_lock(&trig->pool_lock);
  183. ret = bitmap_find_free_region(trig->pool,
  184. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  185. ilog2(1));
  186. mutex_unlock(&trig->pool_lock);
  187. if (ret >= 0)
  188. ret += trig->subirq_base;
  189. return ret;
  190. }
  191. static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
  192. {
  193. mutex_lock(&trig->pool_lock);
  194. clear_bit(irq - trig->subirq_base, trig->pool);
  195. mutex_unlock(&trig->pool_lock);
  196. }
  197. /* Complexity in here. With certain triggers (datardy) an acknowledgement
  198. * may be needed if the pollfuncs do not include the data read for the
  199. * triggering device.
  200. * This is not currently handled. Alternative of not enabling trigger unless
  201. * the relevant function is in there may be the best option.
  202. */
  203. /* Worth protecting against double additions? */
  204. static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
  205. struct iio_poll_func *pf)
  206. {
  207. int ret = 0;
  208. bool notinuse
  209. = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  210. /* Prevent the module from being removed whilst attached to a trigger */
  211. __module_get(pf->indio_dev->info->driver_module);
  212. /* Get irq number */
  213. pf->irq = iio_trigger_get_irq(trig);
  214. if (pf->irq < 0)
  215. goto out_put_module;
  216. /* Request irq */
  217. ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
  218. pf->type, pf->name,
  219. pf);
  220. if (ret < 0)
  221. goto out_put_irq;
  222. /* Enable trigger in driver */
  223. if (trig->ops->set_trigger_state && notinuse) {
  224. ret = trig->ops->set_trigger_state(trig, true);
  225. if (ret < 0)
  226. goto out_free_irq;
  227. }
  228. /*
  229. * Check if we just registered to our own trigger: we determine that
  230. * this is the case if the IIO device and the trigger device share the
  231. * same parent device.
  232. */
  233. if (pf->indio_dev->dev.parent == trig->dev.parent)
  234. trig->attached_own_device = true;
  235. return ret;
  236. out_free_irq:
  237. free_irq(pf->irq, pf);
  238. out_put_irq:
  239. iio_trigger_put_irq(trig, pf->irq);
  240. out_put_module:
  241. module_put(pf->indio_dev->info->driver_module);
  242. return ret;
  243. }
  244. static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
  245. struct iio_poll_func *pf)
  246. {
  247. int ret = 0;
  248. bool no_other_users
  249. = (bitmap_weight(trig->pool,
  250. CONFIG_IIO_CONSUMERS_PER_TRIGGER)
  251. == 1);
  252. if (trig->ops->set_trigger_state && no_other_users) {
  253. ret = trig->ops->set_trigger_state(trig, false);
  254. if (ret)
  255. return ret;
  256. }
  257. if (pf->indio_dev->dev.parent == trig->dev.parent)
  258. trig->attached_own_device = false;
  259. iio_trigger_put_irq(trig, pf->irq);
  260. free_irq(pf->irq, pf);
  261. module_put(pf->indio_dev->info->driver_module);
  262. return ret;
  263. }
  264. irqreturn_t iio_pollfunc_store_time(int irq, void *p)
  265. {
  266. struct iio_poll_func *pf = p;
  267. pf->timestamp = iio_get_time_ns(pf->indio_dev);
  268. return IRQ_WAKE_THREAD;
  269. }
  270. EXPORT_SYMBOL(iio_pollfunc_store_time);
  271. struct iio_poll_func
  272. *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
  273. irqreturn_t (*thread)(int irq, void *p),
  274. int type,
  275. struct iio_dev *indio_dev,
  276. const char *fmt,
  277. ...)
  278. {
  279. va_list vargs;
  280. struct iio_poll_func *pf;
  281. pf = kmalloc(sizeof *pf, GFP_KERNEL);
  282. if (pf == NULL)
  283. return NULL;
  284. va_start(vargs, fmt);
  285. pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  286. va_end(vargs);
  287. if (pf->name == NULL) {
  288. kfree(pf);
  289. return NULL;
  290. }
  291. pf->h = h;
  292. pf->thread = thread;
  293. pf->type = type;
  294. pf->indio_dev = indio_dev;
  295. return pf;
  296. }
  297. EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
  298. void iio_dealloc_pollfunc(struct iio_poll_func *pf)
  299. {
  300. kfree(pf->name);
  301. kfree(pf);
  302. }
  303. EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
  304. /**
  305. * iio_trigger_read_current() - trigger consumer sysfs query current trigger
  306. * @dev: device associated with an industrial I/O device
  307. * @attr: pointer to the device_attribute structure that
  308. * is being processed
  309. * @buf: buffer where the current trigger name will be printed into
  310. *
  311. * For trigger consumers the current_trigger interface allows the trigger
  312. * used by the device to be queried.
  313. *
  314. * Return: a negative number on failure, the number of characters written
  315. * on success or 0 if no trigger is available
  316. */
  317. static ssize_t iio_trigger_read_current(struct device *dev,
  318. struct device_attribute *attr,
  319. char *buf)
  320. {
  321. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  322. if (indio_dev->trig)
  323. return sprintf(buf, "%s\n", indio_dev->trig->name);
  324. return 0;
  325. }
  326. /**
  327. * iio_trigger_write_current() - trigger consumer sysfs set current trigger
  328. * @dev: device associated with an industrial I/O device
  329. * @attr: device attribute that is being processed
  330. * @buf: string buffer that holds the name of the trigger
  331. * @len: length of the trigger name held by buf
  332. *
  333. * For trigger consumers the current_trigger interface allows the trigger
  334. * used for this device to be specified at run time based on the trigger's
  335. * name.
  336. *
  337. * Return: negative error code on failure or length of the buffer
  338. * on success
  339. */
  340. static ssize_t iio_trigger_write_current(struct device *dev,
  341. struct device_attribute *attr,
  342. const char *buf,
  343. size_t len)
  344. {
  345. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  346. struct iio_trigger *oldtrig = indio_dev->trig;
  347. struct iio_trigger *trig;
  348. int ret;
  349. mutex_lock(&indio_dev->mlock);
  350. if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
  351. mutex_unlock(&indio_dev->mlock);
  352. return -EBUSY;
  353. }
  354. if (indio_dev->trig_readonly) {
  355. mutex_unlock(&indio_dev->mlock);
  356. return -EPERM;
  357. }
  358. mutex_unlock(&indio_dev->mlock);
  359. trig = iio_trigger_acquire_by_name(buf);
  360. if (oldtrig == trig) {
  361. ret = len;
  362. goto out_trigger_put;
  363. }
  364. if (trig && indio_dev->info->validate_trigger) {
  365. ret = indio_dev->info->validate_trigger(indio_dev, trig);
  366. if (ret)
  367. goto out_trigger_put;
  368. }
  369. if (trig && trig->ops->validate_device) {
  370. ret = trig->ops->validate_device(trig, indio_dev);
  371. if (ret)
  372. goto out_trigger_put;
  373. }
  374. indio_dev->trig = trig;
  375. if (oldtrig) {
  376. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  377. iio_trigger_detach_poll_func(oldtrig,
  378. indio_dev->pollfunc_event);
  379. iio_trigger_put(oldtrig);
  380. }
  381. if (indio_dev->trig) {
  382. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  383. iio_trigger_attach_poll_func(indio_dev->trig,
  384. indio_dev->pollfunc_event);
  385. }
  386. return len;
  387. out_trigger_put:
  388. iio_trigger_put(trig);
  389. return ret;
  390. }
  391. static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
  392. iio_trigger_read_current,
  393. iio_trigger_write_current);
  394. static struct attribute *iio_trigger_consumer_attrs[] = {
  395. &dev_attr_current_trigger.attr,
  396. NULL,
  397. };
  398. static const struct attribute_group iio_trigger_consumer_attr_group = {
  399. .name = "trigger",
  400. .attrs = iio_trigger_consumer_attrs,
  401. };
  402. static void iio_trig_release(struct device *device)
  403. {
  404. struct iio_trigger *trig = to_iio_trigger(device);
  405. int i;
  406. if (trig->subirq_base) {
  407. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  408. irq_modify_status(trig->subirq_base + i,
  409. IRQ_NOAUTOEN,
  410. IRQ_NOREQUEST | IRQ_NOPROBE);
  411. irq_set_chip(trig->subirq_base + i,
  412. NULL);
  413. irq_set_handler(trig->subirq_base + i,
  414. NULL);
  415. }
  416. irq_free_descs(trig->subirq_base,
  417. CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  418. }
  419. kfree(trig->name);
  420. kfree(trig);
  421. }
  422. static const struct device_type iio_trig_type = {
  423. .release = iio_trig_release,
  424. .groups = iio_trig_dev_groups,
  425. };
  426. static void iio_trig_subirqmask(struct irq_data *d)
  427. {
  428. struct irq_chip *chip = irq_data_get_irq_chip(d);
  429. struct iio_trigger *trig
  430. = container_of(chip,
  431. struct iio_trigger, subirq_chip);
  432. trig->subirqs[d->irq - trig->subirq_base].enabled = false;
  433. }
  434. static void iio_trig_subirqunmask(struct irq_data *d)
  435. {
  436. struct irq_chip *chip = irq_data_get_irq_chip(d);
  437. struct iio_trigger *trig
  438. = container_of(chip,
  439. struct iio_trigger, subirq_chip);
  440. trig->subirqs[d->irq - trig->subirq_base].enabled = true;
  441. }
  442. static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
  443. {
  444. struct iio_trigger *trig;
  445. int i;
  446. trig = kzalloc(sizeof *trig, GFP_KERNEL);
  447. if (!trig)
  448. return NULL;
  449. trig->dev.type = &iio_trig_type;
  450. trig->dev.bus = &iio_bus_type;
  451. device_initialize(&trig->dev);
  452. mutex_init(&trig->pool_lock);
  453. trig->subirq_base = irq_alloc_descs(-1, 0,
  454. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  455. 0);
  456. if (trig->subirq_base < 0)
  457. goto free_trig;
  458. trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  459. if (trig->name == NULL)
  460. goto free_descs;
  461. trig->subirq_chip.name = trig->name;
  462. trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
  463. trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
  464. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  465. irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
  466. irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
  467. irq_modify_status(trig->subirq_base + i,
  468. IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
  469. }
  470. get_device(&trig->dev);
  471. return trig;
  472. free_descs:
  473. irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  474. free_trig:
  475. kfree(trig);
  476. return NULL;
  477. }
  478. struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
  479. {
  480. struct iio_trigger *trig;
  481. va_list vargs;
  482. va_start(vargs, fmt);
  483. trig = viio_trigger_alloc(fmt, vargs);
  484. va_end(vargs);
  485. return trig;
  486. }
  487. EXPORT_SYMBOL(iio_trigger_alloc);
  488. void iio_trigger_free(struct iio_trigger *trig)
  489. {
  490. if (trig)
  491. put_device(&trig->dev);
  492. }
  493. EXPORT_SYMBOL(iio_trigger_free);
  494. static void devm_iio_trigger_release(struct device *dev, void *res)
  495. {
  496. iio_trigger_free(*(struct iio_trigger **)res);
  497. }
  498. static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
  499. {
  500. struct iio_trigger **r = res;
  501. if (!r || !*r) {
  502. WARN_ON(!r || !*r);
  503. return 0;
  504. }
  505. return *r == data;
  506. }
  507. /**
  508. * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
  509. * @dev: Device to allocate iio_trigger for
  510. * @fmt: trigger name format. If it includes format
  511. * specifiers, the additional arguments following
  512. * format are formatted and inserted in the resulting
  513. * string replacing their respective specifiers.
  514. *
  515. * Managed iio_trigger_alloc. iio_trigger allocated with this function is
  516. * automatically freed on driver detach.
  517. *
  518. * If an iio_trigger allocated with this function needs to be freed separately,
  519. * devm_iio_trigger_free() must be used.
  520. *
  521. * RETURNS:
  522. * Pointer to allocated iio_trigger on success, NULL on failure.
  523. */
  524. struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
  525. const char *fmt, ...)
  526. {
  527. struct iio_trigger **ptr, *trig;
  528. va_list vargs;
  529. ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
  530. GFP_KERNEL);
  531. if (!ptr)
  532. return NULL;
  533. /* use raw alloc_dr for kmalloc caller tracing */
  534. va_start(vargs, fmt);
  535. trig = viio_trigger_alloc(fmt, vargs);
  536. va_end(vargs);
  537. if (trig) {
  538. *ptr = trig;
  539. devres_add(dev, ptr);
  540. } else {
  541. devres_free(ptr);
  542. }
  543. return trig;
  544. }
  545. EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
  546. /**
  547. * devm_iio_trigger_free - Resource-managed iio_trigger_free()
  548. * @dev: Device this iio_dev belongs to
  549. * @iio_trig: the iio_trigger associated with the device
  550. *
  551. * Free iio_trigger allocated with devm_iio_trigger_alloc().
  552. */
  553. void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
  554. {
  555. int rc;
  556. rc = devres_release(dev, devm_iio_trigger_release,
  557. devm_iio_trigger_match, iio_trig);
  558. WARN_ON(rc);
  559. }
  560. EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
  561. static void devm_iio_trigger_unreg(struct device *dev, void *res)
  562. {
  563. iio_trigger_unregister(*(struct iio_trigger **)res);
  564. }
  565. /**
  566. * devm_iio_trigger_register - Resource-managed iio_trigger_register()
  567. * @dev: device this trigger was allocated for
  568. * @trig_info: trigger to register
  569. *
  570. * Managed iio_trigger_register(). The IIO trigger registered with this
  571. * function is automatically unregistered on driver detach. This function
  572. * calls iio_trigger_register() internally. Refer to that function for more
  573. * information.
  574. *
  575. * If an iio_trigger registered with this function needs to be unregistered
  576. * separately, devm_iio_trigger_unregister() must be used.
  577. *
  578. * RETURNS:
  579. * 0 on success, negative error number on failure.
  580. */
  581. int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
  582. {
  583. struct iio_trigger **ptr;
  584. int ret;
  585. ptr = devres_alloc(devm_iio_trigger_unreg, sizeof(*ptr), GFP_KERNEL);
  586. if (!ptr)
  587. return -ENOMEM;
  588. *ptr = trig_info;
  589. ret = iio_trigger_register(trig_info);
  590. if (!ret)
  591. devres_add(dev, ptr);
  592. else
  593. devres_free(ptr);
  594. return ret;
  595. }
  596. EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
  597. /**
  598. * devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
  599. * @dev: device this iio_trigger belongs to
  600. * @trig_info: the trigger associated with the device
  601. *
  602. * Unregister trigger registered with devm_iio_trigger_register().
  603. */
  604. void devm_iio_trigger_unregister(struct device *dev,
  605. struct iio_trigger *trig_info)
  606. {
  607. int rc;
  608. rc = devres_release(dev, devm_iio_trigger_unreg, devm_iio_trigger_match,
  609. trig_info);
  610. WARN_ON(rc);
  611. }
  612. EXPORT_SYMBOL_GPL(devm_iio_trigger_unregister);
  613. bool iio_trigger_using_own(struct iio_dev *indio_dev)
  614. {
  615. return indio_dev->trig->attached_own_device;
  616. }
  617. EXPORT_SYMBOL(iio_trigger_using_own);
  618. /**
  619. * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
  620. * the same device
  621. * @trig: The IIO trigger to check
  622. * @indio_dev: the IIO device to check
  623. *
  624. * This function can be used as the validate_device callback for triggers that
  625. * can only be attached to their own device.
  626. *
  627. * Return: 0 if both the trigger and the IIO device belong to the same
  628. * device, -EINVAL otherwise.
  629. */
  630. int iio_trigger_validate_own_device(struct iio_trigger *trig,
  631. struct iio_dev *indio_dev)
  632. {
  633. if (indio_dev->dev.parent != trig->dev.parent)
  634. return -EINVAL;
  635. return 0;
  636. }
  637. EXPORT_SYMBOL(iio_trigger_validate_own_device);
  638. void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
  639. {
  640. indio_dev->groups[indio_dev->groupcounter++] =
  641. &iio_trigger_consumer_attr_group;
  642. }
  643. void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
  644. {
  645. /* Clean up an associated but not attached trigger reference */
  646. if (indio_dev->trig)
  647. iio_trigger_put(indio_dev->trig);
  648. }
  649. int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
  650. {
  651. return iio_trigger_attach_poll_func(indio_dev->trig,
  652. indio_dev->pollfunc);
  653. }
  654. EXPORT_SYMBOL(iio_triggered_buffer_postenable);
  655. int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
  656. {
  657. return iio_trigger_detach_poll_func(indio_dev->trig,
  658. indio_dev->pollfunc);
  659. }
  660. EXPORT_SYMBOL(iio_triggered_buffer_predisable);