industrialio-buffer.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /* The industrial I/O core
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Handling of buffer allocation / resizing.
  10. *
  11. *
  12. * Things to look at here.
  13. * - Better memory allocation techniques?
  14. * - Alternative access techniques?
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/sched.h>
  24. #include <linux/iio/iio.h>
  25. #include "iio_core.h"
  26. #include <linux/iio/sysfs.h>
  27. #include <linux/iio/buffer.h>
  28. static const char * const iio_endian_prefix[] = {
  29. [IIO_BE] = "be",
  30. [IIO_LE] = "le",
  31. };
  32. static bool iio_buffer_is_active(struct iio_buffer *buf)
  33. {
  34. return !list_empty(&buf->buffer_list);
  35. }
  36. static bool iio_buffer_data_available(struct iio_buffer *buf)
  37. {
  38. if (buf->access->data_available)
  39. return buf->access->data_available(buf);
  40. return buf->stufftoread;
  41. }
  42. /**
  43. * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  44. *
  45. * This function relies on all buffer implementations having an
  46. * iio_buffer as their first element.
  47. **/
  48. ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  49. size_t n, loff_t *f_ps)
  50. {
  51. struct iio_dev *indio_dev = filp->private_data;
  52. struct iio_buffer *rb = indio_dev->buffer;
  53. int ret;
  54. if (!indio_dev->info)
  55. return -ENODEV;
  56. if (!rb || !rb->access->read_first_n)
  57. return -EINVAL;
  58. do {
  59. if (!iio_buffer_data_available(rb)) {
  60. if (filp->f_flags & O_NONBLOCK)
  61. return -EAGAIN;
  62. ret = wait_event_interruptible(rb->pollq,
  63. iio_buffer_data_available(rb) ||
  64. indio_dev->info == NULL);
  65. if (ret)
  66. return ret;
  67. if (indio_dev->info == NULL)
  68. return -ENODEV;
  69. }
  70. ret = rb->access->read_first_n(rb, n, buf);
  71. if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  72. ret = -EAGAIN;
  73. } while (ret == 0);
  74. return ret;
  75. }
  76. /**
  77. * iio_buffer_poll() - poll the buffer to find out if it has data
  78. */
  79. unsigned int iio_buffer_poll(struct file *filp,
  80. struct poll_table_struct *wait)
  81. {
  82. struct iio_dev *indio_dev = filp->private_data;
  83. struct iio_buffer *rb = indio_dev->buffer;
  84. if (!indio_dev->info)
  85. return -ENODEV;
  86. poll_wait(filp, &rb->pollq, wait);
  87. if (iio_buffer_data_available(rb))
  88. return POLLIN | POLLRDNORM;
  89. /* need a way of knowing if there may be enough data... */
  90. return 0;
  91. }
  92. /**
  93. * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
  94. * @indio_dev: The IIO device
  95. *
  96. * Wakes up the event waitqueue used for poll(). Should usually
  97. * be called when the device is unregistered.
  98. */
  99. void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  100. {
  101. if (!indio_dev->buffer)
  102. return;
  103. wake_up(&indio_dev->buffer->pollq);
  104. }
  105. void iio_buffer_init(struct iio_buffer *buffer)
  106. {
  107. INIT_LIST_HEAD(&buffer->demux_list);
  108. INIT_LIST_HEAD(&buffer->buffer_list);
  109. init_waitqueue_head(&buffer->pollq);
  110. kref_init(&buffer->ref);
  111. }
  112. EXPORT_SYMBOL(iio_buffer_init);
  113. static ssize_t iio_show_scan_index(struct device *dev,
  114. struct device_attribute *attr,
  115. char *buf)
  116. {
  117. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  118. }
  119. static ssize_t iio_show_fixed_type(struct device *dev,
  120. struct device_attribute *attr,
  121. char *buf)
  122. {
  123. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  124. u8 type = this_attr->c->scan_type.endianness;
  125. if (type == IIO_CPU) {
  126. #ifdef __LITTLE_ENDIAN
  127. type = IIO_LE;
  128. #else
  129. type = IIO_BE;
  130. #endif
  131. }
  132. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  133. iio_endian_prefix[type],
  134. this_attr->c->scan_type.sign,
  135. this_attr->c->scan_type.realbits,
  136. this_attr->c->scan_type.storagebits,
  137. this_attr->c->scan_type.shift);
  138. }
  139. static ssize_t iio_scan_el_show(struct device *dev,
  140. struct device_attribute *attr,
  141. char *buf)
  142. {
  143. int ret;
  144. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  145. ret = test_bit(to_iio_dev_attr(attr)->address,
  146. indio_dev->buffer->scan_mask);
  147. return sprintf(buf, "%d\n", ret);
  148. }
  149. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  150. {
  151. clear_bit(bit, buffer->scan_mask);
  152. return 0;
  153. }
  154. static ssize_t iio_scan_el_store(struct device *dev,
  155. struct device_attribute *attr,
  156. const char *buf,
  157. size_t len)
  158. {
  159. int ret;
  160. bool state;
  161. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  162. struct iio_buffer *buffer = indio_dev->buffer;
  163. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  164. ret = strtobool(buf, &state);
  165. if (ret < 0)
  166. return ret;
  167. mutex_lock(&indio_dev->mlock);
  168. if (iio_buffer_is_active(indio_dev->buffer)) {
  169. ret = -EBUSY;
  170. goto error_ret;
  171. }
  172. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  173. if (ret < 0)
  174. goto error_ret;
  175. if (!state && ret) {
  176. ret = iio_scan_mask_clear(buffer, this_attr->address);
  177. if (ret)
  178. goto error_ret;
  179. } else if (state && !ret) {
  180. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  181. if (ret)
  182. goto error_ret;
  183. }
  184. error_ret:
  185. mutex_unlock(&indio_dev->mlock);
  186. return ret < 0 ? ret : len;
  187. }
  188. static ssize_t iio_scan_el_ts_show(struct device *dev,
  189. struct device_attribute *attr,
  190. char *buf)
  191. {
  192. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  193. return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
  194. }
  195. static ssize_t iio_scan_el_ts_store(struct device *dev,
  196. struct device_attribute *attr,
  197. const char *buf,
  198. size_t len)
  199. {
  200. int ret;
  201. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  202. bool state;
  203. ret = strtobool(buf, &state);
  204. if (ret < 0)
  205. return ret;
  206. mutex_lock(&indio_dev->mlock);
  207. if (iio_buffer_is_active(indio_dev->buffer)) {
  208. ret = -EBUSY;
  209. goto error_ret;
  210. }
  211. indio_dev->buffer->scan_timestamp = state;
  212. error_ret:
  213. mutex_unlock(&indio_dev->mlock);
  214. return ret ? ret : len;
  215. }
  216. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  217. const struct iio_chan_spec *chan)
  218. {
  219. int ret, attrcount = 0;
  220. struct iio_buffer *buffer = indio_dev->buffer;
  221. ret = __iio_add_chan_devattr("index",
  222. chan,
  223. &iio_show_scan_index,
  224. NULL,
  225. 0,
  226. IIO_SEPARATE,
  227. &indio_dev->dev,
  228. &buffer->scan_el_dev_attr_list);
  229. if (ret)
  230. return ret;
  231. attrcount++;
  232. ret = __iio_add_chan_devattr("type",
  233. chan,
  234. &iio_show_fixed_type,
  235. NULL,
  236. 0,
  237. 0,
  238. &indio_dev->dev,
  239. &buffer->scan_el_dev_attr_list);
  240. if (ret)
  241. return ret;
  242. attrcount++;
  243. if (chan->type != IIO_TIMESTAMP)
  244. ret = __iio_add_chan_devattr("en",
  245. chan,
  246. &iio_scan_el_show,
  247. &iio_scan_el_store,
  248. chan->scan_index,
  249. 0,
  250. &indio_dev->dev,
  251. &buffer->scan_el_dev_attr_list);
  252. else
  253. ret = __iio_add_chan_devattr("en",
  254. chan,
  255. &iio_scan_el_ts_show,
  256. &iio_scan_el_ts_store,
  257. chan->scan_index,
  258. 0,
  259. &indio_dev->dev,
  260. &buffer->scan_el_dev_attr_list);
  261. if (ret)
  262. return ret;
  263. attrcount++;
  264. ret = attrcount;
  265. return ret;
  266. }
  267. static const char * const iio_scan_elements_group_name = "scan_elements";
  268. int iio_buffer_register(struct iio_dev *indio_dev,
  269. const struct iio_chan_spec *channels,
  270. int num_channels)
  271. {
  272. struct iio_dev_attr *p;
  273. struct attribute **attr;
  274. struct iio_buffer *buffer = indio_dev->buffer;
  275. int ret, i, attrn, attrcount, attrcount_orig = 0;
  276. if (buffer->attrs)
  277. indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
  278. if (buffer->scan_el_attrs != NULL) {
  279. attr = buffer->scan_el_attrs->attrs;
  280. while (*attr++ != NULL)
  281. attrcount_orig++;
  282. }
  283. attrcount = attrcount_orig;
  284. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  285. if (channels) {
  286. /* new magic */
  287. for (i = 0; i < num_channels; i++) {
  288. if (channels[i].scan_index < 0)
  289. continue;
  290. /* Establish necessary mask length */
  291. if (channels[i].scan_index >
  292. (int)indio_dev->masklength - 1)
  293. indio_dev->masklength
  294. = channels[i].scan_index + 1;
  295. ret = iio_buffer_add_channel_sysfs(indio_dev,
  296. &channels[i]);
  297. if (ret < 0)
  298. goto error_cleanup_dynamic;
  299. attrcount += ret;
  300. if (channels[i].type == IIO_TIMESTAMP)
  301. indio_dev->scan_index_timestamp =
  302. channels[i].scan_index;
  303. }
  304. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  305. buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  306. sizeof(*buffer->scan_mask),
  307. GFP_KERNEL);
  308. if (buffer->scan_mask == NULL) {
  309. ret = -ENOMEM;
  310. goto error_cleanup_dynamic;
  311. }
  312. }
  313. }
  314. buffer->scan_el_group.name = iio_scan_elements_group_name;
  315. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  316. sizeof(buffer->scan_el_group.attrs[0]),
  317. GFP_KERNEL);
  318. if (buffer->scan_el_group.attrs == NULL) {
  319. ret = -ENOMEM;
  320. goto error_free_scan_mask;
  321. }
  322. if (buffer->scan_el_attrs)
  323. memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  324. sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  325. attrn = attrcount_orig;
  326. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  327. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  328. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  329. return 0;
  330. error_free_scan_mask:
  331. kfree(buffer->scan_mask);
  332. error_cleanup_dynamic:
  333. iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
  334. return ret;
  335. }
  336. EXPORT_SYMBOL(iio_buffer_register);
  337. void iio_buffer_unregister(struct iio_dev *indio_dev)
  338. {
  339. kfree(indio_dev->buffer->scan_mask);
  340. kfree(indio_dev->buffer->scan_el_group.attrs);
  341. iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
  342. }
  343. EXPORT_SYMBOL(iio_buffer_unregister);
  344. ssize_t iio_buffer_read_length(struct device *dev,
  345. struct device_attribute *attr,
  346. char *buf)
  347. {
  348. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  349. struct iio_buffer *buffer = indio_dev->buffer;
  350. if (buffer->access->get_length)
  351. return sprintf(buf, "%d\n",
  352. buffer->access->get_length(buffer));
  353. return 0;
  354. }
  355. EXPORT_SYMBOL(iio_buffer_read_length);
  356. ssize_t iio_buffer_write_length(struct device *dev,
  357. struct device_attribute *attr,
  358. const char *buf,
  359. size_t len)
  360. {
  361. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  362. struct iio_buffer *buffer = indio_dev->buffer;
  363. unsigned int val;
  364. int ret;
  365. ret = kstrtouint(buf, 10, &val);
  366. if (ret)
  367. return ret;
  368. if (buffer->access->get_length)
  369. if (val == buffer->access->get_length(buffer))
  370. return len;
  371. mutex_lock(&indio_dev->mlock);
  372. if (iio_buffer_is_active(indio_dev->buffer)) {
  373. ret = -EBUSY;
  374. } else {
  375. if (buffer->access->set_length)
  376. buffer->access->set_length(buffer, val);
  377. ret = 0;
  378. }
  379. mutex_unlock(&indio_dev->mlock);
  380. return ret ? ret : len;
  381. }
  382. EXPORT_SYMBOL(iio_buffer_write_length);
  383. ssize_t iio_buffer_show_enable(struct device *dev,
  384. struct device_attribute *attr,
  385. char *buf)
  386. {
  387. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  388. return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
  389. }
  390. EXPORT_SYMBOL(iio_buffer_show_enable);
  391. /* Note NULL used as error indicator as it doesn't make sense. */
  392. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  393. unsigned int masklength,
  394. const unsigned long *mask)
  395. {
  396. if (bitmap_empty(mask, masklength))
  397. return NULL;
  398. while (*av_masks) {
  399. if (bitmap_subset(mask, av_masks, masklength))
  400. return av_masks;
  401. av_masks += BITS_TO_LONGS(masklength);
  402. }
  403. return NULL;
  404. }
  405. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  406. const unsigned long *mask, bool timestamp)
  407. {
  408. const struct iio_chan_spec *ch;
  409. unsigned bytes = 0;
  410. int length, i;
  411. /* How much space will the demuxed element take? */
  412. for_each_set_bit(i, mask,
  413. indio_dev->masklength) {
  414. ch = iio_find_channel_from_si(indio_dev, i);
  415. length = ch->scan_type.storagebits / 8;
  416. bytes = ALIGN(bytes, length);
  417. bytes += length;
  418. }
  419. if (timestamp) {
  420. ch = iio_find_channel_from_si(indio_dev,
  421. indio_dev->scan_index_timestamp);
  422. length = ch->scan_type.storagebits / 8;
  423. bytes = ALIGN(bytes, length);
  424. bytes += length;
  425. }
  426. return bytes;
  427. }
  428. static void iio_buffer_activate(struct iio_dev *indio_dev,
  429. struct iio_buffer *buffer)
  430. {
  431. iio_buffer_get(buffer);
  432. list_add(&buffer->buffer_list, &indio_dev->buffer_list);
  433. }
  434. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  435. {
  436. list_del_init(&buffer->buffer_list);
  437. iio_buffer_put(buffer);
  438. }
  439. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  440. {
  441. struct iio_buffer *buffer, *_buffer;
  442. if (list_empty(&indio_dev->buffer_list))
  443. return;
  444. if (indio_dev->setup_ops->predisable)
  445. indio_dev->setup_ops->predisable(indio_dev);
  446. list_for_each_entry_safe(buffer, _buffer,
  447. &indio_dev->buffer_list, buffer_list)
  448. iio_buffer_deactivate(buffer);
  449. indio_dev->currentmode = INDIO_DIRECT_MODE;
  450. if (indio_dev->setup_ops->postdisable)
  451. indio_dev->setup_ops->postdisable(indio_dev);
  452. if (indio_dev->available_scan_masks == NULL)
  453. kfree(indio_dev->active_scan_mask);
  454. }
  455. static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  456. struct iio_buffer *buffer)
  457. {
  458. unsigned int bytes;
  459. if (!buffer->access->set_bytes_per_datum)
  460. return;
  461. bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  462. buffer->scan_timestamp);
  463. buffer->access->set_bytes_per_datum(buffer, bytes);
  464. }
  465. static int __iio_update_buffers(struct iio_dev *indio_dev,
  466. struct iio_buffer *insert_buffer,
  467. struct iio_buffer *remove_buffer)
  468. {
  469. int ret;
  470. int success = 0;
  471. struct iio_buffer *buffer;
  472. unsigned long *compound_mask;
  473. const unsigned long *old_mask;
  474. /* Wind down existing buffers - iff there are any */
  475. if (!list_empty(&indio_dev->buffer_list)) {
  476. if (indio_dev->setup_ops->predisable) {
  477. ret = indio_dev->setup_ops->predisable(indio_dev);
  478. if (ret)
  479. return ret;
  480. }
  481. indio_dev->currentmode = INDIO_DIRECT_MODE;
  482. if (indio_dev->setup_ops->postdisable) {
  483. ret = indio_dev->setup_ops->postdisable(indio_dev);
  484. if (ret)
  485. return ret;
  486. }
  487. }
  488. /* Keep a copy of current setup to allow roll back */
  489. old_mask = indio_dev->active_scan_mask;
  490. if (!indio_dev->available_scan_masks)
  491. indio_dev->active_scan_mask = NULL;
  492. if (remove_buffer)
  493. iio_buffer_deactivate(remove_buffer);
  494. if (insert_buffer)
  495. iio_buffer_activate(indio_dev, insert_buffer);
  496. /* If no buffers in list, we are done */
  497. if (list_empty(&indio_dev->buffer_list)) {
  498. indio_dev->currentmode = INDIO_DIRECT_MODE;
  499. if (indio_dev->available_scan_masks == NULL)
  500. kfree(old_mask);
  501. return 0;
  502. }
  503. /* What scan mask do we actually have? */
  504. compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  505. sizeof(long), GFP_KERNEL);
  506. if (compound_mask == NULL) {
  507. if (indio_dev->available_scan_masks == NULL)
  508. kfree(old_mask);
  509. return -ENOMEM;
  510. }
  511. indio_dev->scan_timestamp = 0;
  512. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  513. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  514. indio_dev->masklength);
  515. indio_dev->scan_timestamp |= buffer->scan_timestamp;
  516. }
  517. if (indio_dev->available_scan_masks) {
  518. indio_dev->active_scan_mask =
  519. iio_scan_mask_match(indio_dev->available_scan_masks,
  520. indio_dev->masklength,
  521. compound_mask);
  522. if (indio_dev->active_scan_mask == NULL) {
  523. /*
  524. * Roll back.
  525. * Note can only occur when adding a buffer.
  526. */
  527. iio_buffer_deactivate(insert_buffer);
  528. if (old_mask) {
  529. indio_dev->active_scan_mask = old_mask;
  530. success = -EINVAL;
  531. }
  532. else {
  533. kfree(compound_mask);
  534. ret = -EINVAL;
  535. return ret;
  536. }
  537. }
  538. } else {
  539. indio_dev->active_scan_mask = compound_mask;
  540. }
  541. iio_update_demux(indio_dev);
  542. /* Wind up again */
  543. if (indio_dev->setup_ops->preenable) {
  544. ret = indio_dev->setup_ops->preenable(indio_dev);
  545. if (ret) {
  546. printk(KERN_ERR
  547. "Buffer not started: buffer preenable failed (%d)\n", ret);
  548. goto error_remove_inserted;
  549. }
  550. }
  551. indio_dev->scan_bytes =
  552. iio_compute_scan_bytes(indio_dev,
  553. indio_dev->active_scan_mask,
  554. indio_dev->scan_timestamp);
  555. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  556. iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  557. if (buffer->access->request_update) {
  558. ret = buffer->access->request_update(buffer);
  559. if (ret) {
  560. printk(KERN_INFO
  561. "Buffer not started: buffer parameter update failed (%d)\n", ret);
  562. goto error_run_postdisable;
  563. }
  564. }
  565. }
  566. if (indio_dev->info->update_scan_mode) {
  567. ret = indio_dev->info
  568. ->update_scan_mode(indio_dev,
  569. indio_dev->active_scan_mask);
  570. if (ret < 0) {
  571. printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
  572. goto error_run_postdisable;
  573. }
  574. }
  575. /* Definitely possible for devices to support both of these. */
  576. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
  577. if (!indio_dev->trig) {
  578. printk(KERN_INFO "Buffer not started: no trigger\n");
  579. ret = -EINVAL;
  580. /* Can only occur on first buffer */
  581. goto error_run_postdisable;
  582. }
  583. indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
  584. } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
  585. indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
  586. } else { /* Should never be reached */
  587. ret = -EINVAL;
  588. goto error_run_postdisable;
  589. }
  590. if (indio_dev->setup_ops->postenable) {
  591. ret = indio_dev->setup_ops->postenable(indio_dev);
  592. if (ret) {
  593. printk(KERN_INFO
  594. "Buffer not started: postenable failed (%d)\n", ret);
  595. indio_dev->currentmode = INDIO_DIRECT_MODE;
  596. if (indio_dev->setup_ops->postdisable)
  597. indio_dev->setup_ops->postdisable(indio_dev);
  598. goto error_disable_all_buffers;
  599. }
  600. }
  601. if (indio_dev->available_scan_masks)
  602. kfree(compound_mask);
  603. else
  604. kfree(old_mask);
  605. return success;
  606. error_disable_all_buffers:
  607. indio_dev->currentmode = INDIO_DIRECT_MODE;
  608. error_run_postdisable:
  609. if (indio_dev->setup_ops->postdisable)
  610. indio_dev->setup_ops->postdisable(indio_dev);
  611. error_remove_inserted:
  612. if (insert_buffer)
  613. iio_buffer_deactivate(insert_buffer);
  614. indio_dev->active_scan_mask = old_mask;
  615. kfree(compound_mask);
  616. return ret;
  617. }
  618. int iio_update_buffers(struct iio_dev *indio_dev,
  619. struct iio_buffer *insert_buffer,
  620. struct iio_buffer *remove_buffer)
  621. {
  622. int ret;
  623. if (insert_buffer == remove_buffer)
  624. return 0;
  625. mutex_lock(&indio_dev->info_exist_lock);
  626. mutex_lock(&indio_dev->mlock);
  627. if (insert_buffer && iio_buffer_is_active(insert_buffer))
  628. insert_buffer = NULL;
  629. if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  630. remove_buffer = NULL;
  631. if (!insert_buffer && !remove_buffer) {
  632. ret = 0;
  633. goto out_unlock;
  634. }
  635. if (indio_dev->info == NULL) {
  636. ret = -ENODEV;
  637. goto out_unlock;
  638. }
  639. ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  640. out_unlock:
  641. mutex_unlock(&indio_dev->mlock);
  642. mutex_unlock(&indio_dev->info_exist_lock);
  643. return ret;
  644. }
  645. EXPORT_SYMBOL_GPL(iio_update_buffers);
  646. ssize_t iio_buffer_store_enable(struct device *dev,
  647. struct device_attribute *attr,
  648. const char *buf,
  649. size_t len)
  650. {
  651. int ret;
  652. bool requested_state;
  653. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  654. bool inlist;
  655. ret = strtobool(buf, &requested_state);
  656. if (ret < 0)
  657. return ret;
  658. mutex_lock(&indio_dev->mlock);
  659. /* Find out if it is in the list */
  660. inlist = iio_buffer_is_active(indio_dev->buffer);
  661. /* Already in desired state */
  662. if (inlist == requested_state)
  663. goto done;
  664. if (requested_state)
  665. ret = __iio_update_buffers(indio_dev,
  666. indio_dev->buffer, NULL);
  667. else
  668. ret = __iio_update_buffers(indio_dev,
  669. NULL, indio_dev->buffer);
  670. if (ret < 0)
  671. goto done;
  672. done:
  673. mutex_unlock(&indio_dev->mlock);
  674. return (ret < 0) ? ret : len;
  675. }
  676. EXPORT_SYMBOL(iio_buffer_store_enable);
  677. /**
  678. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  679. * @indio_dev: the iio device
  680. * @mask: scan mask to be checked
  681. *
  682. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  683. * can be used for devices where only one channel can be active for sampling at
  684. * a time.
  685. */
  686. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  687. const unsigned long *mask)
  688. {
  689. return bitmap_weight(mask, indio_dev->masklength) == 1;
  690. }
  691. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  692. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  693. const unsigned long *mask)
  694. {
  695. if (!indio_dev->setup_ops->validate_scan_mask)
  696. return true;
  697. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  698. }
  699. /**
  700. * iio_scan_mask_set() - set particular bit in the scan mask
  701. * @indio_dev: the iio device
  702. * @buffer: the buffer whose scan mask we are interested in
  703. * @bit: the bit to be set.
  704. *
  705. * Note that at this point we have no way of knowing what other
  706. * buffers might request, hence this code only verifies that the
  707. * individual buffers request is plausible.
  708. */
  709. int iio_scan_mask_set(struct iio_dev *indio_dev,
  710. struct iio_buffer *buffer, int bit)
  711. {
  712. const unsigned long *mask;
  713. unsigned long *trialmask;
  714. trialmask = kmalloc(sizeof(*trialmask)*
  715. BITS_TO_LONGS(indio_dev->masklength),
  716. GFP_KERNEL);
  717. if (trialmask == NULL)
  718. return -ENOMEM;
  719. if (!indio_dev->masklength) {
  720. WARN_ON("Trying to set scanmask prior to registering buffer\n");
  721. goto err_invalid_mask;
  722. }
  723. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  724. set_bit(bit, trialmask);
  725. if (!iio_validate_scan_mask(indio_dev, trialmask))
  726. goto err_invalid_mask;
  727. if (indio_dev->available_scan_masks) {
  728. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  729. indio_dev->masklength,
  730. trialmask);
  731. if (!mask)
  732. goto err_invalid_mask;
  733. }
  734. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  735. kfree(trialmask);
  736. return 0;
  737. err_invalid_mask:
  738. kfree(trialmask);
  739. return -EINVAL;
  740. }
  741. EXPORT_SYMBOL_GPL(iio_scan_mask_set);
  742. int iio_scan_mask_query(struct iio_dev *indio_dev,
  743. struct iio_buffer *buffer, int bit)
  744. {
  745. if (bit > indio_dev->masklength)
  746. return -EINVAL;
  747. if (!buffer->scan_mask)
  748. return 0;
  749. return test_bit(bit, buffer->scan_mask);
  750. };
  751. EXPORT_SYMBOL_GPL(iio_scan_mask_query);
  752. /**
  753. * struct iio_demux_table() - table describing demux memcpy ops
  754. * @from: index to copy from
  755. * @to: index to copy to
  756. * @length: how many bytes to copy
  757. * @l: list head used for management
  758. */
  759. struct iio_demux_table {
  760. unsigned from;
  761. unsigned to;
  762. unsigned length;
  763. struct list_head l;
  764. };
  765. static const void *iio_demux(struct iio_buffer *buffer,
  766. const void *datain)
  767. {
  768. struct iio_demux_table *t;
  769. if (list_empty(&buffer->demux_list))
  770. return datain;
  771. list_for_each_entry(t, &buffer->demux_list, l)
  772. memcpy(buffer->demux_bounce + t->to,
  773. datain + t->from, t->length);
  774. return buffer->demux_bounce;
  775. }
  776. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  777. {
  778. const void *dataout = iio_demux(buffer, data);
  779. return buffer->access->store_to(buffer, dataout);
  780. }
  781. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  782. {
  783. struct iio_demux_table *p, *q;
  784. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  785. list_del(&p->l);
  786. kfree(p);
  787. }
  788. }
  789. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  790. {
  791. int ret;
  792. struct iio_buffer *buf;
  793. list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  794. ret = iio_push_to_buffer(buf, data);
  795. if (ret < 0)
  796. return ret;
  797. }
  798. return 0;
  799. }
  800. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  801. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  802. struct iio_buffer *buffer)
  803. {
  804. const struct iio_chan_spec *ch;
  805. int ret, in_ind = -1, out_ind, length;
  806. unsigned in_loc = 0, out_loc = 0;
  807. struct iio_demux_table *p;
  808. /* Clear out any old demux */
  809. iio_buffer_demux_free(buffer);
  810. kfree(buffer->demux_bounce);
  811. buffer->demux_bounce = NULL;
  812. /* First work out which scan mode we will actually have */
  813. if (bitmap_equal(indio_dev->active_scan_mask,
  814. buffer->scan_mask,
  815. indio_dev->masklength))
  816. return 0;
  817. /* Now we have the two masks, work from least sig and build up sizes */
  818. for_each_set_bit(out_ind,
  819. indio_dev->active_scan_mask,
  820. indio_dev->masklength) {
  821. in_ind = find_next_bit(indio_dev->active_scan_mask,
  822. indio_dev->masklength,
  823. in_ind + 1);
  824. while (in_ind != out_ind) {
  825. in_ind = find_next_bit(indio_dev->active_scan_mask,
  826. indio_dev->masklength,
  827. in_ind + 1);
  828. ch = iio_find_channel_from_si(indio_dev, in_ind);
  829. length = ch->scan_type.storagebits/8;
  830. /* Make sure we are aligned */
  831. in_loc += length;
  832. if (in_loc % length)
  833. in_loc += length - in_loc % length;
  834. }
  835. p = kmalloc(sizeof(*p), GFP_KERNEL);
  836. if (p == NULL) {
  837. ret = -ENOMEM;
  838. goto error_clear_mux_table;
  839. }
  840. ch = iio_find_channel_from_si(indio_dev, in_ind);
  841. length = ch->scan_type.storagebits/8;
  842. if (out_loc % length)
  843. out_loc += length - out_loc % length;
  844. if (in_loc % length)
  845. in_loc += length - in_loc % length;
  846. p->from = in_loc;
  847. p->to = out_loc;
  848. p->length = length;
  849. list_add_tail(&p->l, &buffer->demux_list);
  850. out_loc += length;
  851. in_loc += length;
  852. }
  853. /* Relies on scan_timestamp being last */
  854. if (buffer->scan_timestamp) {
  855. p = kmalloc(sizeof(*p), GFP_KERNEL);
  856. if (p == NULL) {
  857. ret = -ENOMEM;
  858. goto error_clear_mux_table;
  859. }
  860. ch = iio_find_channel_from_si(indio_dev,
  861. indio_dev->scan_index_timestamp);
  862. length = ch->scan_type.storagebits/8;
  863. if (out_loc % length)
  864. out_loc += length - out_loc % length;
  865. if (in_loc % length)
  866. in_loc += length - in_loc % length;
  867. p->from = in_loc;
  868. p->to = out_loc;
  869. p->length = length;
  870. list_add_tail(&p->l, &buffer->demux_list);
  871. out_loc += length;
  872. in_loc += length;
  873. }
  874. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  875. if (buffer->demux_bounce == NULL) {
  876. ret = -ENOMEM;
  877. goto error_clear_mux_table;
  878. }
  879. return 0;
  880. error_clear_mux_table:
  881. iio_buffer_demux_free(buffer);
  882. return ret;
  883. }
  884. int iio_update_demux(struct iio_dev *indio_dev)
  885. {
  886. struct iio_buffer *buffer;
  887. int ret;
  888. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  889. ret = iio_buffer_update_demux(indio_dev, buffer);
  890. if (ret < 0)
  891. goto error_clear_mux_table;
  892. }
  893. return 0;
  894. error_clear_mux_table:
  895. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  896. iio_buffer_demux_free(buffer);
  897. return ret;
  898. }
  899. EXPORT_SYMBOL_GPL(iio_update_demux);
  900. /**
  901. * iio_buffer_release() - Free a buffer's resources
  902. * @ref: Pointer to the kref embedded in the iio_buffer struct
  903. *
  904. * This function is called when the last reference to the buffer has been
  905. * dropped. It will typically free all resources allocated by the buffer. Do not
  906. * call this function manually, always use iio_buffer_put() when done using a
  907. * buffer.
  908. */
  909. static void iio_buffer_release(struct kref *ref)
  910. {
  911. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  912. buffer->access->release(buffer);
  913. }
  914. /**
  915. * iio_buffer_get() - Grab a reference to the buffer
  916. * @buffer: The buffer to grab a reference for, may be NULL
  917. *
  918. * Returns the pointer to the buffer that was passed into the function.
  919. */
  920. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  921. {
  922. if (buffer)
  923. kref_get(&buffer->ref);
  924. return buffer;
  925. }
  926. EXPORT_SYMBOL_GPL(iio_buffer_get);
  927. /**
  928. * iio_buffer_put() - Release the reference to the buffer
  929. * @buffer: The buffer to release the reference for, may be NULL
  930. */
  931. void iio_buffer_put(struct iio_buffer *buffer)
  932. {
  933. if (buffer)
  934. kref_put(&buffer->ref, iio_buffer_release);
  935. }
  936. EXPORT_SYMBOL_GPL(iio_buffer_put);