industrialio-buffer.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109
  1. /* The industrial I/O core
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Handling of buffer allocation / resizing.
  10. *
  11. *
  12. * Things to look at here.
  13. * - Better memory allocation techniques?
  14. * - Alternative access techniques?
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/sched.h>
  24. #include <linux/iio/iio.h>
  25. #include "iio_core.h"
  26. #include <linux/iio/sysfs.h>
  27. #include <linux/iio/buffer.h>
  28. static const char * const iio_endian_prefix[] = {
  29. [IIO_BE] = "be",
  30. [IIO_LE] = "le",
  31. };
  32. static bool iio_buffer_is_active(struct iio_buffer *buf)
  33. {
  34. return !list_empty(&buf->buffer_list);
  35. }
  36. static bool iio_buffer_data_available(struct iio_buffer *buf)
  37. {
  38. return buf->access->data_available(buf);
  39. }
  40. /**
  41. * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  42. *
  43. * This function relies on all buffer implementations having an
  44. * iio_buffer as their first element.
  45. **/
  46. ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  47. size_t n, loff_t *f_ps)
  48. {
  49. struct iio_dev *indio_dev = filp->private_data;
  50. struct iio_buffer *rb = indio_dev->buffer;
  51. int ret;
  52. if (!indio_dev->info)
  53. return -ENODEV;
  54. if (!rb || !rb->access->read_first_n)
  55. return -EINVAL;
  56. do {
  57. if (!iio_buffer_data_available(rb)) {
  58. if (filp->f_flags & O_NONBLOCK)
  59. return -EAGAIN;
  60. ret = wait_event_interruptible(rb->pollq,
  61. iio_buffer_data_available(rb) ||
  62. indio_dev->info == NULL);
  63. if (ret)
  64. return ret;
  65. if (indio_dev->info == NULL)
  66. return -ENODEV;
  67. }
  68. ret = rb->access->read_first_n(rb, n, buf);
  69. if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  70. ret = -EAGAIN;
  71. } while (ret == 0);
  72. return ret;
  73. }
  74. /**
  75. * iio_buffer_poll() - poll the buffer to find out if it has data
  76. */
  77. unsigned int iio_buffer_poll(struct file *filp,
  78. struct poll_table_struct *wait)
  79. {
  80. struct iio_dev *indio_dev = filp->private_data;
  81. struct iio_buffer *rb = indio_dev->buffer;
  82. if (!indio_dev->info)
  83. return -ENODEV;
  84. poll_wait(filp, &rb->pollq, wait);
  85. if (iio_buffer_data_available(rb))
  86. return POLLIN | POLLRDNORM;
  87. /* need a way of knowing if there may be enough data... */
  88. return 0;
  89. }
  90. /**
  91. * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
  92. * @indio_dev: The IIO device
  93. *
  94. * Wakes up the event waitqueue used for poll(). Should usually
  95. * be called when the device is unregistered.
  96. */
  97. void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  98. {
  99. if (!indio_dev->buffer)
  100. return;
  101. wake_up(&indio_dev->buffer->pollq);
  102. }
  103. void iio_buffer_init(struct iio_buffer *buffer)
  104. {
  105. INIT_LIST_HEAD(&buffer->demux_list);
  106. INIT_LIST_HEAD(&buffer->buffer_list);
  107. init_waitqueue_head(&buffer->pollq);
  108. kref_init(&buffer->ref);
  109. }
  110. EXPORT_SYMBOL(iio_buffer_init);
  111. static ssize_t iio_show_scan_index(struct device *dev,
  112. struct device_attribute *attr,
  113. char *buf)
  114. {
  115. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  116. }
  117. static ssize_t iio_show_fixed_type(struct device *dev,
  118. struct device_attribute *attr,
  119. char *buf)
  120. {
  121. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  122. u8 type = this_attr->c->scan_type.endianness;
  123. if (type == IIO_CPU) {
  124. #ifdef __LITTLE_ENDIAN
  125. type = IIO_LE;
  126. #else
  127. type = IIO_BE;
  128. #endif
  129. }
  130. if (this_attr->c->scan_type.repeat > 1)
  131. return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
  132. iio_endian_prefix[type],
  133. this_attr->c->scan_type.sign,
  134. this_attr->c->scan_type.realbits,
  135. this_attr->c->scan_type.storagebits,
  136. this_attr->c->scan_type.repeat,
  137. this_attr->c->scan_type.shift);
  138. else
  139. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  140. iio_endian_prefix[type],
  141. this_attr->c->scan_type.sign,
  142. this_attr->c->scan_type.realbits,
  143. this_attr->c->scan_type.storagebits,
  144. this_attr->c->scan_type.shift);
  145. }
  146. static ssize_t iio_scan_el_show(struct device *dev,
  147. struct device_attribute *attr,
  148. char *buf)
  149. {
  150. int ret;
  151. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  152. /* Ensure ret is 0 or 1. */
  153. ret = !!test_bit(to_iio_dev_attr(attr)->address,
  154. indio_dev->buffer->scan_mask);
  155. return sprintf(buf, "%d\n", ret);
  156. }
  157. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  158. {
  159. clear_bit(bit, buffer->scan_mask);
  160. return 0;
  161. }
  162. static ssize_t iio_scan_el_store(struct device *dev,
  163. struct device_attribute *attr,
  164. const char *buf,
  165. size_t len)
  166. {
  167. int ret;
  168. bool state;
  169. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  170. struct iio_buffer *buffer = indio_dev->buffer;
  171. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  172. ret = strtobool(buf, &state);
  173. if (ret < 0)
  174. return ret;
  175. mutex_lock(&indio_dev->mlock);
  176. if (iio_buffer_is_active(indio_dev->buffer)) {
  177. ret = -EBUSY;
  178. goto error_ret;
  179. }
  180. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  181. if (ret < 0)
  182. goto error_ret;
  183. if (!state && ret) {
  184. ret = iio_scan_mask_clear(buffer, this_attr->address);
  185. if (ret)
  186. goto error_ret;
  187. } else if (state && !ret) {
  188. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  189. if (ret)
  190. goto error_ret;
  191. }
  192. error_ret:
  193. mutex_unlock(&indio_dev->mlock);
  194. return ret < 0 ? ret : len;
  195. }
  196. static ssize_t iio_scan_el_ts_show(struct device *dev,
  197. struct device_attribute *attr,
  198. char *buf)
  199. {
  200. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  201. return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
  202. }
  203. static ssize_t iio_scan_el_ts_store(struct device *dev,
  204. struct device_attribute *attr,
  205. const char *buf,
  206. size_t len)
  207. {
  208. int ret;
  209. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  210. bool state;
  211. ret = strtobool(buf, &state);
  212. if (ret < 0)
  213. return ret;
  214. mutex_lock(&indio_dev->mlock);
  215. if (iio_buffer_is_active(indio_dev->buffer)) {
  216. ret = -EBUSY;
  217. goto error_ret;
  218. }
  219. indio_dev->buffer->scan_timestamp = state;
  220. error_ret:
  221. mutex_unlock(&indio_dev->mlock);
  222. return ret ? ret : len;
  223. }
  224. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  225. const struct iio_chan_spec *chan)
  226. {
  227. int ret, attrcount = 0;
  228. struct iio_buffer *buffer = indio_dev->buffer;
  229. ret = __iio_add_chan_devattr("index",
  230. chan,
  231. &iio_show_scan_index,
  232. NULL,
  233. 0,
  234. IIO_SEPARATE,
  235. &indio_dev->dev,
  236. &buffer->scan_el_dev_attr_list);
  237. if (ret)
  238. return ret;
  239. attrcount++;
  240. ret = __iio_add_chan_devattr("type",
  241. chan,
  242. &iio_show_fixed_type,
  243. NULL,
  244. 0,
  245. 0,
  246. &indio_dev->dev,
  247. &buffer->scan_el_dev_attr_list);
  248. if (ret)
  249. return ret;
  250. attrcount++;
  251. if (chan->type != IIO_TIMESTAMP)
  252. ret = __iio_add_chan_devattr("en",
  253. chan,
  254. &iio_scan_el_show,
  255. &iio_scan_el_store,
  256. chan->scan_index,
  257. 0,
  258. &indio_dev->dev,
  259. &buffer->scan_el_dev_attr_list);
  260. else
  261. ret = __iio_add_chan_devattr("en",
  262. chan,
  263. &iio_scan_el_ts_show,
  264. &iio_scan_el_ts_store,
  265. chan->scan_index,
  266. 0,
  267. &indio_dev->dev,
  268. &buffer->scan_el_dev_attr_list);
  269. if (ret)
  270. return ret;
  271. attrcount++;
  272. ret = attrcount;
  273. return ret;
  274. }
  275. static const char * const iio_scan_elements_group_name = "scan_elements";
  276. int iio_buffer_register(struct iio_dev *indio_dev,
  277. const struct iio_chan_spec *channels,
  278. int num_channels)
  279. {
  280. struct iio_dev_attr *p;
  281. struct attribute **attr;
  282. struct iio_buffer *buffer = indio_dev->buffer;
  283. int ret, i, attrn, attrcount, attrcount_orig = 0;
  284. if (buffer->attrs)
  285. indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
  286. if (buffer->scan_el_attrs != NULL) {
  287. attr = buffer->scan_el_attrs->attrs;
  288. while (*attr++ != NULL)
  289. attrcount_orig++;
  290. }
  291. attrcount = attrcount_orig;
  292. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  293. if (channels) {
  294. /* new magic */
  295. for (i = 0; i < num_channels; i++) {
  296. if (channels[i].scan_index < 0)
  297. continue;
  298. /* Establish necessary mask length */
  299. if (channels[i].scan_index >
  300. (int)indio_dev->masklength - 1)
  301. indio_dev->masklength
  302. = channels[i].scan_index + 1;
  303. ret = iio_buffer_add_channel_sysfs(indio_dev,
  304. &channels[i]);
  305. if (ret < 0)
  306. goto error_cleanup_dynamic;
  307. attrcount += ret;
  308. if (channels[i].type == IIO_TIMESTAMP)
  309. indio_dev->scan_index_timestamp =
  310. channels[i].scan_index;
  311. }
  312. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  313. buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  314. sizeof(*buffer->scan_mask),
  315. GFP_KERNEL);
  316. if (buffer->scan_mask == NULL) {
  317. ret = -ENOMEM;
  318. goto error_cleanup_dynamic;
  319. }
  320. }
  321. }
  322. buffer->scan_el_group.name = iio_scan_elements_group_name;
  323. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  324. sizeof(buffer->scan_el_group.attrs[0]),
  325. GFP_KERNEL);
  326. if (buffer->scan_el_group.attrs == NULL) {
  327. ret = -ENOMEM;
  328. goto error_free_scan_mask;
  329. }
  330. if (buffer->scan_el_attrs)
  331. memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  332. sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  333. attrn = attrcount_orig;
  334. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  335. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  336. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  337. return 0;
  338. error_free_scan_mask:
  339. kfree(buffer->scan_mask);
  340. error_cleanup_dynamic:
  341. iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
  342. return ret;
  343. }
  344. EXPORT_SYMBOL(iio_buffer_register);
  345. void iio_buffer_unregister(struct iio_dev *indio_dev)
  346. {
  347. kfree(indio_dev->buffer->scan_mask);
  348. kfree(indio_dev->buffer->scan_el_group.attrs);
  349. iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
  350. }
  351. EXPORT_SYMBOL(iio_buffer_unregister);
  352. ssize_t iio_buffer_read_length(struct device *dev,
  353. struct device_attribute *attr,
  354. char *buf)
  355. {
  356. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  357. struct iio_buffer *buffer = indio_dev->buffer;
  358. if (buffer->access->get_length)
  359. return sprintf(buf, "%d\n",
  360. buffer->access->get_length(buffer));
  361. return 0;
  362. }
  363. EXPORT_SYMBOL(iio_buffer_read_length);
  364. ssize_t iio_buffer_write_length(struct device *dev,
  365. struct device_attribute *attr,
  366. const char *buf,
  367. size_t len)
  368. {
  369. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  370. struct iio_buffer *buffer = indio_dev->buffer;
  371. unsigned int val;
  372. int ret;
  373. ret = kstrtouint(buf, 10, &val);
  374. if (ret)
  375. return ret;
  376. if (buffer->access->get_length)
  377. if (val == buffer->access->get_length(buffer))
  378. return len;
  379. mutex_lock(&indio_dev->mlock);
  380. if (iio_buffer_is_active(indio_dev->buffer)) {
  381. ret = -EBUSY;
  382. } else {
  383. if (buffer->access->set_length)
  384. buffer->access->set_length(buffer, val);
  385. ret = 0;
  386. }
  387. mutex_unlock(&indio_dev->mlock);
  388. return ret ? ret : len;
  389. }
  390. EXPORT_SYMBOL(iio_buffer_write_length);
  391. ssize_t iio_buffer_show_enable(struct device *dev,
  392. struct device_attribute *attr,
  393. char *buf)
  394. {
  395. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  396. return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
  397. }
  398. EXPORT_SYMBOL(iio_buffer_show_enable);
  399. /* Note NULL used as error indicator as it doesn't make sense. */
  400. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  401. unsigned int masklength,
  402. const unsigned long *mask)
  403. {
  404. if (bitmap_empty(mask, masklength))
  405. return NULL;
  406. while (*av_masks) {
  407. if (bitmap_subset(mask, av_masks, masklength))
  408. return av_masks;
  409. av_masks += BITS_TO_LONGS(masklength);
  410. }
  411. return NULL;
  412. }
  413. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  414. const unsigned long *mask, bool timestamp)
  415. {
  416. const struct iio_chan_spec *ch;
  417. unsigned bytes = 0;
  418. int length, i;
  419. /* How much space will the demuxed element take? */
  420. for_each_set_bit(i, mask,
  421. indio_dev->masklength) {
  422. ch = iio_find_channel_from_si(indio_dev, i);
  423. if (ch->scan_type.repeat > 1)
  424. length = ch->scan_type.storagebits / 8 *
  425. ch->scan_type.repeat;
  426. else
  427. length = ch->scan_type.storagebits / 8;
  428. bytes = ALIGN(bytes, length);
  429. bytes += length;
  430. }
  431. if (timestamp) {
  432. ch = iio_find_channel_from_si(indio_dev,
  433. indio_dev->scan_index_timestamp);
  434. if (ch->scan_type.repeat > 1)
  435. length = ch->scan_type.storagebits / 8 *
  436. ch->scan_type.repeat;
  437. else
  438. length = ch->scan_type.storagebits / 8;
  439. bytes = ALIGN(bytes, length);
  440. bytes += length;
  441. }
  442. return bytes;
  443. }
  444. static void iio_buffer_activate(struct iio_dev *indio_dev,
  445. struct iio_buffer *buffer)
  446. {
  447. iio_buffer_get(buffer);
  448. list_add(&buffer->buffer_list, &indio_dev->buffer_list);
  449. }
  450. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  451. {
  452. list_del_init(&buffer->buffer_list);
  453. iio_buffer_put(buffer);
  454. }
  455. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  456. {
  457. struct iio_buffer *buffer, *_buffer;
  458. if (list_empty(&indio_dev->buffer_list))
  459. return;
  460. if (indio_dev->setup_ops->predisable)
  461. indio_dev->setup_ops->predisable(indio_dev);
  462. list_for_each_entry_safe(buffer, _buffer,
  463. &indio_dev->buffer_list, buffer_list)
  464. iio_buffer_deactivate(buffer);
  465. indio_dev->currentmode = INDIO_DIRECT_MODE;
  466. if (indio_dev->setup_ops->postdisable)
  467. indio_dev->setup_ops->postdisable(indio_dev);
  468. if (indio_dev->available_scan_masks == NULL)
  469. kfree(indio_dev->active_scan_mask);
  470. }
  471. static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  472. struct iio_buffer *buffer)
  473. {
  474. unsigned int bytes;
  475. if (!buffer->access->set_bytes_per_datum)
  476. return;
  477. bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  478. buffer->scan_timestamp);
  479. buffer->access->set_bytes_per_datum(buffer, bytes);
  480. }
  481. static int __iio_update_buffers(struct iio_dev *indio_dev,
  482. struct iio_buffer *insert_buffer,
  483. struct iio_buffer *remove_buffer)
  484. {
  485. int ret;
  486. int success = 0;
  487. struct iio_buffer *buffer;
  488. unsigned long *compound_mask;
  489. const unsigned long *old_mask;
  490. /* Wind down existing buffers - iff there are any */
  491. if (!list_empty(&indio_dev->buffer_list)) {
  492. if (indio_dev->setup_ops->predisable) {
  493. ret = indio_dev->setup_ops->predisable(indio_dev);
  494. if (ret)
  495. return ret;
  496. }
  497. indio_dev->currentmode = INDIO_DIRECT_MODE;
  498. if (indio_dev->setup_ops->postdisable) {
  499. ret = indio_dev->setup_ops->postdisable(indio_dev);
  500. if (ret)
  501. return ret;
  502. }
  503. }
  504. /* Keep a copy of current setup to allow roll back */
  505. old_mask = indio_dev->active_scan_mask;
  506. if (!indio_dev->available_scan_masks)
  507. indio_dev->active_scan_mask = NULL;
  508. if (remove_buffer)
  509. iio_buffer_deactivate(remove_buffer);
  510. if (insert_buffer)
  511. iio_buffer_activate(indio_dev, insert_buffer);
  512. /* If no buffers in list, we are done */
  513. if (list_empty(&indio_dev->buffer_list)) {
  514. indio_dev->currentmode = INDIO_DIRECT_MODE;
  515. if (indio_dev->available_scan_masks == NULL)
  516. kfree(old_mask);
  517. return 0;
  518. }
  519. /* What scan mask do we actually have? */
  520. compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  521. sizeof(long), GFP_KERNEL);
  522. if (compound_mask == NULL) {
  523. if (indio_dev->available_scan_masks == NULL)
  524. kfree(old_mask);
  525. return -ENOMEM;
  526. }
  527. indio_dev->scan_timestamp = 0;
  528. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  529. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  530. indio_dev->masklength);
  531. indio_dev->scan_timestamp |= buffer->scan_timestamp;
  532. }
  533. if (indio_dev->available_scan_masks) {
  534. indio_dev->active_scan_mask =
  535. iio_scan_mask_match(indio_dev->available_scan_masks,
  536. indio_dev->masklength,
  537. compound_mask);
  538. if (indio_dev->active_scan_mask == NULL) {
  539. /*
  540. * Roll back.
  541. * Note can only occur when adding a buffer.
  542. */
  543. iio_buffer_deactivate(insert_buffer);
  544. if (old_mask) {
  545. indio_dev->active_scan_mask = old_mask;
  546. success = -EINVAL;
  547. }
  548. else {
  549. kfree(compound_mask);
  550. ret = -EINVAL;
  551. return ret;
  552. }
  553. }
  554. } else {
  555. indio_dev->active_scan_mask = compound_mask;
  556. }
  557. iio_update_demux(indio_dev);
  558. /* Wind up again */
  559. if (indio_dev->setup_ops->preenable) {
  560. ret = indio_dev->setup_ops->preenable(indio_dev);
  561. if (ret) {
  562. printk(KERN_ERR
  563. "Buffer not started: buffer preenable failed (%d)\n", ret);
  564. goto error_remove_inserted;
  565. }
  566. }
  567. indio_dev->scan_bytes =
  568. iio_compute_scan_bytes(indio_dev,
  569. indio_dev->active_scan_mask,
  570. indio_dev->scan_timestamp);
  571. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  572. iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  573. if (buffer->access->request_update) {
  574. ret = buffer->access->request_update(buffer);
  575. if (ret) {
  576. printk(KERN_INFO
  577. "Buffer not started: buffer parameter update failed (%d)\n", ret);
  578. goto error_run_postdisable;
  579. }
  580. }
  581. }
  582. if (indio_dev->info->update_scan_mode) {
  583. ret = indio_dev->info
  584. ->update_scan_mode(indio_dev,
  585. indio_dev->active_scan_mask);
  586. if (ret < 0) {
  587. printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
  588. goto error_run_postdisable;
  589. }
  590. }
  591. /* Definitely possible for devices to support both of these. */
  592. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
  593. if (!indio_dev->trig) {
  594. printk(KERN_INFO "Buffer not started: no trigger\n");
  595. ret = -EINVAL;
  596. /* Can only occur on first buffer */
  597. goto error_run_postdisable;
  598. }
  599. indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
  600. } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
  601. indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
  602. } else { /* Should never be reached */
  603. ret = -EINVAL;
  604. goto error_run_postdisable;
  605. }
  606. if (indio_dev->setup_ops->postenable) {
  607. ret = indio_dev->setup_ops->postenable(indio_dev);
  608. if (ret) {
  609. printk(KERN_INFO
  610. "Buffer not started: postenable failed (%d)\n", ret);
  611. indio_dev->currentmode = INDIO_DIRECT_MODE;
  612. if (indio_dev->setup_ops->postdisable)
  613. indio_dev->setup_ops->postdisable(indio_dev);
  614. goto error_disable_all_buffers;
  615. }
  616. }
  617. if (indio_dev->available_scan_masks)
  618. kfree(compound_mask);
  619. else
  620. kfree(old_mask);
  621. return success;
  622. error_disable_all_buffers:
  623. indio_dev->currentmode = INDIO_DIRECT_MODE;
  624. error_run_postdisable:
  625. if (indio_dev->setup_ops->postdisable)
  626. indio_dev->setup_ops->postdisable(indio_dev);
  627. error_remove_inserted:
  628. if (insert_buffer)
  629. iio_buffer_deactivate(insert_buffer);
  630. indio_dev->active_scan_mask = old_mask;
  631. kfree(compound_mask);
  632. return ret;
  633. }
  634. int iio_update_buffers(struct iio_dev *indio_dev,
  635. struct iio_buffer *insert_buffer,
  636. struct iio_buffer *remove_buffer)
  637. {
  638. int ret;
  639. if (insert_buffer == remove_buffer)
  640. return 0;
  641. mutex_lock(&indio_dev->info_exist_lock);
  642. mutex_lock(&indio_dev->mlock);
  643. if (insert_buffer && iio_buffer_is_active(insert_buffer))
  644. insert_buffer = NULL;
  645. if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  646. remove_buffer = NULL;
  647. if (!insert_buffer && !remove_buffer) {
  648. ret = 0;
  649. goto out_unlock;
  650. }
  651. if (indio_dev->info == NULL) {
  652. ret = -ENODEV;
  653. goto out_unlock;
  654. }
  655. ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  656. out_unlock:
  657. mutex_unlock(&indio_dev->mlock);
  658. mutex_unlock(&indio_dev->info_exist_lock);
  659. return ret;
  660. }
  661. EXPORT_SYMBOL_GPL(iio_update_buffers);
  662. ssize_t iio_buffer_store_enable(struct device *dev,
  663. struct device_attribute *attr,
  664. const char *buf,
  665. size_t len)
  666. {
  667. int ret;
  668. bool requested_state;
  669. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  670. bool inlist;
  671. ret = strtobool(buf, &requested_state);
  672. if (ret < 0)
  673. return ret;
  674. mutex_lock(&indio_dev->mlock);
  675. /* Find out if it is in the list */
  676. inlist = iio_buffer_is_active(indio_dev->buffer);
  677. /* Already in desired state */
  678. if (inlist == requested_state)
  679. goto done;
  680. if (requested_state)
  681. ret = __iio_update_buffers(indio_dev,
  682. indio_dev->buffer, NULL);
  683. else
  684. ret = __iio_update_buffers(indio_dev,
  685. NULL, indio_dev->buffer);
  686. if (ret < 0)
  687. goto done;
  688. done:
  689. mutex_unlock(&indio_dev->mlock);
  690. return (ret < 0) ? ret : len;
  691. }
  692. EXPORT_SYMBOL(iio_buffer_store_enable);
  693. /**
  694. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  695. * @indio_dev: the iio device
  696. * @mask: scan mask to be checked
  697. *
  698. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  699. * can be used for devices where only one channel can be active for sampling at
  700. * a time.
  701. */
  702. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  703. const unsigned long *mask)
  704. {
  705. return bitmap_weight(mask, indio_dev->masklength) == 1;
  706. }
  707. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  708. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  709. const unsigned long *mask)
  710. {
  711. if (!indio_dev->setup_ops->validate_scan_mask)
  712. return true;
  713. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  714. }
  715. /**
  716. * iio_scan_mask_set() - set particular bit in the scan mask
  717. * @indio_dev: the iio device
  718. * @buffer: the buffer whose scan mask we are interested in
  719. * @bit: the bit to be set.
  720. *
  721. * Note that at this point we have no way of knowing what other
  722. * buffers might request, hence this code only verifies that the
  723. * individual buffers request is plausible.
  724. */
  725. int iio_scan_mask_set(struct iio_dev *indio_dev,
  726. struct iio_buffer *buffer, int bit)
  727. {
  728. const unsigned long *mask;
  729. unsigned long *trialmask;
  730. trialmask = kmalloc(sizeof(*trialmask)*
  731. BITS_TO_LONGS(indio_dev->masklength),
  732. GFP_KERNEL);
  733. if (trialmask == NULL)
  734. return -ENOMEM;
  735. if (!indio_dev->masklength) {
  736. WARN_ON("Trying to set scanmask prior to registering buffer\n");
  737. goto err_invalid_mask;
  738. }
  739. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  740. set_bit(bit, trialmask);
  741. if (!iio_validate_scan_mask(indio_dev, trialmask))
  742. goto err_invalid_mask;
  743. if (indio_dev->available_scan_masks) {
  744. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  745. indio_dev->masklength,
  746. trialmask);
  747. if (!mask)
  748. goto err_invalid_mask;
  749. }
  750. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  751. kfree(trialmask);
  752. return 0;
  753. err_invalid_mask:
  754. kfree(trialmask);
  755. return -EINVAL;
  756. }
  757. EXPORT_SYMBOL_GPL(iio_scan_mask_set);
  758. int iio_scan_mask_query(struct iio_dev *indio_dev,
  759. struct iio_buffer *buffer, int bit)
  760. {
  761. if (bit > indio_dev->masklength)
  762. return -EINVAL;
  763. if (!buffer->scan_mask)
  764. return 0;
  765. /* Ensure return value is 0 or 1. */
  766. return !!test_bit(bit, buffer->scan_mask);
  767. };
  768. EXPORT_SYMBOL_GPL(iio_scan_mask_query);
  769. /**
  770. * struct iio_demux_table() - table describing demux memcpy ops
  771. * @from: index to copy from
  772. * @to: index to copy to
  773. * @length: how many bytes to copy
  774. * @l: list head used for management
  775. */
  776. struct iio_demux_table {
  777. unsigned from;
  778. unsigned to;
  779. unsigned length;
  780. struct list_head l;
  781. };
  782. static const void *iio_demux(struct iio_buffer *buffer,
  783. const void *datain)
  784. {
  785. struct iio_demux_table *t;
  786. if (list_empty(&buffer->demux_list))
  787. return datain;
  788. list_for_each_entry(t, &buffer->demux_list, l)
  789. memcpy(buffer->demux_bounce + t->to,
  790. datain + t->from, t->length);
  791. return buffer->demux_bounce;
  792. }
  793. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  794. {
  795. const void *dataout = iio_demux(buffer, data);
  796. return buffer->access->store_to(buffer, dataout);
  797. }
  798. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  799. {
  800. struct iio_demux_table *p, *q;
  801. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  802. list_del(&p->l);
  803. kfree(p);
  804. }
  805. }
  806. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  807. {
  808. int ret;
  809. struct iio_buffer *buf;
  810. list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  811. ret = iio_push_to_buffer(buf, data);
  812. if (ret < 0)
  813. return ret;
  814. }
  815. return 0;
  816. }
  817. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  818. static int iio_buffer_add_demux(struct iio_buffer *buffer,
  819. struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
  820. unsigned int length)
  821. {
  822. if (*p && (*p)->from + (*p)->length == in_loc &&
  823. (*p)->to + (*p)->length == out_loc) {
  824. (*p)->length += length;
  825. } else {
  826. *p = kmalloc(sizeof(**p), GFP_KERNEL);
  827. if (*p == NULL)
  828. return -ENOMEM;
  829. (*p)->from = in_loc;
  830. (*p)->to = out_loc;
  831. (*p)->length = length;
  832. list_add_tail(&(*p)->l, &buffer->demux_list);
  833. }
  834. return 0;
  835. }
  836. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  837. struct iio_buffer *buffer)
  838. {
  839. const struct iio_chan_spec *ch;
  840. int ret, in_ind = -1, out_ind, length;
  841. unsigned in_loc = 0, out_loc = 0;
  842. struct iio_demux_table *p = NULL;
  843. /* Clear out any old demux */
  844. iio_buffer_demux_free(buffer);
  845. kfree(buffer->demux_bounce);
  846. buffer->demux_bounce = NULL;
  847. /* First work out which scan mode we will actually have */
  848. if (bitmap_equal(indio_dev->active_scan_mask,
  849. buffer->scan_mask,
  850. indio_dev->masklength))
  851. return 0;
  852. /* Now we have the two masks, work from least sig and build up sizes */
  853. for_each_set_bit(out_ind,
  854. buffer->scan_mask,
  855. indio_dev->masklength) {
  856. in_ind = find_next_bit(indio_dev->active_scan_mask,
  857. indio_dev->masklength,
  858. in_ind + 1);
  859. while (in_ind != out_ind) {
  860. in_ind = find_next_bit(indio_dev->active_scan_mask,
  861. indio_dev->masklength,
  862. in_ind + 1);
  863. ch = iio_find_channel_from_si(indio_dev, in_ind);
  864. if (ch->scan_type.repeat > 1)
  865. length = ch->scan_type.storagebits / 8 *
  866. ch->scan_type.repeat;
  867. else
  868. length = ch->scan_type.storagebits / 8;
  869. /* Make sure we are aligned */
  870. in_loc = roundup(in_loc, length) + length;
  871. }
  872. ch = iio_find_channel_from_si(indio_dev, in_ind);
  873. if (ch->scan_type.repeat > 1)
  874. length = ch->scan_type.storagebits / 8 *
  875. ch->scan_type.repeat;
  876. else
  877. length = ch->scan_type.storagebits / 8;
  878. out_loc = roundup(out_loc, length);
  879. in_loc = roundup(in_loc, length);
  880. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  881. if (ret)
  882. goto error_clear_mux_table;
  883. out_loc += length;
  884. in_loc += length;
  885. }
  886. /* Relies on scan_timestamp being last */
  887. if (buffer->scan_timestamp) {
  888. ch = iio_find_channel_from_si(indio_dev,
  889. indio_dev->scan_index_timestamp);
  890. if (ch->scan_type.repeat > 1)
  891. length = ch->scan_type.storagebits / 8 *
  892. ch->scan_type.repeat;
  893. else
  894. length = ch->scan_type.storagebits / 8;
  895. out_loc = roundup(out_loc, length);
  896. in_loc = roundup(in_loc, length);
  897. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  898. if (ret)
  899. goto error_clear_mux_table;
  900. out_loc += length;
  901. in_loc += length;
  902. }
  903. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  904. if (buffer->demux_bounce == NULL) {
  905. ret = -ENOMEM;
  906. goto error_clear_mux_table;
  907. }
  908. return 0;
  909. error_clear_mux_table:
  910. iio_buffer_demux_free(buffer);
  911. return ret;
  912. }
  913. int iio_update_demux(struct iio_dev *indio_dev)
  914. {
  915. struct iio_buffer *buffer;
  916. int ret;
  917. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  918. ret = iio_buffer_update_demux(indio_dev, buffer);
  919. if (ret < 0)
  920. goto error_clear_mux_table;
  921. }
  922. return 0;
  923. error_clear_mux_table:
  924. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  925. iio_buffer_demux_free(buffer);
  926. return ret;
  927. }
  928. EXPORT_SYMBOL_GPL(iio_update_demux);
  929. /**
  930. * iio_buffer_release() - Free a buffer's resources
  931. * @ref: Pointer to the kref embedded in the iio_buffer struct
  932. *
  933. * This function is called when the last reference to the buffer has been
  934. * dropped. It will typically free all resources allocated by the buffer. Do not
  935. * call this function manually, always use iio_buffer_put() when done using a
  936. * buffer.
  937. */
  938. static void iio_buffer_release(struct kref *ref)
  939. {
  940. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  941. buffer->access->release(buffer);
  942. }
  943. /**
  944. * iio_buffer_get() - Grab a reference to the buffer
  945. * @buffer: The buffer to grab a reference for, may be NULL
  946. *
  947. * Returns the pointer to the buffer that was passed into the function.
  948. */
  949. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  950. {
  951. if (buffer)
  952. kref_get(&buffer->ref);
  953. return buffer;
  954. }
  955. EXPORT_SYMBOL_GPL(iio_buffer_get);
  956. /**
  957. * iio_buffer_put() - Release the reference to the buffer
  958. * @buffer: The buffer to release the reference for, may be NULL
  959. */
  960. void iio_buffer_put(struct iio_buffer *buffer)
  961. {
  962. if (buffer)
  963. kref_put(&buffer->ref, iio_buffer_release);
  964. }
  965. EXPORT_SYMBOL_GPL(iio_buffer_put);