industrialio-buffer.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262
  1. /* The industrial I/O core
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Handling of buffer allocation / resizing.
  10. *
  11. *
  12. * Things to look at here.
  13. * - Better memory allocation techniques?
  14. * - Alternative access techniques?
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/sched.h>
  24. #include <linux/iio/iio.h>
  25. #include "iio_core.h"
  26. #include <linux/iio/sysfs.h>
  27. #include <linux/iio/buffer.h>
  28. static const char * const iio_endian_prefix[] = {
  29. [IIO_BE] = "be",
  30. [IIO_LE] = "le",
  31. };
  32. static bool iio_buffer_is_active(struct iio_buffer *buf)
  33. {
  34. return !list_empty(&buf->buffer_list);
  35. }
  36. static size_t iio_buffer_data_available(struct iio_buffer *buf)
  37. {
  38. return buf->access->data_available(buf);
  39. }
  40. static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
  41. struct iio_buffer *buf, size_t required)
  42. {
  43. if (!indio_dev->info->hwfifo_flush_to_buffer)
  44. return -ENODEV;
  45. return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
  46. }
  47. static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
  48. size_t to_wait, int to_flush)
  49. {
  50. size_t avail;
  51. int flushed = 0;
  52. /* wakeup if the device was unregistered */
  53. if (!indio_dev->info)
  54. return true;
  55. /* drain the buffer if it was disabled */
  56. if (!iio_buffer_is_active(buf)) {
  57. to_wait = min_t(size_t, to_wait, 1);
  58. to_flush = 0;
  59. }
  60. avail = iio_buffer_data_available(buf);
  61. if (avail >= to_wait) {
  62. /* force a flush for non-blocking reads */
  63. if (!to_wait && !avail && to_flush)
  64. iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
  65. return true;
  66. }
  67. if (to_flush)
  68. flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
  69. to_wait - avail);
  70. if (flushed <= 0)
  71. return false;
  72. if (avail + flushed >= to_wait)
  73. return true;
  74. return false;
  75. }
  76. /**
  77. * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  78. *
  79. * This function relies on all buffer implementations having an
  80. * iio_buffer as their first element.
  81. **/
  82. ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  83. size_t n, loff_t *f_ps)
  84. {
  85. struct iio_dev *indio_dev = filp->private_data;
  86. struct iio_buffer *rb = indio_dev->buffer;
  87. size_t datum_size;
  88. size_t to_wait = 0;
  89. size_t to_read;
  90. int ret;
  91. if (!indio_dev->info)
  92. return -ENODEV;
  93. if (!rb || !rb->access->read_first_n)
  94. return -EINVAL;
  95. datum_size = rb->bytes_per_datum;
  96. /*
  97. * If datum_size is 0 there will never be anything to read from the
  98. * buffer, so signal end of file now.
  99. */
  100. if (!datum_size)
  101. return 0;
  102. to_read = min_t(size_t, n / datum_size, rb->watermark);
  103. if (!(filp->f_flags & O_NONBLOCK))
  104. to_wait = to_read;
  105. do {
  106. ret = wait_event_interruptible(rb->pollq,
  107. iio_buffer_ready(indio_dev, rb, to_wait, to_read));
  108. if (ret)
  109. return ret;
  110. if (!indio_dev->info)
  111. return -ENODEV;
  112. ret = rb->access->read_first_n(rb, n, buf);
  113. if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  114. ret = -EAGAIN;
  115. } while (ret == 0);
  116. return ret;
  117. }
  118. /**
  119. * iio_buffer_poll() - poll the buffer to find out if it has data
  120. */
  121. unsigned int iio_buffer_poll(struct file *filp,
  122. struct poll_table_struct *wait)
  123. {
  124. struct iio_dev *indio_dev = filp->private_data;
  125. struct iio_buffer *rb = indio_dev->buffer;
  126. if (!indio_dev->info)
  127. return -ENODEV;
  128. poll_wait(filp, &rb->pollq, wait);
  129. if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
  130. return POLLIN | POLLRDNORM;
  131. return 0;
  132. }
  133. /**
  134. * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
  135. * @indio_dev: The IIO device
  136. *
  137. * Wakes up the event waitqueue used for poll(). Should usually
  138. * be called when the device is unregistered.
  139. */
  140. void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  141. {
  142. if (!indio_dev->buffer)
  143. return;
  144. wake_up(&indio_dev->buffer->pollq);
  145. }
  146. void iio_buffer_init(struct iio_buffer *buffer)
  147. {
  148. INIT_LIST_HEAD(&buffer->demux_list);
  149. INIT_LIST_HEAD(&buffer->buffer_list);
  150. init_waitqueue_head(&buffer->pollq);
  151. kref_init(&buffer->ref);
  152. buffer->watermark = 1;
  153. }
  154. EXPORT_SYMBOL(iio_buffer_init);
  155. static ssize_t iio_show_scan_index(struct device *dev,
  156. struct device_attribute *attr,
  157. char *buf)
  158. {
  159. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  160. }
  161. static ssize_t iio_show_fixed_type(struct device *dev,
  162. struct device_attribute *attr,
  163. char *buf)
  164. {
  165. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  166. u8 type = this_attr->c->scan_type.endianness;
  167. if (type == IIO_CPU) {
  168. #ifdef __LITTLE_ENDIAN
  169. type = IIO_LE;
  170. #else
  171. type = IIO_BE;
  172. #endif
  173. }
  174. if (this_attr->c->scan_type.repeat > 1)
  175. return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
  176. iio_endian_prefix[type],
  177. this_attr->c->scan_type.sign,
  178. this_attr->c->scan_type.realbits,
  179. this_attr->c->scan_type.storagebits,
  180. this_attr->c->scan_type.repeat,
  181. this_attr->c->scan_type.shift);
  182. else
  183. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  184. iio_endian_prefix[type],
  185. this_attr->c->scan_type.sign,
  186. this_attr->c->scan_type.realbits,
  187. this_attr->c->scan_type.storagebits,
  188. this_attr->c->scan_type.shift);
  189. }
  190. static ssize_t iio_scan_el_show(struct device *dev,
  191. struct device_attribute *attr,
  192. char *buf)
  193. {
  194. int ret;
  195. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  196. /* Ensure ret is 0 or 1. */
  197. ret = !!test_bit(to_iio_dev_attr(attr)->address,
  198. indio_dev->buffer->scan_mask);
  199. return sprintf(buf, "%d\n", ret);
  200. }
  201. /* Note NULL used as error indicator as it doesn't make sense. */
  202. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  203. unsigned int masklength,
  204. const unsigned long *mask)
  205. {
  206. if (bitmap_empty(mask, masklength))
  207. return NULL;
  208. while (*av_masks) {
  209. if (bitmap_subset(mask, av_masks, masklength))
  210. return av_masks;
  211. av_masks += BITS_TO_LONGS(masklength);
  212. }
  213. return NULL;
  214. }
  215. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  216. const unsigned long *mask)
  217. {
  218. if (!indio_dev->setup_ops->validate_scan_mask)
  219. return true;
  220. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  221. }
  222. /**
  223. * iio_scan_mask_set() - set particular bit in the scan mask
  224. * @indio_dev: the iio device
  225. * @buffer: the buffer whose scan mask we are interested in
  226. * @bit: the bit to be set.
  227. *
  228. * Note that at this point we have no way of knowing what other
  229. * buffers might request, hence this code only verifies that the
  230. * individual buffers request is plausible.
  231. */
  232. static int iio_scan_mask_set(struct iio_dev *indio_dev,
  233. struct iio_buffer *buffer, int bit)
  234. {
  235. const unsigned long *mask;
  236. unsigned long *trialmask;
  237. trialmask = kmalloc(sizeof(*trialmask)*
  238. BITS_TO_LONGS(indio_dev->masklength),
  239. GFP_KERNEL);
  240. if (trialmask == NULL)
  241. return -ENOMEM;
  242. if (!indio_dev->masklength) {
  243. WARN_ON("Trying to set scanmask prior to registering buffer\n");
  244. goto err_invalid_mask;
  245. }
  246. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  247. set_bit(bit, trialmask);
  248. if (!iio_validate_scan_mask(indio_dev, trialmask))
  249. goto err_invalid_mask;
  250. if (indio_dev->available_scan_masks) {
  251. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  252. indio_dev->masklength,
  253. trialmask);
  254. if (!mask)
  255. goto err_invalid_mask;
  256. }
  257. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  258. kfree(trialmask);
  259. return 0;
  260. err_invalid_mask:
  261. kfree(trialmask);
  262. return -EINVAL;
  263. }
  264. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  265. {
  266. clear_bit(bit, buffer->scan_mask);
  267. return 0;
  268. }
  269. static ssize_t iio_scan_el_store(struct device *dev,
  270. struct device_attribute *attr,
  271. const char *buf,
  272. size_t len)
  273. {
  274. int ret;
  275. bool state;
  276. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  277. struct iio_buffer *buffer = indio_dev->buffer;
  278. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  279. ret = strtobool(buf, &state);
  280. if (ret < 0)
  281. return ret;
  282. mutex_lock(&indio_dev->mlock);
  283. if (iio_buffer_is_active(indio_dev->buffer)) {
  284. ret = -EBUSY;
  285. goto error_ret;
  286. }
  287. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  288. if (ret < 0)
  289. goto error_ret;
  290. if (!state && ret) {
  291. ret = iio_scan_mask_clear(buffer, this_attr->address);
  292. if (ret)
  293. goto error_ret;
  294. } else if (state && !ret) {
  295. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  296. if (ret)
  297. goto error_ret;
  298. }
  299. error_ret:
  300. mutex_unlock(&indio_dev->mlock);
  301. return ret < 0 ? ret : len;
  302. }
  303. static ssize_t iio_scan_el_ts_show(struct device *dev,
  304. struct device_attribute *attr,
  305. char *buf)
  306. {
  307. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  308. return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
  309. }
  310. static ssize_t iio_scan_el_ts_store(struct device *dev,
  311. struct device_attribute *attr,
  312. const char *buf,
  313. size_t len)
  314. {
  315. int ret;
  316. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  317. bool state;
  318. ret = strtobool(buf, &state);
  319. if (ret < 0)
  320. return ret;
  321. mutex_lock(&indio_dev->mlock);
  322. if (iio_buffer_is_active(indio_dev->buffer)) {
  323. ret = -EBUSY;
  324. goto error_ret;
  325. }
  326. indio_dev->buffer->scan_timestamp = state;
  327. error_ret:
  328. mutex_unlock(&indio_dev->mlock);
  329. return ret ? ret : len;
  330. }
  331. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  332. const struct iio_chan_spec *chan)
  333. {
  334. int ret, attrcount = 0;
  335. struct iio_buffer *buffer = indio_dev->buffer;
  336. ret = __iio_add_chan_devattr("index",
  337. chan,
  338. &iio_show_scan_index,
  339. NULL,
  340. 0,
  341. IIO_SEPARATE,
  342. &indio_dev->dev,
  343. &buffer->scan_el_dev_attr_list);
  344. if (ret)
  345. return ret;
  346. attrcount++;
  347. ret = __iio_add_chan_devattr("type",
  348. chan,
  349. &iio_show_fixed_type,
  350. NULL,
  351. 0,
  352. 0,
  353. &indio_dev->dev,
  354. &buffer->scan_el_dev_attr_list);
  355. if (ret)
  356. return ret;
  357. attrcount++;
  358. if (chan->type != IIO_TIMESTAMP)
  359. ret = __iio_add_chan_devattr("en",
  360. chan,
  361. &iio_scan_el_show,
  362. &iio_scan_el_store,
  363. chan->scan_index,
  364. 0,
  365. &indio_dev->dev,
  366. &buffer->scan_el_dev_attr_list);
  367. else
  368. ret = __iio_add_chan_devattr("en",
  369. chan,
  370. &iio_scan_el_ts_show,
  371. &iio_scan_el_ts_store,
  372. chan->scan_index,
  373. 0,
  374. &indio_dev->dev,
  375. &buffer->scan_el_dev_attr_list);
  376. if (ret)
  377. return ret;
  378. attrcount++;
  379. ret = attrcount;
  380. return ret;
  381. }
  382. static ssize_t iio_buffer_read_length(struct device *dev,
  383. struct device_attribute *attr,
  384. char *buf)
  385. {
  386. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  387. struct iio_buffer *buffer = indio_dev->buffer;
  388. return sprintf(buf, "%d\n", buffer->length);
  389. }
  390. static ssize_t iio_buffer_write_length(struct device *dev,
  391. struct device_attribute *attr,
  392. const char *buf, size_t len)
  393. {
  394. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  395. struct iio_buffer *buffer = indio_dev->buffer;
  396. unsigned int val;
  397. int ret;
  398. ret = kstrtouint(buf, 10, &val);
  399. if (ret)
  400. return ret;
  401. if (val == buffer->length)
  402. return len;
  403. mutex_lock(&indio_dev->mlock);
  404. if (iio_buffer_is_active(indio_dev->buffer)) {
  405. ret = -EBUSY;
  406. } else {
  407. buffer->access->set_length(buffer, val);
  408. ret = 0;
  409. }
  410. if (ret)
  411. goto out;
  412. if (buffer->length && buffer->length < buffer->watermark)
  413. buffer->watermark = buffer->length;
  414. out:
  415. mutex_unlock(&indio_dev->mlock);
  416. return ret ? ret : len;
  417. }
  418. static ssize_t iio_buffer_show_enable(struct device *dev,
  419. struct device_attribute *attr,
  420. char *buf)
  421. {
  422. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  423. return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
  424. }
  425. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  426. const unsigned long *mask, bool timestamp)
  427. {
  428. const struct iio_chan_spec *ch;
  429. unsigned bytes = 0;
  430. int length, i;
  431. /* How much space will the demuxed element take? */
  432. for_each_set_bit(i, mask,
  433. indio_dev->masklength) {
  434. ch = iio_find_channel_from_si(indio_dev, i);
  435. if (ch->scan_type.repeat > 1)
  436. length = ch->scan_type.storagebits / 8 *
  437. ch->scan_type.repeat;
  438. else
  439. length = ch->scan_type.storagebits / 8;
  440. bytes = ALIGN(bytes, length);
  441. bytes += length;
  442. }
  443. if (timestamp) {
  444. ch = iio_find_channel_from_si(indio_dev,
  445. indio_dev->scan_index_timestamp);
  446. if (ch->scan_type.repeat > 1)
  447. length = ch->scan_type.storagebits / 8 *
  448. ch->scan_type.repeat;
  449. else
  450. length = ch->scan_type.storagebits / 8;
  451. bytes = ALIGN(bytes, length);
  452. bytes += length;
  453. }
  454. return bytes;
  455. }
  456. static void iio_buffer_activate(struct iio_dev *indio_dev,
  457. struct iio_buffer *buffer)
  458. {
  459. iio_buffer_get(buffer);
  460. list_add(&buffer->buffer_list, &indio_dev->buffer_list);
  461. }
  462. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  463. {
  464. list_del_init(&buffer->buffer_list);
  465. wake_up_interruptible(&buffer->pollq);
  466. iio_buffer_put(buffer);
  467. }
  468. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  469. {
  470. struct iio_buffer *buffer, *_buffer;
  471. if (list_empty(&indio_dev->buffer_list))
  472. return;
  473. if (indio_dev->setup_ops->predisable)
  474. indio_dev->setup_ops->predisable(indio_dev);
  475. list_for_each_entry_safe(buffer, _buffer,
  476. &indio_dev->buffer_list, buffer_list)
  477. iio_buffer_deactivate(buffer);
  478. indio_dev->currentmode = INDIO_DIRECT_MODE;
  479. if (indio_dev->setup_ops->postdisable)
  480. indio_dev->setup_ops->postdisable(indio_dev);
  481. if (indio_dev->available_scan_masks == NULL)
  482. kfree(indio_dev->active_scan_mask);
  483. }
  484. static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  485. struct iio_buffer *buffer)
  486. {
  487. unsigned int bytes;
  488. if (!buffer->access->set_bytes_per_datum)
  489. return;
  490. bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  491. buffer->scan_timestamp);
  492. buffer->access->set_bytes_per_datum(buffer, bytes);
  493. }
  494. static int __iio_update_buffers(struct iio_dev *indio_dev,
  495. struct iio_buffer *insert_buffer,
  496. struct iio_buffer *remove_buffer)
  497. {
  498. int ret;
  499. int success = 0;
  500. struct iio_buffer *buffer;
  501. unsigned long *compound_mask;
  502. const unsigned long *old_mask;
  503. /* Wind down existing buffers - iff there are any */
  504. if (!list_empty(&indio_dev->buffer_list)) {
  505. if (indio_dev->setup_ops->predisable) {
  506. ret = indio_dev->setup_ops->predisable(indio_dev);
  507. if (ret)
  508. return ret;
  509. }
  510. indio_dev->currentmode = INDIO_DIRECT_MODE;
  511. if (indio_dev->setup_ops->postdisable) {
  512. ret = indio_dev->setup_ops->postdisable(indio_dev);
  513. if (ret)
  514. return ret;
  515. }
  516. }
  517. /* Keep a copy of current setup to allow roll back */
  518. old_mask = indio_dev->active_scan_mask;
  519. if (!indio_dev->available_scan_masks)
  520. indio_dev->active_scan_mask = NULL;
  521. if (remove_buffer)
  522. iio_buffer_deactivate(remove_buffer);
  523. if (insert_buffer)
  524. iio_buffer_activate(indio_dev, insert_buffer);
  525. /* If no buffers in list, we are done */
  526. if (list_empty(&indio_dev->buffer_list)) {
  527. indio_dev->currentmode = INDIO_DIRECT_MODE;
  528. if (indio_dev->available_scan_masks == NULL)
  529. kfree(old_mask);
  530. return 0;
  531. }
  532. /* What scan mask do we actually have? */
  533. compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  534. sizeof(long), GFP_KERNEL);
  535. if (compound_mask == NULL) {
  536. if (indio_dev->available_scan_masks == NULL)
  537. kfree(old_mask);
  538. return -ENOMEM;
  539. }
  540. indio_dev->scan_timestamp = 0;
  541. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  542. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  543. indio_dev->masklength);
  544. indio_dev->scan_timestamp |= buffer->scan_timestamp;
  545. }
  546. if (indio_dev->available_scan_masks) {
  547. indio_dev->active_scan_mask =
  548. iio_scan_mask_match(indio_dev->available_scan_masks,
  549. indio_dev->masklength,
  550. compound_mask);
  551. if (indio_dev->active_scan_mask == NULL) {
  552. /*
  553. * Roll back.
  554. * Note can only occur when adding a buffer.
  555. */
  556. iio_buffer_deactivate(insert_buffer);
  557. if (old_mask) {
  558. indio_dev->active_scan_mask = old_mask;
  559. success = -EINVAL;
  560. }
  561. else {
  562. kfree(compound_mask);
  563. ret = -EINVAL;
  564. return ret;
  565. }
  566. }
  567. } else {
  568. indio_dev->active_scan_mask = compound_mask;
  569. }
  570. iio_update_demux(indio_dev);
  571. /* Wind up again */
  572. if (indio_dev->setup_ops->preenable) {
  573. ret = indio_dev->setup_ops->preenable(indio_dev);
  574. if (ret) {
  575. printk(KERN_ERR
  576. "Buffer not started: buffer preenable failed (%d)\n", ret);
  577. goto error_remove_inserted;
  578. }
  579. }
  580. indio_dev->scan_bytes =
  581. iio_compute_scan_bytes(indio_dev,
  582. indio_dev->active_scan_mask,
  583. indio_dev->scan_timestamp);
  584. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  585. iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  586. if (buffer->access->request_update) {
  587. ret = buffer->access->request_update(buffer);
  588. if (ret) {
  589. printk(KERN_INFO
  590. "Buffer not started: buffer parameter update failed (%d)\n", ret);
  591. goto error_run_postdisable;
  592. }
  593. }
  594. }
  595. if (indio_dev->info->update_scan_mode) {
  596. ret = indio_dev->info
  597. ->update_scan_mode(indio_dev,
  598. indio_dev->active_scan_mask);
  599. if (ret < 0) {
  600. printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
  601. goto error_run_postdisable;
  602. }
  603. }
  604. /* Definitely possible for devices to support both of these. */
  605. if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
  606. indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
  607. } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
  608. indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
  609. } else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
  610. indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
  611. } else { /* Should never be reached */
  612. /* Can only occur on first buffer */
  613. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
  614. pr_info("Buffer not started: no trigger\n");
  615. ret = -EINVAL;
  616. goto error_run_postdisable;
  617. }
  618. if (indio_dev->setup_ops->postenable) {
  619. ret = indio_dev->setup_ops->postenable(indio_dev);
  620. if (ret) {
  621. printk(KERN_INFO
  622. "Buffer not started: postenable failed (%d)\n", ret);
  623. indio_dev->currentmode = INDIO_DIRECT_MODE;
  624. if (indio_dev->setup_ops->postdisable)
  625. indio_dev->setup_ops->postdisable(indio_dev);
  626. goto error_disable_all_buffers;
  627. }
  628. }
  629. if (indio_dev->available_scan_masks)
  630. kfree(compound_mask);
  631. else
  632. kfree(old_mask);
  633. return success;
  634. error_disable_all_buffers:
  635. indio_dev->currentmode = INDIO_DIRECT_MODE;
  636. error_run_postdisable:
  637. if (indio_dev->setup_ops->postdisable)
  638. indio_dev->setup_ops->postdisable(indio_dev);
  639. error_remove_inserted:
  640. if (insert_buffer)
  641. iio_buffer_deactivate(insert_buffer);
  642. indio_dev->active_scan_mask = old_mask;
  643. kfree(compound_mask);
  644. return ret;
  645. }
  646. int iio_update_buffers(struct iio_dev *indio_dev,
  647. struct iio_buffer *insert_buffer,
  648. struct iio_buffer *remove_buffer)
  649. {
  650. int ret;
  651. if (insert_buffer == remove_buffer)
  652. return 0;
  653. mutex_lock(&indio_dev->info_exist_lock);
  654. mutex_lock(&indio_dev->mlock);
  655. if (insert_buffer && iio_buffer_is_active(insert_buffer))
  656. insert_buffer = NULL;
  657. if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  658. remove_buffer = NULL;
  659. if (!insert_buffer && !remove_buffer) {
  660. ret = 0;
  661. goto out_unlock;
  662. }
  663. if (indio_dev->info == NULL) {
  664. ret = -ENODEV;
  665. goto out_unlock;
  666. }
  667. ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  668. out_unlock:
  669. mutex_unlock(&indio_dev->mlock);
  670. mutex_unlock(&indio_dev->info_exist_lock);
  671. return ret;
  672. }
  673. EXPORT_SYMBOL_GPL(iio_update_buffers);
  674. static ssize_t iio_buffer_store_enable(struct device *dev,
  675. struct device_attribute *attr,
  676. const char *buf,
  677. size_t len)
  678. {
  679. int ret;
  680. bool requested_state;
  681. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  682. bool inlist;
  683. ret = strtobool(buf, &requested_state);
  684. if (ret < 0)
  685. return ret;
  686. mutex_lock(&indio_dev->mlock);
  687. /* Find out if it is in the list */
  688. inlist = iio_buffer_is_active(indio_dev->buffer);
  689. /* Already in desired state */
  690. if (inlist == requested_state)
  691. goto done;
  692. if (requested_state)
  693. ret = __iio_update_buffers(indio_dev,
  694. indio_dev->buffer, NULL);
  695. else
  696. ret = __iio_update_buffers(indio_dev,
  697. NULL, indio_dev->buffer);
  698. if (ret < 0)
  699. goto done;
  700. done:
  701. mutex_unlock(&indio_dev->mlock);
  702. return (ret < 0) ? ret : len;
  703. }
  704. static const char * const iio_scan_elements_group_name = "scan_elements";
  705. static ssize_t iio_buffer_show_watermark(struct device *dev,
  706. struct device_attribute *attr,
  707. char *buf)
  708. {
  709. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  710. struct iio_buffer *buffer = indio_dev->buffer;
  711. return sprintf(buf, "%u\n", buffer->watermark);
  712. }
  713. static ssize_t iio_buffer_store_watermark(struct device *dev,
  714. struct device_attribute *attr,
  715. const char *buf,
  716. size_t len)
  717. {
  718. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  719. struct iio_buffer *buffer = indio_dev->buffer;
  720. unsigned int val;
  721. int ret;
  722. ret = kstrtouint(buf, 10, &val);
  723. if (ret)
  724. return ret;
  725. if (!val)
  726. return -EINVAL;
  727. mutex_lock(&indio_dev->mlock);
  728. if (val > buffer->length) {
  729. ret = -EINVAL;
  730. goto out;
  731. }
  732. if (iio_buffer_is_active(indio_dev->buffer)) {
  733. ret = -EBUSY;
  734. goto out;
  735. }
  736. buffer->watermark = val;
  737. if (indio_dev->info->hwfifo_set_watermark)
  738. indio_dev->info->hwfifo_set_watermark(indio_dev, val);
  739. out:
  740. mutex_unlock(&indio_dev->mlock);
  741. return ret ? ret : len;
  742. }
  743. static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
  744. iio_buffer_write_length);
  745. static struct device_attribute dev_attr_length_ro = __ATTR(length,
  746. S_IRUGO, iio_buffer_read_length, NULL);
  747. static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
  748. iio_buffer_show_enable, iio_buffer_store_enable);
  749. static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
  750. iio_buffer_show_watermark, iio_buffer_store_watermark);
  751. static struct attribute *iio_buffer_attrs[] = {
  752. &dev_attr_length.attr,
  753. &dev_attr_enable.attr,
  754. &dev_attr_watermark.attr,
  755. };
  756. int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
  757. {
  758. struct iio_dev_attr *p;
  759. struct attribute **attr;
  760. struct iio_buffer *buffer = indio_dev->buffer;
  761. int ret, i, attrn, attrcount, attrcount_orig = 0;
  762. const struct iio_chan_spec *channels;
  763. if (!buffer)
  764. return 0;
  765. attrcount = 0;
  766. if (buffer->attrs) {
  767. while (buffer->attrs[attrcount] != NULL)
  768. attrcount++;
  769. }
  770. attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
  771. sizeof(struct attribute *), GFP_KERNEL);
  772. if (!attr)
  773. return -ENOMEM;
  774. memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
  775. if (!buffer->access->set_length)
  776. attr[0] = &dev_attr_length_ro.attr;
  777. if (buffer->attrs)
  778. memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
  779. sizeof(struct attribute *) * attrcount);
  780. attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
  781. buffer->buffer_group.name = "buffer";
  782. buffer->buffer_group.attrs = attr;
  783. indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
  784. if (buffer->scan_el_attrs != NULL) {
  785. attr = buffer->scan_el_attrs->attrs;
  786. while (*attr++ != NULL)
  787. attrcount_orig++;
  788. }
  789. attrcount = attrcount_orig;
  790. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  791. channels = indio_dev->channels;
  792. if (channels) {
  793. /* new magic */
  794. for (i = 0; i < indio_dev->num_channels; i++) {
  795. if (channels[i].scan_index < 0)
  796. continue;
  797. /* Establish necessary mask length */
  798. if (channels[i].scan_index >
  799. (int)indio_dev->masklength - 1)
  800. indio_dev->masklength
  801. = channels[i].scan_index + 1;
  802. ret = iio_buffer_add_channel_sysfs(indio_dev,
  803. &channels[i]);
  804. if (ret < 0)
  805. goto error_cleanup_dynamic;
  806. attrcount += ret;
  807. if (channels[i].type == IIO_TIMESTAMP)
  808. indio_dev->scan_index_timestamp =
  809. channels[i].scan_index;
  810. }
  811. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  812. buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  813. sizeof(*buffer->scan_mask),
  814. GFP_KERNEL);
  815. if (buffer->scan_mask == NULL) {
  816. ret = -ENOMEM;
  817. goto error_cleanup_dynamic;
  818. }
  819. }
  820. }
  821. buffer->scan_el_group.name = iio_scan_elements_group_name;
  822. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  823. sizeof(buffer->scan_el_group.attrs[0]),
  824. GFP_KERNEL);
  825. if (buffer->scan_el_group.attrs == NULL) {
  826. ret = -ENOMEM;
  827. goto error_free_scan_mask;
  828. }
  829. if (buffer->scan_el_attrs)
  830. memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  831. sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  832. attrn = attrcount_orig;
  833. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  834. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  835. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  836. return 0;
  837. error_free_scan_mask:
  838. kfree(buffer->scan_mask);
  839. error_cleanup_dynamic:
  840. iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
  841. kfree(indio_dev->buffer->buffer_group.attrs);
  842. return ret;
  843. }
  844. void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
  845. {
  846. if (!indio_dev->buffer)
  847. return;
  848. kfree(indio_dev->buffer->scan_mask);
  849. kfree(indio_dev->buffer->buffer_group.attrs);
  850. kfree(indio_dev->buffer->scan_el_group.attrs);
  851. iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
  852. }
  853. /**
  854. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  855. * @indio_dev: the iio device
  856. * @mask: scan mask to be checked
  857. *
  858. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  859. * can be used for devices where only one channel can be active for sampling at
  860. * a time.
  861. */
  862. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  863. const unsigned long *mask)
  864. {
  865. return bitmap_weight(mask, indio_dev->masklength) == 1;
  866. }
  867. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  868. int iio_scan_mask_query(struct iio_dev *indio_dev,
  869. struct iio_buffer *buffer, int bit)
  870. {
  871. if (bit > indio_dev->masklength)
  872. return -EINVAL;
  873. if (!buffer->scan_mask)
  874. return 0;
  875. /* Ensure return value is 0 or 1. */
  876. return !!test_bit(bit, buffer->scan_mask);
  877. };
  878. EXPORT_SYMBOL_GPL(iio_scan_mask_query);
  879. /**
  880. * struct iio_demux_table() - table describing demux memcpy ops
  881. * @from: index to copy from
  882. * @to: index to copy to
  883. * @length: how many bytes to copy
  884. * @l: list head used for management
  885. */
  886. struct iio_demux_table {
  887. unsigned from;
  888. unsigned to;
  889. unsigned length;
  890. struct list_head l;
  891. };
  892. static const void *iio_demux(struct iio_buffer *buffer,
  893. const void *datain)
  894. {
  895. struct iio_demux_table *t;
  896. if (list_empty(&buffer->demux_list))
  897. return datain;
  898. list_for_each_entry(t, &buffer->demux_list, l)
  899. memcpy(buffer->demux_bounce + t->to,
  900. datain + t->from, t->length);
  901. return buffer->demux_bounce;
  902. }
  903. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  904. {
  905. const void *dataout = iio_demux(buffer, data);
  906. int ret;
  907. ret = buffer->access->store_to(buffer, dataout);
  908. if (ret)
  909. return ret;
  910. /*
  911. * We can't just test for watermark to decide if we wake the poll queue
  912. * because read may request less samples than the watermark.
  913. */
  914. wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
  915. return 0;
  916. }
  917. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  918. {
  919. struct iio_demux_table *p, *q;
  920. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  921. list_del(&p->l);
  922. kfree(p);
  923. }
  924. }
  925. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  926. {
  927. int ret;
  928. struct iio_buffer *buf;
  929. list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  930. ret = iio_push_to_buffer(buf, data);
  931. if (ret < 0)
  932. return ret;
  933. }
  934. return 0;
  935. }
  936. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  937. static int iio_buffer_add_demux(struct iio_buffer *buffer,
  938. struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
  939. unsigned int length)
  940. {
  941. if (*p && (*p)->from + (*p)->length == in_loc &&
  942. (*p)->to + (*p)->length == out_loc) {
  943. (*p)->length += length;
  944. } else {
  945. *p = kmalloc(sizeof(**p), GFP_KERNEL);
  946. if (*p == NULL)
  947. return -ENOMEM;
  948. (*p)->from = in_loc;
  949. (*p)->to = out_loc;
  950. (*p)->length = length;
  951. list_add_tail(&(*p)->l, &buffer->demux_list);
  952. }
  953. return 0;
  954. }
  955. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  956. struct iio_buffer *buffer)
  957. {
  958. const struct iio_chan_spec *ch;
  959. int ret, in_ind = -1, out_ind, length;
  960. unsigned in_loc = 0, out_loc = 0;
  961. struct iio_demux_table *p = NULL;
  962. /* Clear out any old demux */
  963. iio_buffer_demux_free(buffer);
  964. kfree(buffer->demux_bounce);
  965. buffer->demux_bounce = NULL;
  966. /* First work out which scan mode we will actually have */
  967. if (bitmap_equal(indio_dev->active_scan_mask,
  968. buffer->scan_mask,
  969. indio_dev->masklength))
  970. return 0;
  971. /* Now we have the two masks, work from least sig and build up sizes */
  972. for_each_set_bit(out_ind,
  973. buffer->scan_mask,
  974. indio_dev->masklength) {
  975. in_ind = find_next_bit(indio_dev->active_scan_mask,
  976. indio_dev->masklength,
  977. in_ind + 1);
  978. while (in_ind != out_ind) {
  979. in_ind = find_next_bit(indio_dev->active_scan_mask,
  980. indio_dev->masklength,
  981. in_ind + 1);
  982. ch = iio_find_channel_from_si(indio_dev, in_ind);
  983. if (ch->scan_type.repeat > 1)
  984. length = ch->scan_type.storagebits / 8 *
  985. ch->scan_type.repeat;
  986. else
  987. length = ch->scan_type.storagebits / 8;
  988. /* Make sure we are aligned */
  989. in_loc = roundup(in_loc, length) + length;
  990. }
  991. ch = iio_find_channel_from_si(indio_dev, in_ind);
  992. if (ch->scan_type.repeat > 1)
  993. length = ch->scan_type.storagebits / 8 *
  994. ch->scan_type.repeat;
  995. else
  996. length = ch->scan_type.storagebits / 8;
  997. out_loc = roundup(out_loc, length);
  998. in_loc = roundup(in_loc, length);
  999. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  1000. if (ret)
  1001. goto error_clear_mux_table;
  1002. out_loc += length;
  1003. in_loc += length;
  1004. }
  1005. /* Relies on scan_timestamp being last */
  1006. if (buffer->scan_timestamp) {
  1007. ch = iio_find_channel_from_si(indio_dev,
  1008. indio_dev->scan_index_timestamp);
  1009. if (ch->scan_type.repeat > 1)
  1010. length = ch->scan_type.storagebits / 8 *
  1011. ch->scan_type.repeat;
  1012. else
  1013. length = ch->scan_type.storagebits / 8;
  1014. out_loc = roundup(out_loc, length);
  1015. in_loc = roundup(in_loc, length);
  1016. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  1017. if (ret)
  1018. goto error_clear_mux_table;
  1019. out_loc += length;
  1020. in_loc += length;
  1021. }
  1022. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  1023. if (buffer->demux_bounce == NULL) {
  1024. ret = -ENOMEM;
  1025. goto error_clear_mux_table;
  1026. }
  1027. return 0;
  1028. error_clear_mux_table:
  1029. iio_buffer_demux_free(buffer);
  1030. return ret;
  1031. }
  1032. int iio_update_demux(struct iio_dev *indio_dev)
  1033. {
  1034. struct iio_buffer *buffer;
  1035. int ret;
  1036. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  1037. ret = iio_buffer_update_demux(indio_dev, buffer);
  1038. if (ret < 0)
  1039. goto error_clear_mux_table;
  1040. }
  1041. return 0;
  1042. error_clear_mux_table:
  1043. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  1044. iio_buffer_demux_free(buffer);
  1045. return ret;
  1046. }
  1047. EXPORT_SYMBOL_GPL(iio_update_demux);
  1048. /**
  1049. * iio_buffer_release() - Free a buffer's resources
  1050. * @ref: Pointer to the kref embedded in the iio_buffer struct
  1051. *
  1052. * This function is called when the last reference to the buffer has been
  1053. * dropped. It will typically free all resources allocated by the buffer. Do not
  1054. * call this function manually, always use iio_buffer_put() when done using a
  1055. * buffer.
  1056. */
  1057. static void iio_buffer_release(struct kref *ref)
  1058. {
  1059. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  1060. buffer->access->release(buffer);
  1061. }
  1062. /**
  1063. * iio_buffer_get() - Grab a reference to the buffer
  1064. * @buffer: The buffer to grab a reference for, may be NULL
  1065. *
  1066. * Returns the pointer to the buffer that was passed into the function.
  1067. */
  1068. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  1069. {
  1070. if (buffer)
  1071. kref_get(&buffer->ref);
  1072. return buffer;
  1073. }
  1074. EXPORT_SYMBOL_GPL(iio_buffer_get);
  1075. /**
  1076. * iio_buffer_put() - Release the reference to the buffer
  1077. * @buffer: The buffer to release the reference for, may be NULL
  1078. */
  1079. void iio_buffer_put(struct iio_buffer *buffer)
  1080. {
  1081. if (buffer)
  1082. kref_put(&buffer->ref, iio_buffer_release);
  1083. }
  1084. EXPORT_SYMBOL_GPL(iio_buffer_put);