industrialio-buffer.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. /* The industrial I/O core
  2. *
  3. * Copyright (c) 2008 Jonathan Cameron
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * Handling of buffer allocation / resizing.
  10. *
  11. *
  12. * Things to look at here.
  13. * - Better memory allocation techniques?
  14. * - Alternative access techniques?
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/device.h>
  19. #include <linux/fs.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/poll.h>
  23. #include <linux/sched.h>
  24. #include <linux/iio/iio.h>
  25. #include "iio_core.h"
  26. #include <linux/iio/sysfs.h>
  27. #include <linux/iio/buffer.h>
  28. static const char * const iio_endian_prefix[] = {
  29. [IIO_BE] = "be",
  30. [IIO_LE] = "le",
  31. };
  32. static bool iio_buffer_is_active(struct iio_buffer *buf)
  33. {
  34. return !list_empty(&buf->buffer_list);
  35. }
  36. static size_t iio_buffer_data_available(struct iio_buffer *buf)
  37. {
  38. return buf->access->data_available(buf);
  39. }
  40. static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
  41. struct iio_buffer *buf, size_t required)
  42. {
  43. if (!indio_dev->info->hwfifo_flush_to_buffer)
  44. return -ENODEV;
  45. return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
  46. }
  47. static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
  48. size_t to_wait, int to_flush)
  49. {
  50. size_t avail;
  51. int flushed = 0;
  52. /* wakeup if the device was unregistered */
  53. if (!indio_dev->info)
  54. return true;
  55. /* drain the buffer if it was disabled */
  56. if (!iio_buffer_is_active(buf)) {
  57. to_wait = min_t(size_t, to_wait, 1);
  58. to_flush = 0;
  59. }
  60. avail = iio_buffer_data_available(buf);
  61. if (avail >= to_wait) {
  62. /* force a flush for non-blocking reads */
  63. if (!to_wait && avail < to_flush)
  64. iio_buffer_flush_hwfifo(indio_dev, buf,
  65. to_flush - avail);
  66. return true;
  67. }
  68. if (to_flush)
  69. flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
  70. to_wait - avail);
  71. if (flushed <= 0)
  72. return false;
  73. if (avail + flushed >= to_wait)
  74. return true;
  75. return false;
  76. }
  77. /**
  78. * iio_buffer_read_first_n_outer() - chrdev read for buffer access
  79. * @filp: File structure pointer for the char device
  80. * @buf: Destination buffer for iio buffer read
  81. * @n: First n bytes to read
  82. * @f_ps: Long offset provided by the user as a seek position
  83. *
  84. * This function relies on all buffer implementations having an
  85. * iio_buffer as their first element.
  86. *
  87. * Return: negative values corresponding to error codes or ret != 0
  88. * for ending the reading activity
  89. **/
  90. ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  91. size_t n, loff_t *f_ps)
  92. {
  93. struct iio_dev *indio_dev = filp->private_data;
  94. struct iio_buffer *rb = indio_dev->buffer;
  95. size_t datum_size;
  96. size_t to_wait;
  97. int ret;
  98. if (!indio_dev->info)
  99. return -ENODEV;
  100. if (!rb || !rb->access->read_first_n)
  101. return -EINVAL;
  102. datum_size = rb->bytes_per_datum;
  103. /*
  104. * If datum_size is 0 there will never be anything to read from the
  105. * buffer, so signal end of file now.
  106. */
  107. if (!datum_size)
  108. return 0;
  109. if (filp->f_flags & O_NONBLOCK)
  110. to_wait = 0;
  111. else
  112. to_wait = min_t(size_t, n / datum_size, rb->watermark);
  113. do {
  114. ret = wait_event_interruptible(rb->pollq,
  115. iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
  116. if (ret)
  117. return ret;
  118. if (!indio_dev->info)
  119. return -ENODEV;
  120. ret = rb->access->read_first_n(rb, n, buf);
  121. if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  122. ret = -EAGAIN;
  123. } while (ret == 0);
  124. return ret;
  125. }
  126. /**
  127. * iio_buffer_poll() - poll the buffer to find out if it has data
  128. * @filp: File structure pointer for device access
  129. * @wait: Poll table structure pointer for which the driver adds
  130. * a wait queue
  131. *
  132. * Return: (POLLIN | POLLRDNORM) if data is available for reading
  133. * or 0 for other cases
  134. */
  135. unsigned int iio_buffer_poll(struct file *filp,
  136. struct poll_table_struct *wait)
  137. {
  138. struct iio_dev *indio_dev = filp->private_data;
  139. struct iio_buffer *rb = indio_dev->buffer;
  140. if (!indio_dev->info)
  141. return 0;
  142. poll_wait(filp, &rb->pollq, wait);
  143. if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
  144. return POLLIN | POLLRDNORM;
  145. return 0;
  146. }
  147. /**
  148. * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
  149. * @indio_dev: The IIO device
  150. *
  151. * Wakes up the event waitqueue used for poll(). Should usually
  152. * be called when the device is unregistered.
  153. */
  154. void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  155. {
  156. if (!indio_dev->buffer)
  157. return;
  158. wake_up(&indio_dev->buffer->pollq);
  159. }
  160. void iio_buffer_init(struct iio_buffer *buffer)
  161. {
  162. INIT_LIST_HEAD(&buffer->demux_list);
  163. INIT_LIST_HEAD(&buffer->buffer_list);
  164. init_waitqueue_head(&buffer->pollq);
  165. kref_init(&buffer->ref);
  166. buffer->watermark = 1;
  167. }
  168. EXPORT_SYMBOL(iio_buffer_init);
  169. static ssize_t iio_show_scan_index(struct device *dev,
  170. struct device_attribute *attr,
  171. char *buf)
  172. {
  173. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  174. }
  175. static ssize_t iio_show_fixed_type(struct device *dev,
  176. struct device_attribute *attr,
  177. char *buf)
  178. {
  179. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  180. u8 type = this_attr->c->scan_type.endianness;
  181. if (type == IIO_CPU) {
  182. #ifdef __LITTLE_ENDIAN
  183. type = IIO_LE;
  184. #else
  185. type = IIO_BE;
  186. #endif
  187. }
  188. if (this_attr->c->scan_type.repeat > 1)
  189. return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
  190. iio_endian_prefix[type],
  191. this_attr->c->scan_type.sign,
  192. this_attr->c->scan_type.realbits,
  193. this_attr->c->scan_type.storagebits,
  194. this_attr->c->scan_type.repeat,
  195. this_attr->c->scan_type.shift);
  196. else
  197. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  198. iio_endian_prefix[type],
  199. this_attr->c->scan_type.sign,
  200. this_attr->c->scan_type.realbits,
  201. this_attr->c->scan_type.storagebits,
  202. this_attr->c->scan_type.shift);
  203. }
  204. static ssize_t iio_scan_el_show(struct device *dev,
  205. struct device_attribute *attr,
  206. char *buf)
  207. {
  208. int ret;
  209. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  210. /* Ensure ret is 0 or 1. */
  211. ret = !!test_bit(to_iio_dev_attr(attr)->address,
  212. indio_dev->buffer->scan_mask);
  213. return sprintf(buf, "%d\n", ret);
  214. }
  215. /* Note NULL used as error indicator as it doesn't make sense. */
  216. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  217. unsigned int masklength,
  218. const unsigned long *mask,
  219. bool strict)
  220. {
  221. if (bitmap_empty(mask, masklength))
  222. return NULL;
  223. while (*av_masks) {
  224. if (strict) {
  225. if (bitmap_equal(mask, av_masks, masklength))
  226. return av_masks;
  227. } else {
  228. if (bitmap_subset(mask, av_masks, masklength))
  229. return av_masks;
  230. }
  231. av_masks += BITS_TO_LONGS(masklength);
  232. }
  233. return NULL;
  234. }
  235. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  236. const unsigned long *mask)
  237. {
  238. if (!indio_dev->setup_ops->validate_scan_mask)
  239. return true;
  240. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  241. }
  242. /**
  243. * iio_scan_mask_set() - set particular bit in the scan mask
  244. * @indio_dev: the iio device
  245. * @buffer: the buffer whose scan mask we are interested in
  246. * @bit: the bit to be set.
  247. *
  248. * Note that at this point we have no way of knowing what other
  249. * buffers might request, hence this code only verifies that the
  250. * individual buffers request is plausible.
  251. */
  252. static int iio_scan_mask_set(struct iio_dev *indio_dev,
  253. struct iio_buffer *buffer, int bit)
  254. {
  255. const unsigned long *mask;
  256. unsigned long *trialmask;
  257. trialmask = kmalloc(sizeof(*trialmask)*
  258. BITS_TO_LONGS(indio_dev->masklength),
  259. GFP_KERNEL);
  260. if (trialmask == NULL)
  261. return -ENOMEM;
  262. if (!indio_dev->masklength) {
  263. WARN_ON("Trying to set scanmask prior to registering buffer\n");
  264. goto err_invalid_mask;
  265. }
  266. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  267. set_bit(bit, trialmask);
  268. if (!iio_validate_scan_mask(indio_dev, trialmask))
  269. goto err_invalid_mask;
  270. if (indio_dev->available_scan_masks) {
  271. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  272. indio_dev->masklength,
  273. trialmask, false);
  274. if (!mask)
  275. goto err_invalid_mask;
  276. }
  277. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  278. kfree(trialmask);
  279. return 0;
  280. err_invalid_mask:
  281. kfree(trialmask);
  282. return -EINVAL;
  283. }
  284. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  285. {
  286. clear_bit(bit, buffer->scan_mask);
  287. return 0;
  288. }
  289. static ssize_t iio_scan_el_store(struct device *dev,
  290. struct device_attribute *attr,
  291. const char *buf,
  292. size_t len)
  293. {
  294. int ret;
  295. bool state;
  296. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  297. struct iio_buffer *buffer = indio_dev->buffer;
  298. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  299. ret = strtobool(buf, &state);
  300. if (ret < 0)
  301. return ret;
  302. mutex_lock(&indio_dev->mlock);
  303. if (iio_buffer_is_active(indio_dev->buffer)) {
  304. ret = -EBUSY;
  305. goto error_ret;
  306. }
  307. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  308. if (ret < 0)
  309. goto error_ret;
  310. if (!state && ret) {
  311. ret = iio_scan_mask_clear(buffer, this_attr->address);
  312. if (ret)
  313. goto error_ret;
  314. } else if (state && !ret) {
  315. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  316. if (ret)
  317. goto error_ret;
  318. }
  319. error_ret:
  320. mutex_unlock(&indio_dev->mlock);
  321. return ret < 0 ? ret : len;
  322. }
  323. static ssize_t iio_scan_el_ts_show(struct device *dev,
  324. struct device_attribute *attr,
  325. char *buf)
  326. {
  327. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  328. return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
  329. }
  330. static ssize_t iio_scan_el_ts_store(struct device *dev,
  331. struct device_attribute *attr,
  332. const char *buf,
  333. size_t len)
  334. {
  335. int ret;
  336. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  337. bool state;
  338. ret = strtobool(buf, &state);
  339. if (ret < 0)
  340. return ret;
  341. mutex_lock(&indio_dev->mlock);
  342. if (iio_buffer_is_active(indio_dev->buffer)) {
  343. ret = -EBUSY;
  344. goto error_ret;
  345. }
  346. indio_dev->buffer->scan_timestamp = state;
  347. error_ret:
  348. mutex_unlock(&indio_dev->mlock);
  349. return ret ? ret : len;
  350. }
  351. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  352. const struct iio_chan_spec *chan)
  353. {
  354. int ret, attrcount = 0;
  355. struct iio_buffer *buffer = indio_dev->buffer;
  356. ret = __iio_add_chan_devattr("index",
  357. chan,
  358. &iio_show_scan_index,
  359. NULL,
  360. 0,
  361. IIO_SEPARATE,
  362. &indio_dev->dev,
  363. &buffer->scan_el_dev_attr_list);
  364. if (ret)
  365. return ret;
  366. attrcount++;
  367. ret = __iio_add_chan_devattr("type",
  368. chan,
  369. &iio_show_fixed_type,
  370. NULL,
  371. 0,
  372. 0,
  373. &indio_dev->dev,
  374. &buffer->scan_el_dev_attr_list);
  375. if (ret)
  376. return ret;
  377. attrcount++;
  378. if (chan->type != IIO_TIMESTAMP)
  379. ret = __iio_add_chan_devattr("en",
  380. chan,
  381. &iio_scan_el_show,
  382. &iio_scan_el_store,
  383. chan->scan_index,
  384. 0,
  385. &indio_dev->dev,
  386. &buffer->scan_el_dev_attr_list);
  387. else
  388. ret = __iio_add_chan_devattr("en",
  389. chan,
  390. &iio_scan_el_ts_show,
  391. &iio_scan_el_ts_store,
  392. chan->scan_index,
  393. 0,
  394. &indio_dev->dev,
  395. &buffer->scan_el_dev_attr_list);
  396. if (ret)
  397. return ret;
  398. attrcount++;
  399. ret = attrcount;
  400. return ret;
  401. }
  402. static ssize_t iio_buffer_read_length(struct device *dev,
  403. struct device_attribute *attr,
  404. char *buf)
  405. {
  406. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  407. struct iio_buffer *buffer = indio_dev->buffer;
  408. return sprintf(buf, "%d\n", buffer->length);
  409. }
  410. static ssize_t iio_buffer_write_length(struct device *dev,
  411. struct device_attribute *attr,
  412. const char *buf, size_t len)
  413. {
  414. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  415. struct iio_buffer *buffer = indio_dev->buffer;
  416. unsigned int val;
  417. int ret;
  418. ret = kstrtouint(buf, 10, &val);
  419. if (ret)
  420. return ret;
  421. if (val == buffer->length)
  422. return len;
  423. mutex_lock(&indio_dev->mlock);
  424. if (iio_buffer_is_active(indio_dev->buffer)) {
  425. ret = -EBUSY;
  426. } else {
  427. buffer->access->set_length(buffer, val);
  428. ret = 0;
  429. }
  430. if (ret)
  431. goto out;
  432. if (buffer->length && buffer->length < buffer->watermark)
  433. buffer->watermark = buffer->length;
  434. out:
  435. mutex_unlock(&indio_dev->mlock);
  436. return ret ? ret : len;
  437. }
  438. static ssize_t iio_buffer_show_enable(struct device *dev,
  439. struct device_attribute *attr,
  440. char *buf)
  441. {
  442. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  443. return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
  444. }
  445. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  446. const unsigned long *mask, bool timestamp)
  447. {
  448. const struct iio_chan_spec *ch;
  449. unsigned bytes = 0;
  450. int length, i;
  451. /* How much space will the demuxed element take? */
  452. for_each_set_bit(i, mask,
  453. indio_dev->masklength) {
  454. ch = iio_find_channel_from_si(indio_dev, i);
  455. if (ch->scan_type.repeat > 1)
  456. length = ch->scan_type.storagebits / 8 *
  457. ch->scan_type.repeat;
  458. else
  459. length = ch->scan_type.storagebits / 8;
  460. bytes = ALIGN(bytes, length);
  461. bytes += length;
  462. }
  463. if (timestamp) {
  464. ch = iio_find_channel_from_si(indio_dev,
  465. indio_dev->scan_index_timestamp);
  466. if (ch->scan_type.repeat > 1)
  467. length = ch->scan_type.storagebits / 8 *
  468. ch->scan_type.repeat;
  469. else
  470. length = ch->scan_type.storagebits / 8;
  471. bytes = ALIGN(bytes, length);
  472. bytes += length;
  473. }
  474. return bytes;
  475. }
  476. static void iio_buffer_activate(struct iio_dev *indio_dev,
  477. struct iio_buffer *buffer)
  478. {
  479. iio_buffer_get(buffer);
  480. list_add(&buffer->buffer_list, &indio_dev->buffer_list);
  481. }
  482. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  483. {
  484. list_del_init(&buffer->buffer_list);
  485. wake_up_interruptible(&buffer->pollq);
  486. iio_buffer_put(buffer);
  487. }
  488. static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
  489. {
  490. struct iio_buffer *buffer, *_buffer;
  491. list_for_each_entry_safe(buffer, _buffer,
  492. &indio_dev->buffer_list, buffer_list)
  493. iio_buffer_deactivate(buffer);
  494. }
  495. static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  496. struct iio_buffer *buffer)
  497. {
  498. unsigned int bytes;
  499. if (!buffer->access->set_bytes_per_datum)
  500. return;
  501. bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  502. buffer->scan_timestamp);
  503. buffer->access->set_bytes_per_datum(buffer, bytes);
  504. }
  505. static int iio_buffer_request_update(struct iio_dev *indio_dev,
  506. struct iio_buffer *buffer)
  507. {
  508. int ret;
  509. iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  510. if (buffer->access->request_update) {
  511. ret = buffer->access->request_update(buffer);
  512. if (ret) {
  513. dev_dbg(&indio_dev->dev,
  514. "Buffer not started: buffer parameter update failed (%d)\n",
  515. ret);
  516. return ret;
  517. }
  518. }
  519. return 0;
  520. }
  521. static void iio_free_scan_mask(struct iio_dev *indio_dev,
  522. const unsigned long *mask)
  523. {
  524. /* If the mask is dynamically allocated free it, otherwise do nothing */
  525. if (!indio_dev->available_scan_masks)
  526. kfree(mask);
  527. }
  528. struct iio_device_config {
  529. unsigned int mode;
  530. const unsigned long *scan_mask;
  531. unsigned int scan_bytes;
  532. bool scan_timestamp;
  533. };
  534. static int iio_verify_update(struct iio_dev *indio_dev,
  535. struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
  536. struct iio_device_config *config)
  537. {
  538. unsigned long *compound_mask;
  539. const unsigned long *scan_mask;
  540. bool strict_scanmask = false;
  541. struct iio_buffer *buffer;
  542. bool scan_timestamp;
  543. unsigned int modes;
  544. memset(config, 0, sizeof(*config));
  545. /*
  546. * If there is just one buffer and we are removing it there is nothing
  547. * to verify.
  548. */
  549. if (remove_buffer && !insert_buffer &&
  550. list_is_singular(&indio_dev->buffer_list))
  551. return 0;
  552. modes = indio_dev->modes;
  553. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  554. if (buffer == remove_buffer)
  555. continue;
  556. modes &= buffer->access->modes;
  557. }
  558. if (insert_buffer)
  559. modes &= insert_buffer->access->modes;
  560. /* Definitely possible for devices to support both of these. */
  561. if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
  562. config->mode = INDIO_BUFFER_TRIGGERED;
  563. } else if (modes & INDIO_BUFFER_HARDWARE) {
  564. /*
  565. * Keep things simple for now and only allow a single buffer to
  566. * be connected in hardware mode.
  567. */
  568. if (insert_buffer && !list_empty(&indio_dev->buffer_list))
  569. return -EINVAL;
  570. config->mode = INDIO_BUFFER_HARDWARE;
  571. strict_scanmask = true;
  572. } else if (modes & INDIO_BUFFER_SOFTWARE) {
  573. config->mode = INDIO_BUFFER_SOFTWARE;
  574. } else {
  575. /* Can only occur on first buffer */
  576. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
  577. dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
  578. return -EINVAL;
  579. }
  580. /* What scan mask do we actually have? */
  581. compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  582. sizeof(long), GFP_KERNEL);
  583. if (compound_mask == NULL)
  584. return -ENOMEM;
  585. scan_timestamp = false;
  586. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  587. if (buffer == remove_buffer)
  588. continue;
  589. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  590. indio_dev->masklength);
  591. scan_timestamp |= buffer->scan_timestamp;
  592. }
  593. if (insert_buffer) {
  594. bitmap_or(compound_mask, compound_mask,
  595. insert_buffer->scan_mask, indio_dev->masklength);
  596. scan_timestamp |= insert_buffer->scan_timestamp;
  597. }
  598. if (indio_dev->available_scan_masks) {
  599. scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  600. indio_dev->masklength,
  601. compound_mask,
  602. strict_scanmask);
  603. kfree(compound_mask);
  604. if (scan_mask == NULL)
  605. return -EINVAL;
  606. } else {
  607. scan_mask = compound_mask;
  608. }
  609. config->scan_bytes = iio_compute_scan_bytes(indio_dev,
  610. scan_mask, scan_timestamp);
  611. config->scan_mask = scan_mask;
  612. config->scan_timestamp = scan_timestamp;
  613. return 0;
  614. }
  615. static int iio_enable_buffers(struct iio_dev *indio_dev,
  616. struct iio_device_config *config)
  617. {
  618. int ret;
  619. indio_dev->active_scan_mask = config->scan_mask;
  620. indio_dev->scan_timestamp = config->scan_timestamp;
  621. indio_dev->scan_bytes = config->scan_bytes;
  622. iio_update_demux(indio_dev);
  623. /* Wind up again */
  624. if (indio_dev->setup_ops->preenable) {
  625. ret = indio_dev->setup_ops->preenable(indio_dev);
  626. if (ret) {
  627. dev_dbg(&indio_dev->dev,
  628. "Buffer not started: buffer preenable failed (%d)\n", ret);
  629. goto err_undo_config;
  630. }
  631. }
  632. if (indio_dev->info->update_scan_mode) {
  633. ret = indio_dev->info
  634. ->update_scan_mode(indio_dev,
  635. indio_dev->active_scan_mask);
  636. if (ret < 0) {
  637. dev_dbg(&indio_dev->dev,
  638. "Buffer not started: update scan mode failed (%d)\n",
  639. ret);
  640. goto err_run_postdisable;
  641. }
  642. }
  643. indio_dev->currentmode = config->mode;
  644. if (indio_dev->setup_ops->postenable) {
  645. ret = indio_dev->setup_ops->postenable(indio_dev);
  646. if (ret) {
  647. dev_dbg(&indio_dev->dev,
  648. "Buffer not started: postenable failed (%d)\n", ret);
  649. goto err_run_postdisable;
  650. }
  651. }
  652. return 0;
  653. err_run_postdisable:
  654. indio_dev->currentmode = INDIO_DIRECT_MODE;
  655. if (indio_dev->setup_ops->postdisable)
  656. indio_dev->setup_ops->postdisable(indio_dev);
  657. err_undo_config:
  658. indio_dev->active_scan_mask = NULL;
  659. return ret;
  660. }
  661. static int iio_disable_buffers(struct iio_dev *indio_dev)
  662. {
  663. int ret = 0;
  664. int ret2;
  665. /* Wind down existing buffers - iff there are any */
  666. if (list_empty(&indio_dev->buffer_list))
  667. return 0;
  668. /*
  669. * If things go wrong at some step in disable we still need to continue
  670. * to perform the other steps, otherwise we leave the device in a
  671. * inconsistent state. We return the error code for the first error we
  672. * encountered.
  673. */
  674. if (indio_dev->setup_ops->predisable) {
  675. ret2 = indio_dev->setup_ops->predisable(indio_dev);
  676. if (ret2 && !ret)
  677. ret = ret2;
  678. }
  679. indio_dev->currentmode = INDIO_DIRECT_MODE;
  680. if (indio_dev->setup_ops->postdisable) {
  681. ret2 = indio_dev->setup_ops->postdisable(indio_dev);
  682. if (ret2 && !ret)
  683. ret = ret2;
  684. }
  685. iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
  686. indio_dev->active_scan_mask = NULL;
  687. return ret;
  688. }
  689. static int __iio_update_buffers(struct iio_dev *indio_dev,
  690. struct iio_buffer *insert_buffer,
  691. struct iio_buffer *remove_buffer)
  692. {
  693. struct iio_device_config new_config;
  694. int ret;
  695. ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
  696. &new_config);
  697. if (ret)
  698. return ret;
  699. if (insert_buffer) {
  700. ret = iio_buffer_request_update(indio_dev, insert_buffer);
  701. if (ret)
  702. goto err_free_config;
  703. }
  704. ret = iio_disable_buffers(indio_dev);
  705. if (ret)
  706. goto err_deactivate_all;
  707. if (remove_buffer)
  708. iio_buffer_deactivate(remove_buffer);
  709. if (insert_buffer)
  710. iio_buffer_activate(indio_dev, insert_buffer);
  711. /* If no buffers in list, we are done */
  712. if (list_empty(&indio_dev->buffer_list))
  713. return 0;
  714. ret = iio_enable_buffers(indio_dev, &new_config);
  715. if (ret)
  716. goto err_deactivate_all;
  717. return 0;
  718. err_deactivate_all:
  719. /*
  720. * We've already verified that the config is valid earlier. If things go
  721. * wrong in either enable or disable the most likely reason is an IO
  722. * error from the device. In this case there is no good recovery
  723. * strategy. Just make sure to disable everything and leave the device
  724. * in a sane state. With a bit of luck the device might come back to
  725. * life again later and userspace can try again.
  726. */
  727. iio_buffer_deactivate_all(indio_dev);
  728. err_free_config:
  729. iio_free_scan_mask(indio_dev, new_config.scan_mask);
  730. return ret;
  731. }
  732. int iio_update_buffers(struct iio_dev *indio_dev,
  733. struct iio_buffer *insert_buffer,
  734. struct iio_buffer *remove_buffer)
  735. {
  736. int ret;
  737. if (insert_buffer == remove_buffer)
  738. return 0;
  739. mutex_lock(&indio_dev->info_exist_lock);
  740. mutex_lock(&indio_dev->mlock);
  741. if (insert_buffer && iio_buffer_is_active(insert_buffer))
  742. insert_buffer = NULL;
  743. if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  744. remove_buffer = NULL;
  745. if (!insert_buffer && !remove_buffer) {
  746. ret = 0;
  747. goto out_unlock;
  748. }
  749. if (indio_dev->info == NULL) {
  750. ret = -ENODEV;
  751. goto out_unlock;
  752. }
  753. ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  754. out_unlock:
  755. mutex_unlock(&indio_dev->mlock);
  756. mutex_unlock(&indio_dev->info_exist_lock);
  757. return ret;
  758. }
  759. EXPORT_SYMBOL_GPL(iio_update_buffers);
  760. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  761. {
  762. iio_disable_buffers(indio_dev);
  763. iio_buffer_deactivate_all(indio_dev);
  764. }
  765. static ssize_t iio_buffer_store_enable(struct device *dev,
  766. struct device_attribute *attr,
  767. const char *buf,
  768. size_t len)
  769. {
  770. int ret;
  771. bool requested_state;
  772. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  773. bool inlist;
  774. ret = strtobool(buf, &requested_state);
  775. if (ret < 0)
  776. return ret;
  777. mutex_lock(&indio_dev->mlock);
  778. /* Find out if it is in the list */
  779. inlist = iio_buffer_is_active(indio_dev->buffer);
  780. /* Already in desired state */
  781. if (inlist == requested_state)
  782. goto done;
  783. if (requested_state)
  784. ret = __iio_update_buffers(indio_dev,
  785. indio_dev->buffer, NULL);
  786. else
  787. ret = __iio_update_buffers(indio_dev,
  788. NULL, indio_dev->buffer);
  789. done:
  790. mutex_unlock(&indio_dev->mlock);
  791. return (ret < 0) ? ret : len;
  792. }
  793. static const char * const iio_scan_elements_group_name = "scan_elements";
  794. static ssize_t iio_buffer_show_watermark(struct device *dev,
  795. struct device_attribute *attr,
  796. char *buf)
  797. {
  798. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  799. struct iio_buffer *buffer = indio_dev->buffer;
  800. return sprintf(buf, "%u\n", buffer->watermark);
  801. }
  802. static ssize_t iio_buffer_store_watermark(struct device *dev,
  803. struct device_attribute *attr,
  804. const char *buf,
  805. size_t len)
  806. {
  807. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  808. struct iio_buffer *buffer = indio_dev->buffer;
  809. unsigned int val;
  810. int ret;
  811. ret = kstrtouint(buf, 10, &val);
  812. if (ret)
  813. return ret;
  814. if (!val)
  815. return -EINVAL;
  816. mutex_lock(&indio_dev->mlock);
  817. if (val > buffer->length) {
  818. ret = -EINVAL;
  819. goto out;
  820. }
  821. if (iio_buffer_is_active(indio_dev->buffer)) {
  822. ret = -EBUSY;
  823. goto out;
  824. }
  825. buffer->watermark = val;
  826. if (indio_dev->info->hwfifo_set_watermark)
  827. indio_dev->info->hwfifo_set_watermark(indio_dev, val);
  828. out:
  829. mutex_unlock(&indio_dev->mlock);
  830. return ret ? ret : len;
  831. }
  832. static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
  833. iio_buffer_write_length);
  834. static struct device_attribute dev_attr_length_ro = __ATTR(length,
  835. S_IRUGO, iio_buffer_read_length, NULL);
  836. static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
  837. iio_buffer_show_enable, iio_buffer_store_enable);
  838. static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
  839. iio_buffer_show_watermark, iio_buffer_store_watermark);
  840. static struct attribute *iio_buffer_attrs[] = {
  841. &dev_attr_length.attr,
  842. &dev_attr_enable.attr,
  843. &dev_attr_watermark.attr,
  844. };
  845. int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
  846. {
  847. struct iio_dev_attr *p;
  848. struct attribute **attr;
  849. struct iio_buffer *buffer = indio_dev->buffer;
  850. int ret, i, attrn, attrcount, attrcount_orig = 0;
  851. const struct iio_chan_spec *channels;
  852. channels = indio_dev->channels;
  853. if (channels) {
  854. int ml = indio_dev->masklength;
  855. for (i = 0; i < indio_dev->num_channels; i++)
  856. ml = max(ml, channels[i].scan_index + 1);
  857. indio_dev->masklength = ml;
  858. }
  859. if (!buffer)
  860. return 0;
  861. attrcount = 0;
  862. if (buffer->attrs) {
  863. while (buffer->attrs[attrcount] != NULL)
  864. attrcount++;
  865. }
  866. attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
  867. sizeof(struct attribute *), GFP_KERNEL);
  868. if (!attr)
  869. return -ENOMEM;
  870. memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
  871. if (!buffer->access->set_length)
  872. attr[0] = &dev_attr_length_ro.attr;
  873. if (buffer->attrs)
  874. memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
  875. sizeof(struct attribute *) * attrcount);
  876. attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
  877. buffer->buffer_group.name = "buffer";
  878. buffer->buffer_group.attrs = attr;
  879. indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
  880. if (buffer->scan_el_attrs != NULL) {
  881. attr = buffer->scan_el_attrs->attrs;
  882. while (*attr++ != NULL)
  883. attrcount_orig++;
  884. }
  885. attrcount = attrcount_orig;
  886. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  887. channels = indio_dev->channels;
  888. if (channels) {
  889. /* new magic */
  890. for (i = 0; i < indio_dev->num_channels; i++) {
  891. if (channels[i].scan_index < 0)
  892. continue;
  893. ret = iio_buffer_add_channel_sysfs(indio_dev,
  894. &channels[i]);
  895. if (ret < 0)
  896. goto error_cleanup_dynamic;
  897. attrcount += ret;
  898. if (channels[i].type == IIO_TIMESTAMP)
  899. indio_dev->scan_index_timestamp =
  900. channels[i].scan_index;
  901. }
  902. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  903. buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  904. sizeof(*buffer->scan_mask),
  905. GFP_KERNEL);
  906. if (buffer->scan_mask == NULL) {
  907. ret = -ENOMEM;
  908. goto error_cleanup_dynamic;
  909. }
  910. }
  911. }
  912. buffer->scan_el_group.name = iio_scan_elements_group_name;
  913. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  914. sizeof(buffer->scan_el_group.attrs[0]),
  915. GFP_KERNEL);
  916. if (buffer->scan_el_group.attrs == NULL) {
  917. ret = -ENOMEM;
  918. goto error_free_scan_mask;
  919. }
  920. if (buffer->scan_el_attrs)
  921. memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  922. sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  923. attrn = attrcount_orig;
  924. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  925. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  926. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  927. return 0;
  928. error_free_scan_mask:
  929. kfree(buffer->scan_mask);
  930. error_cleanup_dynamic:
  931. iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
  932. kfree(indio_dev->buffer->buffer_group.attrs);
  933. return ret;
  934. }
  935. void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
  936. {
  937. if (!indio_dev->buffer)
  938. return;
  939. kfree(indio_dev->buffer->scan_mask);
  940. kfree(indio_dev->buffer->buffer_group.attrs);
  941. kfree(indio_dev->buffer->scan_el_group.attrs);
  942. iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
  943. }
  944. /**
  945. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  946. * @indio_dev: the iio device
  947. * @mask: scan mask to be checked
  948. *
  949. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  950. * can be used for devices where only one channel can be active for sampling at
  951. * a time.
  952. */
  953. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  954. const unsigned long *mask)
  955. {
  956. return bitmap_weight(mask, indio_dev->masklength) == 1;
  957. }
  958. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  959. int iio_scan_mask_query(struct iio_dev *indio_dev,
  960. struct iio_buffer *buffer, int bit)
  961. {
  962. if (bit > indio_dev->masklength)
  963. return -EINVAL;
  964. if (!buffer->scan_mask)
  965. return 0;
  966. /* Ensure return value is 0 or 1. */
  967. return !!test_bit(bit, buffer->scan_mask);
  968. };
  969. EXPORT_SYMBOL_GPL(iio_scan_mask_query);
  970. /**
  971. * struct iio_demux_table - table describing demux memcpy ops
  972. * @from: index to copy from
  973. * @to: index to copy to
  974. * @length: how many bytes to copy
  975. * @l: list head used for management
  976. */
  977. struct iio_demux_table {
  978. unsigned from;
  979. unsigned to;
  980. unsigned length;
  981. struct list_head l;
  982. };
  983. static const void *iio_demux(struct iio_buffer *buffer,
  984. const void *datain)
  985. {
  986. struct iio_demux_table *t;
  987. if (list_empty(&buffer->demux_list))
  988. return datain;
  989. list_for_each_entry(t, &buffer->demux_list, l)
  990. memcpy(buffer->demux_bounce + t->to,
  991. datain + t->from, t->length);
  992. return buffer->demux_bounce;
  993. }
  994. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  995. {
  996. const void *dataout = iio_demux(buffer, data);
  997. int ret;
  998. ret = buffer->access->store_to(buffer, dataout);
  999. if (ret)
  1000. return ret;
  1001. /*
  1002. * We can't just test for watermark to decide if we wake the poll queue
  1003. * because read may request less samples than the watermark.
  1004. */
  1005. wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
  1006. return 0;
  1007. }
  1008. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  1009. {
  1010. struct iio_demux_table *p, *q;
  1011. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  1012. list_del(&p->l);
  1013. kfree(p);
  1014. }
  1015. }
  1016. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  1017. {
  1018. int ret;
  1019. struct iio_buffer *buf;
  1020. list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  1021. ret = iio_push_to_buffer(buf, data);
  1022. if (ret < 0)
  1023. return ret;
  1024. }
  1025. return 0;
  1026. }
  1027. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  1028. static int iio_buffer_add_demux(struct iio_buffer *buffer,
  1029. struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
  1030. unsigned int length)
  1031. {
  1032. if (*p && (*p)->from + (*p)->length == in_loc &&
  1033. (*p)->to + (*p)->length == out_loc) {
  1034. (*p)->length += length;
  1035. } else {
  1036. *p = kmalloc(sizeof(**p), GFP_KERNEL);
  1037. if (*p == NULL)
  1038. return -ENOMEM;
  1039. (*p)->from = in_loc;
  1040. (*p)->to = out_loc;
  1041. (*p)->length = length;
  1042. list_add_tail(&(*p)->l, &buffer->demux_list);
  1043. }
  1044. return 0;
  1045. }
  1046. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  1047. struct iio_buffer *buffer)
  1048. {
  1049. const struct iio_chan_spec *ch;
  1050. int ret, in_ind = -1, out_ind, length;
  1051. unsigned in_loc = 0, out_loc = 0;
  1052. struct iio_demux_table *p = NULL;
  1053. /* Clear out any old demux */
  1054. iio_buffer_demux_free(buffer);
  1055. kfree(buffer->demux_bounce);
  1056. buffer->demux_bounce = NULL;
  1057. /* First work out which scan mode we will actually have */
  1058. if (bitmap_equal(indio_dev->active_scan_mask,
  1059. buffer->scan_mask,
  1060. indio_dev->masklength))
  1061. return 0;
  1062. /* Now we have the two masks, work from least sig and build up sizes */
  1063. for_each_set_bit(out_ind,
  1064. buffer->scan_mask,
  1065. indio_dev->masklength) {
  1066. in_ind = find_next_bit(indio_dev->active_scan_mask,
  1067. indio_dev->masklength,
  1068. in_ind + 1);
  1069. while (in_ind != out_ind) {
  1070. in_ind = find_next_bit(indio_dev->active_scan_mask,
  1071. indio_dev->masklength,
  1072. in_ind + 1);
  1073. ch = iio_find_channel_from_si(indio_dev, in_ind);
  1074. if (ch->scan_type.repeat > 1)
  1075. length = ch->scan_type.storagebits / 8 *
  1076. ch->scan_type.repeat;
  1077. else
  1078. length = ch->scan_type.storagebits / 8;
  1079. /* Make sure we are aligned */
  1080. in_loc = roundup(in_loc, length) + length;
  1081. }
  1082. ch = iio_find_channel_from_si(indio_dev, in_ind);
  1083. if (ch->scan_type.repeat > 1)
  1084. length = ch->scan_type.storagebits / 8 *
  1085. ch->scan_type.repeat;
  1086. else
  1087. length = ch->scan_type.storagebits / 8;
  1088. out_loc = roundup(out_loc, length);
  1089. in_loc = roundup(in_loc, length);
  1090. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  1091. if (ret)
  1092. goto error_clear_mux_table;
  1093. out_loc += length;
  1094. in_loc += length;
  1095. }
  1096. /* Relies on scan_timestamp being last */
  1097. if (buffer->scan_timestamp) {
  1098. ch = iio_find_channel_from_si(indio_dev,
  1099. indio_dev->scan_index_timestamp);
  1100. if (ch->scan_type.repeat > 1)
  1101. length = ch->scan_type.storagebits / 8 *
  1102. ch->scan_type.repeat;
  1103. else
  1104. length = ch->scan_type.storagebits / 8;
  1105. out_loc = roundup(out_loc, length);
  1106. in_loc = roundup(in_loc, length);
  1107. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  1108. if (ret)
  1109. goto error_clear_mux_table;
  1110. out_loc += length;
  1111. in_loc += length;
  1112. }
  1113. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  1114. if (buffer->demux_bounce == NULL) {
  1115. ret = -ENOMEM;
  1116. goto error_clear_mux_table;
  1117. }
  1118. return 0;
  1119. error_clear_mux_table:
  1120. iio_buffer_demux_free(buffer);
  1121. return ret;
  1122. }
  1123. int iio_update_demux(struct iio_dev *indio_dev)
  1124. {
  1125. struct iio_buffer *buffer;
  1126. int ret;
  1127. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  1128. ret = iio_buffer_update_demux(indio_dev, buffer);
  1129. if (ret < 0)
  1130. goto error_clear_mux_table;
  1131. }
  1132. return 0;
  1133. error_clear_mux_table:
  1134. list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  1135. iio_buffer_demux_free(buffer);
  1136. return ret;
  1137. }
  1138. EXPORT_SYMBOL_GPL(iio_update_demux);
  1139. /**
  1140. * iio_buffer_release() - Free a buffer's resources
  1141. * @ref: Pointer to the kref embedded in the iio_buffer struct
  1142. *
  1143. * This function is called when the last reference to the buffer has been
  1144. * dropped. It will typically free all resources allocated by the buffer. Do not
  1145. * call this function manually, always use iio_buffer_put() when done using a
  1146. * buffer.
  1147. */
  1148. static void iio_buffer_release(struct kref *ref)
  1149. {
  1150. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  1151. buffer->access->release(buffer);
  1152. }
  1153. /**
  1154. * iio_buffer_get() - Grab a reference to the buffer
  1155. * @buffer: The buffer to grab a reference for, may be NULL
  1156. *
  1157. * Returns the pointer to the buffer that was passed into the function.
  1158. */
  1159. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  1160. {
  1161. if (buffer)
  1162. kref_get(&buffer->ref);
  1163. return buffer;
  1164. }
  1165. EXPORT_SYMBOL_GPL(iio_buffer_get);
  1166. /**
  1167. * iio_buffer_put() - Release the reference to the buffer
  1168. * @buffer: The buffer to release the reference for, may be NULL
  1169. */
  1170. void iio_buffer_put(struct iio_buffer *buffer)
  1171. {
  1172. if (buffer)
  1173. kref_put(&buffer->ref, iio_buffer_release);
  1174. }
  1175. EXPORT_SYMBOL_GPL(iio_buffer_put);