buffer_cb.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. #include <linux/kernel.h>
  2. #include <linux/slab.h>
  3. #include <linux/err.h>
  4. #include <linux/export.h>
  5. #include <linux/iio/buffer.h>
  6. #include <linux/iio/consumer.h>
  7. struct iio_cb_buffer {
  8. struct iio_buffer buffer;
  9. int (*cb)(const void *data, void *private);
  10. void *private;
  11. struct iio_channel *channels;
  12. };
  13. static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
  14. {
  15. return container_of(buffer, struct iio_cb_buffer, buffer);
  16. }
  17. static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
  18. {
  19. struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
  20. return cb_buff->cb(data, cb_buff->private);
  21. }
  22. static void iio_buffer_cb_release(struct iio_buffer *buffer)
  23. {
  24. struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
  25. kfree(cb_buff->buffer.scan_mask);
  26. kfree(cb_buff);
  27. }
  28. static const struct iio_buffer_access_funcs iio_cb_access = {
  29. .store_to = &iio_buffer_cb_store_to,
  30. .release = &iio_buffer_cb_release,
  31. .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
  32. };
  33. struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
  34. int (*cb)(const void *data,
  35. void *private),
  36. void *private)
  37. {
  38. int ret;
  39. struct iio_cb_buffer *cb_buff;
  40. struct iio_dev *indio_dev;
  41. struct iio_channel *chan;
  42. cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
  43. if (cb_buff == NULL)
  44. return ERR_PTR(-ENOMEM);
  45. iio_buffer_init(&cb_buff->buffer);
  46. cb_buff->private = private;
  47. cb_buff->cb = cb;
  48. cb_buff->buffer.access = &iio_cb_access;
  49. INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
  50. cb_buff->channels = iio_channel_get_all(dev);
  51. if (IS_ERR(cb_buff->channels)) {
  52. ret = PTR_ERR(cb_buff->channels);
  53. goto error_free_cb_buff;
  54. }
  55. indio_dev = cb_buff->channels[0].indio_dev;
  56. cb_buff->buffer.scan_mask
  57. = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
  58. GFP_KERNEL);
  59. if (cb_buff->buffer.scan_mask == NULL) {
  60. ret = -ENOMEM;
  61. goto error_release_channels;
  62. }
  63. chan = &cb_buff->channels[0];
  64. while (chan->indio_dev) {
  65. if (chan->indio_dev != indio_dev) {
  66. ret = -EINVAL;
  67. goto error_free_scan_mask;
  68. }
  69. set_bit(chan->channel->scan_index,
  70. cb_buff->buffer.scan_mask);
  71. chan++;
  72. }
  73. return cb_buff;
  74. error_free_scan_mask:
  75. kfree(cb_buff->buffer.scan_mask);
  76. error_release_channels:
  77. iio_channel_release_all(cb_buff->channels);
  78. error_free_cb_buff:
  79. kfree(cb_buff);
  80. return ERR_PTR(ret);
  81. }
  82. EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
  83. int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
  84. {
  85. return iio_update_buffers(cb_buff->channels[0].indio_dev,
  86. &cb_buff->buffer,
  87. NULL);
  88. }
  89. EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
  90. void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
  91. {
  92. iio_update_buffers(cb_buff->channels[0].indio_dev,
  93. NULL,
  94. &cb_buff->buffer);
  95. }
  96. EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
  97. void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
  98. {
  99. iio_channel_release_all(cb_buff->channels);
  100. iio_buffer_put(&cb_buff->buffer);
  101. }
  102. EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
  103. struct iio_channel
  104. *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
  105. {
  106. return cb_buffer->channels;
  107. }
  108. EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);