rc-ir-raw.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* rc-ir-raw.c - handle IR pulse/space events
  2. *
  3. * Copyright (C) 2010 by Mauro Carvalho Chehab
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation version 2 of the License.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/export.h>
  15. #include <linux/kthread.h>
  16. #include <linux/mutex.h>
  17. #include <linux/kmod.h>
  18. #include <linux/sched.h>
  19. #include <linux/freezer.h>
  20. #include "rc-core-priv.h"
  21. /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
  22. static LIST_HEAD(ir_raw_client_list);
  23. /* Used to handle IR raw handler extensions */
  24. static DEFINE_MUTEX(ir_raw_handler_lock);
  25. static LIST_HEAD(ir_raw_handler_list);
  26. static atomic64_t available_protocols = ATOMIC64_INIT(0);
  27. static int ir_raw_event_thread(void *data)
  28. {
  29. struct ir_raw_event ev;
  30. struct ir_raw_handler *handler;
  31. struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
  32. while (!kthread_should_stop()) {
  33. spin_lock_irq(&raw->lock);
  34. if (!kfifo_len(&raw->kfifo)) {
  35. set_current_state(TASK_INTERRUPTIBLE);
  36. if (kthread_should_stop())
  37. set_current_state(TASK_RUNNING);
  38. spin_unlock_irq(&raw->lock);
  39. schedule();
  40. continue;
  41. }
  42. if(!kfifo_out(&raw->kfifo, &ev, 1))
  43. dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
  44. spin_unlock_irq(&raw->lock);
  45. mutex_lock(&ir_raw_handler_lock);
  46. list_for_each_entry(handler, &ir_raw_handler_list, list)
  47. if (raw->dev->enabled_protocols & handler->protocols ||
  48. !handler->protocols)
  49. handler->decode(raw->dev, ev);
  50. raw->prev_ev = ev;
  51. mutex_unlock(&ir_raw_handler_lock);
  52. }
  53. return 0;
  54. }
  55. /**
  56. * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
  57. * @dev: the struct rc_dev device descriptor
  58. * @ev: the struct ir_raw_event descriptor of the pulse/space
  59. *
  60. * This routine (which may be called from an interrupt context) stores a
  61. * pulse/space duration for the raw ir decoding state machines. Pulses are
  62. * signalled as positive values and spaces as negative values. A zero value
  63. * will reset the decoding state machines.
  64. */
  65. int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
  66. {
  67. if (!dev->raw)
  68. return -EINVAL;
  69. IR_dprintk(2, "sample: (%05dus %s)\n",
  70. TO_US(ev->duration), TO_STR(ev->pulse));
  71. if (!kfifo_put(&dev->raw->kfifo, *ev)) {
  72. dev_err(&dev->dev, "IR event FIFO is full!\n");
  73. return -ENOSPC;
  74. }
  75. return 0;
  76. }
  77. EXPORT_SYMBOL_GPL(ir_raw_event_store);
  78. /**
  79. * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
  80. * @dev: the struct rc_dev device descriptor
  81. * @type: the type of the event that has occurred
  82. *
  83. * This routine (which may be called from an interrupt context) is used to
  84. * store the beginning of an ir pulse or space (or the start/end of ir
  85. * reception) for the raw ir decoding state machines. This is used by
  86. * hardware which does not provide durations directly but only interrupts
  87. * (or similar events) on state change.
  88. */
  89. int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
  90. {
  91. ktime_t now;
  92. s64 delta; /* ns */
  93. DEFINE_IR_RAW_EVENT(ev);
  94. int rc = 0;
  95. int delay;
  96. if (!dev->raw)
  97. return -EINVAL;
  98. now = ktime_get();
  99. delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
  100. delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
  101. /* Check for a long duration since last event or if we're
  102. * being called for the first time, note that delta can't
  103. * possibly be negative.
  104. */
  105. if (delta > delay || !dev->raw->last_type)
  106. type |= IR_START_EVENT;
  107. else
  108. ev.duration = delta;
  109. if (type & IR_START_EVENT)
  110. ir_raw_event_reset(dev);
  111. else if (dev->raw->last_type & IR_SPACE) {
  112. ev.pulse = false;
  113. rc = ir_raw_event_store(dev, &ev);
  114. } else if (dev->raw->last_type & IR_PULSE) {
  115. ev.pulse = true;
  116. rc = ir_raw_event_store(dev, &ev);
  117. } else
  118. return 0;
  119. dev->raw->last_event = now;
  120. dev->raw->last_type = type;
  121. return rc;
  122. }
  123. EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
  124. /**
  125. * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
  126. * @dev: the struct rc_dev device descriptor
  127. * @type: the type of the event that has occurred
  128. *
  129. * This routine (which may be called from an interrupt context) works
  130. * in similar manner to ir_raw_event_store_edge.
  131. * This routine is intended for devices with limited internal buffer
  132. * It automerges samples of same type, and handles timeouts. Returns non-zero
  133. * if the event was added, and zero if the event was ignored due to idle
  134. * processing.
  135. */
  136. int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
  137. {
  138. if (!dev->raw)
  139. return -EINVAL;
  140. /* Ignore spaces in idle mode */
  141. if (dev->idle && !ev->pulse)
  142. return 0;
  143. else if (dev->idle)
  144. ir_raw_event_set_idle(dev, false);
  145. if (!dev->raw->this_ev.duration)
  146. dev->raw->this_ev = *ev;
  147. else if (ev->pulse == dev->raw->this_ev.pulse)
  148. dev->raw->this_ev.duration += ev->duration;
  149. else {
  150. ir_raw_event_store(dev, &dev->raw->this_ev);
  151. dev->raw->this_ev = *ev;
  152. }
  153. /* Enter idle mode if nessesary */
  154. if (!ev->pulse && dev->timeout &&
  155. dev->raw->this_ev.duration >= dev->timeout)
  156. ir_raw_event_set_idle(dev, true);
  157. return 1;
  158. }
  159. EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
  160. /**
  161. * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
  162. * @dev: the struct rc_dev device descriptor
  163. * @idle: whether the device is idle or not
  164. */
  165. void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
  166. {
  167. if (!dev->raw)
  168. return;
  169. IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
  170. if (idle) {
  171. dev->raw->this_ev.timeout = true;
  172. ir_raw_event_store(dev, &dev->raw->this_ev);
  173. init_ir_raw_event(&dev->raw->this_ev);
  174. }
  175. if (dev->s_idle)
  176. dev->s_idle(dev, idle);
  177. dev->idle = idle;
  178. }
  179. EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  180. /**
  181. * ir_raw_event_handle() - schedules the decoding of stored ir data
  182. * @dev: the struct rc_dev device descriptor
  183. *
  184. * This routine will tell rc-core to start decoding stored ir data.
  185. */
  186. void ir_raw_event_handle(struct rc_dev *dev)
  187. {
  188. unsigned long flags;
  189. if (!dev->raw)
  190. return;
  191. spin_lock_irqsave(&dev->raw->lock, flags);
  192. wake_up_process(dev->raw->thread);
  193. spin_unlock_irqrestore(&dev->raw->lock, flags);
  194. }
  195. EXPORT_SYMBOL_GPL(ir_raw_event_handle);
  196. /* used internally by the sysfs interface */
  197. u64
  198. ir_raw_get_allowed_protocols(void)
  199. {
  200. return atomic64_read(&available_protocols);
  201. }
  202. static int change_protocol(struct rc_dev *dev, u64 *rc_type)
  203. {
  204. /* the caller will update dev->enabled_protocols */
  205. return 0;
  206. }
  207. static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
  208. {
  209. mutex_lock(&dev->lock);
  210. dev->enabled_protocols &= ~protocols;
  211. dev->enabled_wakeup_protocols &= ~protocols;
  212. mutex_unlock(&dev->lock);
  213. }
  214. /*
  215. * Used to (un)register raw event clients
  216. */
  217. int ir_raw_event_register(struct rc_dev *dev)
  218. {
  219. int rc;
  220. struct ir_raw_handler *handler;
  221. if (!dev)
  222. return -EINVAL;
  223. dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
  224. if (!dev->raw)
  225. return -ENOMEM;
  226. dev->raw->dev = dev;
  227. dev->change_protocol = change_protocol;
  228. INIT_KFIFO(dev->raw->kfifo);
  229. spin_lock_init(&dev->raw->lock);
  230. dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
  231. "rc%u", dev->minor);
  232. if (IS_ERR(dev->raw->thread)) {
  233. rc = PTR_ERR(dev->raw->thread);
  234. goto out;
  235. }
  236. mutex_lock(&ir_raw_handler_lock);
  237. list_add_tail(&dev->raw->list, &ir_raw_client_list);
  238. list_for_each_entry(handler, &ir_raw_handler_list, list)
  239. if (handler->raw_register)
  240. handler->raw_register(dev);
  241. mutex_unlock(&ir_raw_handler_lock);
  242. return 0;
  243. out:
  244. kfree(dev->raw);
  245. dev->raw = NULL;
  246. return rc;
  247. }
  248. void ir_raw_event_unregister(struct rc_dev *dev)
  249. {
  250. struct ir_raw_handler *handler;
  251. if (!dev || !dev->raw)
  252. return;
  253. kthread_stop(dev->raw->thread);
  254. mutex_lock(&ir_raw_handler_lock);
  255. list_del(&dev->raw->list);
  256. list_for_each_entry(handler, &ir_raw_handler_list, list)
  257. if (handler->raw_unregister)
  258. handler->raw_unregister(dev);
  259. mutex_unlock(&ir_raw_handler_lock);
  260. kfree(dev->raw);
  261. dev->raw = NULL;
  262. }
  263. /*
  264. * Extension interface - used to register the IR decoders
  265. */
  266. int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
  267. {
  268. struct ir_raw_event_ctrl *raw;
  269. mutex_lock(&ir_raw_handler_lock);
  270. list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
  271. if (ir_raw_handler->raw_register)
  272. list_for_each_entry(raw, &ir_raw_client_list, list)
  273. ir_raw_handler->raw_register(raw->dev);
  274. atomic64_or(ir_raw_handler->protocols, &available_protocols);
  275. mutex_unlock(&ir_raw_handler_lock);
  276. return 0;
  277. }
  278. EXPORT_SYMBOL(ir_raw_handler_register);
  279. void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
  280. {
  281. struct ir_raw_event_ctrl *raw;
  282. u64 protocols = ir_raw_handler->protocols;
  283. mutex_lock(&ir_raw_handler_lock);
  284. list_del(&ir_raw_handler->list);
  285. list_for_each_entry(raw, &ir_raw_client_list, list) {
  286. ir_raw_disable_protocols(raw->dev, protocols);
  287. if (ir_raw_handler->raw_unregister)
  288. ir_raw_handler->raw_unregister(raw->dev);
  289. }
  290. atomic64_andnot(protocols, &available_protocols);
  291. mutex_unlock(&ir_raw_handler_lock);
  292. }
  293. EXPORT_SYMBOL(ir_raw_handler_unregister);