rc-ir-raw.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. // SPDX-License-Identifier: GPL-2.0
  2. // rc-ir-raw.c - handle IR pulse/space events
  3. //
  4. // Copyright (C) 2010 by Mauro Carvalho Chehab
  5. #include <linux/export.h>
  6. #include <linux/kthread.h>
  7. #include <linux/mutex.h>
  8. #include <linux/kmod.h>
  9. #include <linux/sched.h>
  10. #include "rc-core-priv.h"
  11. /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
  12. static LIST_HEAD(ir_raw_client_list);
  13. /* Used to handle IR raw handler extensions */
  14. static DEFINE_MUTEX(ir_raw_handler_lock);
  15. static LIST_HEAD(ir_raw_handler_list);
  16. static atomic64_t available_protocols = ATOMIC64_INIT(0);
  17. static int ir_raw_event_thread(void *data)
  18. {
  19. struct ir_raw_event ev;
  20. struct ir_raw_handler *handler;
  21. struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
  22. while (1) {
  23. mutex_lock(&ir_raw_handler_lock);
  24. while (kfifo_out(&raw->kfifo, &ev, 1)) {
  25. list_for_each_entry(handler, &ir_raw_handler_list, list)
  26. if (raw->dev->enabled_protocols &
  27. handler->protocols || !handler->protocols)
  28. handler->decode(raw->dev, ev);
  29. ir_lirc_raw_event(raw->dev, ev);
  30. raw->prev_ev = ev;
  31. }
  32. mutex_unlock(&ir_raw_handler_lock);
  33. set_current_state(TASK_INTERRUPTIBLE);
  34. if (kthread_should_stop()) {
  35. __set_current_state(TASK_RUNNING);
  36. break;
  37. } else if (!kfifo_is_empty(&raw->kfifo))
  38. set_current_state(TASK_RUNNING);
  39. schedule();
  40. }
  41. return 0;
  42. }
  43. /**
  44. * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
  45. * @dev: the struct rc_dev device descriptor
  46. * @ev: the struct ir_raw_event descriptor of the pulse/space
  47. *
  48. * This routine (which may be called from an interrupt context) stores a
  49. * pulse/space duration for the raw ir decoding state machines. Pulses are
  50. * signalled as positive values and spaces as negative values. A zero value
  51. * will reset the decoding state machines.
  52. */
  53. int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
  54. {
  55. if (!dev->raw)
  56. return -EINVAL;
  57. dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
  58. TO_US(ev->duration), TO_STR(ev->pulse));
  59. if (!kfifo_put(&dev->raw->kfifo, *ev)) {
  60. dev_err(&dev->dev, "IR event FIFO is full!\n");
  61. return -ENOSPC;
  62. }
  63. return 0;
  64. }
  65. EXPORT_SYMBOL_GPL(ir_raw_event_store);
  66. /**
  67. * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
  68. * @dev: the struct rc_dev device descriptor
  69. * @pulse: true for pulse, false for space
  70. *
  71. * This routine (which may be called from an interrupt context) is used to
  72. * store the beginning of an ir pulse or space (or the start/end of ir
  73. * reception) for the raw ir decoding state machines. This is used by
  74. * hardware which does not provide durations directly but only interrupts
  75. * (or similar events) on state change.
  76. */
  77. int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
  78. {
  79. ktime_t now;
  80. DEFINE_IR_RAW_EVENT(ev);
  81. if (!dev->raw)
  82. return -EINVAL;
  83. now = ktime_get();
  84. ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
  85. ev.pulse = !pulse;
  86. return ir_raw_event_store_with_timeout(dev, &ev);
  87. }
  88. EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
  89. /*
  90. * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
  91. * ir decoders, schedule decoding and
  92. * timeout
  93. * @dev: the struct rc_dev device descriptor
  94. * @ev: the struct ir_raw_event descriptor of the pulse/space
  95. *
  96. * This routine (which may be called from an interrupt context) stores a
  97. * pulse/space duration for the raw ir decoding state machines, schedules
  98. * decoding and generates a timeout.
  99. */
  100. int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
  101. {
  102. ktime_t now;
  103. int rc = 0;
  104. if (!dev->raw)
  105. return -EINVAL;
  106. now = ktime_get();
  107. spin_lock(&dev->raw->edge_spinlock);
  108. rc = ir_raw_event_store(dev, ev);
  109. dev->raw->last_event = now;
  110. /* timer could be set to timeout (125ms by default) */
  111. if (!timer_pending(&dev->raw->edge_handle) ||
  112. time_after(dev->raw->edge_handle.expires,
  113. jiffies + msecs_to_jiffies(15))) {
  114. mod_timer(&dev->raw->edge_handle,
  115. jiffies + msecs_to_jiffies(15));
  116. }
  117. spin_unlock(&dev->raw->edge_spinlock);
  118. return rc;
  119. }
  120. EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
  121. /**
  122. * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
  123. * @dev: the struct rc_dev device descriptor
  124. * @ev: the event that has occurred
  125. *
  126. * This routine (which may be called from an interrupt context) works
  127. * in similar manner to ir_raw_event_store_edge.
  128. * This routine is intended for devices with limited internal buffer
  129. * It automerges samples of same type, and handles timeouts. Returns non-zero
  130. * if the event was added, and zero if the event was ignored due to idle
  131. * processing.
  132. */
  133. int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
  134. {
  135. if (!dev->raw)
  136. return -EINVAL;
  137. /* Ignore spaces in idle mode */
  138. if (dev->idle && !ev->pulse)
  139. return 0;
  140. else if (dev->idle)
  141. ir_raw_event_set_idle(dev, false);
  142. if (!dev->raw->this_ev.duration)
  143. dev->raw->this_ev = *ev;
  144. else if (ev->pulse == dev->raw->this_ev.pulse)
  145. dev->raw->this_ev.duration += ev->duration;
  146. else {
  147. ir_raw_event_store(dev, &dev->raw->this_ev);
  148. dev->raw->this_ev = *ev;
  149. }
  150. /* Enter idle mode if nessesary */
  151. if (!ev->pulse && dev->timeout &&
  152. dev->raw->this_ev.duration >= dev->timeout)
  153. ir_raw_event_set_idle(dev, true);
  154. return 1;
  155. }
  156. EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
  157. /**
  158. * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
  159. * @dev: the struct rc_dev device descriptor
  160. * @idle: whether the device is idle or not
  161. */
  162. void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
  163. {
  164. if (!dev->raw)
  165. return;
  166. dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
  167. if (idle) {
  168. dev->raw->this_ev.timeout = true;
  169. ir_raw_event_store(dev, &dev->raw->this_ev);
  170. init_ir_raw_event(&dev->raw->this_ev);
  171. }
  172. if (dev->s_idle)
  173. dev->s_idle(dev, idle);
  174. dev->idle = idle;
  175. }
  176. EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  177. /**
  178. * ir_raw_event_handle() - schedules the decoding of stored ir data
  179. * @dev: the struct rc_dev device descriptor
  180. *
  181. * This routine will tell rc-core to start decoding stored ir data.
  182. */
  183. void ir_raw_event_handle(struct rc_dev *dev)
  184. {
  185. if (!dev->raw || !dev->raw->thread)
  186. return;
  187. wake_up_process(dev->raw->thread);
  188. }
  189. EXPORT_SYMBOL_GPL(ir_raw_event_handle);
  190. /* used internally by the sysfs interface */
  191. u64
  192. ir_raw_get_allowed_protocols(void)
  193. {
  194. return atomic64_read(&available_protocols);
  195. }
  196. static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
  197. {
  198. struct ir_raw_handler *handler;
  199. u32 timeout = 0;
  200. mutex_lock(&ir_raw_handler_lock);
  201. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  202. if (!(dev->enabled_protocols & handler->protocols) &&
  203. (*rc_proto & handler->protocols) && handler->raw_register)
  204. handler->raw_register(dev);
  205. if ((dev->enabled_protocols & handler->protocols) &&
  206. !(*rc_proto & handler->protocols) &&
  207. handler->raw_unregister)
  208. handler->raw_unregister(dev);
  209. }
  210. mutex_unlock(&ir_raw_handler_lock);
  211. if (!dev->max_timeout)
  212. return 0;
  213. mutex_lock(&ir_raw_handler_lock);
  214. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  215. if (handler->protocols & *rc_proto) {
  216. if (timeout < handler->min_timeout)
  217. timeout = handler->min_timeout;
  218. }
  219. }
  220. mutex_unlock(&ir_raw_handler_lock);
  221. if (timeout == 0)
  222. timeout = IR_DEFAULT_TIMEOUT;
  223. else
  224. timeout += MS_TO_NS(10);
  225. if (timeout < dev->min_timeout)
  226. timeout = dev->min_timeout;
  227. else if (timeout > dev->max_timeout)
  228. timeout = dev->max_timeout;
  229. if (dev->s_timeout)
  230. dev->s_timeout(dev, timeout);
  231. else
  232. dev->timeout = timeout;
  233. return 0;
  234. }
  235. static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
  236. {
  237. mutex_lock(&dev->lock);
  238. dev->enabled_protocols &= ~protocols;
  239. mutex_unlock(&dev->lock);
  240. }
  241. /**
  242. * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
  243. * @ev: Pointer to pointer to next free event. *@ev is incremented for
  244. * each raw event filled.
  245. * @max: Maximum number of raw events to fill.
  246. * @timings: Manchester modulation timings.
  247. * @n: Number of bits of data.
  248. * @data: Data bits to encode.
  249. *
  250. * Encodes the @n least significant bits of @data using Manchester (bi-phase)
  251. * modulation with the timing characteristics described by @timings, writing up
  252. * to @max raw IR events using the *@ev pointer.
  253. *
  254. * Returns: 0 on success.
  255. * -ENOBUFS if there isn't enough space in the array to fit the
  256. * full encoded data. In this case all @max events will have been
  257. * written.
  258. */
  259. int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
  260. const struct ir_raw_timings_manchester *timings,
  261. unsigned int n, u64 data)
  262. {
  263. bool need_pulse;
  264. u64 i;
  265. int ret = -ENOBUFS;
  266. i = BIT_ULL(n - 1);
  267. if (timings->leader_pulse) {
  268. if (!max--)
  269. return ret;
  270. init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
  271. if (timings->leader_space) {
  272. if (!max--)
  273. return ret;
  274. init_ir_raw_event_duration(++(*ev), 0,
  275. timings->leader_space);
  276. }
  277. } else {
  278. /* continue existing signal */
  279. --(*ev);
  280. }
  281. /* from here on *ev will point to the last event rather than the next */
  282. while (n && i > 0) {
  283. need_pulse = !(data & i);
  284. if (timings->invert)
  285. need_pulse = !need_pulse;
  286. if (need_pulse == !!(*ev)->pulse) {
  287. (*ev)->duration += timings->clock;
  288. } else {
  289. if (!max--)
  290. goto nobufs;
  291. init_ir_raw_event_duration(++(*ev), need_pulse,
  292. timings->clock);
  293. }
  294. if (!max--)
  295. goto nobufs;
  296. init_ir_raw_event_duration(++(*ev), !need_pulse,
  297. timings->clock);
  298. i >>= 1;
  299. }
  300. if (timings->trailer_space) {
  301. if (!(*ev)->pulse)
  302. (*ev)->duration += timings->trailer_space;
  303. else if (!max--)
  304. goto nobufs;
  305. else
  306. init_ir_raw_event_duration(++(*ev), 0,
  307. timings->trailer_space);
  308. }
  309. ret = 0;
  310. nobufs:
  311. /* point to the next event rather than last event before returning */
  312. ++(*ev);
  313. return ret;
  314. }
  315. EXPORT_SYMBOL(ir_raw_gen_manchester);
  316. /**
  317. * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
  318. * @ev: Pointer to pointer to next free event. *@ev is incremented for
  319. * each raw event filled.
  320. * @max: Maximum number of raw events to fill.
  321. * @timings: Pulse distance modulation timings.
  322. * @n: Number of bits of data.
  323. * @data: Data bits to encode.
  324. *
  325. * Encodes the @n least significant bits of @data using pulse-distance
  326. * modulation with the timing characteristics described by @timings, writing up
  327. * to @max raw IR events using the *@ev pointer.
  328. *
  329. * Returns: 0 on success.
  330. * -ENOBUFS if there isn't enough space in the array to fit the
  331. * full encoded data. In this case all @max events will have been
  332. * written.
  333. */
  334. int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
  335. const struct ir_raw_timings_pd *timings,
  336. unsigned int n, u64 data)
  337. {
  338. int i;
  339. int ret;
  340. unsigned int space;
  341. if (timings->header_pulse) {
  342. ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
  343. timings->header_space);
  344. if (ret)
  345. return ret;
  346. }
  347. if (timings->msb_first) {
  348. for (i = n - 1; i >= 0; --i) {
  349. space = timings->bit_space[(data >> i) & 1];
  350. ret = ir_raw_gen_pulse_space(ev, &max,
  351. timings->bit_pulse,
  352. space);
  353. if (ret)
  354. return ret;
  355. }
  356. } else {
  357. for (i = 0; i < n; ++i, data >>= 1) {
  358. space = timings->bit_space[data & 1];
  359. ret = ir_raw_gen_pulse_space(ev, &max,
  360. timings->bit_pulse,
  361. space);
  362. if (ret)
  363. return ret;
  364. }
  365. }
  366. ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
  367. timings->trailer_space);
  368. return ret;
  369. }
  370. EXPORT_SYMBOL(ir_raw_gen_pd);
  371. /**
  372. * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
  373. * @ev: Pointer to pointer to next free event. *@ev is incremented for
  374. * each raw event filled.
  375. * @max: Maximum number of raw events to fill.
  376. * @timings: Pulse distance modulation timings.
  377. * @n: Number of bits of data.
  378. * @data: Data bits to encode.
  379. *
  380. * Encodes the @n least significant bits of @data using space-distance
  381. * modulation with the timing characteristics described by @timings, writing up
  382. * to @max raw IR events using the *@ev pointer.
  383. *
  384. * Returns: 0 on success.
  385. * -ENOBUFS if there isn't enough space in the array to fit the
  386. * full encoded data. In this case all @max events will have been
  387. * written.
  388. */
  389. int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
  390. const struct ir_raw_timings_pl *timings,
  391. unsigned int n, u64 data)
  392. {
  393. int i;
  394. int ret = -ENOBUFS;
  395. unsigned int pulse;
  396. if (!max--)
  397. return ret;
  398. init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
  399. if (timings->msb_first) {
  400. for (i = n - 1; i >= 0; --i) {
  401. if (!max--)
  402. return ret;
  403. init_ir_raw_event_duration((*ev)++, 0,
  404. timings->bit_space);
  405. if (!max--)
  406. return ret;
  407. pulse = timings->bit_pulse[(data >> i) & 1];
  408. init_ir_raw_event_duration((*ev)++, 1, pulse);
  409. }
  410. } else {
  411. for (i = 0; i < n; ++i, data >>= 1) {
  412. if (!max--)
  413. return ret;
  414. init_ir_raw_event_duration((*ev)++, 0,
  415. timings->bit_space);
  416. if (!max--)
  417. return ret;
  418. pulse = timings->bit_pulse[data & 1];
  419. init_ir_raw_event_duration((*ev)++, 1, pulse);
  420. }
  421. }
  422. if (!max--)
  423. return ret;
  424. init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
  425. return 0;
  426. }
  427. EXPORT_SYMBOL(ir_raw_gen_pl);
  428. /**
  429. * ir_raw_encode_scancode() - Encode a scancode as raw events
  430. *
  431. * @protocol: protocol
  432. * @scancode: scancode filter describing a single scancode
  433. * @events: array of raw events to write into
  434. * @max: max number of raw events
  435. *
  436. * Attempts to encode the scancode as raw events.
  437. *
  438. * Returns: The number of events written.
  439. * -ENOBUFS if there isn't enough space in the array to fit the
  440. * encoding. In this case all @max events will have been written.
  441. * -EINVAL if the scancode is ambiguous or invalid, or if no
  442. * compatible encoder was found.
  443. */
  444. int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
  445. struct ir_raw_event *events, unsigned int max)
  446. {
  447. struct ir_raw_handler *handler;
  448. int ret = -EINVAL;
  449. u64 mask = 1ULL << protocol;
  450. ir_raw_load_modules(&mask);
  451. mutex_lock(&ir_raw_handler_lock);
  452. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  453. if (handler->protocols & mask && handler->encode) {
  454. ret = handler->encode(protocol, scancode, events, max);
  455. if (ret >= 0 || ret == -ENOBUFS)
  456. break;
  457. }
  458. }
  459. mutex_unlock(&ir_raw_handler_lock);
  460. return ret;
  461. }
  462. EXPORT_SYMBOL(ir_raw_encode_scancode);
  463. /**
  464. * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
  465. *
  466. * @t: timer_list
  467. *
  468. * This callback is armed by ir_raw_event_store_edge(). It does two things:
  469. * first of all, rather than calling ir_raw_event_handle() for each
  470. * edge and waking up the rc thread, 15 ms after the first edge
  471. * ir_raw_event_handle() is called. Secondly, generate a timeout event
  472. * no more IR is received after the rc_dev timeout.
  473. */
  474. static void ir_raw_edge_handle(struct timer_list *t)
  475. {
  476. struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
  477. struct rc_dev *dev = raw->dev;
  478. unsigned long flags;
  479. ktime_t interval;
  480. spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
  481. interval = ktime_sub(ktime_get(), dev->raw->last_event);
  482. if (ktime_to_ns(interval) >= dev->timeout) {
  483. DEFINE_IR_RAW_EVENT(ev);
  484. ev.timeout = true;
  485. ev.duration = ktime_to_ns(interval);
  486. ir_raw_event_store(dev, &ev);
  487. } else {
  488. mod_timer(&dev->raw->edge_handle,
  489. jiffies + nsecs_to_jiffies(dev->timeout -
  490. ktime_to_ns(interval)));
  491. }
  492. spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
  493. ir_raw_event_handle(dev);
  494. }
  495. /**
  496. * ir_raw_encode_carrier() - Get carrier used for protocol
  497. *
  498. * @protocol: protocol
  499. *
  500. * Attempts to find the carrier for the specified protocol
  501. *
  502. * Returns: The carrier in Hz
  503. * -EINVAL if the protocol is invalid, or if no
  504. * compatible encoder was found.
  505. */
  506. int ir_raw_encode_carrier(enum rc_proto protocol)
  507. {
  508. struct ir_raw_handler *handler;
  509. int ret = -EINVAL;
  510. u64 mask = BIT_ULL(protocol);
  511. mutex_lock(&ir_raw_handler_lock);
  512. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  513. if (handler->protocols & mask && handler->encode) {
  514. ret = handler->carrier;
  515. break;
  516. }
  517. }
  518. mutex_unlock(&ir_raw_handler_lock);
  519. return ret;
  520. }
  521. EXPORT_SYMBOL(ir_raw_encode_carrier);
  522. /*
  523. * Used to (un)register raw event clients
  524. */
  525. int ir_raw_event_prepare(struct rc_dev *dev)
  526. {
  527. if (!dev)
  528. return -EINVAL;
  529. dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
  530. if (!dev->raw)
  531. return -ENOMEM;
  532. dev->raw->dev = dev;
  533. dev->change_protocol = change_protocol;
  534. spin_lock_init(&dev->raw->edge_spinlock);
  535. timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
  536. INIT_KFIFO(dev->raw->kfifo);
  537. return 0;
  538. }
  539. int ir_raw_event_register(struct rc_dev *dev)
  540. {
  541. struct task_struct *thread;
  542. thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
  543. if (IS_ERR(thread))
  544. return PTR_ERR(thread);
  545. dev->raw->thread = thread;
  546. mutex_lock(&ir_raw_handler_lock);
  547. list_add_tail(&dev->raw->list, &ir_raw_client_list);
  548. mutex_unlock(&ir_raw_handler_lock);
  549. return 0;
  550. }
  551. void ir_raw_event_free(struct rc_dev *dev)
  552. {
  553. if (!dev)
  554. return;
  555. kfree(dev->raw);
  556. dev->raw = NULL;
  557. }
  558. void ir_raw_event_unregister(struct rc_dev *dev)
  559. {
  560. struct ir_raw_handler *handler;
  561. if (!dev || !dev->raw)
  562. return;
  563. kthread_stop(dev->raw->thread);
  564. del_timer_sync(&dev->raw->edge_handle);
  565. mutex_lock(&ir_raw_handler_lock);
  566. list_del(&dev->raw->list);
  567. list_for_each_entry(handler, &ir_raw_handler_list, list)
  568. if (handler->raw_unregister &&
  569. (handler->protocols & dev->enabled_protocols))
  570. handler->raw_unregister(dev);
  571. mutex_unlock(&ir_raw_handler_lock);
  572. ir_raw_event_free(dev);
  573. }
  574. /*
  575. * Extension interface - used to register the IR decoders
  576. */
  577. int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
  578. {
  579. mutex_lock(&ir_raw_handler_lock);
  580. list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
  581. atomic64_or(ir_raw_handler->protocols, &available_protocols);
  582. mutex_unlock(&ir_raw_handler_lock);
  583. return 0;
  584. }
  585. EXPORT_SYMBOL(ir_raw_handler_register);
  586. void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
  587. {
  588. struct ir_raw_event_ctrl *raw;
  589. u64 protocols = ir_raw_handler->protocols;
  590. mutex_lock(&ir_raw_handler_lock);
  591. list_del(&ir_raw_handler->list);
  592. list_for_each_entry(raw, &ir_raw_client_list, list) {
  593. if (ir_raw_handler->raw_unregister &&
  594. (raw->dev->enabled_protocols & protocols))
  595. ir_raw_handler->raw_unregister(raw->dev);
  596. ir_raw_disable_protocols(raw->dev, protocols);
  597. }
  598. atomic64_andnot(protocols, &available_protocols);
  599. mutex_unlock(&ir_raw_handler_lock);
  600. }
  601. EXPORT_SYMBOL(ir_raw_handler_unregister);