rc-ir-raw.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. // SPDX-License-Identifier: GPL-2.0
  2. // rc-ir-raw.c - handle IR pulse/space events
  3. //
  4. // Copyright (C) 2010 by Mauro Carvalho Chehab
  5. #include <linux/export.h>
  6. #include <linux/kthread.h>
  7. #include <linux/mutex.h>
  8. #include <linux/kmod.h>
  9. #include <linux/sched.h>
  10. #include "rc-core-priv.h"
  11. /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
  12. static LIST_HEAD(ir_raw_client_list);
  13. /* Used to handle IR raw handler extensions */
  14. DEFINE_MUTEX(ir_raw_handler_lock);
  15. static LIST_HEAD(ir_raw_handler_list);
  16. static atomic64_t available_protocols = ATOMIC64_INIT(0);
  17. static int ir_raw_event_thread(void *data)
  18. {
  19. struct ir_raw_event ev;
  20. struct ir_raw_handler *handler;
  21. struct ir_raw_event_ctrl *raw = data;
  22. struct rc_dev *dev = raw->dev;
  23. while (1) {
  24. mutex_lock(&ir_raw_handler_lock);
  25. while (kfifo_out(&raw->kfifo, &ev, 1)) {
  26. if (is_timing_event(ev)) {
  27. if (ev.duration == 0)
  28. dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
  29. if (is_timing_event(raw->prev_ev) &&
  30. !is_transition(&ev, &raw->prev_ev))
  31. dev_warn_once(&dev->dev, "two consecutive events of type %s",
  32. TO_STR(ev.pulse));
  33. }
  34. list_for_each_entry(handler, &ir_raw_handler_list, list)
  35. if (dev->enabled_protocols &
  36. handler->protocols || !handler->protocols)
  37. handler->decode(dev, ev);
  38. lirc_raw_event(dev, ev);
  39. raw->prev_ev = ev;
  40. }
  41. mutex_unlock(&ir_raw_handler_lock);
  42. set_current_state(TASK_INTERRUPTIBLE);
  43. if (kthread_should_stop()) {
  44. __set_current_state(TASK_RUNNING);
  45. break;
  46. } else if (!kfifo_is_empty(&raw->kfifo))
  47. set_current_state(TASK_RUNNING);
  48. schedule();
  49. }
  50. return 0;
  51. }
  52. /**
  53. * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
  54. * @dev: the struct rc_dev device descriptor
  55. * @ev: the struct ir_raw_event descriptor of the pulse/space
  56. *
  57. * This routine (which may be called from an interrupt context) stores a
  58. * pulse/space duration for the raw ir decoding state machines. Pulses are
  59. * signalled as positive values and spaces as negative values. A zero value
  60. * will reset the decoding state machines.
  61. */
  62. int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
  63. {
  64. if (!dev->raw)
  65. return -EINVAL;
  66. dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
  67. ev->duration, TO_STR(ev->pulse));
  68. if (!kfifo_put(&dev->raw->kfifo, *ev)) {
  69. dev_err(&dev->dev, "IR event FIFO is full!\n");
  70. return -ENOSPC;
  71. }
  72. return 0;
  73. }
  74. EXPORT_SYMBOL_GPL(ir_raw_event_store);
  75. /**
  76. * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
  77. * @dev: the struct rc_dev device descriptor
  78. * @pulse: true for pulse, false for space
  79. *
  80. * This routine (which may be called from an interrupt context) is used to
  81. * store the beginning of an ir pulse or space (or the start/end of ir
  82. * reception) for the raw ir decoding state machines. This is used by
  83. * hardware which does not provide durations directly but only interrupts
  84. * (or similar events) on state change.
  85. */
  86. int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
  87. {
  88. ktime_t now;
  89. struct ir_raw_event ev = {};
  90. if (!dev->raw)
  91. return -EINVAL;
  92. now = ktime_get();
  93. ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
  94. ev.pulse = !pulse;
  95. return ir_raw_event_store_with_timeout(dev, &ev);
  96. }
  97. EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
  98. /*
  99. * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
  100. * ir decoders, schedule decoding and
  101. * timeout
  102. * @dev: the struct rc_dev device descriptor
  103. * @ev: the struct ir_raw_event descriptor of the pulse/space
  104. *
  105. * This routine (which may be called from an interrupt context) stores a
  106. * pulse/space duration for the raw ir decoding state machines, schedules
  107. * decoding and generates a timeout.
  108. */
  109. int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
  110. {
  111. ktime_t now;
  112. int rc = 0;
  113. if (!dev->raw)
  114. return -EINVAL;
  115. now = ktime_get();
  116. spin_lock(&dev->raw->edge_spinlock);
  117. rc = ir_raw_event_store(dev, ev);
  118. dev->raw->last_event = now;
  119. /* timer could be set to timeout (125ms by default) */
  120. if (!timer_pending(&dev->raw->edge_handle) ||
  121. time_after(dev->raw->edge_handle.expires,
  122. jiffies + msecs_to_jiffies(15))) {
  123. mod_timer(&dev->raw->edge_handle,
  124. jiffies + msecs_to_jiffies(15));
  125. }
  126. spin_unlock(&dev->raw->edge_spinlock);
  127. return rc;
  128. }
  129. EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
  130. /**
  131. * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
  132. * @dev: the struct rc_dev device descriptor
  133. * @ev: the event that has occurred
  134. *
  135. * This routine (which may be called from an interrupt context) works
  136. * in similar manner to ir_raw_event_store_edge.
  137. * This routine is intended for devices with limited internal buffer
  138. * It automerges samples of same type, and handles timeouts. Returns non-zero
  139. * if the event was added, and zero if the event was ignored due to idle
  140. * processing.
  141. */
  142. int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
  143. {
  144. if (!dev->raw)
  145. return -EINVAL;
  146. /* Ignore spaces in idle mode */
  147. if (dev->idle && !ev->pulse)
  148. return 0;
  149. else if (dev->idle)
  150. ir_raw_event_set_idle(dev, false);
  151. if (!dev->raw->this_ev.duration)
  152. dev->raw->this_ev = *ev;
  153. else if (ev->pulse == dev->raw->this_ev.pulse)
  154. dev->raw->this_ev.duration += ev->duration;
  155. else {
  156. ir_raw_event_store(dev, &dev->raw->this_ev);
  157. dev->raw->this_ev = *ev;
  158. }
  159. /* Enter idle mode if necessary */
  160. if (!ev->pulse && dev->timeout &&
  161. dev->raw->this_ev.duration >= dev->timeout)
  162. ir_raw_event_set_idle(dev, true);
  163. return 1;
  164. }
  165. EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
  166. /**
  167. * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
  168. * @dev: the struct rc_dev device descriptor
  169. * @idle: whether the device is idle or not
  170. */
  171. void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
  172. {
  173. if (!dev->raw)
  174. return;
  175. dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
  176. if (idle) {
  177. dev->raw->this_ev.timeout = true;
  178. ir_raw_event_store(dev, &dev->raw->this_ev);
  179. dev->raw->this_ev = (struct ir_raw_event) {};
  180. }
  181. if (dev->s_idle)
  182. dev->s_idle(dev, idle);
  183. dev->idle = idle;
  184. }
  185. EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  186. /**
  187. * ir_raw_event_handle() - schedules the decoding of stored ir data
  188. * @dev: the struct rc_dev device descriptor
  189. *
  190. * This routine will tell rc-core to start decoding stored ir data.
  191. */
  192. void ir_raw_event_handle(struct rc_dev *dev)
  193. {
  194. if (!dev->raw || !dev->raw->thread)
  195. return;
  196. wake_up_process(dev->raw->thread);
  197. }
  198. EXPORT_SYMBOL_GPL(ir_raw_event_handle);
  199. /* used internally by the sysfs interface */
  200. u64
  201. ir_raw_get_allowed_protocols(void)
  202. {
  203. return atomic64_read(&available_protocols);
  204. }
  205. static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
  206. {
  207. struct ir_raw_handler *handler;
  208. u32 timeout = 0;
  209. mutex_lock(&ir_raw_handler_lock);
  210. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  211. if (!(dev->enabled_protocols & handler->protocols) &&
  212. (*rc_proto & handler->protocols) && handler->raw_register)
  213. handler->raw_register(dev);
  214. if ((dev->enabled_protocols & handler->protocols) &&
  215. !(*rc_proto & handler->protocols) &&
  216. handler->raw_unregister)
  217. handler->raw_unregister(dev);
  218. }
  219. mutex_unlock(&ir_raw_handler_lock);
  220. if (!dev->max_timeout)
  221. return 0;
  222. mutex_lock(&ir_raw_handler_lock);
  223. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  224. if (handler->protocols & *rc_proto) {
  225. if (timeout < handler->min_timeout)
  226. timeout = handler->min_timeout;
  227. }
  228. }
  229. mutex_unlock(&ir_raw_handler_lock);
  230. if (timeout == 0)
  231. timeout = IR_DEFAULT_TIMEOUT;
  232. else
  233. timeout += MS_TO_US(10);
  234. if (timeout < dev->min_timeout)
  235. timeout = dev->min_timeout;
  236. else if (timeout > dev->max_timeout)
  237. timeout = dev->max_timeout;
  238. if (dev->s_timeout)
  239. dev->s_timeout(dev, timeout);
  240. else
  241. dev->timeout = timeout;
  242. return 0;
  243. }
  244. static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
  245. {
  246. mutex_lock(&dev->lock);
  247. dev->enabled_protocols &= ~protocols;
  248. mutex_unlock(&dev->lock);
  249. }
  250. /**
  251. * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
  252. * @ev: Pointer to pointer to next free event. *@ev is incremented for
  253. * each raw event filled.
  254. * @max: Maximum number of raw events to fill.
  255. * @timings: Manchester modulation timings.
  256. * @n: Number of bits of data.
  257. * @data: Data bits to encode.
  258. *
  259. * Encodes the @n least significant bits of @data using Manchester (bi-phase)
  260. * modulation with the timing characteristics described by @timings, writing up
  261. * to @max raw IR events using the *@ev pointer.
  262. *
  263. * Returns: 0 on success.
  264. * -ENOBUFS if there isn't enough space in the array to fit the
  265. * full encoded data. In this case all @max events will have been
  266. * written.
  267. */
  268. int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
  269. const struct ir_raw_timings_manchester *timings,
  270. unsigned int n, u64 data)
  271. {
  272. bool need_pulse;
  273. u64 i;
  274. int ret = -ENOBUFS;
  275. i = BIT_ULL(n - 1);
  276. if (timings->leader_pulse) {
  277. if (!max--)
  278. return ret;
  279. init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
  280. if (timings->leader_space) {
  281. if (!max--)
  282. return ret;
  283. init_ir_raw_event_duration(++(*ev), 0,
  284. timings->leader_space);
  285. }
  286. } else {
  287. /* continue existing signal */
  288. --(*ev);
  289. }
  290. /* from here on *ev will point to the last event rather than the next */
  291. while (n && i > 0) {
  292. need_pulse = !(data & i);
  293. if (timings->invert)
  294. need_pulse = !need_pulse;
  295. if (need_pulse == !!(*ev)->pulse) {
  296. (*ev)->duration += timings->clock;
  297. } else {
  298. if (!max--)
  299. goto nobufs;
  300. init_ir_raw_event_duration(++(*ev), need_pulse,
  301. timings->clock);
  302. }
  303. if (!max--)
  304. goto nobufs;
  305. init_ir_raw_event_duration(++(*ev), !need_pulse,
  306. timings->clock);
  307. i >>= 1;
  308. }
  309. if (timings->trailer_space) {
  310. if (!(*ev)->pulse)
  311. (*ev)->duration += timings->trailer_space;
  312. else if (!max--)
  313. goto nobufs;
  314. else
  315. init_ir_raw_event_duration(++(*ev), 0,
  316. timings->trailer_space);
  317. }
  318. ret = 0;
  319. nobufs:
  320. /* point to the next event rather than last event before returning */
  321. ++(*ev);
  322. return ret;
  323. }
  324. EXPORT_SYMBOL(ir_raw_gen_manchester);
  325. /**
  326. * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
  327. * @ev: Pointer to pointer to next free event. *@ev is incremented for
  328. * each raw event filled.
  329. * @max: Maximum number of raw events to fill.
  330. * @timings: Pulse distance modulation timings.
  331. * @n: Number of bits of data.
  332. * @data: Data bits to encode.
  333. *
  334. * Encodes the @n least significant bits of @data using pulse-distance
  335. * modulation with the timing characteristics described by @timings, writing up
  336. * to @max raw IR events using the *@ev pointer.
  337. *
  338. * Returns: 0 on success.
  339. * -ENOBUFS if there isn't enough space in the array to fit the
  340. * full encoded data. In this case all @max events will have been
  341. * written.
  342. */
  343. int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
  344. const struct ir_raw_timings_pd *timings,
  345. unsigned int n, u64 data)
  346. {
  347. int i;
  348. int ret;
  349. unsigned int space;
  350. if (timings->header_pulse) {
  351. ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
  352. timings->header_space);
  353. if (ret)
  354. return ret;
  355. }
  356. if (timings->msb_first) {
  357. for (i = n - 1; i >= 0; --i) {
  358. space = timings->bit_space[(data >> i) & 1];
  359. ret = ir_raw_gen_pulse_space(ev, &max,
  360. timings->bit_pulse,
  361. space);
  362. if (ret)
  363. return ret;
  364. }
  365. } else {
  366. for (i = 0; i < n; ++i, data >>= 1) {
  367. space = timings->bit_space[data & 1];
  368. ret = ir_raw_gen_pulse_space(ev, &max,
  369. timings->bit_pulse,
  370. space);
  371. if (ret)
  372. return ret;
  373. }
  374. }
  375. ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
  376. timings->trailer_space);
  377. return ret;
  378. }
  379. EXPORT_SYMBOL(ir_raw_gen_pd);
  380. /**
  381. * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
  382. * @ev: Pointer to pointer to next free event. *@ev is incremented for
  383. * each raw event filled.
  384. * @max: Maximum number of raw events to fill.
  385. * @timings: Pulse distance modulation timings.
  386. * @n: Number of bits of data.
  387. * @data: Data bits to encode.
  388. *
  389. * Encodes the @n least significant bits of @data using space-distance
  390. * modulation with the timing characteristics described by @timings, writing up
  391. * to @max raw IR events using the *@ev pointer.
  392. *
  393. * Returns: 0 on success.
  394. * -ENOBUFS if there isn't enough space in the array to fit the
  395. * full encoded data. In this case all @max events will have been
  396. * written.
  397. */
  398. int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
  399. const struct ir_raw_timings_pl *timings,
  400. unsigned int n, u64 data)
  401. {
  402. int i;
  403. int ret = -ENOBUFS;
  404. unsigned int pulse;
  405. if (!max--)
  406. return ret;
  407. init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
  408. if (timings->msb_first) {
  409. for (i = n - 1; i >= 0; --i) {
  410. if (!max--)
  411. return ret;
  412. init_ir_raw_event_duration((*ev)++, 0,
  413. timings->bit_space);
  414. if (!max--)
  415. return ret;
  416. pulse = timings->bit_pulse[(data >> i) & 1];
  417. init_ir_raw_event_duration((*ev)++, 1, pulse);
  418. }
  419. } else {
  420. for (i = 0; i < n; ++i, data >>= 1) {
  421. if (!max--)
  422. return ret;
  423. init_ir_raw_event_duration((*ev)++, 0,
  424. timings->bit_space);
  425. if (!max--)
  426. return ret;
  427. pulse = timings->bit_pulse[data & 1];
  428. init_ir_raw_event_duration((*ev)++, 1, pulse);
  429. }
  430. }
  431. if (!max--)
  432. return ret;
  433. init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
  434. return 0;
  435. }
  436. EXPORT_SYMBOL(ir_raw_gen_pl);
  437. /**
  438. * ir_raw_encode_scancode() - Encode a scancode as raw events
  439. *
  440. * @protocol: protocol
  441. * @scancode: scancode filter describing a single scancode
  442. * @events: array of raw events to write into
  443. * @max: max number of raw events
  444. *
  445. * Attempts to encode the scancode as raw events.
  446. *
  447. * Returns: The number of events written.
  448. * -ENOBUFS if there isn't enough space in the array to fit the
  449. * encoding. In this case all @max events will have been written.
  450. * -EINVAL if the scancode is ambiguous or invalid, or if no
  451. * compatible encoder was found.
  452. */
  453. int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
  454. struct ir_raw_event *events, unsigned int max)
  455. {
  456. struct ir_raw_handler *handler;
  457. int ret = -EINVAL;
  458. u64 mask = 1ULL << protocol;
  459. ir_raw_load_modules(&mask);
  460. mutex_lock(&ir_raw_handler_lock);
  461. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  462. if (handler->protocols & mask && handler->encode) {
  463. ret = handler->encode(protocol, scancode, events, max);
  464. if (ret >= 0 || ret == -ENOBUFS)
  465. break;
  466. }
  467. }
  468. mutex_unlock(&ir_raw_handler_lock);
  469. return ret;
  470. }
  471. EXPORT_SYMBOL(ir_raw_encode_scancode);
  472. /**
  473. * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
  474. *
  475. * @t: timer_list
  476. *
  477. * This callback is armed by ir_raw_event_store_edge(). It does two things:
  478. * first of all, rather than calling ir_raw_event_handle() for each
  479. * edge and waking up the rc thread, 15 ms after the first edge
  480. * ir_raw_event_handle() is called. Secondly, generate a timeout event
  481. * no more IR is received after the rc_dev timeout.
  482. */
  483. static void ir_raw_edge_handle(struct timer_list *t)
  484. {
  485. struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
  486. struct rc_dev *dev = raw->dev;
  487. unsigned long flags;
  488. ktime_t interval;
  489. spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
  490. interval = ktime_sub(ktime_get(), dev->raw->last_event);
  491. if (ktime_to_us(interval) >= dev->timeout) {
  492. struct ir_raw_event ev = {
  493. .timeout = true,
  494. .duration = ktime_to_us(interval)
  495. };
  496. ir_raw_event_store(dev, &ev);
  497. } else {
  498. mod_timer(&dev->raw->edge_handle,
  499. jiffies + usecs_to_jiffies(dev->timeout -
  500. ktime_to_us(interval)));
  501. }
  502. spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
  503. ir_raw_event_handle(dev);
  504. }
  505. /**
  506. * ir_raw_encode_carrier() - Get carrier used for protocol
  507. *
  508. * @protocol: protocol
  509. *
  510. * Attempts to find the carrier for the specified protocol
  511. *
  512. * Returns: The carrier in Hz
  513. * -EINVAL if the protocol is invalid, or if no
  514. * compatible encoder was found.
  515. */
  516. int ir_raw_encode_carrier(enum rc_proto protocol)
  517. {
  518. struct ir_raw_handler *handler;
  519. int ret = -EINVAL;
  520. u64 mask = BIT_ULL(protocol);
  521. mutex_lock(&ir_raw_handler_lock);
  522. list_for_each_entry(handler, &ir_raw_handler_list, list) {
  523. if (handler->protocols & mask && handler->encode) {
  524. ret = handler->carrier;
  525. break;
  526. }
  527. }
  528. mutex_unlock(&ir_raw_handler_lock);
  529. return ret;
  530. }
  531. EXPORT_SYMBOL(ir_raw_encode_carrier);
  532. /*
  533. * Used to (un)register raw event clients
  534. */
  535. int ir_raw_event_prepare(struct rc_dev *dev)
  536. {
  537. if (!dev)
  538. return -EINVAL;
  539. dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
  540. if (!dev->raw)
  541. return -ENOMEM;
  542. dev->raw->dev = dev;
  543. dev->change_protocol = change_protocol;
  544. dev->idle = true;
  545. spin_lock_init(&dev->raw->edge_spinlock);
  546. timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
  547. INIT_KFIFO(dev->raw->kfifo);
  548. return 0;
  549. }
  550. int ir_raw_event_register(struct rc_dev *dev)
  551. {
  552. struct task_struct *thread;
  553. thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
  554. if (IS_ERR(thread))
  555. return PTR_ERR(thread);
  556. dev->raw->thread = thread;
  557. mutex_lock(&ir_raw_handler_lock);
  558. list_add_tail(&dev->raw->list, &ir_raw_client_list);
  559. mutex_unlock(&ir_raw_handler_lock);
  560. return 0;
  561. }
  562. void ir_raw_event_free(struct rc_dev *dev)
  563. {
  564. if (!dev)
  565. return;
  566. kfree(dev->raw);
  567. dev->raw = NULL;
  568. }
  569. void ir_raw_event_unregister(struct rc_dev *dev)
  570. {
  571. struct ir_raw_handler *handler;
  572. if (!dev || !dev->raw)
  573. return;
  574. kthread_stop(dev->raw->thread);
  575. del_timer_sync(&dev->raw->edge_handle);
  576. mutex_lock(&ir_raw_handler_lock);
  577. list_del(&dev->raw->list);
  578. list_for_each_entry(handler, &ir_raw_handler_list, list)
  579. if (handler->raw_unregister &&
  580. (handler->protocols & dev->enabled_protocols))
  581. handler->raw_unregister(dev);
  582. lirc_bpf_free(dev);
  583. ir_raw_event_free(dev);
  584. /*
  585. * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
  586. * ensure that the raw member is null on unlock; this is how
  587. * "device gone" is checked.
  588. */
  589. mutex_unlock(&ir_raw_handler_lock);
  590. }
  591. /*
  592. * Extension interface - used to register the IR decoders
  593. */
  594. int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
  595. {
  596. mutex_lock(&ir_raw_handler_lock);
  597. list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
  598. atomic64_or(ir_raw_handler->protocols, &available_protocols);
  599. mutex_unlock(&ir_raw_handler_lock);
  600. return 0;
  601. }
  602. EXPORT_SYMBOL(ir_raw_handler_register);
  603. void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
  604. {
  605. struct ir_raw_event_ctrl *raw;
  606. u64 protocols = ir_raw_handler->protocols;
  607. mutex_lock(&ir_raw_handler_lock);
  608. list_del(&ir_raw_handler->list);
  609. list_for_each_entry(raw, &ir_raw_client_list, list) {
  610. if (ir_raw_handler->raw_unregister &&
  611. (raw->dev->enabled_protocols & protocols))
  612. ir_raw_handler->raw_unregister(raw->dev);
  613. ir_raw_disable_protocols(raw->dev, protocols);
  614. }
  615. atomic64_andnot(protocols, &available_protocols);
  616. mutex_unlock(&ir_raw_handler_lock);
  617. }
  618. EXPORT_SYMBOL(ir_raw_handler_unregister);