industrialio-trigger.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core, trigger handling functions
  3. *
  4. * Copyright (c) 2008 Jonathan Cameron
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/idr.h>
  8. #include <linux/err.h>
  9. #include <linux/device.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/list.h>
  12. #include <linux/slab.h>
  13. #include <linux/iio/iio.h>
  14. #include <linux/iio/iio-opaque.h>
  15. #include <linux/iio/trigger.h>
  16. #include "iio_core.h"
  17. #include "iio_core_trigger.h"
  18. #include <linux/iio/trigger_consumer.h>
  19. /* RFC - Question of approach
  20. * Make the common case (single sensor single trigger)
  21. * simple by starting trigger capture from when first sensors
  22. * is added.
  23. *
  24. * Complex simultaneous start requires use of 'hold' functionality
  25. * of the trigger. (not implemented)
  26. *
  27. * Any other suggestions?
  28. */
  29. static DEFINE_IDA(iio_trigger_ida);
  30. /* Single list of all available triggers */
  31. static LIST_HEAD(iio_trigger_list);
  32. static DEFINE_MUTEX(iio_trigger_list_lock);
  33. /**
  34. * name_show() - retrieve useful identifying name
  35. * @dev: device associated with the iio_trigger
  36. * @attr: pointer to the device_attribute structure that is
  37. * being processed
  38. * @buf: buffer to print the name into
  39. *
  40. * Return: a negative number on failure or the number of written
  41. * characters on success.
  42. */
  43. static ssize_t name_show(struct device *dev, struct device_attribute *attr,
  44. char *buf)
  45. {
  46. struct iio_trigger *trig = to_iio_trigger(dev);
  47. return sysfs_emit(buf, "%s\n", trig->name);
  48. }
  49. static DEVICE_ATTR_RO(name);
  50. static struct attribute *iio_trig_dev_attrs[] = {
  51. &dev_attr_name.attr,
  52. NULL,
  53. };
  54. ATTRIBUTE_GROUPS(iio_trig_dev);
  55. static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
  56. int iio_trigger_register(struct iio_trigger *trig_info)
  57. {
  58. int ret;
  59. trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
  60. if (trig_info->id < 0)
  61. return trig_info->id;
  62. /* Set the name used for the sysfs directory etc */
  63. dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
  64. ret = device_add(&trig_info->dev);
  65. if (ret)
  66. goto error_unregister_id;
  67. /* Add to list of available triggers held by the IIO core */
  68. mutex_lock(&iio_trigger_list_lock);
  69. if (__iio_trigger_find_by_name(trig_info->name)) {
  70. pr_err("Duplicate trigger name '%s'\n", trig_info->name);
  71. ret = -EEXIST;
  72. goto error_device_del;
  73. }
  74. list_add_tail(&trig_info->list, &iio_trigger_list);
  75. mutex_unlock(&iio_trigger_list_lock);
  76. return 0;
  77. error_device_del:
  78. mutex_unlock(&iio_trigger_list_lock);
  79. device_del(&trig_info->dev);
  80. error_unregister_id:
  81. ida_free(&iio_trigger_ida, trig_info->id);
  82. return ret;
  83. }
  84. EXPORT_SYMBOL(iio_trigger_register);
  85. void iio_trigger_unregister(struct iio_trigger *trig_info)
  86. {
  87. mutex_lock(&iio_trigger_list_lock);
  88. list_del(&trig_info->list);
  89. mutex_unlock(&iio_trigger_list_lock);
  90. ida_free(&iio_trigger_ida, trig_info->id);
  91. /* Possible issue in here */
  92. device_del(&trig_info->dev);
  93. }
  94. EXPORT_SYMBOL(iio_trigger_unregister);
  95. int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
  96. {
  97. struct iio_dev_opaque *iio_dev_opaque;
  98. if (!indio_dev || !trig)
  99. return -EINVAL;
  100. iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  101. mutex_lock(&indio_dev->mlock);
  102. WARN_ON(iio_dev_opaque->trig_readonly);
  103. indio_dev->trig = iio_trigger_get(trig);
  104. iio_dev_opaque->trig_readonly = true;
  105. mutex_unlock(&indio_dev->mlock);
  106. return 0;
  107. }
  108. EXPORT_SYMBOL(iio_trigger_set_immutable);
  109. /* Search for trigger by name, assuming iio_trigger_list_lock held */
  110. static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
  111. {
  112. struct iio_trigger *iter;
  113. list_for_each_entry(iter, &iio_trigger_list, list)
  114. if (!strcmp(iter->name, name))
  115. return iter;
  116. return NULL;
  117. }
  118. static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
  119. {
  120. struct iio_trigger *trig = NULL, *iter;
  121. mutex_lock(&iio_trigger_list_lock);
  122. list_for_each_entry(iter, &iio_trigger_list, list)
  123. if (sysfs_streq(iter->name, name)) {
  124. trig = iter;
  125. iio_trigger_get(trig);
  126. break;
  127. }
  128. mutex_unlock(&iio_trigger_list_lock);
  129. return trig;
  130. }
  131. static void iio_reenable_work_fn(struct work_struct *work)
  132. {
  133. struct iio_trigger *trig = container_of(work, struct iio_trigger,
  134. reenable_work);
  135. /*
  136. * This 'might' occur after the trigger state is set to disabled -
  137. * in that case the driver should skip reenabling.
  138. */
  139. trig->ops->reenable(trig);
  140. }
  141. /*
  142. * In general, reenable callbacks may need to sleep and this path is
  143. * not performance sensitive, so just queue up a work item
  144. * to reneable the trigger for us.
  145. *
  146. * Races that can cause this.
  147. * 1) A handler occurs entirely in interrupt context so the counter
  148. * the final decrement is still in this interrupt.
  149. * 2) The trigger has been removed, but one last interrupt gets through.
  150. *
  151. * For (1) we must call reenable, but not in atomic context.
  152. * For (2) it should be safe to call reenanble, if drivers never blindly
  153. * reenable after state is off.
  154. */
  155. static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
  156. {
  157. if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
  158. trig->ops->reenable)
  159. schedule_work(&trig->reenable_work);
  160. }
  161. void iio_trigger_poll(struct iio_trigger *trig)
  162. {
  163. int i;
  164. if (!atomic_read(&trig->use_count)) {
  165. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  166. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  167. if (trig->subirqs[i].enabled)
  168. generic_handle_irq(trig->subirq_base + i);
  169. else
  170. iio_trigger_notify_done_atomic(trig);
  171. }
  172. }
  173. }
  174. EXPORT_SYMBOL(iio_trigger_poll);
  175. irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
  176. {
  177. iio_trigger_poll(private);
  178. return IRQ_HANDLED;
  179. }
  180. EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
  181. void iio_trigger_poll_chained(struct iio_trigger *trig)
  182. {
  183. int i;
  184. if (!atomic_read(&trig->use_count)) {
  185. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  186. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  187. if (trig->subirqs[i].enabled)
  188. handle_nested_irq(trig->subirq_base + i);
  189. else
  190. iio_trigger_notify_done(trig);
  191. }
  192. }
  193. }
  194. EXPORT_SYMBOL(iio_trigger_poll_chained);
  195. void iio_trigger_notify_done(struct iio_trigger *trig)
  196. {
  197. if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
  198. trig->ops->reenable)
  199. trig->ops->reenable(trig);
  200. }
  201. EXPORT_SYMBOL(iio_trigger_notify_done);
  202. /* Trigger Consumer related functions */
  203. static int iio_trigger_get_irq(struct iio_trigger *trig)
  204. {
  205. int ret;
  206. mutex_lock(&trig->pool_lock);
  207. ret = bitmap_find_free_region(trig->pool,
  208. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  209. ilog2(1));
  210. mutex_unlock(&trig->pool_lock);
  211. if (ret >= 0)
  212. ret += trig->subirq_base;
  213. return ret;
  214. }
  215. static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
  216. {
  217. mutex_lock(&trig->pool_lock);
  218. clear_bit(irq - trig->subirq_base, trig->pool);
  219. mutex_unlock(&trig->pool_lock);
  220. }
  221. /* Complexity in here. With certain triggers (datardy) an acknowledgement
  222. * may be needed if the pollfuncs do not include the data read for the
  223. * triggering device.
  224. * This is not currently handled. Alternative of not enabling trigger unless
  225. * the relevant function is in there may be the best option.
  226. */
  227. /* Worth protecting against double additions? */
  228. int iio_trigger_attach_poll_func(struct iio_trigger *trig,
  229. struct iio_poll_func *pf)
  230. {
  231. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
  232. bool notinuse =
  233. bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  234. int ret = 0;
  235. /* Prevent the module from being removed whilst attached to a trigger */
  236. __module_get(iio_dev_opaque->driver_module);
  237. /* Get irq number */
  238. pf->irq = iio_trigger_get_irq(trig);
  239. if (pf->irq < 0) {
  240. pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
  241. trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  242. goto out_put_module;
  243. }
  244. /* Request irq */
  245. ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
  246. pf->type, pf->name,
  247. pf);
  248. if (ret < 0)
  249. goto out_put_irq;
  250. /* Enable trigger in driver */
  251. if (trig->ops && trig->ops->set_trigger_state && notinuse) {
  252. ret = trig->ops->set_trigger_state(trig, true);
  253. if (ret < 0)
  254. goto out_free_irq;
  255. }
  256. /*
  257. * Check if we just registered to our own trigger: we determine that
  258. * this is the case if the IIO device and the trigger device share the
  259. * same parent device.
  260. */
  261. if (pf->indio_dev->dev.parent == trig->dev.parent)
  262. trig->attached_own_device = true;
  263. return ret;
  264. out_free_irq:
  265. free_irq(pf->irq, pf);
  266. out_put_irq:
  267. iio_trigger_put_irq(trig, pf->irq);
  268. out_put_module:
  269. module_put(iio_dev_opaque->driver_module);
  270. return ret;
  271. }
  272. int iio_trigger_detach_poll_func(struct iio_trigger *trig,
  273. struct iio_poll_func *pf)
  274. {
  275. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
  276. bool no_other_users =
  277. bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
  278. int ret = 0;
  279. if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
  280. ret = trig->ops->set_trigger_state(trig, false);
  281. if (ret)
  282. return ret;
  283. }
  284. if (pf->indio_dev->dev.parent == trig->dev.parent)
  285. trig->attached_own_device = false;
  286. iio_trigger_put_irq(trig, pf->irq);
  287. free_irq(pf->irq, pf);
  288. module_put(iio_dev_opaque->driver_module);
  289. return ret;
  290. }
  291. irqreturn_t iio_pollfunc_store_time(int irq, void *p)
  292. {
  293. struct iio_poll_func *pf = p;
  294. pf->timestamp = iio_get_time_ns(pf->indio_dev);
  295. return IRQ_WAKE_THREAD;
  296. }
  297. EXPORT_SYMBOL(iio_pollfunc_store_time);
  298. struct iio_poll_func
  299. *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
  300. irqreturn_t (*thread)(int irq, void *p),
  301. int type,
  302. struct iio_dev *indio_dev,
  303. const char *fmt,
  304. ...)
  305. {
  306. va_list vargs;
  307. struct iio_poll_func *pf;
  308. pf = kmalloc(sizeof(*pf), GFP_KERNEL);
  309. if (!pf)
  310. return NULL;
  311. va_start(vargs, fmt);
  312. pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  313. va_end(vargs);
  314. if (pf->name == NULL) {
  315. kfree(pf);
  316. return NULL;
  317. }
  318. pf->h = h;
  319. pf->thread = thread;
  320. pf->type = type;
  321. pf->indio_dev = indio_dev;
  322. return pf;
  323. }
  324. EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
  325. void iio_dealloc_pollfunc(struct iio_poll_func *pf)
  326. {
  327. kfree(pf->name);
  328. kfree(pf);
  329. }
  330. EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
  331. /**
  332. * current_trigger_show() - trigger consumer sysfs query current trigger
  333. * @dev: device associated with an industrial I/O device
  334. * @attr: pointer to the device_attribute structure that
  335. * is being processed
  336. * @buf: buffer where the current trigger name will be printed into
  337. *
  338. * For trigger consumers the current_trigger interface allows the trigger
  339. * used by the device to be queried.
  340. *
  341. * Return: a negative number on failure, the number of characters written
  342. * on success or 0 if no trigger is available
  343. */
  344. static ssize_t current_trigger_show(struct device *dev,
  345. struct device_attribute *attr, char *buf)
  346. {
  347. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  348. if (indio_dev->trig)
  349. return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
  350. return 0;
  351. }
  352. /**
  353. * current_trigger_store() - trigger consumer sysfs set current trigger
  354. * @dev: device associated with an industrial I/O device
  355. * @attr: device attribute that is being processed
  356. * @buf: string buffer that holds the name of the trigger
  357. * @len: length of the trigger name held by buf
  358. *
  359. * For trigger consumers the current_trigger interface allows the trigger
  360. * used for this device to be specified at run time based on the trigger's
  361. * name.
  362. *
  363. * Return: negative error code on failure or length of the buffer
  364. * on success
  365. */
  366. static ssize_t current_trigger_store(struct device *dev,
  367. struct device_attribute *attr,
  368. const char *buf, size_t len)
  369. {
  370. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  371. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  372. struct iio_trigger *oldtrig = indio_dev->trig;
  373. struct iio_trigger *trig;
  374. int ret;
  375. mutex_lock(&indio_dev->mlock);
  376. if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
  377. mutex_unlock(&indio_dev->mlock);
  378. return -EBUSY;
  379. }
  380. if (iio_dev_opaque->trig_readonly) {
  381. mutex_unlock(&indio_dev->mlock);
  382. return -EPERM;
  383. }
  384. mutex_unlock(&indio_dev->mlock);
  385. trig = iio_trigger_acquire_by_name(buf);
  386. if (oldtrig == trig) {
  387. ret = len;
  388. goto out_trigger_put;
  389. }
  390. if (trig && indio_dev->info->validate_trigger) {
  391. ret = indio_dev->info->validate_trigger(indio_dev, trig);
  392. if (ret)
  393. goto out_trigger_put;
  394. }
  395. if (trig && trig->ops && trig->ops->validate_device) {
  396. ret = trig->ops->validate_device(trig, indio_dev);
  397. if (ret)
  398. goto out_trigger_put;
  399. }
  400. indio_dev->trig = trig;
  401. if (oldtrig) {
  402. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  403. iio_trigger_detach_poll_func(oldtrig,
  404. indio_dev->pollfunc_event);
  405. iio_trigger_put(oldtrig);
  406. }
  407. if (indio_dev->trig) {
  408. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  409. iio_trigger_attach_poll_func(indio_dev->trig,
  410. indio_dev->pollfunc_event);
  411. }
  412. return len;
  413. out_trigger_put:
  414. if (trig)
  415. iio_trigger_put(trig);
  416. return ret;
  417. }
  418. static DEVICE_ATTR_RW(current_trigger);
  419. static struct attribute *iio_trigger_consumer_attrs[] = {
  420. &dev_attr_current_trigger.attr,
  421. NULL,
  422. };
  423. static const struct attribute_group iio_trigger_consumer_attr_group = {
  424. .name = "trigger",
  425. .attrs = iio_trigger_consumer_attrs,
  426. };
  427. static void iio_trig_release(struct device *device)
  428. {
  429. struct iio_trigger *trig = to_iio_trigger(device);
  430. int i;
  431. if (trig->subirq_base) {
  432. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  433. irq_modify_status(trig->subirq_base + i,
  434. IRQ_NOAUTOEN,
  435. IRQ_NOREQUEST | IRQ_NOPROBE);
  436. irq_set_chip(trig->subirq_base + i,
  437. NULL);
  438. irq_set_handler(trig->subirq_base + i,
  439. NULL);
  440. }
  441. irq_free_descs(trig->subirq_base,
  442. CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  443. }
  444. kfree(trig->name);
  445. kfree(trig);
  446. }
  447. static const struct device_type iio_trig_type = {
  448. .release = iio_trig_release,
  449. .groups = iio_trig_dev_groups,
  450. };
  451. static void iio_trig_subirqmask(struct irq_data *d)
  452. {
  453. struct irq_chip *chip = irq_data_get_irq_chip(d);
  454. struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
  455. trig->subirqs[d->irq - trig->subirq_base].enabled = false;
  456. }
  457. static void iio_trig_subirqunmask(struct irq_data *d)
  458. {
  459. struct irq_chip *chip = irq_data_get_irq_chip(d);
  460. struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
  461. trig->subirqs[d->irq - trig->subirq_base].enabled = true;
  462. }
  463. static __printf(3, 0)
  464. struct iio_trigger *viio_trigger_alloc(struct device *parent,
  465. struct module *this_mod,
  466. const char *fmt,
  467. va_list vargs)
  468. {
  469. struct iio_trigger *trig;
  470. int i;
  471. trig = kzalloc(sizeof(*trig), GFP_KERNEL);
  472. if (!trig)
  473. return NULL;
  474. trig->dev.parent = parent;
  475. trig->dev.type = &iio_trig_type;
  476. trig->dev.bus = &iio_bus_type;
  477. device_initialize(&trig->dev);
  478. INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
  479. mutex_init(&trig->pool_lock);
  480. trig->subirq_base = irq_alloc_descs(-1, 0,
  481. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  482. 0);
  483. if (trig->subirq_base < 0)
  484. goto free_trig;
  485. trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  486. if (trig->name == NULL)
  487. goto free_descs;
  488. INIT_LIST_HEAD(&trig->list);
  489. trig->owner = this_mod;
  490. trig->subirq_chip.name = trig->name;
  491. trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
  492. trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
  493. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  494. irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
  495. irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
  496. irq_modify_status(trig->subirq_base + i,
  497. IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
  498. }
  499. return trig;
  500. free_descs:
  501. irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  502. free_trig:
  503. kfree(trig);
  504. return NULL;
  505. }
  506. /**
  507. * __iio_trigger_alloc - Allocate a trigger
  508. * @parent: Device to allocate iio_trigger for
  509. * @this_mod: module allocating the trigger
  510. * @fmt: trigger name format. If it includes format
  511. * specifiers, the additional arguments following
  512. * format are formatted and inserted in the resulting
  513. * string replacing their respective specifiers.
  514. * RETURNS:
  515. * Pointer to allocated iio_trigger on success, NULL on failure.
  516. */
  517. struct iio_trigger *__iio_trigger_alloc(struct device *parent,
  518. struct module *this_mod,
  519. const char *fmt, ...)
  520. {
  521. struct iio_trigger *trig;
  522. va_list vargs;
  523. va_start(vargs, fmt);
  524. trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
  525. va_end(vargs);
  526. return trig;
  527. }
  528. EXPORT_SYMBOL(__iio_trigger_alloc);
  529. void iio_trigger_free(struct iio_trigger *trig)
  530. {
  531. if (trig)
  532. put_device(&trig->dev);
  533. }
  534. EXPORT_SYMBOL(iio_trigger_free);
  535. static void devm_iio_trigger_release(struct device *dev, void *res)
  536. {
  537. iio_trigger_free(*(struct iio_trigger **)res);
  538. }
  539. /**
  540. * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
  541. * Managed iio_trigger_alloc. iio_trigger allocated with this function is
  542. * automatically freed on driver detach.
  543. * @parent: Device to allocate iio_trigger for
  544. * @this_mod: module allocating the trigger
  545. * @fmt: trigger name format. If it includes format
  546. * specifiers, the additional arguments following
  547. * format are formatted and inserted in the resulting
  548. * string replacing their respective specifiers.
  549. *
  550. *
  551. * RETURNS:
  552. * Pointer to allocated iio_trigger on success, NULL on failure.
  553. */
  554. struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
  555. struct module *this_mod,
  556. const char *fmt, ...)
  557. {
  558. struct iio_trigger **ptr, *trig;
  559. va_list vargs;
  560. ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
  561. GFP_KERNEL);
  562. if (!ptr)
  563. return NULL;
  564. /* use raw alloc_dr for kmalloc caller tracing */
  565. va_start(vargs, fmt);
  566. trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
  567. va_end(vargs);
  568. if (trig) {
  569. *ptr = trig;
  570. devres_add(parent, ptr);
  571. } else {
  572. devres_free(ptr);
  573. }
  574. return trig;
  575. }
  576. EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
  577. static void devm_iio_trigger_unreg(void *trigger_info)
  578. {
  579. iio_trigger_unregister(trigger_info);
  580. }
  581. /**
  582. * devm_iio_trigger_register - Resource-managed iio_trigger_register()
  583. * @dev: device this trigger was allocated for
  584. * @trig_info: trigger to register
  585. *
  586. * Managed iio_trigger_register(). The IIO trigger registered with this
  587. * function is automatically unregistered on driver detach. This function
  588. * calls iio_trigger_register() internally. Refer to that function for more
  589. * information.
  590. *
  591. * RETURNS:
  592. * 0 on success, negative error number on failure.
  593. */
  594. int devm_iio_trigger_register(struct device *dev,
  595. struct iio_trigger *trig_info)
  596. {
  597. int ret;
  598. ret = iio_trigger_register(trig_info);
  599. if (ret)
  600. return ret;
  601. return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
  602. }
  603. EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
  604. bool iio_trigger_using_own(struct iio_dev *indio_dev)
  605. {
  606. return indio_dev->trig->attached_own_device;
  607. }
  608. EXPORT_SYMBOL(iio_trigger_using_own);
  609. /**
  610. * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
  611. * the same device
  612. * @trig: The IIO trigger to check
  613. * @indio_dev: the IIO device to check
  614. *
  615. * This function can be used as the validate_device callback for triggers that
  616. * can only be attached to their own device.
  617. *
  618. * Return: 0 if both the trigger and the IIO device belong to the same
  619. * device, -EINVAL otherwise.
  620. */
  621. int iio_trigger_validate_own_device(struct iio_trigger *trig,
  622. struct iio_dev *indio_dev)
  623. {
  624. if (indio_dev->dev.parent != trig->dev.parent)
  625. return -EINVAL;
  626. return 0;
  627. }
  628. EXPORT_SYMBOL(iio_trigger_validate_own_device);
  629. int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
  630. {
  631. return iio_device_register_sysfs_group(indio_dev,
  632. &iio_trigger_consumer_attr_group);
  633. }
  634. void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
  635. {
  636. /* Clean up an associated but not attached trigger reference */
  637. if (indio_dev->trig)
  638. iio_trigger_put(indio_dev->trig);
  639. }