industrialio-buffer.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core
  3. *
  4. * Copyright (c) 2008 Jonathan Cameron
  5. *
  6. * Handling of buffer allocation / resizing.
  7. *
  8. * Things to look at here.
  9. * - Better memory allocation techniques?
  10. * - Alternative access techniques?
  11. */
  12. #include <linux/anon_inodes.h>
  13. #include <linux/kernel.h>
  14. #include <linux/export.h>
  15. #include <linux/device.h>
  16. #include <linux/file.h>
  17. #include <linux/fs.h>
  18. #include <linux/cdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/poll.h>
  21. #include <linux/sched/signal.h>
  22. #include <linux/iio/iio.h>
  23. #include <linux/iio/iio-opaque.h>
  24. #include "iio_core.h"
  25. #include "iio_core_trigger.h"
  26. #include <linux/iio/sysfs.h>
  27. #include <linux/iio/buffer.h>
  28. #include <linux/iio/buffer_impl.h>
  29. static const char * const iio_endian_prefix[] = {
  30. [IIO_BE] = "be",
  31. [IIO_LE] = "le",
  32. };
  33. static bool iio_buffer_is_active(struct iio_buffer *buf)
  34. {
  35. return !list_empty(&buf->buffer_list);
  36. }
  37. static size_t iio_buffer_data_available(struct iio_buffer *buf)
  38. {
  39. return buf->access->data_available(buf);
  40. }
  41. static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
  42. struct iio_buffer *buf, size_t required)
  43. {
  44. if (!indio_dev->info->hwfifo_flush_to_buffer)
  45. return -ENODEV;
  46. return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
  47. }
  48. static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
  49. size_t to_wait, int to_flush)
  50. {
  51. size_t avail;
  52. int flushed = 0;
  53. /* wakeup if the device was unregistered */
  54. if (!indio_dev->info)
  55. return true;
  56. /* drain the buffer if it was disabled */
  57. if (!iio_buffer_is_active(buf)) {
  58. to_wait = min_t(size_t, to_wait, 1);
  59. to_flush = 0;
  60. }
  61. avail = iio_buffer_data_available(buf);
  62. if (avail >= to_wait) {
  63. /* force a flush for non-blocking reads */
  64. if (!to_wait && avail < to_flush)
  65. iio_buffer_flush_hwfifo(indio_dev, buf,
  66. to_flush - avail);
  67. return true;
  68. }
  69. if (to_flush)
  70. flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
  71. to_wait - avail);
  72. if (flushed <= 0)
  73. return false;
  74. if (avail + flushed >= to_wait)
  75. return true;
  76. return false;
  77. }
  78. /**
  79. * iio_buffer_read() - chrdev read for buffer access
  80. * @filp: File structure pointer for the char device
  81. * @buf: Destination buffer for iio buffer read
  82. * @n: First n bytes to read
  83. * @f_ps: Long offset provided by the user as a seek position
  84. *
  85. * This function relies on all buffer implementations having an
  86. * iio_buffer as their first element.
  87. *
  88. * Return: negative values corresponding to error codes or ret != 0
  89. * for ending the reading activity
  90. **/
  91. static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
  92. size_t n, loff_t *f_ps)
  93. {
  94. struct iio_dev_buffer_pair *ib = filp->private_data;
  95. struct iio_buffer *rb = ib->buffer;
  96. struct iio_dev *indio_dev = ib->indio_dev;
  97. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  98. size_t datum_size;
  99. size_t to_wait;
  100. int ret = 0;
  101. if (!indio_dev->info)
  102. return -ENODEV;
  103. if (!rb || !rb->access->read)
  104. return -EINVAL;
  105. if (rb->direction != IIO_BUFFER_DIRECTION_IN)
  106. return -EPERM;
  107. datum_size = rb->bytes_per_datum;
  108. /*
  109. * If datum_size is 0 there will never be anything to read from the
  110. * buffer, so signal end of file now.
  111. */
  112. if (!datum_size)
  113. return 0;
  114. if (filp->f_flags & O_NONBLOCK)
  115. to_wait = 0;
  116. else
  117. to_wait = min_t(size_t, n / datum_size, rb->watermark);
  118. add_wait_queue(&rb->pollq, &wait);
  119. do {
  120. if (!indio_dev->info) {
  121. ret = -ENODEV;
  122. break;
  123. }
  124. if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
  125. if (signal_pending(current)) {
  126. ret = -ERESTARTSYS;
  127. break;
  128. }
  129. wait_woken(&wait, TASK_INTERRUPTIBLE,
  130. MAX_SCHEDULE_TIMEOUT);
  131. continue;
  132. }
  133. ret = rb->access->read(rb, n, buf);
  134. if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  135. ret = -EAGAIN;
  136. } while (ret == 0);
  137. remove_wait_queue(&rb->pollq, &wait);
  138. return ret;
  139. }
  140. static size_t iio_buffer_space_available(struct iio_buffer *buf)
  141. {
  142. if (buf->access->space_available)
  143. return buf->access->space_available(buf);
  144. return SIZE_MAX;
  145. }
  146. static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
  147. size_t n, loff_t *f_ps)
  148. {
  149. struct iio_dev_buffer_pair *ib = filp->private_data;
  150. struct iio_buffer *rb = ib->buffer;
  151. struct iio_dev *indio_dev = ib->indio_dev;
  152. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  153. int ret = 0;
  154. size_t written;
  155. if (!indio_dev->info)
  156. return -ENODEV;
  157. if (!rb || !rb->access->write)
  158. return -EINVAL;
  159. if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
  160. return -EPERM;
  161. written = 0;
  162. add_wait_queue(&rb->pollq, &wait);
  163. do {
  164. if (indio_dev->info == NULL)
  165. return -ENODEV;
  166. if (!iio_buffer_space_available(rb)) {
  167. if (signal_pending(current)) {
  168. ret = -ERESTARTSYS;
  169. break;
  170. }
  171. if (filp->f_flags & O_NONBLOCK) {
  172. if (!written)
  173. ret = -EAGAIN;
  174. break;
  175. }
  176. wait_woken(&wait, TASK_INTERRUPTIBLE,
  177. MAX_SCHEDULE_TIMEOUT);
  178. continue;
  179. }
  180. ret = rb->access->write(rb, n - written, buf + written);
  181. if (ret < 0)
  182. break;
  183. written += ret;
  184. } while (written != n);
  185. remove_wait_queue(&rb->pollq, &wait);
  186. return ret < 0 ? ret : written;
  187. }
  188. /**
  189. * iio_buffer_poll() - poll the buffer to find out if it has data
  190. * @filp: File structure pointer for device access
  191. * @wait: Poll table structure pointer for which the driver adds
  192. * a wait queue
  193. *
  194. * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
  195. * or 0 for other cases
  196. */
  197. static __poll_t iio_buffer_poll(struct file *filp,
  198. struct poll_table_struct *wait)
  199. {
  200. struct iio_dev_buffer_pair *ib = filp->private_data;
  201. struct iio_buffer *rb = ib->buffer;
  202. struct iio_dev *indio_dev = ib->indio_dev;
  203. if (!indio_dev->info || rb == NULL)
  204. return 0;
  205. poll_wait(filp, &rb->pollq, wait);
  206. switch (rb->direction) {
  207. case IIO_BUFFER_DIRECTION_IN:
  208. if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
  209. return EPOLLIN | EPOLLRDNORM;
  210. break;
  211. case IIO_BUFFER_DIRECTION_OUT:
  212. if (iio_buffer_space_available(rb))
  213. return EPOLLOUT | EPOLLWRNORM;
  214. break;
  215. }
  216. return 0;
  217. }
  218. ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
  219. size_t n, loff_t *f_ps)
  220. {
  221. struct iio_dev_buffer_pair *ib = filp->private_data;
  222. struct iio_buffer *rb = ib->buffer;
  223. /* check if buffer was opened through new API */
  224. if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
  225. return -EBUSY;
  226. return iio_buffer_read(filp, buf, n, f_ps);
  227. }
  228. ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
  229. size_t n, loff_t *f_ps)
  230. {
  231. struct iio_dev_buffer_pair *ib = filp->private_data;
  232. struct iio_buffer *rb = ib->buffer;
  233. /* check if buffer was opened through new API */
  234. if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
  235. return -EBUSY;
  236. return iio_buffer_write(filp, buf, n, f_ps);
  237. }
  238. __poll_t iio_buffer_poll_wrapper(struct file *filp,
  239. struct poll_table_struct *wait)
  240. {
  241. struct iio_dev_buffer_pair *ib = filp->private_data;
  242. struct iio_buffer *rb = ib->buffer;
  243. /* check if buffer was opened through new API */
  244. if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
  245. return 0;
  246. return iio_buffer_poll(filp, wait);
  247. }
  248. /**
  249. * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
  250. * @indio_dev: The IIO device
  251. *
  252. * Wakes up the event waitqueue used for poll(). Should usually
  253. * be called when the device is unregistered.
  254. */
  255. void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  256. {
  257. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  258. struct iio_buffer *buffer;
  259. unsigned int i;
  260. for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
  261. buffer = iio_dev_opaque->attached_buffers[i];
  262. wake_up(&buffer->pollq);
  263. }
  264. }
  265. int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
  266. {
  267. if (!buffer || !buffer->access || !buffer->access->remove_from)
  268. return -EINVAL;
  269. return buffer->access->remove_from(buffer, data);
  270. }
  271. EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
  272. void iio_buffer_init(struct iio_buffer *buffer)
  273. {
  274. INIT_LIST_HEAD(&buffer->demux_list);
  275. INIT_LIST_HEAD(&buffer->buffer_list);
  276. init_waitqueue_head(&buffer->pollq);
  277. kref_init(&buffer->ref);
  278. if (!buffer->watermark)
  279. buffer->watermark = 1;
  280. }
  281. EXPORT_SYMBOL(iio_buffer_init);
  282. void iio_device_detach_buffers(struct iio_dev *indio_dev)
  283. {
  284. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  285. struct iio_buffer *buffer;
  286. unsigned int i;
  287. for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
  288. buffer = iio_dev_opaque->attached_buffers[i];
  289. iio_buffer_put(buffer);
  290. }
  291. kfree(iio_dev_opaque->attached_buffers);
  292. }
  293. static ssize_t iio_show_scan_index(struct device *dev,
  294. struct device_attribute *attr,
  295. char *buf)
  296. {
  297. return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  298. }
  299. static ssize_t iio_show_fixed_type(struct device *dev,
  300. struct device_attribute *attr,
  301. char *buf)
  302. {
  303. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  304. u8 type = this_attr->c->scan_type.endianness;
  305. if (type == IIO_CPU) {
  306. #ifdef __LITTLE_ENDIAN
  307. type = IIO_LE;
  308. #else
  309. type = IIO_BE;
  310. #endif
  311. }
  312. if (this_attr->c->scan_type.repeat > 1)
  313. return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
  314. iio_endian_prefix[type],
  315. this_attr->c->scan_type.sign,
  316. this_attr->c->scan_type.realbits,
  317. this_attr->c->scan_type.storagebits,
  318. this_attr->c->scan_type.repeat,
  319. this_attr->c->scan_type.shift);
  320. else
  321. return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
  322. iio_endian_prefix[type],
  323. this_attr->c->scan_type.sign,
  324. this_attr->c->scan_type.realbits,
  325. this_attr->c->scan_type.storagebits,
  326. this_attr->c->scan_type.shift);
  327. }
  328. static ssize_t iio_scan_el_show(struct device *dev,
  329. struct device_attribute *attr,
  330. char *buf)
  331. {
  332. int ret;
  333. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  334. /* Ensure ret is 0 or 1. */
  335. ret = !!test_bit(to_iio_dev_attr(attr)->address,
  336. buffer->scan_mask);
  337. return sysfs_emit(buf, "%d\n", ret);
  338. }
  339. /* Note NULL used as error indicator as it doesn't make sense. */
  340. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  341. unsigned int masklength,
  342. const unsigned long *mask,
  343. bool strict)
  344. {
  345. if (bitmap_empty(mask, masklength))
  346. return NULL;
  347. while (*av_masks) {
  348. if (strict) {
  349. if (bitmap_equal(mask, av_masks, masklength))
  350. return av_masks;
  351. } else {
  352. if (bitmap_subset(mask, av_masks, masklength))
  353. return av_masks;
  354. }
  355. av_masks += BITS_TO_LONGS(masklength);
  356. }
  357. return NULL;
  358. }
  359. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  360. const unsigned long *mask)
  361. {
  362. if (!indio_dev->setup_ops->validate_scan_mask)
  363. return true;
  364. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  365. }
  366. /**
  367. * iio_scan_mask_set() - set particular bit in the scan mask
  368. * @indio_dev: the iio device
  369. * @buffer: the buffer whose scan mask we are interested in
  370. * @bit: the bit to be set.
  371. *
  372. * Note that at this point we have no way of knowing what other
  373. * buffers might request, hence this code only verifies that the
  374. * individual buffers request is plausible.
  375. */
  376. static int iio_scan_mask_set(struct iio_dev *indio_dev,
  377. struct iio_buffer *buffer, int bit)
  378. {
  379. const unsigned long *mask;
  380. unsigned long *trialmask;
  381. if (!indio_dev->masklength) {
  382. WARN(1, "Trying to set scanmask prior to registering buffer\n");
  383. return -EINVAL;
  384. }
  385. trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
  386. if (!trialmask)
  387. return -ENOMEM;
  388. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  389. set_bit(bit, trialmask);
  390. if (!iio_validate_scan_mask(indio_dev, trialmask))
  391. goto err_invalid_mask;
  392. if (indio_dev->available_scan_masks) {
  393. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  394. indio_dev->masklength,
  395. trialmask, false);
  396. if (!mask)
  397. goto err_invalid_mask;
  398. }
  399. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  400. bitmap_free(trialmask);
  401. return 0;
  402. err_invalid_mask:
  403. bitmap_free(trialmask);
  404. return -EINVAL;
  405. }
  406. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  407. {
  408. clear_bit(bit, buffer->scan_mask);
  409. return 0;
  410. }
  411. static int iio_scan_mask_query(struct iio_dev *indio_dev,
  412. struct iio_buffer *buffer, int bit)
  413. {
  414. if (bit > indio_dev->masklength)
  415. return -EINVAL;
  416. if (!buffer->scan_mask)
  417. return 0;
  418. /* Ensure return value is 0 or 1. */
  419. return !!test_bit(bit, buffer->scan_mask);
  420. };
  421. static ssize_t iio_scan_el_store(struct device *dev,
  422. struct device_attribute *attr,
  423. const char *buf,
  424. size_t len)
  425. {
  426. int ret;
  427. bool state;
  428. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  429. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  430. struct iio_buffer *buffer = this_attr->buffer;
  431. ret = kstrtobool(buf, &state);
  432. if (ret < 0)
  433. return ret;
  434. mutex_lock(&indio_dev->mlock);
  435. if (iio_buffer_is_active(buffer)) {
  436. ret = -EBUSY;
  437. goto error_ret;
  438. }
  439. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  440. if (ret < 0)
  441. goto error_ret;
  442. if (!state && ret) {
  443. ret = iio_scan_mask_clear(buffer, this_attr->address);
  444. if (ret)
  445. goto error_ret;
  446. } else if (state && !ret) {
  447. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  448. if (ret)
  449. goto error_ret;
  450. }
  451. error_ret:
  452. mutex_unlock(&indio_dev->mlock);
  453. return ret < 0 ? ret : len;
  454. }
  455. static ssize_t iio_scan_el_ts_show(struct device *dev,
  456. struct device_attribute *attr,
  457. char *buf)
  458. {
  459. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  460. return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
  461. }
  462. static ssize_t iio_scan_el_ts_store(struct device *dev,
  463. struct device_attribute *attr,
  464. const char *buf,
  465. size_t len)
  466. {
  467. int ret;
  468. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  469. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  470. bool state;
  471. ret = kstrtobool(buf, &state);
  472. if (ret < 0)
  473. return ret;
  474. mutex_lock(&indio_dev->mlock);
  475. if (iio_buffer_is_active(buffer)) {
  476. ret = -EBUSY;
  477. goto error_ret;
  478. }
  479. buffer->scan_timestamp = state;
  480. error_ret:
  481. mutex_unlock(&indio_dev->mlock);
  482. return ret ? ret : len;
  483. }
  484. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  485. struct iio_buffer *buffer,
  486. const struct iio_chan_spec *chan)
  487. {
  488. int ret, attrcount = 0;
  489. ret = __iio_add_chan_devattr("index",
  490. chan,
  491. &iio_show_scan_index,
  492. NULL,
  493. 0,
  494. IIO_SEPARATE,
  495. &indio_dev->dev,
  496. buffer,
  497. &buffer->buffer_attr_list);
  498. if (ret)
  499. return ret;
  500. attrcount++;
  501. ret = __iio_add_chan_devattr("type",
  502. chan,
  503. &iio_show_fixed_type,
  504. NULL,
  505. 0,
  506. 0,
  507. &indio_dev->dev,
  508. buffer,
  509. &buffer->buffer_attr_list);
  510. if (ret)
  511. return ret;
  512. attrcount++;
  513. if (chan->type != IIO_TIMESTAMP)
  514. ret = __iio_add_chan_devattr("en",
  515. chan,
  516. &iio_scan_el_show,
  517. &iio_scan_el_store,
  518. chan->scan_index,
  519. 0,
  520. &indio_dev->dev,
  521. buffer,
  522. &buffer->buffer_attr_list);
  523. else
  524. ret = __iio_add_chan_devattr("en",
  525. chan,
  526. &iio_scan_el_ts_show,
  527. &iio_scan_el_ts_store,
  528. chan->scan_index,
  529. 0,
  530. &indio_dev->dev,
  531. buffer,
  532. &buffer->buffer_attr_list);
  533. if (ret)
  534. return ret;
  535. attrcount++;
  536. ret = attrcount;
  537. return ret;
  538. }
  539. static ssize_t length_show(struct device *dev, struct device_attribute *attr,
  540. char *buf)
  541. {
  542. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  543. return sysfs_emit(buf, "%d\n", buffer->length);
  544. }
  545. static ssize_t length_store(struct device *dev, struct device_attribute *attr,
  546. const char *buf, size_t len)
  547. {
  548. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  549. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  550. unsigned int val;
  551. int ret;
  552. ret = kstrtouint(buf, 10, &val);
  553. if (ret)
  554. return ret;
  555. if (val == buffer->length)
  556. return len;
  557. mutex_lock(&indio_dev->mlock);
  558. if (iio_buffer_is_active(buffer)) {
  559. ret = -EBUSY;
  560. } else {
  561. buffer->access->set_length(buffer, val);
  562. ret = 0;
  563. }
  564. if (ret)
  565. goto out;
  566. if (buffer->length && buffer->length < buffer->watermark)
  567. buffer->watermark = buffer->length;
  568. out:
  569. mutex_unlock(&indio_dev->mlock);
  570. return ret ? ret : len;
  571. }
  572. static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
  573. char *buf)
  574. {
  575. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  576. return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
  577. }
  578. static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
  579. unsigned int scan_index)
  580. {
  581. const struct iio_chan_spec *ch;
  582. unsigned int bytes;
  583. ch = iio_find_channel_from_si(indio_dev, scan_index);
  584. bytes = ch->scan_type.storagebits / 8;
  585. if (ch->scan_type.repeat > 1)
  586. bytes *= ch->scan_type.repeat;
  587. return bytes;
  588. }
  589. static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
  590. {
  591. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  592. return iio_storage_bytes_for_si(indio_dev,
  593. iio_dev_opaque->scan_index_timestamp);
  594. }
  595. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  596. const unsigned long *mask, bool timestamp)
  597. {
  598. unsigned int bytes = 0;
  599. int length, i, largest = 0;
  600. /* How much space will the demuxed element take? */
  601. for_each_set_bit(i, mask,
  602. indio_dev->masklength) {
  603. length = iio_storage_bytes_for_si(indio_dev, i);
  604. bytes = ALIGN(bytes, length);
  605. bytes += length;
  606. largest = max(largest, length);
  607. }
  608. if (timestamp) {
  609. length = iio_storage_bytes_for_timestamp(indio_dev);
  610. bytes = ALIGN(bytes, length);
  611. bytes += length;
  612. largest = max(largest, length);
  613. }
  614. bytes = ALIGN(bytes, largest);
  615. return bytes;
  616. }
  617. static void iio_buffer_activate(struct iio_dev *indio_dev,
  618. struct iio_buffer *buffer)
  619. {
  620. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  621. iio_buffer_get(buffer);
  622. list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
  623. }
  624. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  625. {
  626. list_del_init(&buffer->buffer_list);
  627. wake_up_interruptible(&buffer->pollq);
  628. iio_buffer_put(buffer);
  629. }
  630. static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
  631. {
  632. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  633. struct iio_buffer *buffer, *_buffer;
  634. list_for_each_entry_safe(buffer, _buffer,
  635. &iio_dev_opaque->buffer_list, buffer_list)
  636. iio_buffer_deactivate(buffer);
  637. }
  638. static int iio_buffer_enable(struct iio_buffer *buffer,
  639. struct iio_dev *indio_dev)
  640. {
  641. if (!buffer->access->enable)
  642. return 0;
  643. return buffer->access->enable(buffer, indio_dev);
  644. }
  645. static int iio_buffer_disable(struct iio_buffer *buffer,
  646. struct iio_dev *indio_dev)
  647. {
  648. if (!buffer->access->disable)
  649. return 0;
  650. return buffer->access->disable(buffer, indio_dev);
  651. }
  652. static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  653. struct iio_buffer *buffer)
  654. {
  655. unsigned int bytes;
  656. if (!buffer->access->set_bytes_per_datum)
  657. return;
  658. bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  659. buffer->scan_timestamp);
  660. buffer->access->set_bytes_per_datum(buffer, bytes);
  661. }
  662. static int iio_buffer_request_update(struct iio_dev *indio_dev,
  663. struct iio_buffer *buffer)
  664. {
  665. int ret;
  666. iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  667. if (buffer->access->request_update) {
  668. ret = buffer->access->request_update(buffer);
  669. if (ret) {
  670. dev_dbg(&indio_dev->dev,
  671. "Buffer not started: buffer parameter update failed (%d)\n",
  672. ret);
  673. return ret;
  674. }
  675. }
  676. return 0;
  677. }
  678. static void iio_free_scan_mask(struct iio_dev *indio_dev,
  679. const unsigned long *mask)
  680. {
  681. /* If the mask is dynamically allocated free it, otherwise do nothing */
  682. if (!indio_dev->available_scan_masks)
  683. bitmap_free(mask);
  684. }
  685. struct iio_device_config {
  686. unsigned int mode;
  687. unsigned int watermark;
  688. const unsigned long *scan_mask;
  689. unsigned int scan_bytes;
  690. bool scan_timestamp;
  691. };
  692. static int iio_verify_update(struct iio_dev *indio_dev,
  693. struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
  694. struct iio_device_config *config)
  695. {
  696. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  697. unsigned long *compound_mask;
  698. const unsigned long *scan_mask;
  699. bool strict_scanmask = false;
  700. struct iio_buffer *buffer;
  701. bool scan_timestamp;
  702. unsigned int modes;
  703. if (insert_buffer &&
  704. bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
  705. dev_dbg(&indio_dev->dev,
  706. "At least one scan element must be enabled first\n");
  707. return -EINVAL;
  708. }
  709. memset(config, 0, sizeof(*config));
  710. config->watermark = ~0;
  711. /*
  712. * If there is just one buffer and we are removing it there is nothing
  713. * to verify.
  714. */
  715. if (remove_buffer && !insert_buffer &&
  716. list_is_singular(&iio_dev_opaque->buffer_list))
  717. return 0;
  718. modes = indio_dev->modes;
  719. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  720. if (buffer == remove_buffer)
  721. continue;
  722. modes &= buffer->access->modes;
  723. config->watermark = min(config->watermark, buffer->watermark);
  724. }
  725. if (insert_buffer) {
  726. modes &= insert_buffer->access->modes;
  727. config->watermark = min(config->watermark,
  728. insert_buffer->watermark);
  729. }
  730. /* Definitely possible for devices to support both of these. */
  731. if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
  732. config->mode = INDIO_BUFFER_TRIGGERED;
  733. } else if (modes & INDIO_BUFFER_HARDWARE) {
  734. /*
  735. * Keep things simple for now and only allow a single buffer to
  736. * be connected in hardware mode.
  737. */
  738. if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
  739. return -EINVAL;
  740. config->mode = INDIO_BUFFER_HARDWARE;
  741. strict_scanmask = true;
  742. } else if (modes & INDIO_BUFFER_SOFTWARE) {
  743. config->mode = INDIO_BUFFER_SOFTWARE;
  744. } else {
  745. /* Can only occur on first buffer */
  746. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
  747. dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
  748. return -EINVAL;
  749. }
  750. /* What scan mask do we actually have? */
  751. compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
  752. if (compound_mask == NULL)
  753. return -ENOMEM;
  754. scan_timestamp = false;
  755. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  756. if (buffer == remove_buffer)
  757. continue;
  758. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  759. indio_dev->masklength);
  760. scan_timestamp |= buffer->scan_timestamp;
  761. }
  762. if (insert_buffer) {
  763. bitmap_or(compound_mask, compound_mask,
  764. insert_buffer->scan_mask, indio_dev->masklength);
  765. scan_timestamp |= insert_buffer->scan_timestamp;
  766. }
  767. if (indio_dev->available_scan_masks) {
  768. scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  769. indio_dev->masklength,
  770. compound_mask,
  771. strict_scanmask);
  772. bitmap_free(compound_mask);
  773. if (scan_mask == NULL)
  774. return -EINVAL;
  775. } else {
  776. scan_mask = compound_mask;
  777. }
  778. config->scan_bytes = iio_compute_scan_bytes(indio_dev,
  779. scan_mask, scan_timestamp);
  780. config->scan_mask = scan_mask;
  781. config->scan_timestamp = scan_timestamp;
  782. return 0;
  783. }
  784. /**
  785. * struct iio_demux_table - table describing demux memcpy ops
  786. * @from: index to copy from
  787. * @to: index to copy to
  788. * @length: how many bytes to copy
  789. * @l: list head used for management
  790. */
  791. struct iio_demux_table {
  792. unsigned int from;
  793. unsigned int to;
  794. unsigned int length;
  795. struct list_head l;
  796. };
  797. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  798. {
  799. struct iio_demux_table *p, *q;
  800. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  801. list_del(&p->l);
  802. kfree(p);
  803. }
  804. }
  805. static int iio_buffer_add_demux(struct iio_buffer *buffer,
  806. struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
  807. unsigned int length)
  808. {
  809. if (*p && (*p)->from + (*p)->length == in_loc &&
  810. (*p)->to + (*p)->length == out_loc) {
  811. (*p)->length += length;
  812. } else {
  813. *p = kmalloc(sizeof(**p), GFP_KERNEL);
  814. if (*p == NULL)
  815. return -ENOMEM;
  816. (*p)->from = in_loc;
  817. (*p)->to = out_loc;
  818. (*p)->length = length;
  819. list_add_tail(&(*p)->l, &buffer->demux_list);
  820. }
  821. return 0;
  822. }
  823. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  824. struct iio_buffer *buffer)
  825. {
  826. int ret, in_ind = -1, out_ind, length;
  827. unsigned int in_loc = 0, out_loc = 0;
  828. struct iio_demux_table *p = NULL;
  829. /* Clear out any old demux */
  830. iio_buffer_demux_free(buffer);
  831. kfree(buffer->demux_bounce);
  832. buffer->demux_bounce = NULL;
  833. /* First work out which scan mode we will actually have */
  834. if (bitmap_equal(indio_dev->active_scan_mask,
  835. buffer->scan_mask,
  836. indio_dev->masklength))
  837. return 0;
  838. /* Now we have the two masks, work from least sig and build up sizes */
  839. for_each_set_bit(out_ind,
  840. buffer->scan_mask,
  841. indio_dev->masklength) {
  842. in_ind = find_next_bit(indio_dev->active_scan_mask,
  843. indio_dev->masklength,
  844. in_ind + 1);
  845. while (in_ind != out_ind) {
  846. length = iio_storage_bytes_for_si(indio_dev, in_ind);
  847. /* Make sure we are aligned */
  848. in_loc = roundup(in_loc, length) + length;
  849. in_ind = find_next_bit(indio_dev->active_scan_mask,
  850. indio_dev->masklength,
  851. in_ind + 1);
  852. }
  853. length = iio_storage_bytes_for_si(indio_dev, in_ind);
  854. out_loc = roundup(out_loc, length);
  855. in_loc = roundup(in_loc, length);
  856. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  857. if (ret)
  858. goto error_clear_mux_table;
  859. out_loc += length;
  860. in_loc += length;
  861. }
  862. /* Relies on scan_timestamp being last */
  863. if (buffer->scan_timestamp) {
  864. length = iio_storage_bytes_for_timestamp(indio_dev);
  865. out_loc = roundup(out_loc, length);
  866. in_loc = roundup(in_loc, length);
  867. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  868. if (ret)
  869. goto error_clear_mux_table;
  870. out_loc += length;
  871. }
  872. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  873. if (buffer->demux_bounce == NULL) {
  874. ret = -ENOMEM;
  875. goto error_clear_mux_table;
  876. }
  877. return 0;
  878. error_clear_mux_table:
  879. iio_buffer_demux_free(buffer);
  880. return ret;
  881. }
  882. static int iio_update_demux(struct iio_dev *indio_dev)
  883. {
  884. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  885. struct iio_buffer *buffer;
  886. int ret;
  887. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  888. ret = iio_buffer_update_demux(indio_dev, buffer);
  889. if (ret < 0)
  890. goto error_clear_mux_table;
  891. }
  892. return 0;
  893. error_clear_mux_table:
  894. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
  895. iio_buffer_demux_free(buffer);
  896. return ret;
  897. }
  898. static int iio_enable_buffers(struct iio_dev *indio_dev,
  899. struct iio_device_config *config)
  900. {
  901. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  902. struct iio_buffer *buffer, *tmp = NULL;
  903. int ret;
  904. indio_dev->active_scan_mask = config->scan_mask;
  905. indio_dev->scan_timestamp = config->scan_timestamp;
  906. indio_dev->scan_bytes = config->scan_bytes;
  907. iio_dev_opaque->currentmode = config->mode;
  908. iio_update_demux(indio_dev);
  909. /* Wind up again */
  910. if (indio_dev->setup_ops->preenable) {
  911. ret = indio_dev->setup_ops->preenable(indio_dev);
  912. if (ret) {
  913. dev_dbg(&indio_dev->dev,
  914. "Buffer not started: buffer preenable failed (%d)\n", ret);
  915. goto err_undo_config;
  916. }
  917. }
  918. if (indio_dev->info->update_scan_mode) {
  919. ret = indio_dev->info
  920. ->update_scan_mode(indio_dev,
  921. indio_dev->active_scan_mask);
  922. if (ret < 0) {
  923. dev_dbg(&indio_dev->dev,
  924. "Buffer not started: update scan mode failed (%d)\n",
  925. ret);
  926. goto err_run_postdisable;
  927. }
  928. }
  929. if (indio_dev->info->hwfifo_set_watermark)
  930. indio_dev->info->hwfifo_set_watermark(indio_dev,
  931. config->watermark);
  932. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  933. ret = iio_buffer_enable(buffer, indio_dev);
  934. if (ret) {
  935. tmp = buffer;
  936. goto err_disable_buffers;
  937. }
  938. }
  939. if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
  940. ret = iio_trigger_attach_poll_func(indio_dev->trig,
  941. indio_dev->pollfunc);
  942. if (ret)
  943. goto err_disable_buffers;
  944. }
  945. if (indio_dev->setup_ops->postenable) {
  946. ret = indio_dev->setup_ops->postenable(indio_dev);
  947. if (ret) {
  948. dev_dbg(&indio_dev->dev,
  949. "Buffer not started: postenable failed (%d)\n", ret);
  950. goto err_detach_pollfunc;
  951. }
  952. }
  953. return 0;
  954. err_detach_pollfunc:
  955. if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
  956. iio_trigger_detach_poll_func(indio_dev->trig,
  957. indio_dev->pollfunc);
  958. }
  959. err_disable_buffers:
  960. buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
  961. list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
  962. buffer_list)
  963. iio_buffer_disable(buffer, indio_dev);
  964. err_run_postdisable:
  965. if (indio_dev->setup_ops->postdisable)
  966. indio_dev->setup_ops->postdisable(indio_dev);
  967. err_undo_config:
  968. iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
  969. indio_dev->active_scan_mask = NULL;
  970. return ret;
  971. }
  972. static int iio_disable_buffers(struct iio_dev *indio_dev)
  973. {
  974. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  975. struct iio_buffer *buffer;
  976. int ret = 0;
  977. int ret2;
  978. /* Wind down existing buffers - iff there are any */
  979. if (list_empty(&iio_dev_opaque->buffer_list))
  980. return 0;
  981. /*
  982. * If things go wrong at some step in disable we still need to continue
  983. * to perform the other steps, otherwise we leave the device in a
  984. * inconsistent state. We return the error code for the first error we
  985. * encountered.
  986. */
  987. if (indio_dev->setup_ops->predisable) {
  988. ret2 = indio_dev->setup_ops->predisable(indio_dev);
  989. if (ret2 && !ret)
  990. ret = ret2;
  991. }
  992. if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
  993. iio_trigger_detach_poll_func(indio_dev->trig,
  994. indio_dev->pollfunc);
  995. }
  996. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  997. ret2 = iio_buffer_disable(buffer, indio_dev);
  998. if (ret2 && !ret)
  999. ret = ret2;
  1000. }
  1001. if (indio_dev->setup_ops->postdisable) {
  1002. ret2 = indio_dev->setup_ops->postdisable(indio_dev);
  1003. if (ret2 && !ret)
  1004. ret = ret2;
  1005. }
  1006. iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
  1007. indio_dev->active_scan_mask = NULL;
  1008. iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
  1009. return ret;
  1010. }
  1011. static int __iio_update_buffers(struct iio_dev *indio_dev,
  1012. struct iio_buffer *insert_buffer,
  1013. struct iio_buffer *remove_buffer)
  1014. {
  1015. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1016. struct iio_device_config new_config;
  1017. int ret;
  1018. ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
  1019. &new_config);
  1020. if (ret)
  1021. return ret;
  1022. if (insert_buffer) {
  1023. ret = iio_buffer_request_update(indio_dev, insert_buffer);
  1024. if (ret)
  1025. goto err_free_config;
  1026. }
  1027. ret = iio_disable_buffers(indio_dev);
  1028. if (ret)
  1029. goto err_deactivate_all;
  1030. if (remove_buffer)
  1031. iio_buffer_deactivate(remove_buffer);
  1032. if (insert_buffer)
  1033. iio_buffer_activate(indio_dev, insert_buffer);
  1034. /* If no buffers in list, we are done */
  1035. if (list_empty(&iio_dev_opaque->buffer_list))
  1036. return 0;
  1037. ret = iio_enable_buffers(indio_dev, &new_config);
  1038. if (ret)
  1039. goto err_deactivate_all;
  1040. return 0;
  1041. err_deactivate_all:
  1042. /*
  1043. * We've already verified that the config is valid earlier. If things go
  1044. * wrong in either enable or disable the most likely reason is an IO
  1045. * error from the device. In this case there is no good recovery
  1046. * strategy. Just make sure to disable everything and leave the device
  1047. * in a sane state. With a bit of luck the device might come back to
  1048. * life again later and userspace can try again.
  1049. */
  1050. iio_buffer_deactivate_all(indio_dev);
  1051. err_free_config:
  1052. iio_free_scan_mask(indio_dev, new_config.scan_mask);
  1053. return ret;
  1054. }
  1055. int iio_update_buffers(struct iio_dev *indio_dev,
  1056. struct iio_buffer *insert_buffer,
  1057. struct iio_buffer *remove_buffer)
  1058. {
  1059. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1060. int ret;
  1061. if (insert_buffer == remove_buffer)
  1062. return 0;
  1063. if (insert_buffer &&
  1064. (insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT))
  1065. return -EINVAL;
  1066. mutex_lock(&iio_dev_opaque->info_exist_lock);
  1067. mutex_lock(&indio_dev->mlock);
  1068. if (insert_buffer && iio_buffer_is_active(insert_buffer))
  1069. insert_buffer = NULL;
  1070. if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  1071. remove_buffer = NULL;
  1072. if (!insert_buffer && !remove_buffer) {
  1073. ret = 0;
  1074. goto out_unlock;
  1075. }
  1076. if (indio_dev->info == NULL) {
  1077. ret = -ENODEV;
  1078. goto out_unlock;
  1079. }
  1080. ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  1081. out_unlock:
  1082. mutex_unlock(&indio_dev->mlock);
  1083. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  1084. return ret;
  1085. }
  1086. EXPORT_SYMBOL_GPL(iio_update_buffers);
  1087. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  1088. {
  1089. iio_disable_buffers(indio_dev);
  1090. iio_buffer_deactivate_all(indio_dev);
  1091. }
  1092. static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
  1093. const char *buf, size_t len)
  1094. {
  1095. int ret;
  1096. bool requested_state;
  1097. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1098. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  1099. bool inlist;
  1100. ret = kstrtobool(buf, &requested_state);
  1101. if (ret < 0)
  1102. return ret;
  1103. mutex_lock(&indio_dev->mlock);
  1104. /* Find out if it is in the list */
  1105. inlist = iio_buffer_is_active(buffer);
  1106. /* Already in desired state */
  1107. if (inlist == requested_state)
  1108. goto done;
  1109. if (requested_state)
  1110. ret = __iio_update_buffers(indio_dev, buffer, NULL);
  1111. else
  1112. ret = __iio_update_buffers(indio_dev, NULL, buffer);
  1113. done:
  1114. mutex_unlock(&indio_dev->mlock);
  1115. return (ret < 0) ? ret : len;
  1116. }
  1117. static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
  1118. char *buf)
  1119. {
  1120. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  1121. return sysfs_emit(buf, "%u\n", buffer->watermark);
  1122. }
  1123. static ssize_t watermark_store(struct device *dev,
  1124. struct device_attribute *attr,
  1125. const char *buf, size_t len)
  1126. {
  1127. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1128. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  1129. unsigned int val;
  1130. int ret;
  1131. ret = kstrtouint(buf, 10, &val);
  1132. if (ret)
  1133. return ret;
  1134. if (!val)
  1135. return -EINVAL;
  1136. mutex_lock(&indio_dev->mlock);
  1137. if (val > buffer->length) {
  1138. ret = -EINVAL;
  1139. goto out;
  1140. }
  1141. if (iio_buffer_is_active(buffer)) {
  1142. ret = -EBUSY;
  1143. goto out;
  1144. }
  1145. buffer->watermark = val;
  1146. out:
  1147. mutex_unlock(&indio_dev->mlock);
  1148. return ret ? ret : len;
  1149. }
  1150. static ssize_t data_available_show(struct device *dev,
  1151. struct device_attribute *attr, char *buf)
  1152. {
  1153. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  1154. return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
  1155. }
  1156. static ssize_t direction_show(struct device *dev,
  1157. struct device_attribute *attr,
  1158. char *buf)
  1159. {
  1160. struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
  1161. switch (buffer->direction) {
  1162. case IIO_BUFFER_DIRECTION_IN:
  1163. return sysfs_emit(buf, "in\n");
  1164. case IIO_BUFFER_DIRECTION_OUT:
  1165. return sysfs_emit(buf, "out\n");
  1166. default:
  1167. return -EINVAL;
  1168. }
  1169. }
  1170. static DEVICE_ATTR_RW(length);
  1171. static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
  1172. static DEVICE_ATTR_RW(enable);
  1173. static DEVICE_ATTR_RW(watermark);
  1174. static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
  1175. static DEVICE_ATTR_RO(data_available);
  1176. static DEVICE_ATTR_RO(direction);
  1177. /*
  1178. * When adding new attributes here, put the at the end, at least until
  1179. * the code that handles the length/length_ro & watermark/watermark_ro
  1180. * assignments gets cleaned up. Otherwise these can create some weird
  1181. * duplicate attributes errors under some setups.
  1182. */
  1183. static struct attribute *iio_buffer_attrs[] = {
  1184. &dev_attr_length.attr,
  1185. &dev_attr_enable.attr,
  1186. &dev_attr_watermark.attr,
  1187. &dev_attr_data_available.attr,
  1188. &dev_attr_direction.attr,
  1189. };
  1190. #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
  1191. static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
  1192. struct attribute *attr)
  1193. {
  1194. struct device_attribute *dattr = to_dev_attr(attr);
  1195. struct iio_dev_attr *iio_attr;
  1196. iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
  1197. if (!iio_attr)
  1198. return NULL;
  1199. iio_attr->buffer = buffer;
  1200. memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
  1201. iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
  1202. if (!iio_attr->dev_attr.attr.name) {
  1203. kfree(iio_attr);
  1204. return NULL;
  1205. }
  1206. sysfs_attr_init(&iio_attr->dev_attr.attr);
  1207. list_add(&iio_attr->l, &buffer->buffer_attr_list);
  1208. return &iio_attr->dev_attr.attr;
  1209. }
  1210. static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
  1211. struct attribute **buffer_attrs,
  1212. int buffer_attrcount,
  1213. int scan_el_attrcount)
  1214. {
  1215. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1216. struct attribute_group *group;
  1217. struct attribute **attrs;
  1218. int ret;
  1219. attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
  1220. if (!attrs)
  1221. return -ENOMEM;
  1222. memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
  1223. group = &iio_dev_opaque->legacy_buffer_group;
  1224. group->attrs = attrs;
  1225. group->name = "buffer";
  1226. ret = iio_device_register_sysfs_group(indio_dev, group);
  1227. if (ret)
  1228. goto error_free_buffer_attrs;
  1229. attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
  1230. if (!attrs) {
  1231. ret = -ENOMEM;
  1232. goto error_free_buffer_attrs;
  1233. }
  1234. memcpy(attrs, &buffer_attrs[buffer_attrcount],
  1235. scan_el_attrcount * sizeof(*attrs));
  1236. group = &iio_dev_opaque->legacy_scan_el_group;
  1237. group->attrs = attrs;
  1238. group->name = "scan_elements";
  1239. ret = iio_device_register_sysfs_group(indio_dev, group);
  1240. if (ret)
  1241. goto error_free_scan_el_attrs;
  1242. return 0;
  1243. error_free_scan_el_attrs:
  1244. kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
  1245. error_free_buffer_attrs:
  1246. kfree(iio_dev_opaque->legacy_buffer_group.attrs);
  1247. return ret;
  1248. }
  1249. static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
  1250. {
  1251. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1252. kfree(iio_dev_opaque->legacy_buffer_group.attrs);
  1253. kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
  1254. }
  1255. static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
  1256. {
  1257. struct iio_dev_buffer_pair *ib = filep->private_data;
  1258. struct iio_dev *indio_dev = ib->indio_dev;
  1259. struct iio_buffer *buffer = ib->buffer;
  1260. wake_up(&buffer->pollq);
  1261. kfree(ib);
  1262. clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
  1263. iio_device_put(indio_dev);
  1264. return 0;
  1265. }
  1266. static const struct file_operations iio_buffer_chrdev_fileops = {
  1267. .owner = THIS_MODULE,
  1268. .llseek = noop_llseek,
  1269. .read = iio_buffer_read,
  1270. .write = iio_buffer_write,
  1271. .poll = iio_buffer_poll,
  1272. .release = iio_buffer_chrdev_release,
  1273. };
  1274. static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
  1275. {
  1276. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1277. int __user *ival = (int __user *)arg;
  1278. struct iio_dev_buffer_pair *ib;
  1279. struct iio_buffer *buffer;
  1280. int fd, idx, ret;
  1281. if (copy_from_user(&idx, ival, sizeof(idx)))
  1282. return -EFAULT;
  1283. if (idx >= iio_dev_opaque->attached_buffers_cnt)
  1284. return -ENODEV;
  1285. iio_device_get(indio_dev);
  1286. buffer = iio_dev_opaque->attached_buffers[idx];
  1287. if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
  1288. ret = -EBUSY;
  1289. goto error_iio_dev_put;
  1290. }
  1291. ib = kzalloc(sizeof(*ib), GFP_KERNEL);
  1292. if (!ib) {
  1293. ret = -ENOMEM;
  1294. goto error_clear_busy_bit;
  1295. }
  1296. ib->indio_dev = indio_dev;
  1297. ib->buffer = buffer;
  1298. fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
  1299. ib, O_RDWR | O_CLOEXEC);
  1300. if (fd < 0) {
  1301. ret = fd;
  1302. goto error_free_ib;
  1303. }
  1304. if (copy_to_user(ival, &fd, sizeof(fd))) {
  1305. /*
  1306. * "Leak" the fd, as there's not much we can do about this
  1307. * anyway. 'fd' might have been closed already, as
  1308. * anon_inode_getfd() called fd_install() on it, which made
  1309. * it reachable by userland.
  1310. *
  1311. * Instead of allowing a malicious user to play tricks with
  1312. * us, rely on the process exit path to do any necessary
  1313. * cleanup, as in releasing the file, if still needed.
  1314. */
  1315. return -EFAULT;
  1316. }
  1317. return 0;
  1318. error_free_ib:
  1319. kfree(ib);
  1320. error_clear_busy_bit:
  1321. clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
  1322. error_iio_dev_put:
  1323. iio_device_put(indio_dev);
  1324. return ret;
  1325. }
  1326. static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
  1327. unsigned int cmd, unsigned long arg)
  1328. {
  1329. switch (cmd) {
  1330. case IIO_BUFFER_GET_FD_IOCTL:
  1331. return iio_device_buffer_getfd(indio_dev, arg);
  1332. default:
  1333. return IIO_IOCTL_UNHANDLED;
  1334. }
  1335. }
  1336. static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
  1337. struct iio_dev *indio_dev,
  1338. int index)
  1339. {
  1340. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1341. struct iio_dev_attr *p;
  1342. struct attribute **attr;
  1343. int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
  1344. const struct iio_chan_spec *channels;
  1345. buffer_attrcount = 0;
  1346. if (buffer->attrs) {
  1347. while (buffer->attrs[buffer_attrcount] != NULL)
  1348. buffer_attrcount++;
  1349. }
  1350. scan_el_attrcount = 0;
  1351. INIT_LIST_HEAD(&buffer->buffer_attr_list);
  1352. channels = indio_dev->channels;
  1353. if (channels) {
  1354. /* new magic */
  1355. for (i = 0; i < indio_dev->num_channels; i++) {
  1356. if (channels[i].scan_index < 0)
  1357. continue;
  1358. /* Verify that sample bits fit into storage */
  1359. if (channels[i].scan_type.storagebits <
  1360. channels[i].scan_type.realbits +
  1361. channels[i].scan_type.shift) {
  1362. dev_err(&indio_dev->dev,
  1363. "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
  1364. i, channels[i].scan_type.storagebits,
  1365. channels[i].scan_type.realbits,
  1366. channels[i].scan_type.shift);
  1367. ret = -EINVAL;
  1368. goto error_cleanup_dynamic;
  1369. }
  1370. ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
  1371. &channels[i]);
  1372. if (ret < 0)
  1373. goto error_cleanup_dynamic;
  1374. scan_el_attrcount += ret;
  1375. if (channels[i].type == IIO_TIMESTAMP)
  1376. iio_dev_opaque->scan_index_timestamp =
  1377. channels[i].scan_index;
  1378. }
  1379. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  1380. buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
  1381. GFP_KERNEL);
  1382. if (buffer->scan_mask == NULL) {
  1383. ret = -ENOMEM;
  1384. goto error_cleanup_dynamic;
  1385. }
  1386. }
  1387. }
  1388. attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
  1389. attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
  1390. if (!attr) {
  1391. ret = -ENOMEM;
  1392. goto error_free_scan_mask;
  1393. }
  1394. memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
  1395. if (!buffer->access->set_length)
  1396. attr[0] = &dev_attr_length_ro.attr;
  1397. if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
  1398. attr[2] = &dev_attr_watermark_ro.attr;
  1399. if (buffer->attrs)
  1400. memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
  1401. sizeof(struct attribute *) * buffer_attrcount);
  1402. buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
  1403. buffer->buffer_group.attrs = attr;
  1404. for (i = 0; i < buffer_attrcount; i++) {
  1405. struct attribute *wrapped;
  1406. wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
  1407. if (!wrapped) {
  1408. ret = -ENOMEM;
  1409. goto error_free_buffer_attrs;
  1410. }
  1411. attr[i] = wrapped;
  1412. }
  1413. attrn = 0;
  1414. list_for_each_entry(p, &buffer->buffer_attr_list, l)
  1415. attr[attrn++] = &p->dev_attr.attr;
  1416. buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
  1417. if (!buffer->buffer_group.name) {
  1418. ret = -ENOMEM;
  1419. goto error_free_buffer_attrs;
  1420. }
  1421. ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
  1422. if (ret)
  1423. goto error_free_buffer_attr_group_name;
  1424. /* we only need to register the legacy groups for the first buffer */
  1425. if (index > 0)
  1426. return 0;
  1427. ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
  1428. buffer_attrcount,
  1429. scan_el_attrcount);
  1430. if (ret)
  1431. goto error_free_buffer_attr_group_name;
  1432. return 0;
  1433. error_free_buffer_attr_group_name:
  1434. kfree(buffer->buffer_group.name);
  1435. error_free_buffer_attrs:
  1436. kfree(buffer->buffer_group.attrs);
  1437. error_free_scan_mask:
  1438. bitmap_free(buffer->scan_mask);
  1439. error_cleanup_dynamic:
  1440. iio_free_chan_devattr_list(&buffer->buffer_attr_list);
  1441. return ret;
  1442. }
  1443. static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
  1444. struct iio_dev *indio_dev,
  1445. int index)
  1446. {
  1447. if (index == 0)
  1448. iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
  1449. bitmap_free(buffer->scan_mask);
  1450. kfree(buffer->buffer_group.name);
  1451. kfree(buffer->buffer_group.attrs);
  1452. iio_free_chan_devattr_list(&buffer->buffer_attr_list);
  1453. }
  1454. int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
  1455. {
  1456. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1457. const struct iio_chan_spec *channels;
  1458. struct iio_buffer *buffer;
  1459. int ret, i, idx;
  1460. size_t sz;
  1461. channels = indio_dev->channels;
  1462. if (channels) {
  1463. int ml = indio_dev->masklength;
  1464. for (i = 0; i < indio_dev->num_channels; i++)
  1465. ml = max(ml, channels[i].scan_index + 1);
  1466. indio_dev->masklength = ml;
  1467. }
  1468. if (!iio_dev_opaque->attached_buffers_cnt)
  1469. return 0;
  1470. for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
  1471. buffer = iio_dev_opaque->attached_buffers[idx];
  1472. ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
  1473. if (ret)
  1474. goto error_unwind_sysfs_and_mask;
  1475. }
  1476. sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
  1477. iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
  1478. if (!iio_dev_opaque->buffer_ioctl_handler) {
  1479. ret = -ENOMEM;
  1480. goto error_unwind_sysfs_and_mask;
  1481. }
  1482. iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
  1483. iio_device_ioctl_handler_register(indio_dev,
  1484. iio_dev_opaque->buffer_ioctl_handler);
  1485. return 0;
  1486. error_unwind_sysfs_and_mask:
  1487. while (idx--) {
  1488. buffer = iio_dev_opaque->attached_buffers[idx];
  1489. __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
  1490. }
  1491. return ret;
  1492. }
  1493. void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
  1494. {
  1495. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1496. struct iio_buffer *buffer;
  1497. int i;
  1498. if (!iio_dev_opaque->attached_buffers_cnt)
  1499. return;
  1500. iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
  1501. kfree(iio_dev_opaque->buffer_ioctl_handler);
  1502. for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
  1503. buffer = iio_dev_opaque->attached_buffers[i];
  1504. __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
  1505. }
  1506. }
  1507. /**
  1508. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  1509. * @indio_dev: the iio device
  1510. * @mask: scan mask to be checked
  1511. *
  1512. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  1513. * can be used for devices where only one channel can be active for sampling at
  1514. * a time.
  1515. */
  1516. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  1517. const unsigned long *mask)
  1518. {
  1519. return bitmap_weight(mask, indio_dev->masklength) == 1;
  1520. }
  1521. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  1522. static const void *iio_demux(struct iio_buffer *buffer,
  1523. const void *datain)
  1524. {
  1525. struct iio_demux_table *t;
  1526. if (list_empty(&buffer->demux_list))
  1527. return datain;
  1528. list_for_each_entry(t, &buffer->demux_list, l)
  1529. memcpy(buffer->demux_bounce + t->to,
  1530. datain + t->from, t->length);
  1531. return buffer->demux_bounce;
  1532. }
  1533. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  1534. {
  1535. const void *dataout = iio_demux(buffer, data);
  1536. int ret;
  1537. ret = buffer->access->store_to(buffer, dataout);
  1538. if (ret)
  1539. return ret;
  1540. /*
  1541. * We can't just test for watermark to decide if we wake the poll queue
  1542. * because read may request less samples than the watermark.
  1543. */
  1544. wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
  1545. return 0;
  1546. }
  1547. /**
  1548. * iio_push_to_buffers() - push to a registered buffer.
  1549. * @indio_dev: iio_dev structure for device.
  1550. * @data: Full scan.
  1551. */
  1552. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  1553. {
  1554. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1555. int ret;
  1556. struct iio_buffer *buf;
  1557. list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
  1558. ret = iio_push_to_buffer(buf, data);
  1559. if (ret < 0)
  1560. return ret;
  1561. }
  1562. return 0;
  1563. }
  1564. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  1565. /**
  1566. * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
  1567. * no alignment or space requirements.
  1568. * @indio_dev: iio_dev structure for device.
  1569. * @data: channel data excluding the timestamp.
  1570. * @data_sz: size of data.
  1571. * @timestamp: timestamp for the sample data.
  1572. *
  1573. * This special variant of iio_push_to_buffers_with_timestamp() does
  1574. * not require space for the timestamp, or 8 byte alignment of data.
  1575. * It does however require an allocation on first call and additional
  1576. * copies on all calls, so should be avoided if possible.
  1577. */
  1578. int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
  1579. const void *data,
  1580. size_t data_sz,
  1581. int64_t timestamp)
  1582. {
  1583. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1584. /*
  1585. * Conservative estimate - we can always safely copy the minimum
  1586. * of either the data provided or the length of the destination buffer.
  1587. * This relaxed limit allows the calling drivers to be lax about
  1588. * tracking the size of the data they are pushing, at the cost of
  1589. * unnecessary copying of padding.
  1590. */
  1591. data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
  1592. if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) {
  1593. void *bb;
  1594. bb = devm_krealloc(&indio_dev->dev,
  1595. iio_dev_opaque->bounce_buffer,
  1596. indio_dev->scan_bytes, GFP_KERNEL);
  1597. if (!bb)
  1598. return -ENOMEM;
  1599. iio_dev_opaque->bounce_buffer = bb;
  1600. iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
  1601. }
  1602. memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
  1603. return iio_push_to_buffers_with_timestamp(indio_dev,
  1604. iio_dev_opaque->bounce_buffer,
  1605. timestamp);
  1606. }
  1607. EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
  1608. /**
  1609. * iio_buffer_release() - Free a buffer's resources
  1610. * @ref: Pointer to the kref embedded in the iio_buffer struct
  1611. *
  1612. * This function is called when the last reference to the buffer has been
  1613. * dropped. It will typically free all resources allocated by the buffer. Do not
  1614. * call this function manually, always use iio_buffer_put() when done using a
  1615. * buffer.
  1616. */
  1617. static void iio_buffer_release(struct kref *ref)
  1618. {
  1619. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  1620. buffer->access->release(buffer);
  1621. }
  1622. /**
  1623. * iio_buffer_get() - Grab a reference to the buffer
  1624. * @buffer: The buffer to grab a reference for, may be NULL
  1625. *
  1626. * Returns the pointer to the buffer that was passed into the function.
  1627. */
  1628. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  1629. {
  1630. if (buffer)
  1631. kref_get(&buffer->ref);
  1632. return buffer;
  1633. }
  1634. EXPORT_SYMBOL_GPL(iio_buffer_get);
  1635. /**
  1636. * iio_buffer_put() - Release the reference to the buffer
  1637. * @buffer: The buffer to release the reference for, may be NULL
  1638. */
  1639. void iio_buffer_put(struct iio_buffer *buffer)
  1640. {
  1641. if (buffer)
  1642. kref_put(&buffer->ref, iio_buffer_release);
  1643. }
  1644. EXPORT_SYMBOL_GPL(iio_buffer_put);
  1645. /**
  1646. * iio_device_attach_buffer - Attach a buffer to a IIO device
  1647. * @indio_dev: The device the buffer should be attached to
  1648. * @buffer: The buffer to attach to the device
  1649. *
  1650. * Return 0 if successful, negative if error.
  1651. *
  1652. * This function attaches a buffer to a IIO device. The buffer stays attached to
  1653. * the device until the device is freed. For legacy reasons, the first attached
  1654. * buffer will also be assigned to 'indio_dev->buffer'.
  1655. * The array allocated here, will be free'd via the iio_device_detach_buffers()
  1656. * call which is handled by the iio_device_free().
  1657. */
  1658. int iio_device_attach_buffer(struct iio_dev *indio_dev,
  1659. struct iio_buffer *buffer)
  1660. {
  1661. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1662. struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
  1663. unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
  1664. cnt++;
  1665. new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
  1666. if (!new)
  1667. return -ENOMEM;
  1668. iio_dev_opaque->attached_buffers = new;
  1669. buffer = iio_buffer_get(buffer);
  1670. /* first buffer is legacy; attach it to the IIO device directly */
  1671. if (!indio_dev->buffer)
  1672. indio_dev->buffer = buffer;
  1673. iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
  1674. iio_dev_opaque->attached_buffers_cnt = cnt;
  1675. return 0;
  1676. }
  1677. EXPORT_SYMBOL_GPL(iio_device_attach_buffer);