inkern.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core in kernel channel mapping
  3. *
  4. * Copyright (c) 2011 Jonathan Cameron
  5. */
  6. #include <linux/err.h>
  7. #include <linux/export.h>
  8. #include <linux/property.h>
  9. #include <linux/slab.h>
  10. #include <linux/mutex.h>
  11. #include <linux/iio/iio.h>
  12. #include <linux/iio/iio-opaque.h>
  13. #include "iio_core.h"
  14. #include <linux/iio/machine.h>
  15. #include <linux/iio/driver.h>
  16. #include <linux/iio/consumer.h>
  17. struct iio_map_internal {
  18. struct iio_dev *indio_dev;
  19. struct iio_map *map;
  20. struct list_head l;
  21. };
  22. static LIST_HEAD(iio_map_list);
  23. static DEFINE_MUTEX(iio_map_list_lock);
  24. static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
  25. {
  26. int ret = -ENODEV;
  27. struct iio_map_internal *mapi, *next;
  28. list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
  29. if (indio_dev == mapi->indio_dev) {
  30. list_del(&mapi->l);
  31. kfree(mapi);
  32. ret = 0;
  33. }
  34. }
  35. return ret;
  36. }
  37. int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  38. {
  39. int i = 0, ret = 0;
  40. struct iio_map_internal *mapi;
  41. if (!maps)
  42. return 0;
  43. mutex_lock(&iio_map_list_lock);
  44. while (maps[i].consumer_dev_name) {
  45. mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  46. if (!mapi) {
  47. ret = -ENOMEM;
  48. goto error_ret;
  49. }
  50. mapi->map = &maps[i];
  51. mapi->indio_dev = indio_dev;
  52. list_add_tail(&mapi->l, &iio_map_list);
  53. i++;
  54. }
  55. error_ret:
  56. if (ret)
  57. iio_map_array_unregister_locked(indio_dev);
  58. mutex_unlock(&iio_map_list_lock);
  59. return ret;
  60. }
  61. EXPORT_SYMBOL_GPL(iio_map_array_register);
  62. /*
  63. * Remove all map entries associated with the given iio device
  64. */
  65. int iio_map_array_unregister(struct iio_dev *indio_dev)
  66. {
  67. int ret;
  68. mutex_lock(&iio_map_list_lock);
  69. ret = iio_map_array_unregister_locked(indio_dev);
  70. mutex_unlock(&iio_map_list_lock);
  71. return ret;
  72. }
  73. EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  74. static void iio_map_array_unregister_cb(void *indio_dev)
  75. {
  76. iio_map_array_unregister(indio_dev);
  77. }
  78. int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
  79. {
  80. int ret;
  81. ret = iio_map_array_register(indio_dev, maps);
  82. if (ret)
  83. return ret;
  84. return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
  85. }
  86. EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
  87. static const struct iio_chan_spec
  88. *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
  89. {
  90. int i;
  91. const struct iio_chan_spec *chan = NULL;
  92. for (i = 0; i < indio_dev->num_channels; i++)
  93. if (indio_dev->channels[i].datasheet_name &&
  94. strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
  95. chan = &indio_dev->channels[i];
  96. break;
  97. }
  98. return chan;
  99. }
  100. /**
  101. * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
  102. * @indio_dev: pointer to the iio_dev structure
  103. * @iiospec: IIO specifier as found in the device tree
  104. *
  105. * This is simple translation function, suitable for the most 1:1 mapped
  106. * channels in IIO chips. This function performs only one sanity check:
  107. * whether IIO index is less than num_channels (that is specified in the
  108. * iio_dev).
  109. */
  110. static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
  111. const struct fwnode_reference_args *iiospec)
  112. {
  113. if (!iiospec->nargs)
  114. return 0;
  115. if (iiospec->args[0] >= indio_dev->num_channels) {
  116. dev_err(&indio_dev->dev, "invalid channel index %llu\n",
  117. iiospec->args[0]);
  118. return -EINVAL;
  119. }
  120. return iiospec->args[0];
  121. }
  122. static int __fwnode_iio_channel_get(struct iio_channel *channel,
  123. struct fwnode_handle *fwnode, int index)
  124. {
  125. struct fwnode_reference_args iiospec;
  126. struct device *idev;
  127. struct iio_dev *indio_dev;
  128. int err;
  129. err = fwnode_property_get_reference_args(fwnode, "io-channels",
  130. "#io-channel-cells", 0,
  131. index, &iiospec);
  132. if (err)
  133. return err;
  134. idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
  135. if (!idev) {
  136. fwnode_handle_put(iiospec.fwnode);
  137. return -EPROBE_DEFER;
  138. }
  139. indio_dev = dev_to_iio_dev(idev);
  140. channel->indio_dev = indio_dev;
  141. if (indio_dev->info->fwnode_xlate)
  142. index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
  143. else
  144. index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
  145. fwnode_handle_put(iiospec.fwnode);
  146. if (index < 0)
  147. goto err_put;
  148. channel->channel = &indio_dev->channels[index];
  149. return 0;
  150. err_put:
  151. iio_device_put(indio_dev);
  152. return index;
  153. }
  154. static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
  155. int index)
  156. {
  157. struct iio_channel *channel;
  158. int err;
  159. if (index < 0)
  160. return ERR_PTR(-EINVAL);
  161. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  162. if (!channel)
  163. return ERR_PTR(-ENOMEM);
  164. err = __fwnode_iio_channel_get(channel, fwnode, index);
  165. if (err)
  166. goto err_free_channel;
  167. return channel;
  168. err_free_channel:
  169. kfree(channel);
  170. return ERR_PTR(err);
  171. }
  172. static struct iio_channel *
  173. __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
  174. {
  175. struct iio_channel *chan;
  176. int index = 0;
  177. /*
  178. * For named iio channels, first look up the name in the
  179. * "io-channel-names" property. If it cannot be found, the
  180. * index will be an error code, and fwnode_iio_channel_get()
  181. * will fail.
  182. */
  183. if (name)
  184. index = fwnode_property_match_string(fwnode, "io-channel-names",
  185. name);
  186. chan = fwnode_iio_channel_get(fwnode, index);
  187. if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
  188. return chan;
  189. if (name) {
  190. if (index >= 0) {
  191. pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
  192. fwnode, name, index);
  193. /*
  194. * In this case, we found 'name' in 'io-channel-names'
  195. * but somehow we still fail so that we should not proceed
  196. * with any other lookup. Hence, explicitly return -EINVAL
  197. * (maybe not the better error code) so that the caller
  198. * won't do a system lookup.
  199. */
  200. return ERR_PTR(-EINVAL);
  201. }
  202. /*
  203. * If index < 0, then fwnode_property_get_reference_args() fails
  204. * with -EINVAL or -ENOENT (ACPI case) which is expected. We
  205. * should not proceed if we get any other error.
  206. */
  207. if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
  208. return chan;
  209. } else if (PTR_ERR(chan) != -ENOENT) {
  210. /*
  211. * if !name, then we should only proceed the lookup if
  212. * fwnode_property_get_reference_args() returns -ENOENT.
  213. */
  214. return chan;
  215. }
  216. /* so we continue the lookup */
  217. return ERR_PTR(-ENODEV);
  218. }
  219. struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
  220. const char *name)
  221. {
  222. struct fwnode_handle *parent;
  223. struct iio_channel *chan;
  224. /* Walk up the tree of devices looking for a matching iio channel */
  225. chan = __fwnode_iio_channel_get_by_name(fwnode, name);
  226. if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
  227. return chan;
  228. /*
  229. * No matching IIO channel found on this node.
  230. * If the parent node has a "io-channel-ranges" property,
  231. * then we can try one of its channels.
  232. */
  233. fwnode_for_each_parent_node(fwnode, parent) {
  234. if (!fwnode_property_present(parent, "io-channel-ranges")) {
  235. fwnode_handle_put(parent);
  236. return ERR_PTR(-ENODEV);
  237. }
  238. chan = __fwnode_iio_channel_get_by_name(fwnode, name);
  239. if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
  240. fwnode_handle_put(parent);
  241. return chan;
  242. }
  243. }
  244. return ERR_PTR(-ENODEV);
  245. }
  246. EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
  247. static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
  248. {
  249. struct fwnode_handle *fwnode = dev_fwnode(dev);
  250. struct iio_channel *chans;
  251. int i, mapind, nummaps = 0;
  252. int ret;
  253. do {
  254. ret = fwnode_property_get_reference_args(fwnode, "io-channels",
  255. "#io-channel-cells", 0,
  256. nummaps, NULL);
  257. if (ret < 0)
  258. break;
  259. } while (++nummaps);
  260. if (nummaps == 0)
  261. return ERR_PTR(-ENODEV);
  262. /* NULL terminated array to save passing size */
  263. chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  264. if (!chans)
  265. return ERR_PTR(-ENOMEM);
  266. /* Search for FW matches */
  267. for (mapind = 0; mapind < nummaps; mapind++) {
  268. ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
  269. if (ret)
  270. goto error_free_chans;
  271. }
  272. return chans;
  273. error_free_chans:
  274. for (i = 0; i < mapind; i++)
  275. iio_device_put(chans[i].indio_dev);
  276. kfree(chans);
  277. return ERR_PTR(ret);
  278. }
  279. static struct iio_channel *iio_channel_get_sys(const char *name,
  280. const char *channel_name)
  281. {
  282. struct iio_map_internal *c_i = NULL, *c = NULL;
  283. struct iio_channel *channel;
  284. int err;
  285. if (!(name || channel_name))
  286. return ERR_PTR(-ENODEV);
  287. /* first find matching entry the channel map */
  288. mutex_lock(&iio_map_list_lock);
  289. list_for_each_entry(c_i, &iio_map_list, l) {
  290. if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
  291. (channel_name &&
  292. strcmp(channel_name, c_i->map->consumer_channel) != 0))
  293. continue;
  294. c = c_i;
  295. iio_device_get(c->indio_dev);
  296. break;
  297. }
  298. mutex_unlock(&iio_map_list_lock);
  299. if (!c)
  300. return ERR_PTR(-ENODEV);
  301. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  302. if (!channel) {
  303. err = -ENOMEM;
  304. goto error_no_mem;
  305. }
  306. channel->indio_dev = c->indio_dev;
  307. if (c->map->adc_channel_label) {
  308. channel->channel =
  309. iio_chan_spec_from_name(channel->indio_dev,
  310. c->map->adc_channel_label);
  311. if (!channel->channel) {
  312. err = -EINVAL;
  313. goto error_no_chan;
  314. }
  315. }
  316. return channel;
  317. error_no_chan:
  318. kfree(channel);
  319. error_no_mem:
  320. iio_device_put(c->indio_dev);
  321. return ERR_PTR(err);
  322. }
  323. struct iio_channel *iio_channel_get(struct device *dev,
  324. const char *channel_name)
  325. {
  326. const char *name = dev ? dev_name(dev) : NULL;
  327. struct iio_channel *channel;
  328. if (dev) {
  329. channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
  330. channel_name);
  331. if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
  332. return channel;
  333. }
  334. return iio_channel_get_sys(name, channel_name);
  335. }
  336. EXPORT_SYMBOL_GPL(iio_channel_get);
  337. void iio_channel_release(struct iio_channel *channel)
  338. {
  339. if (!channel)
  340. return;
  341. iio_device_put(channel->indio_dev);
  342. kfree(channel);
  343. }
  344. EXPORT_SYMBOL_GPL(iio_channel_release);
  345. static void devm_iio_channel_free(void *iio_channel)
  346. {
  347. iio_channel_release(iio_channel);
  348. }
  349. struct iio_channel *devm_iio_channel_get(struct device *dev,
  350. const char *channel_name)
  351. {
  352. struct iio_channel *channel;
  353. int ret;
  354. channel = iio_channel_get(dev, channel_name);
  355. if (IS_ERR(channel))
  356. return channel;
  357. ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
  358. if (ret)
  359. return ERR_PTR(ret);
  360. return channel;
  361. }
  362. EXPORT_SYMBOL_GPL(devm_iio_channel_get);
  363. struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
  364. struct fwnode_handle *fwnode,
  365. const char *channel_name)
  366. {
  367. struct iio_channel *channel;
  368. int ret;
  369. channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
  370. if (IS_ERR(channel))
  371. return channel;
  372. ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
  373. if (ret)
  374. return ERR_PTR(ret);
  375. return channel;
  376. }
  377. EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
  378. struct iio_channel *iio_channel_get_all(struct device *dev)
  379. {
  380. const char *name;
  381. struct iio_channel *chans;
  382. struct iio_map_internal *c = NULL;
  383. int nummaps = 0;
  384. int mapind = 0;
  385. int i, ret;
  386. if (!dev)
  387. return ERR_PTR(-EINVAL);
  388. chans = fwnode_iio_channel_get_all(dev);
  389. /*
  390. * We only want to carry on if the error is -ENODEV. Anything else
  391. * should be reported up the stack.
  392. */
  393. if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
  394. return chans;
  395. name = dev_name(dev);
  396. mutex_lock(&iio_map_list_lock);
  397. /* first count the matching maps */
  398. list_for_each_entry(c, &iio_map_list, l)
  399. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  400. continue;
  401. else
  402. nummaps++;
  403. if (nummaps == 0) {
  404. ret = -ENODEV;
  405. goto error_ret;
  406. }
  407. /* NULL terminated array to save passing size */
  408. chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  409. if (!chans) {
  410. ret = -ENOMEM;
  411. goto error_ret;
  412. }
  413. /* for each map fill in the chans element */
  414. list_for_each_entry(c, &iio_map_list, l) {
  415. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  416. continue;
  417. chans[mapind].indio_dev = c->indio_dev;
  418. chans[mapind].data = c->map->consumer_data;
  419. chans[mapind].channel =
  420. iio_chan_spec_from_name(chans[mapind].indio_dev,
  421. c->map->adc_channel_label);
  422. if (!chans[mapind].channel) {
  423. ret = -EINVAL;
  424. goto error_free_chans;
  425. }
  426. iio_device_get(chans[mapind].indio_dev);
  427. mapind++;
  428. }
  429. if (mapind == 0) {
  430. ret = -ENODEV;
  431. goto error_free_chans;
  432. }
  433. mutex_unlock(&iio_map_list_lock);
  434. return chans;
  435. error_free_chans:
  436. for (i = 0; i < nummaps; i++)
  437. iio_device_put(chans[i].indio_dev);
  438. kfree(chans);
  439. error_ret:
  440. mutex_unlock(&iio_map_list_lock);
  441. return ERR_PTR(ret);
  442. }
  443. EXPORT_SYMBOL_GPL(iio_channel_get_all);
  444. void iio_channel_release_all(struct iio_channel *channels)
  445. {
  446. struct iio_channel *chan = &channels[0];
  447. while (chan->indio_dev) {
  448. iio_device_put(chan->indio_dev);
  449. chan++;
  450. }
  451. kfree(channels);
  452. }
  453. EXPORT_SYMBOL_GPL(iio_channel_release_all);
  454. static void devm_iio_channel_free_all(void *iio_channels)
  455. {
  456. iio_channel_release_all(iio_channels);
  457. }
  458. struct iio_channel *devm_iio_channel_get_all(struct device *dev)
  459. {
  460. struct iio_channel *channels;
  461. int ret;
  462. channels = iio_channel_get_all(dev);
  463. if (IS_ERR(channels))
  464. return channels;
  465. ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
  466. channels);
  467. if (ret)
  468. return ERR_PTR(ret);
  469. return channels;
  470. }
  471. EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
  472. static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
  473. enum iio_chan_info_enum info)
  474. {
  475. int unused;
  476. int vals[INDIO_MAX_RAW_ELEMENTS];
  477. int ret;
  478. int val_len = 2;
  479. if (!val2)
  480. val2 = &unused;
  481. if (!iio_channel_has_info(chan->channel, info))
  482. return -EINVAL;
  483. if (chan->indio_dev->info->read_raw_multi) {
  484. ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
  485. chan->channel, INDIO_MAX_RAW_ELEMENTS,
  486. vals, &val_len, info);
  487. *val = vals[0];
  488. *val2 = vals[1];
  489. } else {
  490. ret = chan->indio_dev->info->read_raw(chan->indio_dev,
  491. chan->channel, val, val2, info);
  492. }
  493. return ret;
  494. }
  495. int iio_read_channel_raw(struct iio_channel *chan, int *val)
  496. {
  497. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  498. int ret;
  499. mutex_lock(&iio_dev_opaque->info_exist_lock);
  500. if (!chan->indio_dev->info) {
  501. ret = -ENODEV;
  502. goto err_unlock;
  503. }
  504. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  505. err_unlock:
  506. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  507. return ret;
  508. }
  509. EXPORT_SYMBOL_GPL(iio_read_channel_raw);
  510. int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
  511. {
  512. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  513. int ret;
  514. mutex_lock(&iio_dev_opaque->info_exist_lock);
  515. if (!chan->indio_dev->info) {
  516. ret = -ENODEV;
  517. goto err_unlock;
  518. }
  519. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
  520. err_unlock:
  521. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  522. return ret;
  523. }
  524. EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
  525. static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
  526. int raw, int *processed,
  527. unsigned int scale)
  528. {
  529. int scale_type, scale_val, scale_val2;
  530. int offset_type, offset_val, offset_val2;
  531. s64 raw64 = raw;
  532. offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
  533. IIO_CHAN_INFO_OFFSET);
  534. if (offset_type >= 0) {
  535. switch (offset_type) {
  536. case IIO_VAL_INT:
  537. break;
  538. case IIO_VAL_INT_PLUS_MICRO:
  539. case IIO_VAL_INT_PLUS_NANO:
  540. /*
  541. * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
  542. * implicitely truncate the offset to it's integer form.
  543. */
  544. break;
  545. case IIO_VAL_FRACTIONAL:
  546. offset_val /= offset_val2;
  547. break;
  548. case IIO_VAL_FRACTIONAL_LOG2:
  549. offset_val >>= offset_val2;
  550. break;
  551. default:
  552. return -EINVAL;
  553. }
  554. raw64 += offset_val;
  555. }
  556. scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
  557. IIO_CHAN_INFO_SCALE);
  558. if (scale_type < 0) {
  559. /*
  560. * If no channel scaling is available apply consumer scale to
  561. * raw value and return.
  562. */
  563. *processed = raw * scale;
  564. return 0;
  565. }
  566. switch (scale_type) {
  567. case IIO_VAL_INT:
  568. *processed = raw64 * scale_val * scale;
  569. break;
  570. case IIO_VAL_INT_PLUS_MICRO:
  571. if (scale_val2 < 0)
  572. *processed = -raw64 * scale_val;
  573. else
  574. *processed = raw64 * scale_val;
  575. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  576. 1000000LL);
  577. break;
  578. case IIO_VAL_INT_PLUS_NANO:
  579. if (scale_val2 < 0)
  580. *processed = -raw64 * scale_val;
  581. else
  582. *processed = raw64 * scale_val;
  583. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  584. 1000000000LL);
  585. break;
  586. case IIO_VAL_FRACTIONAL:
  587. *processed = div_s64(raw64 * (s64)scale_val * scale,
  588. scale_val2);
  589. break;
  590. case IIO_VAL_FRACTIONAL_LOG2:
  591. *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
  592. break;
  593. default:
  594. return -EINVAL;
  595. }
  596. return 0;
  597. }
  598. int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
  599. int *processed, unsigned int scale)
  600. {
  601. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  602. int ret;
  603. mutex_lock(&iio_dev_opaque->info_exist_lock);
  604. if (!chan->indio_dev->info) {
  605. ret = -ENODEV;
  606. goto err_unlock;
  607. }
  608. ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
  609. scale);
  610. err_unlock:
  611. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  612. return ret;
  613. }
  614. EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
  615. int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
  616. enum iio_chan_info_enum attribute)
  617. {
  618. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  619. int ret;
  620. mutex_lock(&iio_dev_opaque->info_exist_lock);
  621. if (!chan->indio_dev->info) {
  622. ret = -ENODEV;
  623. goto err_unlock;
  624. }
  625. ret = iio_channel_read(chan, val, val2, attribute);
  626. err_unlock:
  627. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  628. return ret;
  629. }
  630. EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
  631. int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
  632. {
  633. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
  634. }
  635. EXPORT_SYMBOL_GPL(iio_read_channel_offset);
  636. int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
  637. unsigned int scale)
  638. {
  639. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  640. int ret;
  641. mutex_lock(&iio_dev_opaque->info_exist_lock);
  642. if (!chan->indio_dev->info) {
  643. ret = -ENODEV;
  644. goto err_unlock;
  645. }
  646. if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
  647. ret = iio_channel_read(chan, val, NULL,
  648. IIO_CHAN_INFO_PROCESSED);
  649. if (ret < 0)
  650. goto err_unlock;
  651. *val *= scale;
  652. } else {
  653. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  654. if (ret < 0)
  655. goto err_unlock;
  656. ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
  657. scale);
  658. }
  659. err_unlock:
  660. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  661. return ret;
  662. }
  663. EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
  664. int iio_read_channel_processed(struct iio_channel *chan, int *val)
  665. {
  666. /* This is just a special case with scale factor 1 */
  667. return iio_read_channel_processed_scale(chan, val, 1);
  668. }
  669. EXPORT_SYMBOL_GPL(iio_read_channel_processed);
  670. int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
  671. {
  672. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
  673. }
  674. EXPORT_SYMBOL_GPL(iio_read_channel_scale);
  675. static int iio_channel_read_avail(struct iio_channel *chan,
  676. const int **vals, int *type, int *length,
  677. enum iio_chan_info_enum info)
  678. {
  679. if (!iio_channel_has_available(chan->channel, info))
  680. return -EINVAL;
  681. return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
  682. vals, type, length, info);
  683. }
  684. int iio_read_avail_channel_attribute(struct iio_channel *chan,
  685. const int **vals, int *type, int *length,
  686. enum iio_chan_info_enum attribute)
  687. {
  688. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  689. int ret;
  690. mutex_lock(&iio_dev_opaque->info_exist_lock);
  691. if (!chan->indio_dev->info) {
  692. ret = -ENODEV;
  693. goto err_unlock;
  694. }
  695. ret = iio_channel_read_avail(chan, vals, type, length, attribute);
  696. err_unlock:
  697. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  698. return ret;
  699. }
  700. EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
  701. int iio_read_avail_channel_raw(struct iio_channel *chan,
  702. const int **vals, int *length)
  703. {
  704. int ret;
  705. int type;
  706. ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
  707. IIO_CHAN_INFO_RAW);
  708. if (ret >= 0 && type != IIO_VAL_INT)
  709. /* raw values are assumed to be IIO_VAL_INT */
  710. ret = -EINVAL;
  711. return ret;
  712. }
  713. EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
  714. static int iio_channel_read_max(struct iio_channel *chan,
  715. int *val, int *val2, int *type,
  716. enum iio_chan_info_enum info)
  717. {
  718. int unused;
  719. const int *vals;
  720. int length;
  721. int ret;
  722. if (!val2)
  723. val2 = &unused;
  724. ret = iio_channel_read_avail(chan, &vals, type, &length, info);
  725. switch (ret) {
  726. case IIO_AVAIL_RANGE:
  727. switch (*type) {
  728. case IIO_VAL_INT:
  729. *val = vals[2];
  730. break;
  731. default:
  732. *val = vals[4];
  733. *val2 = vals[5];
  734. }
  735. return 0;
  736. case IIO_AVAIL_LIST:
  737. if (length <= 0)
  738. return -EINVAL;
  739. switch (*type) {
  740. case IIO_VAL_INT:
  741. *val = vals[--length];
  742. while (length) {
  743. if (vals[--length] > *val)
  744. *val = vals[length];
  745. }
  746. break;
  747. default:
  748. /* FIXME: learn about max for other iio values */
  749. return -EINVAL;
  750. }
  751. return 0;
  752. default:
  753. return ret;
  754. }
  755. }
  756. int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
  757. {
  758. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  759. int ret;
  760. int type;
  761. mutex_lock(&iio_dev_opaque->info_exist_lock);
  762. if (!chan->indio_dev->info) {
  763. ret = -ENODEV;
  764. goto err_unlock;
  765. }
  766. ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
  767. err_unlock:
  768. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  769. return ret;
  770. }
  771. EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
  772. int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
  773. {
  774. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  775. int ret = 0;
  776. /* Need to verify underlying driver has not gone away */
  777. mutex_lock(&iio_dev_opaque->info_exist_lock);
  778. if (!chan->indio_dev->info) {
  779. ret = -ENODEV;
  780. goto err_unlock;
  781. }
  782. *type = chan->channel->type;
  783. err_unlock:
  784. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  785. return ret;
  786. }
  787. EXPORT_SYMBOL_GPL(iio_get_channel_type);
  788. static int iio_channel_write(struct iio_channel *chan, int val, int val2,
  789. enum iio_chan_info_enum info)
  790. {
  791. return chan->indio_dev->info->write_raw(chan->indio_dev,
  792. chan->channel, val, val2, info);
  793. }
  794. int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
  795. enum iio_chan_info_enum attribute)
  796. {
  797. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  798. int ret;
  799. mutex_lock(&iio_dev_opaque->info_exist_lock);
  800. if (!chan->indio_dev->info) {
  801. ret = -ENODEV;
  802. goto err_unlock;
  803. }
  804. ret = iio_channel_write(chan, val, val2, attribute);
  805. err_unlock:
  806. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  807. return ret;
  808. }
  809. EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
  810. int iio_write_channel_raw(struct iio_channel *chan, int val)
  811. {
  812. return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
  813. }
  814. EXPORT_SYMBOL_GPL(iio_write_channel_raw);
  815. unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
  816. {
  817. const struct iio_chan_spec_ext_info *ext_info;
  818. unsigned int i = 0;
  819. if (!chan->channel->ext_info)
  820. return i;
  821. for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
  822. ++i;
  823. return i;
  824. }
  825. EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
  826. static const struct iio_chan_spec_ext_info *
  827. iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
  828. {
  829. const struct iio_chan_spec_ext_info *ext_info;
  830. if (!chan->channel->ext_info)
  831. return NULL;
  832. for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
  833. if (!strcmp(attr, ext_info->name))
  834. return ext_info;
  835. }
  836. return NULL;
  837. }
  838. ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
  839. const char *attr, char *buf)
  840. {
  841. const struct iio_chan_spec_ext_info *ext_info;
  842. ext_info = iio_lookup_ext_info(chan, attr);
  843. if (!ext_info)
  844. return -EINVAL;
  845. return ext_info->read(chan->indio_dev, ext_info->private,
  846. chan->channel, buf);
  847. }
  848. EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
  849. ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
  850. const char *buf, size_t len)
  851. {
  852. const struct iio_chan_spec_ext_info *ext_info;
  853. ext_info = iio_lookup_ext_info(chan, attr);
  854. if (!ext_info)
  855. return -EINVAL;
  856. return ext_info->write(chan->indio_dev, ext_info->private,
  857. chan->channel, buf, len);
  858. }
  859. EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);