industrialio-core.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core
  3. *
  4. * Copyright (c) 2008 Jonathan Cameron
  5. *
  6. * Based on elements of hwmon and input subsystems.
  7. */
  8. #define pr_fmt(fmt) "iio-core: " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/idr.h>
  12. #include <linux/kdev_t.h>
  13. #include <linux/err.h>
  14. #include <linux/device.h>
  15. #include <linux/fs.h>
  16. #include <linux/poll.h>
  17. #include <linux/property.h>
  18. #include <linux/sched.h>
  19. #include <linux/wait.h>
  20. #include <linux/cdev.h>
  21. #include <linux/slab.h>
  22. #include <linux/anon_inodes.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/mutex.h>
  25. #include <linux/iio/iio.h>
  26. #include <linux/iio/iio-opaque.h>
  27. #include "iio_core.h"
  28. #include "iio_core_trigger.h"
  29. #include <linux/iio/sysfs.h>
  30. #include <linux/iio/events.h>
  31. #include <linux/iio/buffer.h>
  32. #include <linux/iio/buffer_impl.h>
  33. /* IDA to assign each registered device a unique id */
  34. static DEFINE_IDA(iio_ida);
  35. static dev_t iio_devt;
  36. #define IIO_DEV_MAX 256
  37. struct bus_type iio_bus_type = {
  38. .name = "iio",
  39. };
  40. EXPORT_SYMBOL(iio_bus_type);
  41. static struct dentry *iio_debugfs_dentry;
  42. static const char * const iio_direction[] = {
  43. [0] = "in",
  44. [1] = "out",
  45. };
  46. static const char * const iio_chan_type_name_spec[] = {
  47. [IIO_VOLTAGE] = "voltage",
  48. [IIO_CURRENT] = "current",
  49. [IIO_POWER] = "power",
  50. [IIO_ACCEL] = "accel",
  51. [IIO_ANGL_VEL] = "anglvel",
  52. [IIO_MAGN] = "magn",
  53. [IIO_LIGHT] = "illuminance",
  54. [IIO_INTENSITY] = "intensity",
  55. [IIO_PROXIMITY] = "proximity",
  56. [IIO_TEMP] = "temp",
  57. [IIO_INCLI] = "incli",
  58. [IIO_ROT] = "rot",
  59. [IIO_ANGL] = "angl",
  60. [IIO_TIMESTAMP] = "timestamp",
  61. [IIO_CAPACITANCE] = "capacitance",
  62. [IIO_ALTVOLTAGE] = "altvoltage",
  63. [IIO_CCT] = "cct",
  64. [IIO_PRESSURE] = "pressure",
  65. [IIO_HUMIDITYRELATIVE] = "humidityrelative",
  66. [IIO_ACTIVITY] = "activity",
  67. [IIO_STEPS] = "steps",
  68. [IIO_ENERGY] = "energy",
  69. [IIO_DISTANCE] = "distance",
  70. [IIO_VELOCITY] = "velocity",
  71. [IIO_CONCENTRATION] = "concentration",
  72. [IIO_RESISTANCE] = "resistance",
  73. [IIO_PH] = "ph",
  74. [IIO_UVINDEX] = "uvindex",
  75. [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
  76. [IIO_COUNT] = "count",
  77. [IIO_INDEX] = "index",
  78. [IIO_GRAVITY] = "gravity",
  79. [IIO_POSITIONRELATIVE] = "positionrelative",
  80. [IIO_PHASE] = "phase",
  81. [IIO_MASSCONCENTRATION] = "massconcentration",
  82. };
  83. static const char * const iio_modifier_names[] = {
  84. [IIO_MOD_X] = "x",
  85. [IIO_MOD_Y] = "y",
  86. [IIO_MOD_Z] = "z",
  87. [IIO_MOD_X_AND_Y] = "x&y",
  88. [IIO_MOD_X_AND_Z] = "x&z",
  89. [IIO_MOD_Y_AND_Z] = "y&z",
  90. [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
  91. [IIO_MOD_X_OR_Y] = "x|y",
  92. [IIO_MOD_X_OR_Z] = "x|z",
  93. [IIO_MOD_Y_OR_Z] = "y|z",
  94. [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
  95. [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
  96. [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
  97. [IIO_MOD_LIGHT_BOTH] = "both",
  98. [IIO_MOD_LIGHT_IR] = "ir",
  99. [IIO_MOD_LIGHT_CLEAR] = "clear",
  100. [IIO_MOD_LIGHT_RED] = "red",
  101. [IIO_MOD_LIGHT_GREEN] = "green",
  102. [IIO_MOD_LIGHT_BLUE] = "blue",
  103. [IIO_MOD_LIGHT_UV] = "uv",
  104. [IIO_MOD_LIGHT_DUV] = "duv",
  105. [IIO_MOD_QUATERNION] = "quaternion",
  106. [IIO_MOD_TEMP_AMBIENT] = "ambient",
  107. [IIO_MOD_TEMP_OBJECT] = "object",
  108. [IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
  109. [IIO_MOD_NORTH_TRUE] = "from_north_true",
  110. [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
  111. [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
  112. [IIO_MOD_RUNNING] = "running",
  113. [IIO_MOD_JOGGING] = "jogging",
  114. [IIO_MOD_WALKING] = "walking",
  115. [IIO_MOD_STILL] = "still",
  116. [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
  117. [IIO_MOD_I] = "i",
  118. [IIO_MOD_Q] = "q",
  119. [IIO_MOD_CO2] = "co2",
  120. [IIO_MOD_VOC] = "voc",
  121. [IIO_MOD_PM1] = "pm1",
  122. [IIO_MOD_PM2P5] = "pm2p5",
  123. [IIO_MOD_PM4] = "pm4",
  124. [IIO_MOD_PM10] = "pm10",
  125. [IIO_MOD_ETHANOL] = "ethanol",
  126. [IIO_MOD_H2] = "h2",
  127. [IIO_MOD_O2] = "o2",
  128. [IIO_MOD_LINEAR_X] = "linear_x",
  129. [IIO_MOD_LINEAR_Y] = "linear_y",
  130. [IIO_MOD_LINEAR_Z] = "linear_z",
  131. [IIO_MOD_PITCH] = "pitch",
  132. [IIO_MOD_YAW] = "yaw",
  133. [IIO_MOD_ROLL] = "roll",
  134. };
  135. /* relies on pairs of these shared then separate */
  136. static const char * const iio_chan_info_postfix[] = {
  137. [IIO_CHAN_INFO_RAW] = "raw",
  138. [IIO_CHAN_INFO_PROCESSED] = "input",
  139. [IIO_CHAN_INFO_SCALE] = "scale",
  140. [IIO_CHAN_INFO_OFFSET] = "offset",
  141. [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
  142. [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
  143. [IIO_CHAN_INFO_PEAK] = "peak_raw",
  144. [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
  145. [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
  146. [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
  147. [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
  148. = "filter_low_pass_3db_frequency",
  149. [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY]
  150. = "filter_high_pass_3db_frequency",
  151. [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
  152. [IIO_CHAN_INFO_FREQUENCY] = "frequency",
  153. [IIO_CHAN_INFO_PHASE] = "phase",
  154. [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
  155. [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
  156. [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative",
  157. [IIO_CHAN_INFO_INT_TIME] = "integration_time",
  158. [IIO_CHAN_INFO_ENABLE] = "en",
  159. [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight",
  160. [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight",
  161. [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count",
  162. [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
  163. [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
  164. [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
  165. [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
  166. [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
  167. [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
  168. };
  169. /**
  170. * iio_device_id() - query the unique ID for the device
  171. * @indio_dev: Device structure whose ID is being queried
  172. *
  173. * The IIO device ID is a unique index used for example for the naming
  174. * of the character device /dev/iio\:device[ID]
  175. */
  176. int iio_device_id(struct iio_dev *indio_dev)
  177. {
  178. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  179. return iio_dev_opaque->id;
  180. }
  181. EXPORT_SYMBOL_GPL(iio_device_id);
  182. /**
  183. * iio_buffer_enabled() - helper function to test if the buffer is enabled
  184. * @indio_dev: IIO device structure for device
  185. */
  186. bool iio_buffer_enabled(struct iio_dev *indio_dev)
  187. {
  188. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  189. return iio_dev_opaque->currentmode
  190. & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
  191. INDIO_BUFFER_SOFTWARE);
  192. }
  193. EXPORT_SYMBOL_GPL(iio_buffer_enabled);
  194. /**
  195. * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
  196. * @array: array of strings
  197. * @n: number of strings in the array
  198. * @str: string to match with
  199. *
  200. * Returns index of @str in the @array or -EINVAL, similar to match_string().
  201. * Uses sysfs_streq instead of strcmp for matching.
  202. *
  203. * This routine will look for a string in an array of strings.
  204. * The search will continue until the element is found or the n-th element
  205. * is reached, regardless of any NULL elements in the array.
  206. */
  207. static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n,
  208. const char *str)
  209. {
  210. const char *item;
  211. int index;
  212. for (index = 0; index < n; index++) {
  213. item = array[index];
  214. if (!item)
  215. continue;
  216. if (sysfs_streq(item, str))
  217. return index;
  218. }
  219. return -EINVAL;
  220. }
  221. #if defined(CONFIG_DEBUG_FS)
  222. /*
  223. * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
  224. * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined
  225. */
  226. struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
  227. {
  228. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  229. return iio_dev_opaque->debugfs_dentry;
  230. }
  231. EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
  232. #endif
  233. /**
  234. * iio_find_channel_from_si() - get channel from its scan index
  235. * @indio_dev: device
  236. * @si: scan index to match
  237. */
  238. const struct iio_chan_spec
  239. *iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
  240. {
  241. int i;
  242. for (i = 0; i < indio_dev->num_channels; i++)
  243. if (indio_dev->channels[i].scan_index == si)
  244. return &indio_dev->channels[i];
  245. return NULL;
  246. }
  247. /* This turns up an awful lot */
  248. ssize_t iio_read_const_attr(struct device *dev,
  249. struct device_attribute *attr,
  250. char *buf)
  251. {
  252. return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string);
  253. }
  254. EXPORT_SYMBOL(iio_read_const_attr);
  255. /**
  256. * iio_device_set_clock() - Set current timestamping clock for the device
  257. * @indio_dev: IIO device structure containing the device
  258. * @clock_id: timestamping clock posix identifier to set.
  259. */
  260. int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
  261. {
  262. int ret;
  263. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  264. const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
  265. ret = mutex_lock_interruptible(&indio_dev->mlock);
  266. if (ret)
  267. return ret;
  268. if ((ev_int && iio_event_enabled(ev_int)) ||
  269. iio_buffer_enabled(indio_dev)) {
  270. mutex_unlock(&indio_dev->mlock);
  271. return -EBUSY;
  272. }
  273. iio_dev_opaque->clock_id = clock_id;
  274. mutex_unlock(&indio_dev->mlock);
  275. return 0;
  276. }
  277. EXPORT_SYMBOL(iio_device_set_clock);
  278. /**
  279. * iio_device_get_clock() - Retrieve current timestamping clock for the device
  280. * @indio_dev: IIO device structure containing the device
  281. */
  282. clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
  283. {
  284. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  285. return iio_dev_opaque->clock_id;
  286. }
  287. EXPORT_SYMBOL(iio_device_get_clock);
  288. /**
  289. * iio_get_time_ns() - utility function to get a time stamp for events etc
  290. * @indio_dev: device
  291. */
  292. s64 iio_get_time_ns(const struct iio_dev *indio_dev)
  293. {
  294. struct timespec64 tp;
  295. switch (iio_device_get_clock(indio_dev)) {
  296. case CLOCK_REALTIME:
  297. return ktime_get_real_ns();
  298. case CLOCK_MONOTONIC:
  299. return ktime_get_ns();
  300. case CLOCK_MONOTONIC_RAW:
  301. return ktime_get_raw_ns();
  302. case CLOCK_REALTIME_COARSE:
  303. return ktime_to_ns(ktime_get_coarse_real());
  304. case CLOCK_MONOTONIC_COARSE:
  305. ktime_get_coarse_ts64(&tp);
  306. return timespec64_to_ns(&tp);
  307. case CLOCK_BOOTTIME:
  308. return ktime_get_boottime_ns();
  309. case CLOCK_TAI:
  310. return ktime_get_clocktai_ns();
  311. default:
  312. BUG();
  313. }
  314. }
  315. EXPORT_SYMBOL(iio_get_time_ns);
  316. static int __init iio_init(void)
  317. {
  318. int ret;
  319. /* Register sysfs bus */
  320. ret = bus_register(&iio_bus_type);
  321. if (ret < 0) {
  322. pr_err("could not register bus type\n");
  323. goto error_nothing;
  324. }
  325. ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
  326. if (ret < 0) {
  327. pr_err("failed to allocate char dev region\n");
  328. goto error_unregister_bus_type;
  329. }
  330. iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
  331. return 0;
  332. error_unregister_bus_type:
  333. bus_unregister(&iio_bus_type);
  334. error_nothing:
  335. return ret;
  336. }
  337. static void __exit iio_exit(void)
  338. {
  339. if (iio_devt)
  340. unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
  341. bus_unregister(&iio_bus_type);
  342. debugfs_remove(iio_debugfs_dentry);
  343. }
  344. #if defined(CONFIG_DEBUG_FS)
  345. static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
  346. size_t count, loff_t *ppos)
  347. {
  348. struct iio_dev *indio_dev = file->private_data;
  349. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  350. unsigned int val = 0;
  351. int ret;
  352. if (*ppos > 0)
  353. return simple_read_from_buffer(userbuf, count, ppos,
  354. iio_dev_opaque->read_buf,
  355. iio_dev_opaque->read_buf_len);
  356. ret = indio_dev->info->debugfs_reg_access(indio_dev,
  357. iio_dev_opaque->cached_reg_addr,
  358. 0, &val);
  359. if (ret) {
  360. dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
  361. return ret;
  362. }
  363. iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf,
  364. sizeof(iio_dev_opaque->read_buf),
  365. "0x%X\n", val);
  366. return simple_read_from_buffer(userbuf, count, ppos,
  367. iio_dev_opaque->read_buf,
  368. iio_dev_opaque->read_buf_len);
  369. }
  370. static ssize_t iio_debugfs_write_reg(struct file *file,
  371. const char __user *userbuf, size_t count, loff_t *ppos)
  372. {
  373. struct iio_dev *indio_dev = file->private_data;
  374. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  375. unsigned int reg, val;
  376. char buf[80];
  377. int ret;
  378. count = min_t(size_t, count, (sizeof(buf)-1));
  379. if (copy_from_user(buf, userbuf, count))
  380. return -EFAULT;
  381. buf[count] = 0;
  382. ret = sscanf(buf, "%i %i", &reg, &val);
  383. switch (ret) {
  384. case 1:
  385. iio_dev_opaque->cached_reg_addr = reg;
  386. break;
  387. case 2:
  388. iio_dev_opaque->cached_reg_addr = reg;
  389. ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
  390. val, NULL);
  391. if (ret) {
  392. dev_err(indio_dev->dev.parent, "%s: write failed\n",
  393. __func__);
  394. return ret;
  395. }
  396. break;
  397. default:
  398. return -EINVAL;
  399. }
  400. return count;
  401. }
  402. static const struct file_operations iio_debugfs_reg_fops = {
  403. .open = simple_open,
  404. .read = iio_debugfs_read_reg,
  405. .write = iio_debugfs_write_reg,
  406. };
  407. static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
  408. {
  409. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  410. debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
  411. }
  412. static void iio_device_register_debugfs(struct iio_dev *indio_dev)
  413. {
  414. struct iio_dev_opaque *iio_dev_opaque;
  415. if (indio_dev->info->debugfs_reg_access == NULL)
  416. return;
  417. if (!iio_debugfs_dentry)
  418. return;
  419. iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  420. iio_dev_opaque->debugfs_dentry =
  421. debugfs_create_dir(dev_name(&indio_dev->dev),
  422. iio_debugfs_dentry);
  423. debugfs_create_file("direct_reg_access", 0644,
  424. iio_dev_opaque->debugfs_dentry, indio_dev,
  425. &iio_debugfs_reg_fops);
  426. }
  427. #else
  428. static void iio_device_register_debugfs(struct iio_dev *indio_dev)
  429. {
  430. }
  431. static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
  432. {
  433. }
  434. #endif /* CONFIG_DEBUG_FS */
  435. static ssize_t iio_read_channel_ext_info(struct device *dev,
  436. struct device_attribute *attr,
  437. char *buf)
  438. {
  439. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  440. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  441. const struct iio_chan_spec_ext_info *ext_info;
  442. ext_info = &this_attr->c->ext_info[this_attr->address];
  443. return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
  444. }
  445. static ssize_t iio_write_channel_ext_info(struct device *dev,
  446. struct device_attribute *attr,
  447. const char *buf,
  448. size_t len)
  449. {
  450. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  451. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  452. const struct iio_chan_spec_ext_info *ext_info;
  453. ext_info = &this_attr->c->ext_info[this_attr->address];
  454. return ext_info->write(indio_dev, ext_info->private,
  455. this_attr->c, buf, len);
  456. }
  457. ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
  458. uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
  459. {
  460. const struct iio_enum *e = (const struct iio_enum *)priv;
  461. unsigned int i;
  462. size_t len = 0;
  463. if (!e->num_items)
  464. return 0;
  465. for (i = 0; i < e->num_items; ++i) {
  466. if (!e->items[i])
  467. continue;
  468. len += sysfs_emit_at(buf, len, "%s ", e->items[i]);
  469. }
  470. /* replace last space with a newline */
  471. buf[len - 1] = '\n';
  472. return len;
  473. }
  474. EXPORT_SYMBOL_GPL(iio_enum_available_read);
  475. ssize_t iio_enum_read(struct iio_dev *indio_dev,
  476. uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
  477. {
  478. const struct iio_enum *e = (const struct iio_enum *)priv;
  479. int i;
  480. if (!e->get)
  481. return -EINVAL;
  482. i = e->get(indio_dev, chan);
  483. if (i < 0)
  484. return i;
  485. else if (i >= e->num_items || !e->items[i])
  486. return -EINVAL;
  487. return sysfs_emit(buf, "%s\n", e->items[i]);
  488. }
  489. EXPORT_SYMBOL_GPL(iio_enum_read);
  490. ssize_t iio_enum_write(struct iio_dev *indio_dev,
  491. uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
  492. size_t len)
  493. {
  494. const struct iio_enum *e = (const struct iio_enum *)priv;
  495. int ret;
  496. if (!e->set)
  497. return -EINVAL;
  498. ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf);
  499. if (ret < 0)
  500. return ret;
  501. ret = e->set(indio_dev, chan, ret);
  502. return ret ? ret : len;
  503. }
  504. EXPORT_SYMBOL_GPL(iio_enum_write);
  505. static const struct iio_mount_matrix iio_mount_idmatrix = {
  506. .rotation = {
  507. "1", "0", "0",
  508. "0", "1", "0",
  509. "0", "0", "1"
  510. }
  511. };
  512. static int iio_setup_mount_idmatrix(const struct device *dev,
  513. struct iio_mount_matrix *matrix)
  514. {
  515. *matrix = iio_mount_idmatrix;
  516. dev_info(dev, "mounting matrix not found: using identity...\n");
  517. return 0;
  518. }
  519. ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
  520. const struct iio_chan_spec *chan, char *buf)
  521. {
  522. const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *)
  523. priv)(indio_dev, chan);
  524. if (IS_ERR(mtx))
  525. return PTR_ERR(mtx);
  526. if (!mtx)
  527. mtx = &iio_mount_idmatrix;
  528. return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
  529. mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
  530. mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
  531. mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
  532. }
  533. EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
  534. /**
  535. * iio_read_mount_matrix() - retrieve iio device mounting matrix from
  536. * device "mount-matrix" property
  537. * @dev: device the mounting matrix property is assigned to
  538. * @matrix: where to store retrieved matrix
  539. *
  540. * If device is assigned no mounting matrix property, a default 3x3 identity
  541. * matrix will be filled in.
  542. *
  543. * Return: 0 if success, or a negative error code on failure.
  544. */
  545. int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
  546. {
  547. size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
  548. int err;
  549. err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len);
  550. if (err == len)
  551. return 0;
  552. if (err >= 0)
  553. /* Invalid number of matrix entries. */
  554. return -EINVAL;
  555. if (err != -EINVAL)
  556. /* Invalid matrix declaration format. */
  557. return err;
  558. /* Matrix was not declared at all: fallback to identity. */
  559. return iio_setup_mount_idmatrix(dev, matrix);
  560. }
  561. EXPORT_SYMBOL(iio_read_mount_matrix);
  562. static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
  563. int size, const int *vals)
  564. {
  565. int tmp0, tmp1;
  566. s64 tmp2;
  567. bool scale_db = false;
  568. switch (type) {
  569. case IIO_VAL_INT:
  570. return sysfs_emit_at(buf, offset, "%d", vals[0]);
  571. case IIO_VAL_INT_PLUS_MICRO_DB:
  572. scale_db = true;
  573. fallthrough;
  574. case IIO_VAL_INT_PLUS_MICRO:
  575. if (vals[1] < 0)
  576. return sysfs_emit_at(buf, offset, "-%d.%06u%s",
  577. abs(vals[0]), -vals[1],
  578. scale_db ? " dB" : "");
  579. else
  580. return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0],
  581. vals[1], scale_db ? " dB" : "");
  582. case IIO_VAL_INT_PLUS_NANO:
  583. if (vals[1] < 0)
  584. return sysfs_emit_at(buf, offset, "-%d.%09u",
  585. abs(vals[0]), -vals[1]);
  586. else
  587. return sysfs_emit_at(buf, offset, "%d.%09u", vals[0],
  588. vals[1]);
  589. case IIO_VAL_FRACTIONAL:
  590. tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
  591. tmp1 = vals[1];
  592. tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
  593. if ((tmp2 < 0) && (tmp0 == 0))
  594. return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
  595. else
  596. return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
  597. abs(tmp1));
  598. case IIO_VAL_FRACTIONAL_LOG2:
  599. tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
  600. tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1);
  601. if (tmp0 == 0 && tmp2 < 0)
  602. return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
  603. else
  604. return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
  605. abs(tmp1));
  606. case IIO_VAL_INT_MULTIPLE:
  607. {
  608. int i;
  609. int l = 0;
  610. for (i = 0; i < size; ++i)
  611. l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]);
  612. return l;
  613. }
  614. case IIO_VAL_CHAR:
  615. return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
  616. case IIO_VAL_INT_64:
  617. tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]);
  618. return sysfs_emit_at(buf, offset, "%lld", tmp2);
  619. default:
  620. return 0;
  621. }
  622. }
  623. /**
  624. * iio_format_value() - Formats a IIO value into its string representation
  625. * @buf: The buffer to which the formatted value gets written
  626. * which is assumed to be big enough (i.e. PAGE_SIZE).
  627. * @type: One of the IIO_VAL_* constants. This decides how the val
  628. * and val2 parameters are formatted.
  629. * @size: Number of IIO value entries contained in vals
  630. * @vals: Pointer to the values, exact meaning depends on the
  631. * type parameter.
  632. *
  633. * Return: 0 by default, a negative number on failure or the
  634. * total number of characters written for a type that belongs
  635. * to the IIO_VAL_* constant.
  636. */
  637. ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
  638. {
  639. ssize_t len;
  640. len = __iio_format_value(buf, 0, type, size, vals);
  641. if (len >= PAGE_SIZE - 1)
  642. return -EFBIG;
  643. return len + sysfs_emit_at(buf, len, "\n");
  644. }
  645. EXPORT_SYMBOL_GPL(iio_format_value);
  646. static ssize_t iio_read_channel_label(struct device *dev,
  647. struct device_attribute *attr,
  648. char *buf)
  649. {
  650. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  651. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  652. if (indio_dev->info->read_label)
  653. return indio_dev->info->read_label(indio_dev, this_attr->c, buf);
  654. if (this_attr->c->extend_name)
  655. return sysfs_emit(buf, "%s\n", this_attr->c->extend_name);
  656. return -EINVAL;
  657. }
  658. static ssize_t iio_read_channel_info(struct device *dev,
  659. struct device_attribute *attr,
  660. char *buf)
  661. {
  662. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  663. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  664. int vals[INDIO_MAX_RAW_ELEMENTS];
  665. int ret;
  666. int val_len = 2;
  667. if (indio_dev->info->read_raw_multi)
  668. ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c,
  669. INDIO_MAX_RAW_ELEMENTS,
  670. vals, &val_len,
  671. this_attr->address);
  672. else
  673. ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
  674. &vals[0], &vals[1], this_attr->address);
  675. if (ret < 0)
  676. return ret;
  677. return iio_format_value(buf, ret, val_len, vals);
  678. }
  679. static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
  680. const char *prefix, const char *suffix)
  681. {
  682. ssize_t len;
  683. int stride;
  684. int i;
  685. switch (type) {
  686. case IIO_VAL_INT:
  687. stride = 1;
  688. break;
  689. default:
  690. stride = 2;
  691. break;
  692. }
  693. len = sysfs_emit(buf, prefix);
  694. for (i = 0; i <= length - stride; i += stride) {
  695. if (i != 0) {
  696. len += sysfs_emit_at(buf, len, " ");
  697. if (len >= PAGE_SIZE)
  698. return -EFBIG;
  699. }
  700. len += __iio_format_value(buf, len, type, stride, &vals[i]);
  701. if (len >= PAGE_SIZE)
  702. return -EFBIG;
  703. }
  704. len += sysfs_emit_at(buf, len, "%s\n", suffix);
  705. return len;
  706. }
  707. static ssize_t iio_format_avail_list(char *buf, const int *vals,
  708. int type, int length)
  709. {
  710. return iio_format_list(buf, vals, type, length, "", "");
  711. }
  712. static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
  713. {
  714. int length;
  715. /*
  716. * length refers to the array size , not the number of elements.
  717. * The purpose is to print the range [min , step ,max] so length should
  718. * be 3 in case of int, and 6 for other types.
  719. */
  720. switch (type) {
  721. case IIO_VAL_INT:
  722. length = 3;
  723. break;
  724. default:
  725. length = 6;
  726. break;
  727. }
  728. return iio_format_list(buf, vals, type, length, "[", "]");
  729. }
  730. static ssize_t iio_read_channel_info_avail(struct device *dev,
  731. struct device_attribute *attr,
  732. char *buf)
  733. {
  734. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  735. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  736. const int *vals;
  737. int ret;
  738. int length;
  739. int type;
  740. ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
  741. &vals, &type, &length,
  742. this_attr->address);
  743. if (ret < 0)
  744. return ret;
  745. switch (ret) {
  746. case IIO_AVAIL_LIST:
  747. return iio_format_avail_list(buf, vals, type, length);
  748. case IIO_AVAIL_RANGE:
  749. return iio_format_avail_range(buf, vals, type);
  750. default:
  751. return -EINVAL;
  752. }
  753. }
  754. /**
  755. * __iio_str_to_fixpoint() - Parse a fixed-point number from a string
  756. * @str: The string to parse
  757. * @fract_mult: Multiplier for the first decimal place, should be a power of 10
  758. * @integer: The integer part of the number
  759. * @fract: The fractional part of the number
  760. * @scale_db: True if this should parse as dB
  761. *
  762. * Returns 0 on success, or a negative error code if the string could not be
  763. * parsed.
  764. */
  765. static int __iio_str_to_fixpoint(const char *str, int fract_mult,
  766. int *integer, int *fract, bool scale_db)
  767. {
  768. int i = 0, f = 0;
  769. bool integer_part = true, negative = false;
  770. if (fract_mult == 0) {
  771. *fract = 0;
  772. return kstrtoint(str, 0, integer);
  773. }
  774. if (str[0] == '-') {
  775. negative = true;
  776. str++;
  777. } else if (str[0] == '+') {
  778. str++;
  779. }
  780. while (*str) {
  781. if ('0' <= *str && *str <= '9') {
  782. if (integer_part) {
  783. i = i * 10 + *str - '0';
  784. } else {
  785. f += fract_mult * (*str - '0');
  786. fract_mult /= 10;
  787. }
  788. } else if (*str == '\n') {
  789. if (*(str + 1) == '\0')
  790. break;
  791. return -EINVAL;
  792. } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
  793. /* Ignore the dB suffix */
  794. str += sizeof(" dB") - 1;
  795. continue;
  796. } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) {
  797. /* Ignore the dB suffix */
  798. str += sizeof("dB") - 1;
  799. continue;
  800. } else if (*str == '.' && integer_part) {
  801. integer_part = false;
  802. } else {
  803. return -EINVAL;
  804. }
  805. str++;
  806. }
  807. if (negative) {
  808. if (i)
  809. i = -i;
  810. else
  811. f = -f;
  812. }
  813. *integer = i;
  814. *fract = f;
  815. return 0;
  816. }
  817. /**
  818. * iio_str_to_fixpoint() - Parse a fixed-point number from a string
  819. * @str: The string to parse
  820. * @fract_mult: Multiplier for the first decimal place, should be a power of 10
  821. * @integer: The integer part of the number
  822. * @fract: The fractional part of the number
  823. *
  824. * Returns 0 on success, or a negative error code if the string could not be
  825. * parsed.
  826. */
  827. int iio_str_to_fixpoint(const char *str, int fract_mult,
  828. int *integer, int *fract)
  829. {
  830. return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false);
  831. }
  832. EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
  833. static ssize_t iio_write_channel_info(struct device *dev,
  834. struct device_attribute *attr,
  835. const char *buf,
  836. size_t len)
  837. {
  838. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  839. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  840. int ret, fract_mult = 100000;
  841. int integer, fract = 0;
  842. bool is_char = false;
  843. bool scale_db = false;
  844. /* Assumes decimal - precision based on number of digits */
  845. if (!indio_dev->info->write_raw)
  846. return -EINVAL;
  847. if (indio_dev->info->write_raw_get_fmt)
  848. switch (indio_dev->info->write_raw_get_fmt(indio_dev,
  849. this_attr->c, this_attr->address)) {
  850. case IIO_VAL_INT:
  851. fract_mult = 0;
  852. break;
  853. case IIO_VAL_INT_PLUS_MICRO_DB:
  854. scale_db = true;
  855. fallthrough;
  856. case IIO_VAL_INT_PLUS_MICRO:
  857. fract_mult = 100000;
  858. break;
  859. case IIO_VAL_INT_PLUS_NANO:
  860. fract_mult = 100000000;
  861. break;
  862. case IIO_VAL_CHAR:
  863. is_char = true;
  864. break;
  865. default:
  866. return -EINVAL;
  867. }
  868. if (is_char) {
  869. char ch;
  870. if (sscanf(buf, "%c", &ch) != 1)
  871. return -EINVAL;
  872. integer = ch;
  873. } else {
  874. ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
  875. scale_db);
  876. if (ret)
  877. return ret;
  878. }
  879. ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
  880. integer, fract, this_attr->address);
  881. if (ret)
  882. return ret;
  883. return len;
  884. }
  885. static
  886. int __iio_device_attr_init(struct device_attribute *dev_attr,
  887. const char *postfix,
  888. struct iio_chan_spec const *chan,
  889. ssize_t (*readfunc)(struct device *dev,
  890. struct device_attribute *attr,
  891. char *buf),
  892. ssize_t (*writefunc)(struct device *dev,
  893. struct device_attribute *attr,
  894. const char *buf,
  895. size_t len),
  896. enum iio_shared_by shared_by)
  897. {
  898. int ret = 0;
  899. char *name = NULL;
  900. char *full_postfix;
  901. sysfs_attr_init(&dev_attr->attr);
  902. /* Build up postfix of <extend_name>_<modifier>_postfix */
  903. if (chan->modified && (shared_by == IIO_SEPARATE)) {
  904. if (chan->extend_name)
  905. full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
  906. iio_modifier_names[chan
  907. ->channel2],
  908. chan->extend_name,
  909. postfix);
  910. else
  911. full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
  912. iio_modifier_names[chan
  913. ->channel2],
  914. postfix);
  915. } else {
  916. if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
  917. full_postfix = kstrdup(postfix, GFP_KERNEL);
  918. else
  919. full_postfix = kasprintf(GFP_KERNEL,
  920. "%s_%s",
  921. chan->extend_name,
  922. postfix);
  923. }
  924. if (full_postfix == NULL)
  925. return -ENOMEM;
  926. if (chan->differential) { /* Differential can not have modifier */
  927. switch (shared_by) {
  928. case IIO_SHARED_BY_ALL:
  929. name = kasprintf(GFP_KERNEL, "%s", full_postfix);
  930. break;
  931. case IIO_SHARED_BY_DIR:
  932. name = kasprintf(GFP_KERNEL, "%s_%s",
  933. iio_direction[chan->output],
  934. full_postfix);
  935. break;
  936. case IIO_SHARED_BY_TYPE:
  937. name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
  938. iio_direction[chan->output],
  939. iio_chan_type_name_spec[chan->type],
  940. iio_chan_type_name_spec[chan->type],
  941. full_postfix);
  942. break;
  943. case IIO_SEPARATE:
  944. if (!chan->indexed) {
  945. WARN(1, "Differential channels must be indexed\n");
  946. ret = -EINVAL;
  947. goto error_free_full_postfix;
  948. }
  949. name = kasprintf(GFP_KERNEL,
  950. "%s_%s%d-%s%d_%s",
  951. iio_direction[chan->output],
  952. iio_chan_type_name_spec[chan->type],
  953. chan->channel,
  954. iio_chan_type_name_spec[chan->type],
  955. chan->channel2,
  956. full_postfix);
  957. break;
  958. }
  959. } else { /* Single ended */
  960. switch (shared_by) {
  961. case IIO_SHARED_BY_ALL:
  962. name = kasprintf(GFP_KERNEL, "%s", full_postfix);
  963. break;
  964. case IIO_SHARED_BY_DIR:
  965. name = kasprintf(GFP_KERNEL, "%s_%s",
  966. iio_direction[chan->output],
  967. full_postfix);
  968. break;
  969. case IIO_SHARED_BY_TYPE:
  970. name = kasprintf(GFP_KERNEL, "%s_%s_%s",
  971. iio_direction[chan->output],
  972. iio_chan_type_name_spec[chan->type],
  973. full_postfix);
  974. break;
  975. case IIO_SEPARATE:
  976. if (chan->indexed)
  977. name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
  978. iio_direction[chan->output],
  979. iio_chan_type_name_spec[chan->type],
  980. chan->channel,
  981. full_postfix);
  982. else
  983. name = kasprintf(GFP_KERNEL, "%s_%s_%s",
  984. iio_direction[chan->output],
  985. iio_chan_type_name_spec[chan->type],
  986. full_postfix);
  987. break;
  988. }
  989. }
  990. if (name == NULL) {
  991. ret = -ENOMEM;
  992. goto error_free_full_postfix;
  993. }
  994. dev_attr->attr.name = name;
  995. if (readfunc) {
  996. dev_attr->attr.mode |= 0444;
  997. dev_attr->show = readfunc;
  998. }
  999. if (writefunc) {
  1000. dev_attr->attr.mode |= 0200;
  1001. dev_attr->store = writefunc;
  1002. }
  1003. error_free_full_postfix:
  1004. kfree(full_postfix);
  1005. return ret;
  1006. }
  1007. static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
  1008. {
  1009. kfree(dev_attr->attr.name);
  1010. }
  1011. int __iio_add_chan_devattr(const char *postfix,
  1012. struct iio_chan_spec const *chan,
  1013. ssize_t (*readfunc)(struct device *dev,
  1014. struct device_attribute *attr,
  1015. char *buf),
  1016. ssize_t (*writefunc)(struct device *dev,
  1017. struct device_attribute *attr,
  1018. const char *buf,
  1019. size_t len),
  1020. u64 mask,
  1021. enum iio_shared_by shared_by,
  1022. struct device *dev,
  1023. struct iio_buffer *buffer,
  1024. struct list_head *attr_list)
  1025. {
  1026. int ret;
  1027. struct iio_dev_attr *iio_attr, *t;
  1028. iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
  1029. if (iio_attr == NULL)
  1030. return -ENOMEM;
  1031. ret = __iio_device_attr_init(&iio_attr->dev_attr,
  1032. postfix, chan,
  1033. readfunc, writefunc, shared_by);
  1034. if (ret)
  1035. goto error_iio_dev_attr_free;
  1036. iio_attr->c = chan;
  1037. iio_attr->address = mask;
  1038. iio_attr->buffer = buffer;
  1039. list_for_each_entry(t, attr_list, l)
  1040. if (strcmp(t->dev_attr.attr.name,
  1041. iio_attr->dev_attr.attr.name) == 0) {
  1042. if (shared_by == IIO_SEPARATE)
  1043. dev_err(dev, "tried to double register : %s\n",
  1044. t->dev_attr.attr.name);
  1045. ret = -EBUSY;
  1046. goto error_device_attr_deinit;
  1047. }
  1048. list_add(&iio_attr->l, attr_list);
  1049. return 0;
  1050. error_device_attr_deinit:
  1051. __iio_device_attr_deinit(&iio_attr->dev_attr);
  1052. error_iio_dev_attr_free:
  1053. kfree(iio_attr);
  1054. return ret;
  1055. }
  1056. static int iio_device_add_channel_label(struct iio_dev *indio_dev,
  1057. struct iio_chan_spec const *chan)
  1058. {
  1059. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1060. int ret;
  1061. if (!indio_dev->info->read_label && !chan->extend_name)
  1062. return 0;
  1063. ret = __iio_add_chan_devattr("label",
  1064. chan,
  1065. &iio_read_channel_label,
  1066. NULL,
  1067. 0,
  1068. IIO_SEPARATE,
  1069. &indio_dev->dev,
  1070. NULL,
  1071. &iio_dev_opaque->channel_attr_list);
  1072. if (ret < 0)
  1073. return ret;
  1074. return 1;
  1075. }
  1076. static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
  1077. struct iio_chan_spec const *chan,
  1078. enum iio_shared_by shared_by,
  1079. const long *infomask)
  1080. {
  1081. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1082. int i, ret, attrcount = 0;
  1083. for_each_set_bit(i, infomask, sizeof(*infomask)*8) {
  1084. if (i >= ARRAY_SIZE(iio_chan_info_postfix))
  1085. return -EINVAL;
  1086. ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
  1087. chan,
  1088. &iio_read_channel_info,
  1089. &iio_write_channel_info,
  1090. i,
  1091. shared_by,
  1092. &indio_dev->dev,
  1093. NULL,
  1094. &iio_dev_opaque->channel_attr_list);
  1095. if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
  1096. continue;
  1097. else if (ret < 0)
  1098. return ret;
  1099. attrcount++;
  1100. }
  1101. return attrcount;
  1102. }
  1103. static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
  1104. struct iio_chan_spec const *chan,
  1105. enum iio_shared_by shared_by,
  1106. const long *infomask)
  1107. {
  1108. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1109. int i, ret, attrcount = 0;
  1110. char *avail_postfix;
  1111. for_each_set_bit(i, infomask, sizeof(*infomask) * 8) {
  1112. if (i >= ARRAY_SIZE(iio_chan_info_postfix))
  1113. return -EINVAL;
  1114. avail_postfix = kasprintf(GFP_KERNEL,
  1115. "%s_available",
  1116. iio_chan_info_postfix[i]);
  1117. if (!avail_postfix)
  1118. return -ENOMEM;
  1119. ret = __iio_add_chan_devattr(avail_postfix,
  1120. chan,
  1121. &iio_read_channel_info_avail,
  1122. NULL,
  1123. i,
  1124. shared_by,
  1125. &indio_dev->dev,
  1126. NULL,
  1127. &iio_dev_opaque->channel_attr_list);
  1128. kfree(avail_postfix);
  1129. if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
  1130. continue;
  1131. else if (ret < 0)
  1132. return ret;
  1133. attrcount++;
  1134. }
  1135. return attrcount;
  1136. }
  1137. static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
  1138. struct iio_chan_spec const *chan)
  1139. {
  1140. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1141. int ret, attrcount = 0;
  1142. const struct iio_chan_spec_ext_info *ext_info;
  1143. if (chan->channel < 0)
  1144. return 0;
  1145. ret = iio_device_add_info_mask_type(indio_dev, chan,
  1146. IIO_SEPARATE,
  1147. &chan->info_mask_separate);
  1148. if (ret < 0)
  1149. return ret;
  1150. attrcount += ret;
  1151. ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
  1152. IIO_SEPARATE,
  1153. &chan->info_mask_separate_available);
  1154. if (ret < 0)
  1155. return ret;
  1156. attrcount += ret;
  1157. ret = iio_device_add_info_mask_type(indio_dev, chan,
  1158. IIO_SHARED_BY_TYPE,
  1159. &chan->info_mask_shared_by_type);
  1160. if (ret < 0)
  1161. return ret;
  1162. attrcount += ret;
  1163. ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
  1164. IIO_SHARED_BY_TYPE,
  1165. &chan->info_mask_shared_by_type_available);
  1166. if (ret < 0)
  1167. return ret;
  1168. attrcount += ret;
  1169. ret = iio_device_add_info_mask_type(indio_dev, chan,
  1170. IIO_SHARED_BY_DIR,
  1171. &chan->info_mask_shared_by_dir);
  1172. if (ret < 0)
  1173. return ret;
  1174. attrcount += ret;
  1175. ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
  1176. IIO_SHARED_BY_DIR,
  1177. &chan->info_mask_shared_by_dir_available);
  1178. if (ret < 0)
  1179. return ret;
  1180. attrcount += ret;
  1181. ret = iio_device_add_info_mask_type(indio_dev, chan,
  1182. IIO_SHARED_BY_ALL,
  1183. &chan->info_mask_shared_by_all);
  1184. if (ret < 0)
  1185. return ret;
  1186. attrcount += ret;
  1187. ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
  1188. IIO_SHARED_BY_ALL,
  1189. &chan->info_mask_shared_by_all_available);
  1190. if (ret < 0)
  1191. return ret;
  1192. attrcount += ret;
  1193. ret = iio_device_add_channel_label(indio_dev, chan);
  1194. if (ret < 0)
  1195. return ret;
  1196. attrcount += ret;
  1197. if (chan->ext_info) {
  1198. unsigned int i = 0;
  1199. for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
  1200. ret = __iio_add_chan_devattr(ext_info->name,
  1201. chan,
  1202. ext_info->read ?
  1203. &iio_read_channel_ext_info : NULL,
  1204. ext_info->write ?
  1205. &iio_write_channel_ext_info : NULL,
  1206. i,
  1207. ext_info->shared,
  1208. &indio_dev->dev,
  1209. NULL,
  1210. &iio_dev_opaque->channel_attr_list);
  1211. i++;
  1212. if (ret == -EBUSY && ext_info->shared)
  1213. continue;
  1214. if (ret)
  1215. return ret;
  1216. attrcount++;
  1217. }
  1218. }
  1219. return attrcount;
  1220. }
  1221. /**
  1222. * iio_free_chan_devattr_list() - Free a list of IIO device attributes
  1223. * @attr_list: List of IIO device attributes
  1224. *
  1225. * This function frees the memory allocated for each of the IIO device
  1226. * attributes in the list.
  1227. */
  1228. void iio_free_chan_devattr_list(struct list_head *attr_list)
  1229. {
  1230. struct iio_dev_attr *p, *n;
  1231. list_for_each_entry_safe(p, n, attr_list, l) {
  1232. kfree_const(p->dev_attr.attr.name);
  1233. list_del(&p->l);
  1234. kfree(p);
  1235. }
  1236. }
  1237. static ssize_t name_show(struct device *dev, struct device_attribute *attr,
  1238. char *buf)
  1239. {
  1240. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1241. return sysfs_emit(buf, "%s\n", indio_dev->name);
  1242. }
  1243. static DEVICE_ATTR_RO(name);
  1244. static ssize_t label_show(struct device *dev, struct device_attribute *attr,
  1245. char *buf)
  1246. {
  1247. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1248. return sysfs_emit(buf, "%s\n", indio_dev->label);
  1249. }
  1250. static DEVICE_ATTR_RO(label);
  1251. static ssize_t current_timestamp_clock_show(struct device *dev,
  1252. struct device_attribute *attr,
  1253. char *buf)
  1254. {
  1255. const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1256. const clockid_t clk = iio_device_get_clock(indio_dev);
  1257. const char *name;
  1258. ssize_t sz;
  1259. switch (clk) {
  1260. case CLOCK_REALTIME:
  1261. name = "realtime\n";
  1262. sz = sizeof("realtime\n");
  1263. break;
  1264. case CLOCK_MONOTONIC:
  1265. name = "monotonic\n";
  1266. sz = sizeof("monotonic\n");
  1267. break;
  1268. case CLOCK_MONOTONIC_RAW:
  1269. name = "monotonic_raw\n";
  1270. sz = sizeof("monotonic_raw\n");
  1271. break;
  1272. case CLOCK_REALTIME_COARSE:
  1273. name = "realtime_coarse\n";
  1274. sz = sizeof("realtime_coarse\n");
  1275. break;
  1276. case CLOCK_MONOTONIC_COARSE:
  1277. name = "monotonic_coarse\n";
  1278. sz = sizeof("monotonic_coarse\n");
  1279. break;
  1280. case CLOCK_BOOTTIME:
  1281. name = "boottime\n";
  1282. sz = sizeof("boottime\n");
  1283. break;
  1284. case CLOCK_TAI:
  1285. name = "tai\n";
  1286. sz = sizeof("tai\n");
  1287. break;
  1288. default:
  1289. BUG();
  1290. }
  1291. memcpy(buf, name, sz);
  1292. return sz;
  1293. }
  1294. static ssize_t current_timestamp_clock_store(struct device *dev,
  1295. struct device_attribute *attr,
  1296. const char *buf, size_t len)
  1297. {
  1298. clockid_t clk;
  1299. int ret;
  1300. if (sysfs_streq(buf, "realtime"))
  1301. clk = CLOCK_REALTIME;
  1302. else if (sysfs_streq(buf, "monotonic"))
  1303. clk = CLOCK_MONOTONIC;
  1304. else if (sysfs_streq(buf, "monotonic_raw"))
  1305. clk = CLOCK_MONOTONIC_RAW;
  1306. else if (sysfs_streq(buf, "realtime_coarse"))
  1307. clk = CLOCK_REALTIME_COARSE;
  1308. else if (sysfs_streq(buf, "monotonic_coarse"))
  1309. clk = CLOCK_MONOTONIC_COARSE;
  1310. else if (sysfs_streq(buf, "boottime"))
  1311. clk = CLOCK_BOOTTIME;
  1312. else if (sysfs_streq(buf, "tai"))
  1313. clk = CLOCK_TAI;
  1314. else
  1315. return -EINVAL;
  1316. ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
  1317. if (ret)
  1318. return ret;
  1319. return len;
  1320. }
  1321. int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
  1322. const struct attribute_group *group)
  1323. {
  1324. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1325. const struct attribute_group **new, **old = iio_dev_opaque->groups;
  1326. unsigned int cnt = iio_dev_opaque->groupcounter;
  1327. new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL);
  1328. if (!new)
  1329. return -ENOMEM;
  1330. new[iio_dev_opaque->groupcounter++] = group;
  1331. new[iio_dev_opaque->groupcounter] = NULL;
  1332. iio_dev_opaque->groups = new;
  1333. return 0;
  1334. }
  1335. static DEVICE_ATTR_RW(current_timestamp_clock);
  1336. static int iio_device_register_sysfs(struct iio_dev *indio_dev)
  1337. {
  1338. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1339. int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
  1340. struct iio_dev_attr *p;
  1341. struct attribute **attr, *clk = NULL;
  1342. /* First count elements in any existing group */
  1343. if (indio_dev->info->attrs) {
  1344. attr = indio_dev->info->attrs->attrs;
  1345. while (*attr++ != NULL)
  1346. attrcount_orig++;
  1347. }
  1348. attrcount = attrcount_orig;
  1349. /*
  1350. * New channel registration method - relies on the fact a group does
  1351. * not need to be initialized if its name is NULL.
  1352. */
  1353. if (indio_dev->channels)
  1354. for (i = 0; i < indio_dev->num_channels; i++) {
  1355. const struct iio_chan_spec *chan =
  1356. &indio_dev->channels[i];
  1357. if (chan->type == IIO_TIMESTAMP)
  1358. clk = &dev_attr_current_timestamp_clock.attr;
  1359. ret = iio_device_add_channel_sysfs(indio_dev, chan);
  1360. if (ret < 0)
  1361. goto error_clear_attrs;
  1362. attrcount += ret;
  1363. }
  1364. if (iio_dev_opaque->event_interface)
  1365. clk = &dev_attr_current_timestamp_clock.attr;
  1366. if (indio_dev->name)
  1367. attrcount++;
  1368. if (indio_dev->label)
  1369. attrcount++;
  1370. if (clk)
  1371. attrcount++;
  1372. iio_dev_opaque->chan_attr_group.attrs =
  1373. kcalloc(attrcount + 1,
  1374. sizeof(iio_dev_opaque->chan_attr_group.attrs[0]),
  1375. GFP_KERNEL);
  1376. if (iio_dev_opaque->chan_attr_group.attrs == NULL) {
  1377. ret = -ENOMEM;
  1378. goto error_clear_attrs;
  1379. }
  1380. /* Copy across original attributes, and point to original binary attributes */
  1381. if (indio_dev->info->attrs) {
  1382. memcpy(iio_dev_opaque->chan_attr_group.attrs,
  1383. indio_dev->info->attrs->attrs,
  1384. sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
  1385. *attrcount_orig);
  1386. iio_dev_opaque->chan_attr_group.is_visible =
  1387. indio_dev->info->attrs->is_visible;
  1388. iio_dev_opaque->chan_attr_group.bin_attrs =
  1389. indio_dev->info->attrs->bin_attrs;
  1390. }
  1391. attrn = attrcount_orig;
  1392. /* Add all elements from the list. */
  1393. list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
  1394. iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
  1395. if (indio_dev->name)
  1396. iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
  1397. if (indio_dev->label)
  1398. iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
  1399. if (clk)
  1400. iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk;
  1401. ret = iio_device_register_sysfs_group(indio_dev,
  1402. &iio_dev_opaque->chan_attr_group);
  1403. if (ret)
  1404. goto error_clear_attrs;
  1405. return 0;
  1406. error_clear_attrs:
  1407. iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
  1408. return ret;
  1409. }
  1410. static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
  1411. {
  1412. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1413. iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
  1414. kfree(iio_dev_opaque->chan_attr_group.attrs);
  1415. iio_dev_opaque->chan_attr_group.attrs = NULL;
  1416. kfree(iio_dev_opaque->groups);
  1417. iio_dev_opaque->groups = NULL;
  1418. }
  1419. static void iio_dev_release(struct device *device)
  1420. {
  1421. struct iio_dev *indio_dev = dev_to_iio_dev(device);
  1422. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1423. if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
  1424. iio_device_unregister_trigger_consumer(indio_dev);
  1425. iio_device_unregister_eventset(indio_dev);
  1426. iio_device_unregister_sysfs(indio_dev);
  1427. iio_device_detach_buffers(indio_dev);
  1428. lockdep_unregister_key(&iio_dev_opaque->mlock_key);
  1429. ida_free(&iio_ida, iio_dev_opaque->id);
  1430. kfree(iio_dev_opaque);
  1431. }
  1432. const struct device_type iio_device_type = {
  1433. .name = "iio_device",
  1434. .release = iio_dev_release,
  1435. };
  1436. /**
  1437. * iio_device_alloc() - allocate an iio_dev from a driver
  1438. * @parent: Parent device.
  1439. * @sizeof_priv: Space to allocate for private structure.
  1440. **/
  1441. struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
  1442. {
  1443. struct iio_dev_opaque *iio_dev_opaque;
  1444. struct iio_dev *indio_dev;
  1445. size_t alloc_size;
  1446. alloc_size = sizeof(struct iio_dev_opaque);
  1447. if (sizeof_priv) {
  1448. alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN);
  1449. alloc_size += sizeof_priv;
  1450. }
  1451. iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL);
  1452. if (!iio_dev_opaque)
  1453. return NULL;
  1454. indio_dev = &iio_dev_opaque->indio_dev;
  1455. indio_dev->priv = (char *)iio_dev_opaque +
  1456. ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
  1457. indio_dev->dev.parent = parent;
  1458. indio_dev->dev.type = &iio_device_type;
  1459. indio_dev->dev.bus = &iio_bus_type;
  1460. device_initialize(&indio_dev->dev);
  1461. mutex_init(&indio_dev->mlock);
  1462. mutex_init(&iio_dev_opaque->info_exist_lock);
  1463. INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
  1464. iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL);
  1465. if (iio_dev_opaque->id < 0) {
  1466. /* cannot use a dev_err as the name isn't available */
  1467. pr_err("failed to get device id\n");
  1468. kfree(iio_dev_opaque);
  1469. return NULL;
  1470. }
  1471. if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
  1472. ida_free(&iio_ida, iio_dev_opaque->id);
  1473. kfree(iio_dev_opaque);
  1474. return NULL;
  1475. }
  1476. INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
  1477. INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
  1478. lockdep_register_key(&iio_dev_opaque->mlock_key);
  1479. lockdep_set_class(&indio_dev->mlock, &iio_dev_opaque->mlock_key);
  1480. return indio_dev;
  1481. }
  1482. EXPORT_SYMBOL(iio_device_alloc);
  1483. /**
  1484. * iio_device_free() - free an iio_dev from a driver
  1485. * @dev: the iio_dev associated with the device
  1486. **/
  1487. void iio_device_free(struct iio_dev *dev)
  1488. {
  1489. if (dev)
  1490. put_device(&dev->dev);
  1491. }
  1492. EXPORT_SYMBOL(iio_device_free);
  1493. static void devm_iio_device_release(void *iio_dev)
  1494. {
  1495. iio_device_free(iio_dev);
  1496. }
  1497. /**
  1498. * devm_iio_device_alloc - Resource-managed iio_device_alloc()
  1499. * @parent: Device to allocate iio_dev for, and parent for this IIO device
  1500. * @sizeof_priv: Space to allocate for private structure.
  1501. *
  1502. * Managed iio_device_alloc. iio_dev allocated with this function is
  1503. * automatically freed on driver detach.
  1504. *
  1505. * RETURNS:
  1506. * Pointer to allocated iio_dev on success, NULL on failure.
  1507. */
  1508. struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
  1509. {
  1510. struct iio_dev *iio_dev;
  1511. int ret;
  1512. iio_dev = iio_device_alloc(parent, sizeof_priv);
  1513. if (!iio_dev)
  1514. return NULL;
  1515. ret = devm_add_action_or_reset(parent, devm_iio_device_release,
  1516. iio_dev);
  1517. if (ret)
  1518. return NULL;
  1519. return iio_dev;
  1520. }
  1521. EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
  1522. /**
  1523. * iio_chrdev_open() - chrdev file open for buffer access and ioctls
  1524. * @inode: Inode structure for identifying the device in the file system
  1525. * @filp: File structure for iio device used to keep and later access
  1526. * private data
  1527. *
  1528. * Return: 0 on success or -EBUSY if the device is already opened
  1529. **/
  1530. static int iio_chrdev_open(struct inode *inode, struct file *filp)
  1531. {
  1532. struct iio_dev_opaque *iio_dev_opaque =
  1533. container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
  1534. struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
  1535. struct iio_dev_buffer_pair *ib;
  1536. if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags))
  1537. return -EBUSY;
  1538. iio_device_get(indio_dev);
  1539. ib = kmalloc(sizeof(*ib), GFP_KERNEL);
  1540. if (!ib) {
  1541. iio_device_put(indio_dev);
  1542. clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
  1543. return -ENOMEM;
  1544. }
  1545. ib->indio_dev = indio_dev;
  1546. ib->buffer = indio_dev->buffer;
  1547. filp->private_data = ib;
  1548. return 0;
  1549. }
  1550. /**
  1551. * iio_chrdev_release() - chrdev file close buffer access and ioctls
  1552. * @inode: Inode structure pointer for the char device
  1553. * @filp: File structure pointer for the char device
  1554. *
  1555. * Return: 0 for successful release
  1556. */
  1557. static int iio_chrdev_release(struct inode *inode, struct file *filp)
  1558. {
  1559. struct iio_dev_buffer_pair *ib = filp->private_data;
  1560. struct iio_dev_opaque *iio_dev_opaque =
  1561. container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
  1562. struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
  1563. kfree(ib);
  1564. clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
  1565. iio_device_put(indio_dev);
  1566. return 0;
  1567. }
  1568. void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
  1569. struct iio_ioctl_handler *h)
  1570. {
  1571. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1572. list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers);
  1573. }
  1574. void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h)
  1575. {
  1576. list_del(&h->entry);
  1577. }
  1578. static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1579. {
  1580. struct iio_dev_buffer_pair *ib = filp->private_data;
  1581. struct iio_dev *indio_dev = ib->indio_dev;
  1582. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1583. struct iio_ioctl_handler *h;
  1584. int ret = -ENODEV;
  1585. mutex_lock(&iio_dev_opaque->info_exist_lock);
  1586. /**
  1587. * The NULL check here is required to prevent crashing when a device
  1588. * is being removed while userspace would still have open file handles
  1589. * to try to access this device.
  1590. */
  1591. if (!indio_dev->info)
  1592. goto out_unlock;
  1593. list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
  1594. ret = h->ioctl(indio_dev, filp, cmd, arg);
  1595. if (ret != IIO_IOCTL_UNHANDLED)
  1596. break;
  1597. }
  1598. if (ret == IIO_IOCTL_UNHANDLED)
  1599. ret = -ENODEV;
  1600. out_unlock:
  1601. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  1602. return ret;
  1603. }
  1604. static const struct file_operations iio_buffer_fileops = {
  1605. .owner = THIS_MODULE,
  1606. .llseek = noop_llseek,
  1607. .read = iio_buffer_read_outer_addr,
  1608. .write = iio_buffer_write_outer_addr,
  1609. .poll = iio_buffer_poll_addr,
  1610. .unlocked_ioctl = iio_ioctl,
  1611. .compat_ioctl = compat_ptr_ioctl,
  1612. .open = iio_chrdev_open,
  1613. .release = iio_chrdev_release,
  1614. };
  1615. static const struct file_operations iio_event_fileops = {
  1616. .owner = THIS_MODULE,
  1617. .llseek = noop_llseek,
  1618. .unlocked_ioctl = iio_ioctl,
  1619. .compat_ioctl = compat_ptr_ioctl,
  1620. .open = iio_chrdev_open,
  1621. .release = iio_chrdev_release,
  1622. };
  1623. static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
  1624. {
  1625. int i, j;
  1626. const struct iio_chan_spec *channels = indio_dev->channels;
  1627. if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES))
  1628. return 0;
  1629. for (i = 0; i < indio_dev->num_channels - 1; i++) {
  1630. if (channels[i].scan_index < 0)
  1631. continue;
  1632. for (j = i + 1; j < indio_dev->num_channels; j++)
  1633. if (channels[i].scan_index == channels[j].scan_index) {
  1634. dev_err(&indio_dev->dev,
  1635. "Duplicate scan index %d\n",
  1636. channels[i].scan_index);
  1637. return -EINVAL;
  1638. }
  1639. }
  1640. return 0;
  1641. }
  1642. static int iio_check_extended_name(const struct iio_dev *indio_dev)
  1643. {
  1644. unsigned int i;
  1645. if (!indio_dev->info->read_label)
  1646. return 0;
  1647. for (i = 0; i < indio_dev->num_channels; i++) {
  1648. if (indio_dev->channels[i].extend_name) {
  1649. dev_err(&indio_dev->dev,
  1650. "Cannot use labels and extend_name at the same time\n");
  1651. return -EINVAL;
  1652. }
  1653. }
  1654. return 0;
  1655. }
  1656. static const struct iio_buffer_setup_ops noop_ring_setup_ops;
  1657. int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
  1658. {
  1659. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1660. struct fwnode_handle *fwnode = NULL;
  1661. int ret;
  1662. if (!indio_dev->info)
  1663. return -EINVAL;
  1664. iio_dev_opaque->driver_module = this_mod;
  1665. /* If the calling driver did not initialize firmware node, do it here */
  1666. if (dev_fwnode(&indio_dev->dev))
  1667. fwnode = dev_fwnode(&indio_dev->dev);
  1668. /* The default dummy IIO device has no parent */
  1669. else if (indio_dev->dev.parent)
  1670. fwnode = dev_fwnode(indio_dev->dev.parent);
  1671. device_set_node(&indio_dev->dev, fwnode);
  1672. fwnode_property_read_string(fwnode, "label", &indio_dev->label);
  1673. ret = iio_check_unique_scan_index(indio_dev);
  1674. if (ret < 0)
  1675. return ret;
  1676. ret = iio_check_extended_name(indio_dev);
  1677. if (ret < 0)
  1678. return ret;
  1679. iio_device_register_debugfs(indio_dev);
  1680. ret = iio_buffers_alloc_sysfs_and_mask(indio_dev);
  1681. if (ret) {
  1682. dev_err(indio_dev->dev.parent,
  1683. "Failed to create buffer sysfs interfaces\n");
  1684. goto error_unreg_debugfs;
  1685. }
  1686. ret = iio_device_register_sysfs(indio_dev);
  1687. if (ret) {
  1688. dev_err(indio_dev->dev.parent,
  1689. "Failed to register sysfs interfaces\n");
  1690. goto error_buffer_free_sysfs;
  1691. }
  1692. ret = iio_device_register_eventset(indio_dev);
  1693. if (ret) {
  1694. dev_err(indio_dev->dev.parent,
  1695. "Failed to register event set\n");
  1696. goto error_free_sysfs;
  1697. }
  1698. if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
  1699. iio_device_register_trigger_consumer(indio_dev);
  1700. if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
  1701. indio_dev->setup_ops == NULL)
  1702. indio_dev->setup_ops = &noop_ring_setup_ops;
  1703. if (iio_dev_opaque->attached_buffers_cnt)
  1704. cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops);
  1705. else if (iio_dev_opaque->event_interface)
  1706. cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops);
  1707. if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) {
  1708. indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id);
  1709. iio_dev_opaque->chrdev.owner = this_mod;
  1710. }
  1711. /* assign device groups now; they should be all registered now */
  1712. indio_dev->dev.groups = iio_dev_opaque->groups;
  1713. ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev);
  1714. if (ret < 0)
  1715. goto error_unreg_eventset;
  1716. return 0;
  1717. error_unreg_eventset:
  1718. iio_device_unregister_eventset(indio_dev);
  1719. error_free_sysfs:
  1720. iio_device_unregister_sysfs(indio_dev);
  1721. error_buffer_free_sysfs:
  1722. iio_buffers_free_sysfs_and_mask(indio_dev);
  1723. error_unreg_debugfs:
  1724. iio_device_unregister_debugfs(indio_dev);
  1725. return ret;
  1726. }
  1727. EXPORT_SYMBOL(__iio_device_register);
  1728. /**
  1729. * iio_device_unregister() - unregister a device from the IIO subsystem
  1730. * @indio_dev: Device structure representing the device.
  1731. **/
  1732. void iio_device_unregister(struct iio_dev *indio_dev)
  1733. {
  1734. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1735. cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
  1736. mutex_lock(&iio_dev_opaque->info_exist_lock);
  1737. iio_device_unregister_debugfs(indio_dev);
  1738. iio_disable_all_buffers(indio_dev);
  1739. indio_dev->info = NULL;
  1740. iio_device_wakeup_eventset(indio_dev);
  1741. iio_buffer_wakeup_poll(indio_dev);
  1742. mutex_unlock(&iio_dev_opaque->info_exist_lock);
  1743. iio_buffers_free_sysfs_and_mask(indio_dev);
  1744. }
  1745. EXPORT_SYMBOL(iio_device_unregister);
  1746. static void devm_iio_device_unreg(void *indio_dev)
  1747. {
  1748. iio_device_unregister(indio_dev);
  1749. }
  1750. int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
  1751. struct module *this_mod)
  1752. {
  1753. int ret;
  1754. ret = __iio_device_register(indio_dev, this_mod);
  1755. if (ret)
  1756. return ret;
  1757. return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev);
  1758. }
  1759. EXPORT_SYMBOL_GPL(__devm_iio_device_register);
  1760. /**
  1761. * iio_device_claim_direct_mode - Keep device in direct mode
  1762. * @indio_dev: the iio_dev associated with the device
  1763. *
  1764. * If the device is in direct mode it is guaranteed to stay
  1765. * that way until iio_device_release_direct_mode() is called.
  1766. *
  1767. * Use with iio_device_release_direct_mode()
  1768. *
  1769. * Returns: 0 on success, -EBUSY on failure
  1770. */
  1771. int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
  1772. {
  1773. mutex_lock(&indio_dev->mlock);
  1774. if (iio_buffer_enabled(indio_dev)) {
  1775. mutex_unlock(&indio_dev->mlock);
  1776. return -EBUSY;
  1777. }
  1778. return 0;
  1779. }
  1780. EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
  1781. /**
  1782. * iio_device_release_direct_mode - releases claim on direct mode
  1783. * @indio_dev: the iio_dev associated with the device
  1784. *
  1785. * Release the claim. Device is no longer guaranteed to stay
  1786. * in direct mode.
  1787. *
  1788. * Use with iio_device_claim_direct_mode()
  1789. */
  1790. void iio_device_release_direct_mode(struct iio_dev *indio_dev)
  1791. {
  1792. mutex_unlock(&indio_dev->mlock);
  1793. }
  1794. EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
  1795. /**
  1796. * iio_device_claim_buffer_mode - Keep device in buffer mode
  1797. * @indio_dev: the iio_dev associated with the device
  1798. *
  1799. * If the device is in buffer mode it is guaranteed to stay
  1800. * that way until iio_device_release_buffer_mode() is called.
  1801. *
  1802. * Use with iio_device_release_buffer_mode().
  1803. *
  1804. * Returns: 0 on success, -EBUSY on failure.
  1805. */
  1806. int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
  1807. {
  1808. mutex_lock(&indio_dev->mlock);
  1809. if (iio_buffer_enabled(indio_dev))
  1810. return 0;
  1811. mutex_unlock(&indio_dev->mlock);
  1812. return -EBUSY;
  1813. }
  1814. EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
  1815. /**
  1816. * iio_device_release_buffer_mode - releases claim on buffer mode
  1817. * @indio_dev: the iio_dev associated with the device
  1818. *
  1819. * Release the claim. Device is no longer guaranteed to stay
  1820. * in buffer mode.
  1821. *
  1822. * Use with iio_device_claim_buffer_mode().
  1823. */
  1824. void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
  1825. {
  1826. mutex_unlock(&indio_dev->mlock);
  1827. }
  1828. EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
  1829. /**
  1830. * iio_device_get_current_mode() - helper function providing read-only access to
  1831. * the opaque @currentmode variable
  1832. * @indio_dev: IIO device structure for device
  1833. */
  1834. int iio_device_get_current_mode(struct iio_dev *indio_dev)
  1835. {
  1836. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1837. return iio_dev_opaque->currentmode;
  1838. }
  1839. EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
  1840. subsys_initcall(iio_init);
  1841. module_exit(iio_exit);
  1842. MODULE_AUTHOR("Jonathan Cameron <[email protected]>");
  1843. MODULE_DESCRIPTION("Industrial I/O core");
  1844. MODULE_LICENSE("GPL");