counter-chrdev.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Generic Counter character device interface
  4. * Copyright (C) 2020 William Breathitt Gray
  5. */
  6. #include <linux/cdev.h>
  7. #include <linux/counter.h>
  8. #include <linux/err.h>
  9. #include <linux/errno.h>
  10. #include <linux/export.h>
  11. #include <linux/fs.h>
  12. #include <linux/kfifo.h>
  13. #include <linux/list.h>
  14. #include <linux/mutex.h>
  15. #include <linux/nospec.h>
  16. #include <linux/poll.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/timekeeping.h>
  20. #include <linux/types.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/wait.h>
  23. #include "counter-chrdev.h"
  24. struct counter_comp_node {
  25. struct list_head l;
  26. struct counter_component component;
  27. struct counter_comp comp;
  28. void *parent;
  29. };
  30. #define counter_comp_read_is_equal(a, b) \
  31. (a.action_read == b.action_read || \
  32. a.device_u8_read == b.device_u8_read || \
  33. a.count_u8_read == b.count_u8_read || \
  34. a.signal_u8_read == b.signal_u8_read || \
  35. a.device_u32_read == b.device_u32_read || \
  36. a.count_u32_read == b.count_u32_read || \
  37. a.signal_u32_read == b.signal_u32_read || \
  38. a.device_u64_read == b.device_u64_read || \
  39. a.count_u64_read == b.count_u64_read || \
  40. a.signal_u64_read == b.signal_u64_read || \
  41. a.signal_array_u32_read == b.signal_array_u32_read || \
  42. a.device_array_u64_read == b.device_array_u64_read || \
  43. a.count_array_u64_read == b.count_array_u64_read || \
  44. a.signal_array_u64_read == b.signal_array_u64_read)
  45. #define counter_comp_read_is_set(comp) \
  46. (comp.action_read || \
  47. comp.device_u8_read || \
  48. comp.count_u8_read || \
  49. comp.signal_u8_read || \
  50. comp.device_u32_read || \
  51. comp.count_u32_read || \
  52. comp.signal_u32_read || \
  53. comp.device_u64_read || \
  54. comp.count_u64_read || \
  55. comp.signal_u64_read || \
  56. comp.signal_array_u32_read || \
  57. comp.device_array_u64_read || \
  58. comp.count_array_u64_read || \
  59. comp.signal_array_u64_read)
  60. static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
  61. size_t len, loff_t *f_ps)
  62. {
  63. struct counter_device *const counter = filp->private_data;
  64. int err;
  65. unsigned int copied;
  66. if (!counter->ops)
  67. return -ENODEV;
  68. if (len < sizeof(struct counter_event))
  69. return -EINVAL;
  70. do {
  71. if (kfifo_is_empty(&counter->events)) {
  72. if (filp->f_flags & O_NONBLOCK)
  73. return -EAGAIN;
  74. err = wait_event_interruptible(counter->events_wait,
  75. !kfifo_is_empty(&counter->events) ||
  76. !counter->ops);
  77. if (err < 0)
  78. return err;
  79. if (!counter->ops)
  80. return -ENODEV;
  81. }
  82. if (mutex_lock_interruptible(&counter->events_out_lock))
  83. return -ERESTARTSYS;
  84. err = kfifo_to_user(&counter->events, buf, len, &copied);
  85. mutex_unlock(&counter->events_out_lock);
  86. if (err < 0)
  87. return err;
  88. } while (!copied);
  89. return copied;
  90. }
  91. static __poll_t counter_chrdev_poll(struct file *filp,
  92. struct poll_table_struct *pollt)
  93. {
  94. struct counter_device *const counter = filp->private_data;
  95. __poll_t events = 0;
  96. if (!counter->ops)
  97. return events;
  98. poll_wait(filp, &counter->events_wait, pollt);
  99. if (!kfifo_is_empty(&counter->events))
  100. events = EPOLLIN | EPOLLRDNORM;
  101. return events;
  102. }
  103. static void counter_events_list_free(struct list_head *const events_list)
  104. {
  105. struct counter_event_node *p, *n;
  106. struct counter_comp_node *q, *o;
  107. list_for_each_entry_safe(p, n, events_list, l) {
  108. /* Free associated component nodes */
  109. list_for_each_entry_safe(q, o, &p->comp_list, l) {
  110. list_del(&q->l);
  111. kfree(q);
  112. }
  113. /* Free event node */
  114. list_del(&p->l);
  115. kfree(p);
  116. }
  117. }
  118. static int counter_set_event_node(struct counter_device *const counter,
  119. struct counter_watch *const watch,
  120. const struct counter_comp_node *const cfg)
  121. {
  122. struct counter_event_node *event_node;
  123. int err = 0;
  124. struct counter_comp_node *comp_node;
  125. /* Search for event in the list */
  126. list_for_each_entry(event_node, &counter->next_events_list, l)
  127. if (event_node->event == watch->event &&
  128. event_node->channel == watch->channel)
  129. break;
  130. /* If event is not already in the list */
  131. if (&event_node->l == &counter->next_events_list) {
  132. /* Allocate new event node */
  133. event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
  134. if (!event_node)
  135. return -ENOMEM;
  136. /* Configure event node and add to the list */
  137. event_node->event = watch->event;
  138. event_node->channel = watch->channel;
  139. INIT_LIST_HEAD(&event_node->comp_list);
  140. list_add(&event_node->l, &counter->next_events_list);
  141. }
  142. /* Check if component watch has already been set before */
  143. list_for_each_entry(comp_node, &event_node->comp_list, l)
  144. if (comp_node->parent == cfg->parent &&
  145. counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
  146. err = -EINVAL;
  147. goto exit_free_event_node;
  148. }
  149. /* Allocate component node */
  150. comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
  151. if (!comp_node) {
  152. err = -ENOMEM;
  153. goto exit_free_event_node;
  154. }
  155. *comp_node = *cfg;
  156. /* Add component node to event node */
  157. list_add_tail(&comp_node->l, &event_node->comp_list);
  158. exit_free_event_node:
  159. /* Free event node if no one else is watching */
  160. if (list_empty(&event_node->comp_list)) {
  161. list_del(&event_node->l);
  162. kfree(event_node);
  163. }
  164. return err;
  165. }
  166. static int counter_enable_events(struct counter_device *const counter)
  167. {
  168. unsigned long flags;
  169. int err = 0;
  170. mutex_lock(&counter->n_events_list_lock);
  171. spin_lock_irqsave(&counter->events_list_lock, flags);
  172. counter_events_list_free(&counter->events_list);
  173. list_replace_init(&counter->next_events_list,
  174. &counter->events_list);
  175. if (counter->ops->events_configure)
  176. err = counter->ops->events_configure(counter);
  177. spin_unlock_irqrestore(&counter->events_list_lock, flags);
  178. mutex_unlock(&counter->n_events_list_lock);
  179. return err;
  180. }
  181. static int counter_disable_events(struct counter_device *const counter)
  182. {
  183. unsigned long flags;
  184. int err = 0;
  185. spin_lock_irqsave(&counter->events_list_lock, flags);
  186. counter_events_list_free(&counter->events_list);
  187. if (counter->ops->events_configure)
  188. err = counter->ops->events_configure(counter);
  189. spin_unlock_irqrestore(&counter->events_list_lock, flags);
  190. mutex_lock(&counter->n_events_list_lock);
  191. counter_events_list_free(&counter->next_events_list);
  192. mutex_unlock(&counter->n_events_list_lock);
  193. return err;
  194. }
  195. static int counter_get_ext(const struct counter_comp *const ext,
  196. const size_t num_ext, const size_t component_id,
  197. size_t *const ext_idx, size_t *const id)
  198. {
  199. struct counter_array *element;
  200. *id = 0;
  201. for (*ext_idx = 0; *ext_idx < num_ext; (*ext_idx)++) {
  202. if (*id == component_id)
  203. return 0;
  204. if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
  205. element = ext[*ext_idx].priv;
  206. if (component_id - *id < element->length)
  207. return 0;
  208. *id += element->length;
  209. } else
  210. (*id)++;
  211. }
  212. return -EINVAL;
  213. }
  214. static int counter_add_watch(struct counter_device *const counter,
  215. const unsigned long arg)
  216. {
  217. void __user *const uwatch = (void __user *)arg;
  218. struct counter_watch watch;
  219. struct counter_comp_node comp_node = {};
  220. size_t parent, id;
  221. struct counter_comp *ext;
  222. size_t num_ext;
  223. size_t ext_idx, ext_id;
  224. int err = 0;
  225. if (copy_from_user(&watch, uwatch, sizeof(watch)))
  226. return -EFAULT;
  227. if (watch.component.type == COUNTER_COMPONENT_NONE)
  228. goto no_component;
  229. parent = watch.component.parent;
  230. /* Configure parent component info for comp node */
  231. switch (watch.component.scope) {
  232. case COUNTER_SCOPE_DEVICE:
  233. ext = counter->ext;
  234. num_ext = counter->num_ext;
  235. break;
  236. case COUNTER_SCOPE_SIGNAL:
  237. if (parent >= counter->num_signals)
  238. return -EINVAL;
  239. parent = array_index_nospec(parent, counter->num_signals);
  240. comp_node.parent = counter->signals + parent;
  241. ext = counter->signals[parent].ext;
  242. num_ext = counter->signals[parent].num_ext;
  243. break;
  244. case COUNTER_SCOPE_COUNT:
  245. if (parent >= counter->num_counts)
  246. return -EINVAL;
  247. parent = array_index_nospec(parent, counter->num_counts);
  248. comp_node.parent = counter->counts + parent;
  249. ext = counter->counts[parent].ext;
  250. num_ext = counter->counts[parent].num_ext;
  251. break;
  252. default:
  253. return -EINVAL;
  254. }
  255. id = watch.component.id;
  256. /* Configure component info for comp node */
  257. switch (watch.component.type) {
  258. case COUNTER_COMPONENT_SIGNAL:
  259. if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
  260. return -EINVAL;
  261. comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
  262. comp_node.comp.signal_u32_read = counter->ops->signal_read;
  263. break;
  264. case COUNTER_COMPONENT_COUNT:
  265. if (watch.component.scope != COUNTER_SCOPE_COUNT)
  266. return -EINVAL;
  267. comp_node.comp.type = COUNTER_COMP_U64;
  268. comp_node.comp.count_u64_read = counter->ops->count_read;
  269. break;
  270. case COUNTER_COMPONENT_FUNCTION:
  271. if (watch.component.scope != COUNTER_SCOPE_COUNT)
  272. return -EINVAL;
  273. comp_node.comp.type = COUNTER_COMP_FUNCTION;
  274. comp_node.comp.count_u32_read = counter->ops->function_read;
  275. break;
  276. case COUNTER_COMPONENT_SYNAPSE_ACTION:
  277. if (watch.component.scope != COUNTER_SCOPE_COUNT)
  278. return -EINVAL;
  279. if (id >= counter->counts[parent].num_synapses)
  280. return -EINVAL;
  281. id = array_index_nospec(id, counter->counts[parent].num_synapses);
  282. comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
  283. comp_node.comp.action_read = counter->ops->action_read;
  284. comp_node.comp.priv = counter->counts[parent].synapses + id;
  285. break;
  286. case COUNTER_COMPONENT_EXTENSION:
  287. err = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
  288. if (err < 0)
  289. return err;
  290. comp_node.comp = ext[ext_idx];
  291. break;
  292. default:
  293. return -EINVAL;
  294. }
  295. if (!counter_comp_read_is_set(comp_node.comp))
  296. return -EOPNOTSUPP;
  297. no_component:
  298. mutex_lock(&counter->n_events_list_lock);
  299. if (counter->ops->watch_validate) {
  300. err = counter->ops->watch_validate(counter, &watch);
  301. if (err < 0)
  302. goto err_exit;
  303. }
  304. comp_node.component = watch.component;
  305. err = counter_set_event_node(counter, &watch, &comp_node);
  306. err_exit:
  307. mutex_unlock(&counter->n_events_list_lock);
  308. return err;
  309. }
  310. static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
  311. unsigned long arg)
  312. {
  313. struct counter_device *const counter = filp->private_data;
  314. int ret = -ENODEV;
  315. mutex_lock(&counter->ops_exist_lock);
  316. if (!counter->ops)
  317. goto out_unlock;
  318. switch (cmd) {
  319. case COUNTER_ADD_WATCH_IOCTL:
  320. ret = counter_add_watch(counter, arg);
  321. break;
  322. case COUNTER_ENABLE_EVENTS_IOCTL:
  323. ret = counter_enable_events(counter);
  324. break;
  325. case COUNTER_DISABLE_EVENTS_IOCTL:
  326. ret = counter_disable_events(counter);
  327. break;
  328. default:
  329. ret = -ENOIOCTLCMD;
  330. break;
  331. }
  332. out_unlock:
  333. mutex_unlock(&counter->ops_exist_lock);
  334. return ret;
  335. }
  336. static int counter_chrdev_open(struct inode *inode, struct file *filp)
  337. {
  338. struct counter_device *const counter = container_of(inode->i_cdev,
  339. typeof(*counter),
  340. chrdev);
  341. get_device(&counter->dev);
  342. filp->private_data = counter;
  343. return nonseekable_open(inode, filp);
  344. }
  345. static int counter_chrdev_release(struct inode *inode, struct file *filp)
  346. {
  347. struct counter_device *const counter = filp->private_data;
  348. int ret = 0;
  349. mutex_lock(&counter->ops_exist_lock);
  350. if (!counter->ops) {
  351. /* Free any lingering held memory */
  352. counter_events_list_free(&counter->events_list);
  353. counter_events_list_free(&counter->next_events_list);
  354. ret = -ENODEV;
  355. goto out_unlock;
  356. }
  357. ret = counter_disable_events(counter);
  358. if (ret < 0) {
  359. mutex_unlock(&counter->ops_exist_lock);
  360. return ret;
  361. }
  362. out_unlock:
  363. mutex_unlock(&counter->ops_exist_lock);
  364. put_device(&counter->dev);
  365. return ret;
  366. }
  367. static const struct file_operations counter_fops = {
  368. .owner = THIS_MODULE,
  369. .llseek = no_llseek,
  370. .read = counter_chrdev_read,
  371. .poll = counter_chrdev_poll,
  372. .unlocked_ioctl = counter_chrdev_ioctl,
  373. .open = counter_chrdev_open,
  374. .release = counter_chrdev_release,
  375. };
  376. int counter_chrdev_add(struct counter_device *const counter)
  377. {
  378. /* Initialize Counter events lists */
  379. INIT_LIST_HEAD(&counter->events_list);
  380. INIT_LIST_HEAD(&counter->next_events_list);
  381. spin_lock_init(&counter->events_list_lock);
  382. mutex_init(&counter->n_events_list_lock);
  383. init_waitqueue_head(&counter->events_wait);
  384. spin_lock_init(&counter->events_in_lock);
  385. mutex_init(&counter->events_out_lock);
  386. /* Initialize character device */
  387. cdev_init(&counter->chrdev, &counter_fops);
  388. /* Allocate Counter events queue */
  389. return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
  390. }
  391. void counter_chrdev_remove(struct counter_device *const counter)
  392. {
  393. kfifo_free(&counter->events);
  394. }
  395. static int counter_get_array_data(struct counter_device *const counter,
  396. const enum counter_scope scope,
  397. void *const parent,
  398. const struct counter_comp *const comp,
  399. const size_t idx, u64 *const value)
  400. {
  401. const struct counter_array *const element = comp->priv;
  402. u32 value_u32 = 0;
  403. int ret;
  404. switch (element->type) {
  405. case COUNTER_COMP_SIGNAL_POLARITY:
  406. if (scope != COUNTER_SCOPE_SIGNAL)
  407. return -EINVAL;
  408. ret = comp->signal_array_u32_read(counter, parent, idx,
  409. &value_u32);
  410. *value = value_u32;
  411. return ret;
  412. case COUNTER_COMP_U64:
  413. switch (scope) {
  414. case COUNTER_SCOPE_DEVICE:
  415. return comp->device_array_u64_read(counter, idx, value);
  416. case COUNTER_SCOPE_SIGNAL:
  417. return comp->signal_array_u64_read(counter, parent, idx,
  418. value);
  419. case COUNTER_SCOPE_COUNT:
  420. return comp->count_array_u64_read(counter, parent, idx,
  421. value);
  422. default:
  423. return -EINVAL;
  424. }
  425. default:
  426. return -EINVAL;
  427. }
  428. }
  429. static int counter_get_data(struct counter_device *const counter,
  430. const struct counter_comp_node *const comp_node,
  431. u64 *const value)
  432. {
  433. const struct counter_comp *const comp = &comp_node->comp;
  434. const enum counter_scope scope = comp_node->component.scope;
  435. const size_t id = comp_node->component.id;
  436. struct counter_signal *const signal = comp_node->parent;
  437. struct counter_count *const count = comp_node->parent;
  438. u8 value_u8 = 0;
  439. u32 value_u32 = 0;
  440. const struct counter_comp *ext;
  441. size_t num_ext;
  442. size_t ext_idx, ext_id;
  443. int ret;
  444. if (comp_node->component.type == COUNTER_COMPONENT_NONE)
  445. return 0;
  446. switch (comp->type) {
  447. case COUNTER_COMP_U8:
  448. case COUNTER_COMP_BOOL:
  449. switch (scope) {
  450. case COUNTER_SCOPE_DEVICE:
  451. ret = comp->device_u8_read(counter, &value_u8);
  452. break;
  453. case COUNTER_SCOPE_SIGNAL:
  454. ret = comp->signal_u8_read(counter, signal, &value_u8);
  455. break;
  456. case COUNTER_SCOPE_COUNT:
  457. ret = comp->count_u8_read(counter, count, &value_u8);
  458. break;
  459. default:
  460. return -EINVAL;
  461. }
  462. *value = value_u8;
  463. return ret;
  464. case COUNTER_COMP_SIGNAL_LEVEL:
  465. case COUNTER_COMP_FUNCTION:
  466. case COUNTER_COMP_ENUM:
  467. case COUNTER_COMP_COUNT_DIRECTION:
  468. case COUNTER_COMP_COUNT_MODE:
  469. case COUNTER_COMP_SIGNAL_POLARITY:
  470. switch (scope) {
  471. case COUNTER_SCOPE_DEVICE:
  472. ret = comp->device_u32_read(counter, &value_u32);
  473. break;
  474. case COUNTER_SCOPE_SIGNAL:
  475. ret = comp->signal_u32_read(counter, signal,
  476. &value_u32);
  477. break;
  478. case COUNTER_SCOPE_COUNT:
  479. ret = comp->count_u32_read(counter, count, &value_u32);
  480. break;
  481. default:
  482. return -EINVAL;
  483. }
  484. *value = value_u32;
  485. return ret;
  486. case COUNTER_COMP_U64:
  487. switch (scope) {
  488. case COUNTER_SCOPE_DEVICE:
  489. return comp->device_u64_read(counter, value);
  490. case COUNTER_SCOPE_SIGNAL:
  491. return comp->signal_u64_read(counter, signal, value);
  492. case COUNTER_SCOPE_COUNT:
  493. return comp->count_u64_read(counter, count, value);
  494. default:
  495. return -EINVAL;
  496. }
  497. case COUNTER_COMP_SYNAPSE_ACTION:
  498. ret = comp->action_read(counter, count, comp->priv, &value_u32);
  499. *value = value_u32;
  500. return ret;
  501. case COUNTER_COMP_ARRAY:
  502. switch (scope) {
  503. case COUNTER_SCOPE_DEVICE:
  504. ext = counter->ext;
  505. num_ext = counter->num_ext;
  506. break;
  507. case COUNTER_SCOPE_SIGNAL:
  508. ext = signal->ext;
  509. num_ext = signal->num_ext;
  510. break;
  511. case COUNTER_SCOPE_COUNT:
  512. ext = count->ext;
  513. num_ext = count->num_ext;
  514. break;
  515. default:
  516. return -EINVAL;
  517. }
  518. ret = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
  519. if (ret < 0)
  520. return ret;
  521. return counter_get_array_data(counter, scope, comp_node->parent,
  522. comp, id - ext_id, value);
  523. default:
  524. return -EINVAL;
  525. }
  526. }
  527. /**
  528. * counter_push_event - queue event for userspace reading
  529. * @counter: pointer to Counter structure
  530. * @event: triggered event
  531. * @channel: event channel
  532. *
  533. * Note: If no one is watching for the respective event, it is silently
  534. * discarded.
  535. */
  536. void counter_push_event(struct counter_device *const counter, const u8 event,
  537. const u8 channel)
  538. {
  539. struct counter_event ev;
  540. unsigned int copied = 0;
  541. unsigned long flags;
  542. struct counter_event_node *event_node;
  543. struct counter_comp_node *comp_node;
  544. ev.timestamp = ktime_get_ns();
  545. ev.watch.event = event;
  546. ev.watch.channel = channel;
  547. /* Could be in an interrupt context, so use a spin lock */
  548. spin_lock_irqsave(&counter->events_list_lock, flags);
  549. /* Search for event in the list */
  550. list_for_each_entry(event_node, &counter->events_list, l)
  551. if (event_node->event == event &&
  552. event_node->channel == channel)
  553. break;
  554. /* If event is not in the list */
  555. if (&event_node->l == &counter->events_list)
  556. goto exit_early;
  557. /* Read and queue relevant comp for userspace */
  558. list_for_each_entry(comp_node, &event_node->comp_list, l) {
  559. ev.watch.component = comp_node->component;
  560. ev.status = -counter_get_data(counter, comp_node, &ev.value);
  561. copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
  562. 1, &counter->events_in_lock);
  563. }
  564. exit_early:
  565. spin_unlock_irqrestore(&counter->events_list_lock, flags);
  566. if (copied)
  567. wake_up_poll(&counter->events_wait, EPOLLIN);
  568. }
  569. EXPORT_SYMBOL_NS_GPL(counter_push_event, COUNTER);