ipc_logging_cdev.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/errno.h>
  6. #include <linux/fs.h>
  7. #include <linux/idr.h>
  8. #include <linux/ipc_logging.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/string.h>
  12. #include <linux/uaccess.h>
  13. #include "ipc_logging_private.h"
  14. #define IPL_CDEV_MAX 255
  15. static dev_t cdev_devt;
  16. static struct class *cdev_class;
  17. static DEFINE_IDA(ipl_minor_ida);
  18. static void dfunc_string(struct encode_context *ectxt, struct decode_context *dctxt)
  19. {
  20. tsv_timestamp_read(ectxt, dctxt, "");
  21. tsv_qtimer_read(ectxt, dctxt, " ");
  22. tsv_byte_array_read(ectxt, dctxt, "");
  23. /* add trailing \n if necessary */
  24. if (*(dctxt->buff - 1) != '\n') {
  25. if (dctxt->size) {
  26. ++dctxt->buff;
  27. --dctxt->size;
  28. }
  29. *(dctxt->buff - 1) = '\n';
  30. }
  31. }
  32. static int debug_log(struct ipc_log_context *ilctxt, char *buff, int size, int cont)
  33. {
  34. int i = 0;
  35. int ret;
  36. if (size < MAX_MSG_DECODED_SIZE) {
  37. pr_err("%s: buffer size %d < %d\n", __func__, size, MAX_MSG_DECODED_SIZE);
  38. return -ENOMEM;
  39. }
  40. do {
  41. i = ipc_log_extract(ilctxt, buff, size - 1);
  42. if (cont && i == 0) {
  43. ret = wait_for_completion_interruptible(&ilctxt->read_avail);
  44. if (ret < 0)
  45. return ret;
  46. }
  47. } while (cont && i == 0);
  48. return i;
  49. }
  50. static char *ipc_log_cdev_devnode(struct device *dev, umode_t *mode)
  51. {
  52. return kasprintf(GFP_KERNEL, "ipc_logging/%s", dev_name(dev));
  53. }
  54. static int ipc_log_cdev_open(struct inode *inode, struct file *filp)
  55. {
  56. struct ipc_log_cdev *ipl_cdev;
  57. ipl_cdev = container_of(inode->i_cdev, struct ipc_log_cdev, cdev);
  58. filp->private_data = container_of(ipl_cdev, struct ipc_log_context, cdev);
  59. return 0;
  60. }
  61. /*
  62. * VFS Read operation which dispatches the call to the DevFS read command stored in
  63. * file->private_data.
  64. *
  65. * @filp File structure
  66. * @buff user buffer
  67. * @count size of user buffer
  68. * @offp file position to read from (only a value of 0 is accepted)
  69. *
  70. * @returns = 0 end of file
  71. * > 0 number of bytes read
  72. * < 0 error
  73. */
  74. static ssize_t ipc_log_cdev_read(struct file *filp, char __user *buff, size_t count, loff_t *offp)
  75. {
  76. int ret, bsize;
  77. char *buffer;
  78. struct ipc_log_context *ilctxt;
  79. ilctxt = filp->private_data;
  80. ret = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
  81. if (ret)
  82. return ret;
  83. buffer = kmalloc(count, GFP_KERNEL);
  84. if (!buffer) {
  85. bsize = -ENOMEM;
  86. goto done;
  87. }
  88. /* only support non-continuous mode */
  89. bsize = debug_log(ilctxt, buffer, count, 0);
  90. if (bsize > 0) {
  91. if (copy_to_user(buff, buffer, bsize)) {
  92. bsize = -EFAULT;
  93. kfree(buffer);
  94. goto done;
  95. }
  96. *offp += bsize;
  97. }
  98. kfree(buffer);
  99. done:
  100. ipc_log_context_put(ilctxt);
  101. return bsize;
  102. }
  103. static const struct file_operations cdev_fops = {
  104. .owner = THIS_MODULE,
  105. .open = ipc_log_cdev_open,
  106. .read = ipc_log_cdev_read,
  107. };
  108. void ipc_log_cdev_remove(struct ipc_log_context *ilctxt)
  109. {
  110. if (ilctxt->cdev.dev.class) {
  111. cdev_device_del(&ilctxt->cdev.cdev, &ilctxt->cdev.dev);
  112. ida_free(&ipl_minor_ida, (unsigned int)MINOR(ilctxt->cdev.dev.devt));
  113. }
  114. }
  115. EXPORT_SYMBOL(ipc_log_cdev_remove);
  116. void ipc_log_cdev_create(struct ipc_log_context *ilctxt, const char *mod_name)
  117. {
  118. int ret;
  119. int minor;
  120. dev_t devno;
  121. if (!cdev_class) {
  122. pr_err("%s: %s no device class created\n", __func__, mod_name);
  123. return;
  124. }
  125. minor = ida_alloc_range(&ipl_minor_ida, 0, IPL_CDEV_MAX, GFP_KERNEL);
  126. if (minor < 0) {
  127. pr_err("%s: %s failed to alloc ipl minor number %d\n", __func__, mod_name, minor);
  128. return;
  129. }
  130. devno = MKDEV(MAJOR(cdev_devt), minor);
  131. device_initialize(&ilctxt->cdev.dev);
  132. ilctxt->cdev.dev.devt = devno;
  133. ilctxt->cdev.dev.class = cdev_class;
  134. dev_set_name(&ilctxt->cdev.dev, "%s", mod_name);
  135. cdev_init(&ilctxt->cdev.cdev, &cdev_fops);
  136. ret = cdev_device_add(&ilctxt->cdev.cdev, &ilctxt->cdev.dev);
  137. if (ret) {
  138. pr_err("%s: unable to add ipl cdev %s, %d\n", __func__, mod_name, ret);
  139. ilctxt->cdev.dev.class = NULL;
  140. ida_free(&ipl_minor_ida, (unsigned int)minor);
  141. put_device(&ilctxt->cdev.dev);
  142. return;
  143. }
  144. add_deserialization_func((void *)ilctxt, TSV_TYPE_STRING, dfunc_string);
  145. }
  146. EXPORT_SYMBOL(ipc_log_cdev_create);
  147. void ipc_log_cdev_init(void)
  148. {
  149. int ret;
  150. cdev_class = NULL;
  151. ret = alloc_chrdev_region(&cdev_devt, 0, IPL_CDEV_MAX, "ipc_logging");
  152. if (ret) {
  153. pr_err("%s: unable to create ipl cdev regoin %d\n", __func__, ret);
  154. return;
  155. }
  156. cdev_class = class_create(THIS_MODULE, "ipc_logging");
  157. if (IS_ERR(cdev_class)) {
  158. pr_err("%s: unable to create ipl cdev class %ld\n", __func__, PTR_ERR(cdev_class));
  159. cdev_class = NULL;
  160. unregister_chrdev_region(cdev_devt, IPL_CDEV_MAX);
  161. return;
  162. }
  163. cdev_class->devnode = ipc_log_cdev_devnode;
  164. }
  165. EXPORT_SYMBOL(ipc_log_cdev_init);