cpucp_log.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/mailbox_client.h>
  8. #include <linux/module.h>
  9. #include <linux/of_platform.h>
  10. #include <linux/module.h>
  11. #include <linux/of_address.h>
  12. #include <linux/of_device.h>
  13. #include <linux/io.h>
  14. #include <linux/ipc_logging.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/slab.h>
  17. #define MAX_PRINT_SIZE 1024
  18. #define MAX_BUF_NUM 4
  19. #define MAX_RESIDUAL_SIZE MAX_PRINT_SIZE
  20. #define SIZE_ADJUST 4
  21. #define SRC_OFFSET 4
  22. #define CREATE_TRACE_POINTS
  23. #include "trace_cpucp.h"
  24. struct remote_mem {
  25. void __iomem *start;
  26. unsigned long long size;
  27. };
  28. struct cpucp_buf {
  29. struct list_head node;
  30. char *buf;
  31. u32 size;
  32. u32 cpy_idx;
  33. };
  34. struct cpucp_log_info {
  35. struct remote_mem *rmem;
  36. struct mbox_client cl;
  37. struct mbox_chan *ch;
  38. struct delayed_work work;
  39. struct device *dev;
  40. void __iomem *base;
  41. unsigned int rmem_idx;
  42. unsigned int num_bufs;
  43. unsigned int total_buf_size;
  44. char *rem_buf;
  45. char *glb_buf;
  46. int rem_len;
  47. spinlock_t free_list_lock;
  48. spinlock_t full_list_lock;
  49. };
  50. static LIST_HEAD(full_buffers_list);
  51. static LIST_HEAD(free_buffers_list);
  52. static struct workqueue_struct *cpucp_wq;
  53. static inline bool get_last_newline(char *buf, int size, int *cnt)
  54. {
  55. int i;
  56. for (i = (size - 1); i >= 0 ; i--) {
  57. if (buf[i] == '\n') {
  58. buf[i] = '\0';
  59. *cnt = i + 1;
  60. return true;
  61. }
  62. }
  63. *cnt = size;
  64. return false;
  65. }
  66. static void cpucp_log_work(struct work_struct *work)
  67. {
  68. struct cpucp_log_info *info = container_of(work,
  69. struct cpucp_log_info,
  70. work.work);
  71. char *src;
  72. int buf_start = 0;
  73. int cnt = 0, print_size = 0, buf_size = 0;
  74. bool ret;
  75. char tmp_buf[MAX_PRINT_SIZE + 1];
  76. struct cpucp_buf *buf_node;
  77. unsigned long flags;
  78. while (1) {
  79. spin_lock_irqsave(&info->full_list_lock, flags);
  80. if (list_empty(&full_buffers_list)) {
  81. spin_unlock_irqrestore(&info->full_list_lock, flags);
  82. return;
  83. }
  84. buf_node = list_first_entry(&full_buffers_list,
  85. struct cpucp_buf, node);
  86. list_del(&buf_node->node);
  87. spin_unlock_irqrestore(&info->full_list_lock, flags);
  88. buf_start = buf_node->cpy_idx - info->rem_len;
  89. src = &buf_node->buf[buf_start];
  90. buf_size = buf_node->size + info->rem_len;
  91. if (info->rem_len) {
  92. memcpy(&buf_node->buf[buf_start],
  93. info->rem_buf, info->rem_len);
  94. info->rem_len = 0;
  95. }
  96. do {
  97. print_size = (buf_size >= MAX_PRINT_SIZE) ?
  98. MAX_PRINT_SIZE : buf_size;
  99. ret = get_last_newline(src, print_size, &cnt);
  100. if (cnt == print_size) {
  101. if (!ret && buf_size < MAX_PRINT_SIZE) {
  102. info->rem_len = buf_size;
  103. memcpy(info->rem_buf, src, buf_size);
  104. goto out;
  105. } else {
  106. snprintf(tmp_buf, print_size + 1, "%s", src);
  107. trace_cpucp_log(tmp_buf);
  108. }
  109. } else
  110. trace_cpucp_log(src);
  111. buf_start += cnt;
  112. buf_size -= cnt;
  113. src = &buf_node->buf[buf_start];
  114. } while (buf_size > 0);
  115. out:
  116. spin_lock_irqsave(&info->free_list_lock, flags);
  117. list_add_tail(&buf_node->node, &free_buffers_list);
  118. spin_unlock_irqrestore(&info->free_list_lock, flags);
  119. }
  120. }
  121. static struct cpucp_buf *get_free_buffer(struct cpucp_log_info *info)
  122. {
  123. struct cpucp_buf *buf_node;
  124. unsigned long flags;
  125. spin_lock_irqsave(&info->free_list_lock, flags);
  126. if (list_empty(&free_buffers_list)) {
  127. spin_unlock_irqrestore(&info->free_list_lock, flags);
  128. return NULL;
  129. }
  130. buf_node = list_first_entry(&free_buffers_list,
  131. struct cpucp_buf, node);
  132. list_del(&buf_node->node);
  133. spin_unlock_irqrestore(&info->free_list_lock, flags);
  134. return buf_node;
  135. }
  136. static void cpucp_log_rx(struct mbox_client *client, void *msg)
  137. {
  138. struct cpucp_log_info *info = dev_get_drvdata(client->dev);
  139. struct device *dev = info->dev;
  140. struct cpucp_buf *buf_node;
  141. struct remote_mem *rmem;
  142. void __iomem *src;
  143. u32 marker;
  144. unsigned long long rmem_size;
  145. unsigned long flags;
  146. int src_offset = 0;
  147. int size_adj = 0;
  148. buf_node = get_free_buffer(info);
  149. if (!buf_node) {
  150. dev_err(dev, "global buffer full dropping buffers\n");
  151. return;
  152. }
  153. marker = *(u32 *)(info->rmem)->start;
  154. if (marker <= info->rmem->size) {
  155. info->rmem_idx = 0;
  156. rmem_size = marker;
  157. } else if (marker <= info->total_buf_size) {
  158. info->rmem_idx = 1;
  159. rmem_size = marker - info->rmem->size;
  160. } else {
  161. pr_err("%s: Log marker incorrect: %u\n", __func__, marker);
  162. return;
  163. }
  164. if (info->rmem_idx == 0) {
  165. size_adj = SIZE_ADJUST;
  166. src_offset = SRC_OFFSET;
  167. }
  168. rmem = info->rmem + info->rmem_idx;
  169. rmem_size -= size_adj;
  170. src = rmem->start + src_offset;
  171. memcpy_fromio(&buf_node->buf[buf_node->cpy_idx], src, rmem_size);
  172. buf_node->size = rmem_size;
  173. spin_lock_irqsave(&info->full_list_lock, flags);
  174. list_add_tail(&buf_node->node, &full_buffers_list);
  175. spin_unlock_irqrestore(&info->full_list_lock, flags);
  176. if (!delayed_work_pending(&info->work))
  177. queue_delayed_work(cpucp_wq, &info->work, 0);
  178. }
  179. static int populate_free_buffers(struct cpucp_log_info *info,
  180. int rmem_size)
  181. {
  182. int i = 0;
  183. struct cpucp_buf *buf_nodes;
  184. buf_nodes = devm_kzalloc(info->dev,
  185. MAX_BUF_NUM * sizeof(struct cpucp_buf),
  186. GFP_KERNEL);
  187. if (!buf_nodes)
  188. return -ENOMEM;
  189. for (i = 0; i < MAX_BUF_NUM; i++) {
  190. buf_nodes[i].buf = &info->glb_buf[i * (rmem_size + MAX_PRINT_SIZE)];
  191. buf_nodes[i].size = rmem_size;
  192. buf_nodes[i].cpy_idx = MAX_PRINT_SIZE;
  193. list_add_tail(&buf_nodes[i].node, &free_buffers_list);
  194. }
  195. return 0;
  196. }
  197. static int cpucp_log_probe(struct platform_device *pdev)
  198. {
  199. struct device *dev = &pdev->dev;
  200. struct cpucp_log_info *info;
  201. struct mbox_client *cl;
  202. int ret, i = 0;
  203. struct resource *res;
  204. void __iomem *mem_base;
  205. struct remote_mem *rmem;
  206. int prev_size = 0;
  207. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  208. if (!info)
  209. return -ENOMEM;
  210. info->dev = dev;
  211. rmem = kcalloc(pdev->num_resources, sizeof(struct remote_mem),
  212. GFP_KERNEL);
  213. if (!rmem)
  214. return -ENOMEM;
  215. info->rmem = rmem;
  216. for (i = 0; i < pdev->num_resources; i++) {
  217. struct remote_mem *rmem = &info->rmem[i];
  218. res = platform_get_resource(pdev, IORESOURCE_MEM, i);
  219. if (!res) {
  220. dev_err(dev,
  221. "Failed to get the device base address\n");
  222. ret = -ENODEV;
  223. goto exit;
  224. }
  225. mem_base = devm_ioremap(&pdev->dev, res->start,
  226. resource_size(res));
  227. if (IS_ERR(mem_base)) {
  228. ret = PTR_ERR(mem_base);
  229. dev_err(dev, "Failed to io remap the region err: %d\n", ret);
  230. goto exit;
  231. }
  232. rmem->start = mem_base;
  233. rmem->size = resource_size(res);
  234. if (prev_size && (rmem->size != prev_size)) {
  235. ret = -EINVAL;
  236. goto exit;
  237. } else if (!prev_size) {
  238. prev_size = rmem->size;
  239. }
  240. info->total_buf_size += rmem->size;
  241. info->num_bufs++;
  242. }
  243. info->glb_buf = devm_kzalloc(dev, MAX_BUF_NUM *
  244. (rmem->size + MAX_PRINT_SIZE),
  245. GFP_KERNEL);
  246. if (!info->glb_buf) {
  247. ret = -ENOMEM;
  248. goto exit;
  249. }
  250. info->rem_buf = devm_kzalloc(dev, MAX_RESIDUAL_SIZE, GFP_KERNEL);
  251. if (!info->rem_buf) {
  252. ret = -ENOMEM;
  253. goto exit;
  254. }
  255. ret = populate_free_buffers(info, rmem->size);
  256. if (ret < 0)
  257. goto exit;
  258. cl = &info->cl;
  259. cl->dev = dev;
  260. cl->tx_block = false;
  261. cl->knows_txdone = true;
  262. cl->rx_callback = cpucp_log_rx;
  263. dev_set_drvdata(dev, info);
  264. INIT_DEFERRABLE_WORK(&info->work, &cpucp_log_work);
  265. spin_lock_init(&info->free_list_lock);
  266. spin_lock_init(&info->full_list_lock);
  267. cpucp_wq = create_freezable_workqueue("cpucp_wq");
  268. info->ch = mbox_request_channel(cl, 0);
  269. if (IS_ERR(info->ch)) {
  270. ret = PTR_ERR(info->ch);
  271. if (ret != -EPROBE_DEFER)
  272. dev_err(dev, "Failed to request mbox info: %d\n", ret);
  273. goto exit;
  274. }
  275. dev_dbg(dev, "CPUCP logging initialized\n");
  276. return 0;
  277. exit:
  278. kfree(info->rmem);
  279. /* devm will free up buffers in lists so just re-initialize lists */
  280. INIT_LIST_HEAD(&full_buffers_list);
  281. INIT_LIST_HEAD(&free_buffers_list);
  282. return ret;
  283. }
  284. static int cpucp_log_remove(struct platform_device *pdev)
  285. {
  286. struct cpucp_log_info *info;
  287. info = dev_get_drvdata(&pdev->dev);
  288. mbox_free_channel(info->ch);
  289. return 0;
  290. }
  291. static const struct of_device_id cpucp_log[] = {
  292. {.compatible = "qcom,cpucp-log"},
  293. {},
  294. };
  295. static struct platform_driver cpucp_log_driver = {
  296. .driver = {
  297. .name = "cpucp-log",
  298. .of_match_table = cpucp_log,
  299. },
  300. .probe = cpucp_log_probe,
  301. .remove = cpucp_log_remove,
  302. };
  303. builtin_platform_driver(cpucp_log_driver);
  304. MODULE_LICENSE("GPL");