tz_log.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s:[%s][%d]: " fmt, KBUILD_MODNAME, __func__, __LINE__
  7. #include <linux/debugfs.h>
  8. #include <linux/errno.h>
  9. #include <linux/delay.h>
  10. #include <linux/io.h>
  11. #include <linux/msm_ion.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/string.h>
  17. #include <linux/types.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/of.h>
  20. #include <linux/dma-buf.h>
  21. #include <linux/version.h>
  22. #if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
  23. #include <linux/firmware/qcom/qcom_scm.h>
  24. #else
  25. #include <linux/qcom_scm.h>
  26. #endif
  27. #include <linux/qtee_shmbridge.h>
  28. #include <linux/proc_fs.h>
  29. #if IS_ENABLED(CONFIG_MSM_TMECOM_QMP)
  30. #include <linux/tmelog.h>
  31. #endif
  32. #include "misc/qseecomi.h"
  33. /* QSEE_LOG_BUF_SIZE = 32K */
  34. #define QSEE_LOG_BUF_SIZE 0x8000
  35. /* enlarged qsee log buf size is 128K by default */
  36. #define QSEE_LOG_BUF_SIZE_V2 0x20000
  37. /* Tme log buffer size 20K */
  38. #define TME_LOG_BUF_SIZE 0x5000
  39. /* TZ Diagnostic Area legacy version number */
  40. #define TZBSP_DIAG_MAJOR_VERSION_LEGACY 2
  41. /* TZ Diagnostic Area version number */
  42. #define TZBSP_FVER_MAJOR_MINOR_MASK 0x3FF /* 10 bits */
  43. #define TZBSP_FVER_MAJOR_SHIFT 22
  44. #define TZBSP_FVER_MINOR_SHIFT 12
  45. #define TZBSP_DIAG_MAJOR_VERSION_V9 9
  46. #define TZBSP_DIAG_MINOR_VERSION_V2 2
  47. #define TZBSP_DIAG_MINOR_VERSION_V21 3
  48. #define TZBSP_DIAG_MINOR_VERSION_V22 4
  49. /* TZ Diag Feature Version Id */
  50. #define QCOM_SCM_FEAT_DIAG_ID 0x06
  51. /*
  52. * Preprocessor Definitions and Constants
  53. */
  54. #define TZBSP_MAX_CPU_COUNT 0x08
  55. /*
  56. * Number of VMID Tables
  57. */
  58. #define TZBSP_DIAG_NUM_OF_VMID 16
  59. /*
  60. * VMID Description length
  61. */
  62. #define TZBSP_DIAG_VMID_DESC_LEN 7
  63. /*
  64. * Number of Interrupts
  65. */
  66. #define TZBSP_DIAG_INT_NUM 32
  67. /*
  68. * Length of descriptive name associated with Interrupt
  69. */
  70. #define TZBSP_MAX_INT_DESC 16
  71. /*
  72. * TZ 3.X version info
  73. */
  74. #define QSEE_VERSION_TZ_3_X 0x800000
  75. /*
  76. * TZ 4.X version info
  77. */
  78. #define QSEE_VERSION_TZ_4_X 0x1000000
  79. #define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
  80. #define TZBSP_NONCE_LEN 12
  81. #define TZBSP_TAG_LEN 16
  82. #define ENCRYPTED_TZ_LOG_ID 0
  83. #define ENCRYPTED_QSEE_LOG_ID 1
  84. /*
  85. * Directory for TZ DBG logs
  86. */
  87. #define TZDBG_DIR_NAME "tzdbg"
  88. /*
  89. * VMID Table
  90. */
  91. struct tzdbg_vmid_t {
  92. uint8_t vmid; /* Virtual Machine Identifier */
  93. uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */
  94. };
  95. /*
  96. * Boot Info Table
  97. */
  98. struct tzdbg_boot_info_t {
  99. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  100. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  101. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  102. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  103. uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */
  104. uint32_t spare; /* Reserved for future use. */
  105. };
  106. /*
  107. * Boot Info Table for 64-bit
  108. */
  109. struct tzdbg_boot_info64_t {
  110. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  111. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  112. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  113. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  114. uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
  115. uint32_t psci_exit_cnt; /* PSCI syscall exit CPU Counter */
  116. uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
  117. uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
  118. };
  119. /*
  120. * Reset Info Table
  121. */
  122. struct tzdbg_reset_info_t {
  123. uint32_t reset_type; /* Reset Reason */
  124. uint32_t reset_cnt; /* Number of resets occurred/CPU */
  125. };
  126. /*
  127. * Interrupt Info Table
  128. */
  129. struct tzdbg_int_t {
  130. /*
  131. * Type of Interrupt/exception
  132. */
  133. uint16_t int_info;
  134. /*
  135. * Availability of the slot
  136. */
  137. uint8_t avail;
  138. /*
  139. * Reserved for future use
  140. */
  141. uint8_t spare;
  142. /*
  143. * Interrupt # for IRQ and FIQ
  144. */
  145. uint32_t int_num;
  146. /*
  147. * ASCII text describing type of interrupt e.g:
  148. * Secure Timer, EBI XPU. This string is always null terminated,
  149. * supporting at most TZBSP_MAX_INT_DESC characters.
  150. * Any additional characters are truncated.
  151. */
  152. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  153. uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
  154. };
  155. /*
  156. * Interrupt Info Table used in tz version >=4.X
  157. */
  158. struct tzdbg_int_t_tz40 {
  159. uint16_t int_info;
  160. uint8_t avail;
  161. uint8_t spare;
  162. uint32_t int_num;
  163. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  164. uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
  165. };
  166. /* warm boot reason for cores */
  167. struct tzbsp_diag_wakeup_info_t {
  168. /* Wake source info : APCS_GICC_HPPIR */
  169. uint32_t HPPIR;
  170. /* Wake source info : APCS_GICC_AHPPIR */
  171. uint32_t AHPPIR;
  172. };
  173. /*
  174. * Log ring buffer position
  175. */
  176. struct tzdbg_log_pos_t {
  177. uint16_t wrap;
  178. uint16_t offset;
  179. };
  180. struct tzdbg_log_pos_v2_t {
  181. uint32_t wrap;
  182. uint32_t offset;
  183. };
  184. /*
  185. * Log ring buffer
  186. */
  187. struct tzdbg_log_t {
  188. struct tzdbg_log_pos_t log_pos;
  189. /* open ended array to the end of the 4K IMEM buffer */
  190. uint8_t log_buf[];
  191. };
  192. struct tzdbg_log_v2_t {
  193. struct tzdbg_log_pos_v2_t log_pos;
  194. /* open ended array to the end of the 4K IMEM buffer */
  195. uint8_t log_buf[];
  196. };
  197. struct tzbsp_encr_info_for_log_chunk_t {
  198. uint32_t size_to_encr;
  199. uint8_t nonce[TZBSP_NONCE_LEN];
  200. uint8_t tag[TZBSP_TAG_LEN];
  201. };
  202. /*
  203. * Only `ENTIRE_LOG` will be used unless the
  204. * "OEM_tz_num_of_diag_log_chunks_to_encr" devcfg field >= 2.
  205. * If this is true, the diag log will be encrypted in two
  206. * separate chunks: a smaller chunk containing only error
  207. * fatal logs and a bigger "rest of the log" chunk. In this
  208. * case, `ERR_FATAL_LOG_CHUNK` and `BIG_LOG_CHUNK` will be
  209. * used instead of `ENTIRE_LOG`.
  210. */
  211. enum tzbsp_encr_info_for_log_chunks_idx_t {
  212. BIG_LOG_CHUNK = 0,
  213. ENTIRE_LOG = 1,
  214. ERR_FATAL_LOG_CHUNK = 1,
  215. MAX_NUM_OF_CHUNKS,
  216. };
  217. struct tzbsp_encr_info_t {
  218. uint32_t num_of_chunks;
  219. struct tzbsp_encr_info_for_log_chunk_t chunks[MAX_NUM_OF_CHUNKS];
  220. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  221. };
  222. /*
  223. * Diagnostic Table
  224. * Note: This is the reference data structure for tz diagnostic table
  225. * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
  226. * copied into buffer from i/o memory.
  227. */
  228. struct tzdbg_t {
  229. uint32_t magic_num;
  230. uint32_t version;
  231. /*
  232. * Number of CPU's
  233. */
  234. uint32_t cpu_count;
  235. /*
  236. * Offset of VMID Table
  237. */
  238. uint32_t vmid_info_off;
  239. /*
  240. * Offset of Boot Table
  241. */
  242. uint32_t boot_info_off;
  243. /*
  244. * Offset of Reset info Table
  245. */
  246. uint32_t reset_info_off;
  247. /*
  248. * Offset of Interrupt info Table
  249. */
  250. uint32_t int_info_off;
  251. /*
  252. * Ring Buffer Offset
  253. */
  254. uint32_t ring_off;
  255. /*
  256. * Ring Buffer Length
  257. */
  258. uint32_t ring_len;
  259. /* Offset for Wakeup info */
  260. uint32_t wakeup_info_off;
  261. union {
  262. /* The elements in below structure have to be used for TZ where
  263. * diag version = TZBSP_DIAG_MINOR_VERSION_V2
  264. */
  265. struct {
  266. /*
  267. * VMID to EE Mapping
  268. */
  269. struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
  270. /*
  271. * Boot Info
  272. */
  273. struct tzdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  274. /*
  275. * Reset Info
  276. */
  277. struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
  278. uint32_t num_interrupts;
  279. struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM];
  280. /* Wake up info */
  281. struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT];
  282. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  283. uint8_t nonce[TZBSP_NONCE_LEN];
  284. uint8_t tag[TZBSP_TAG_LEN];
  285. };
  286. /* The elements in below structure have to be used for TZ where
  287. * diag version = TZBSP_DIAG_MINOR_VERSION_V21
  288. */
  289. struct {
  290. uint32_t encr_info_for_log_off;
  291. /*
  292. * VMID to EE Mapping
  293. */
  294. struct tzdbg_vmid_t vmid_info_v2[TZBSP_DIAG_NUM_OF_VMID];
  295. /*
  296. * Boot Info
  297. */
  298. struct tzdbg_boot_info_t boot_info_v2[TZBSP_MAX_CPU_COUNT];
  299. /*
  300. * Reset Info
  301. */
  302. struct tzdbg_reset_info_t reset_info_v2[TZBSP_MAX_CPU_COUNT];
  303. uint32_t num_interrupts_v2;
  304. struct tzdbg_int_t int_info_v2[TZBSP_DIAG_INT_NUM];
  305. /* Wake up info */
  306. struct tzbsp_diag_wakeup_info_t wakeup_info_v2[TZBSP_MAX_CPU_COUNT];
  307. struct tzbsp_encr_info_t encr_info_for_log;
  308. };
  309. };
  310. /*
  311. * We need at least 2K for the ring buffer
  312. */
  313. struct tzdbg_log_t ring_buffer; /* TZ Ring Buffer */
  314. };
  315. struct hypdbg_log_pos_t {
  316. uint16_t wrap;
  317. uint16_t offset;
  318. };
  319. struct rmdbg_log_hdr_t {
  320. uint32_t write_idx;
  321. uint32_t size;
  322. };
  323. struct rmdbg_log_pos_t {
  324. uint32_t read_idx;
  325. uint32_t size;
  326. };
  327. struct hypdbg_boot_info_t {
  328. uint32_t warm_entry_cnt;
  329. uint32_t warm_exit_cnt;
  330. };
  331. struct hypdbg_t {
  332. /* Magic Number */
  333. uint32_t magic_num;
  334. /* Number of CPU's */
  335. uint32_t cpu_count;
  336. /* Ring Buffer Offset */
  337. uint32_t ring_off;
  338. /* Ring buffer position mgmt */
  339. struct hypdbg_log_pos_t log_pos;
  340. uint32_t log_len;
  341. /* S2 fault numbers */
  342. uint32_t s2_fault_counter;
  343. /* Boot Info */
  344. struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  345. /* Ring buffer pointer */
  346. uint8_t log_buf_p[];
  347. };
  348. struct tme_log_pos {
  349. uint32_t offset;
  350. size_t size;
  351. };
  352. /*
  353. * Enumeration order for VMID's
  354. */
  355. enum tzdbg_stats_type {
  356. TZDBG_BOOT = 0,
  357. TZDBG_RESET,
  358. TZDBG_INTERRUPT,
  359. TZDBG_VMID,
  360. TZDBG_GENERAL,
  361. TZDBG_LOG,
  362. TZDBG_QSEE_LOG,
  363. TZDBG_HYP_GENERAL,
  364. TZDBG_HYP_LOG,
  365. TZDBG_RM_LOG,
  366. TZDBG_TME_LOG,
  367. TZDBG_STATS_MAX
  368. };
  369. struct tzdbg_stat {
  370. size_t display_len;
  371. size_t display_offset;
  372. char *name;
  373. char *data;
  374. bool avail;
  375. };
  376. struct tzdbg {
  377. void __iomem *virt_iobase;
  378. void __iomem *hyp_virt_iobase;
  379. void __iomem *rmlog_virt_iobase;
  380. void __iomem *tmelog_virt_iobase;
  381. struct tzdbg_t *diag_buf;
  382. struct hypdbg_t *hyp_diag_buf;
  383. uint8_t *rm_diag_buf;
  384. uint8_t *tme_buf;
  385. char *disp_buf;
  386. int debug_tz[TZDBG_STATS_MAX];
  387. struct tzdbg_stat stat[TZDBG_STATS_MAX];
  388. uint32_t hyp_debug_rw_buf_size;
  389. uint32_t rmlog_rw_buf_size;
  390. bool is_hyplog_enabled;
  391. uint32_t tz_version;
  392. bool is_encrypted_log_enabled;
  393. bool is_enlarged_buf;
  394. bool is_full_encrypted_tz_logs_supported;
  395. bool is_full_encrypted_tz_logs_enabled;
  396. int tz_diag_minor_version;
  397. int tz_diag_major_version;
  398. };
  399. struct tzbsp_encr_log_t {
  400. /* Magic Number */
  401. uint32_t magic_num;
  402. /* version NUMBER */
  403. uint32_t version;
  404. /* encrypted log size */
  405. uint32_t encr_log_buff_size;
  406. /* Wrap value*/
  407. uint16_t wrap_count;
  408. /* AES encryption key wrapped up with oem public key*/
  409. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  410. /* Nonce used for encryption*/
  411. uint8_t nonce[TZBSP_NONCE_LEN];
  412. /* Tag to be used for Validation */
  413. uint8_t tag[TZBSP_TAG_LEN];
  414. /* Encrypted log buffer */
  415. uint8_t log_buf[1];
  416. };
  417. struct encrypted_log_info {
  418. phys_addr_t paddr;
  419. void *vaddr;
  420. size_t size;
  421. uint64_t shmb_handle;
  422. };
  423. static struct tzdbg tzdbg = {
  424. .stat[TZDBG_BOOT].name = "boot",
  425. .stat[TZDBG_RESET].name = "reset",
  426. .stat[TZDBG_INTERRUPT].name = "interrupt",
  427. .stat[TZDBG_VMID].name = "vmid",
  428. .stat[TZDBG_GENERAL].name = "general",
  429. .stat[TZDBG_LOG].name = "log",
  430. .stat[TZDBG_QSEE_LOG].name = "qsee_log",
  431. .stat[TZDBG_HYP_GENERAL].name = "hyp_general",
  432. .stat[TZDBG_HYP_LOG].name = "hyp_log",
  433. .stat[TZDBG_RM_LOG].name = "rm_log",
  434. .stat[TZDBG_TME_LOG].name = "tme_log",
  435. };
  436. static struct tzdbg_log_t *g_qsee_log;
  437. static struct tzdbg_log_v2_t *g_qsee_log_v2;
  438. static dma_addr_t coh_pmem;
  439. static uint32_t debug_rw_buf_size;
  440. static uint32_t display_buf_size;
  441. static uint32_t qseelog_buf_size;
  442. static phys_addr_t disp_buf_paddr;
  443. static uint32_t tmecrashdump_address_offset;
  444. static uint64_t qseelog_shmbridge_handle;
  445. static struct encrypted_log_info enc_qseelog_info;
  446. static struct encrypted_log_info enc_tzlog_info;
  447. /*
  448. * Debugfs data structure and functions
  449. */
  450. static int _disp_tz_general_stats(void)
  451. {
  452. int len = 0;
  453. len += scnprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
  454. " Version : 0x%x\n"
  455. " Magic Number : 0x%x\n"
  456. " Number of CPU : %d\n",
  457. tzdbg.diag_buf->version,
  458. tzdbg.diag_buf->magic_num,
  459. tzdbg.diag_buf->cpu_count);
  460. tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
  461. return len;
  462. }
  463. static int _disp_tz_vmid_stats(void)
  464. {
  465. int i, num_vmid;
  466. int len = 0;
  467. struct tzdbg_vmid_t *ptr;
  468. ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
  469. tzdbg.diag_buf->vmid_info_off);
  470. num_vmid = ((tzdbg.diag_buf->boot_info_off -
  471. tzdbg.diag_buf->vmid_info_off)/
  472. (sizeof(struct tzdbg_vmid_t)));
  473. for (i = 0; i < num_vmid; i++) {
  474. if (ptr->vmid < 0xFF) {
  475. len += scnprintf(tzdbg.disp_buf + len,
  476. (debug_rw_buf_size - 1) - len,
  477. " 0x%x %s\n",
  478. (uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
  479. }
  480. if (len > (debug_rw_buf_size - 1)) {
  481. pr_warn("%s: Cannot fit all info into the buffer\n",
  482. __func__);
  483. break;
  484. }
  485. ptr++;
  486. }
  487. tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
  488. return len;
  489. }
  490. static int _disp_tz_boot_stats(void)
  491. {
  492. int i;
  493. int len = 0;
  494. struct tzdbg_boot_info_t *ptr = NULL;
  495. struct tzdbg_boot_info64_t *ptr_64 = NULL;
  496. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  497. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  498. ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
  499. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  500. } else {
  501. ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
  502. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  503. }
  504. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  505. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  506. len += scnprintf(tzdbg.disp_buf + len,
  507. (debug_rw_buf_size - 1) - len,
  508. " CPU #: %d\n"
  509. " Warmboot jump address : 0x%llx\n"
  510. " Warmboot entry CPU counter : 0x%x\n"
  511. " Warmboot exit CPU counter : 0x%x\n"
  512. " Power Collapse entry CPU counter : 0x%x\n"
  513. " Power Collapse exit CPU counter : 0x%x\n"
  514. " Psci entry CPU counter : 0x%x\n"
  515. " Psci exit CPU counter : 0x%x\n"
  516. " Warmboot Jump Address Instruction : 0x%x\n",
  517. i, (uint64_t)ptr_64->warm_jmp_addr,
  518. ptr_64->wb_entry_cnt,
  519. ptr_64->wb_exit_cnt,
  520. ptr_64->pc_entry_cnt,
  521. ptr_64->pc_exit_cnt,
  522. ptr_64->psci_entry_cnt,
  523. ptr_64->psci_exit_cnt,
  524. ptr_64->warm_jmp_instr);
  525. if (len > (debug_rw_buf_size - 1)) {
  526. pr_warn("%s: Cannot fit all info into the buffer\n",
  527. __func__);
  528. break;
  529. }
  530. ptr_64++;
  531. } else {
  532. len += scnprintf(tzdbg.disp_buf + len,
  533. (debug_rw_buf_size - 1) - len,
  534. " CPU #: %d\n"
  535. " Warmboot jump address : 0x%x\n"
  536. " Warmboot entry CPU counter: 0x%x\n"
  537. " Warmboot exit CPU counter : 0x%x\n"
  538. " Power Collapse entry CPU counter: 0x%x\n"
  539. " Power Collapse exit CPU counter : 0x%x\n",
  540. i, ptr->warm_jmp_addr,
  541. ptr->wb_entry_cnt,
  542. ptr->wb_exit_cnt,
  543. ptr->pc_entry_cnt,
  544. ptr->pc_exit_cnt);
  545. if (len > (debug_rw_buf_size - 1)) {
  546. pr_warn("%s: Cannot fit all info into the buffer\n",
  547. __func__);
  548. break;
  549. }
  550. ptr++;
  551. }
  552. }
  553. tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
  554. return len;
  555. }
  556. static int _disp_tz_reset_stats(void)
  557. {
  558. int i;
  559. int len = 0;
  560. struct tzdbg_reset_info_t *ptr;
  561. ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
  562. tzdbg.diag_buf->reset_info_off);
  563. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  564. len += scnprintf(tzdbg.disp_buf + len,
  565. (debug_rw_buf_size - 1) - len,
  566. " CPU #: %d\n"
  567. " Reset Type (reason) : 0x%x\n"
  568. " Reset counter : 0x%x\n",
  569. i, ptr->reset_type, ptr->reset_cnt);
  570. if (len > (debug_rw_buf_size - 1)) {
  571. pr_warn("%s: Cannot fit all info into the buffer\n",
  572. __func__);
  573. break;
  574. }
  575. ptr++;
  576. }
  577. tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
  578. return len;
  579. }
  580. static int _disp_tz_interrupt_stats(void)
  581. {
  582. int i, j;
  583. int len = 0;
  584. int *num_int;
  585. void *ptr;
  586. struct tzdbg_int_t *tzdbg_ptr;
  587. struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
  588. num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
  589. (tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
  590. ptr = ((unsigned char *)tzdbg.diag_buf +
  591. tzdbg.diag_buf->int_info_off);
  592. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  593. if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
  594. tzdbg_ptr = ptr;
  595. for (i = 0; i < (*num_int); i++) {
  596. len += scnprintf(tzdbg.disp_buf + len,
  597. (debug_rw_buf_size - 1) - len,
  598. " Interrupt Number : 0x%x\n"
  599. " Type of Interrupt : 0x%x\n"
  600. " Description of interrupt : %s\n",
  601. tzdbg_ptr->int_num,
  602. (uint32_t)tzdbg_ptr->int_info,
  603. (uint8_t *)tzdbg_ptr->int_desc);
  604. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  605. len += scnprintf(tzdbg.disp_buf + len,
  606. (debug_rw_buf_size - 1) - len,
  607. " int_count on CPU # %d : %u\n",
  608. (uint32_t)j,
  609. (uint32_t)tzdbg_ptr->int_count[j]);
  610. }
  611. len += scnprintf(tzdbg.disp_buf + len,
  612. debug_rw_buf_size - 1, "\n");
  613. if (len > (debug_rw_buf_size - 1)) {
  614. pr_warn("%s: Cannot fit all info into buf\n",
  615. __func__);
  616. break;
  617. }
  618. tzdbg_ptr++;
  619. }
  620. } else {
  621. tzdbg_ptr_tz40 = ptr;
  622. for (i = 0; i < (*num_int); i++) {
  623. len += scnprintf(tzdbg.disp_buf + len,
  624. (debug_rw_buf_size - 1) - len,
  625. " Interrupt Number : 0x%x\n"
  626. " Type of Interrupt : 0x%x\n"
  627. " Description of interrupt : %s\n",
  628. tzdbg_ptr_tz40->int_num,
  629. (uint32_t)tzdbg_ptr_tz40->int_info,
  630. (uint8_t *)tzdbg_ptr_tz40->int_desc);
  631. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  632. len += scnprintf(tzdbg.disp_buf + len,
  633. (debug_rw_buf_size - 1) - len,
  634. " int_count on CPU # %d : %u\n",
  635. (uint32_t)j,
  636. (uint32_t)tzdbg_ptr_tz40->int_count[j]);
  637. }
  638. len += scnprintf(tzdbg.disp_buf + len,
  639. debug_rw_buf_size - 1, "\n");
  640. if (len > (debug_rw_buf_size - 1)) {
  641. pr_warn("%s: Cannot fit all info into buf\n",
  642. __func__);
  643. break;
  644. }
  645. tzdbg_ptr_tz40++;
  646. }
  647. }
  648. tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
  649. return len;
  650. }
  651. static int _disp_tz_log_stats_legacy(void)
  652. {
  653. int len = 0;
  654. unsigned char *ptr;
  655. ptr = (unsigned char *)tzdbg.diag_buf +
  656. tzdbg.diag_buf->ring_off;
  657. len += scnprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
  658. "%s\n", ptr);
  659. tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
  660. return len;
  661. }
  662. static int _disp_log_stats(struct tzdbg_log_t *log,
  663. struct tzdbg_log_pos_t *log_start, uint32_t log_len,
  664. size_t count, uint32_t buf_idx)
  665. {
  666. uint32_t wrap_start;
  667. uint32_t wrap_end;
  668. uint32_t wrap_cnt;
  669. int max_len;
  670. int len = 0;
  671. int i = 0;
  672. wrap_start = log_start->wrap;
  673. wrap_end = log->log_pos.wrap;
  674. /* Calculate difference in # of buffer wrap-arounds */
  675. if (wrap_end >= wrap_start)
  676. wrap_cnt = wrap_end - wrap_start;
  677. else {
  678. /* wrap counter has wrapped around, invalidate start position */
  679. wrap_cnt = 2;
  680. }
  681. if (wrap_cnt > 1) {
  682. /* end position has wrapped around more than once, */
  683. /* current start no longer valid */
  684. log_start->wrap = log->log_pos.wrap - 1;
  685. log_start->offset = (log->log_pos.offset + 1) % log_len;
  686. } else if ((wrap_cnt == 1) &&
  687. (log->log_pos.offset > log_start->offset)) {
  688. /* end position has overwritten start */
  689. log_start->offset = (log->log_pos.offset + 1) % log_len;
  690. }
  691. pr_debug("diag_buf wrap = %u, offset = %u\n",
  692. log->log_pos.wrap, log->log_pos.offset);
  693. while (log_start->offset == log->log_pos.offset) {
  694. /*
  695. * No data in ring buffer,
  696. * so we'll hang around until something happens
  697. */
  698. unsigned long t = msleep_interruptible(50);
  699. if (t != 0) {
  700. /* Some event woke us up, so let's quit */
  701. return 0;
  702. }
  703. if (buf_idx == TZDBG_LOG)
  704. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  705. debug_rw_buf_size);
  706. }
  707. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  708. pr_debug("diag_buf wrap = %u, offset = %u\n",
  709. log->log_pos.wrap, log->log_pos.offset);
  710. /*
  711. * Read from ring buff while there is data and space in return buff
  712. */
  713. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  714. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  715. log_start->offset = (log_start->offset + 1) % log_len;
  716. if (log_start->offset == 0)
  717. ++log_start->wrap;
  718. ++len;
  719. }
  720. /*
  721. * return buffer to caller
  722. */
  723. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  724. return len;
  725. }
  726. static int _disp_log_stats_v2(struct tzdbg_log_v2_t *log,
  727. struct tzdbg_log_pos_v2_t *log_start, uint32_t log_len,
  728. size_t count, uint32_t buf_idx)
  729. {
  730. uint32_t wrap_start;
  731. uint32_t wrap_end;
  732. uint32_t wrap_cnt;
  733. int max_len;
  734. int len = 0;
  735. int i = 0;
  736. wrap_start = log_start->wrap;
  737. wrap_end = log->log_pos.wrap;
  738. /* Calculate difference in # of buffer wrap-arounds */
  739. if (wrap_end >= wrap_start)
  740. wrap_cnt = wrap_end - wrap_start;
  741. else {
  742. /* wrap counter has wrapped around, invalidate start position */
  743. wrap_cnt = 2;
  744. }
  745. if (wrap_cnt > 1) {
  746. /* end position has wrapped around more than once, */
  747. /* current start no longer valid */
  748. log_start->wrap = log->log_pos.wrap - 1;
  749. log_start->offset = (log->log_pos.offset + 1) % log_len;
  750. } else if ((wrap_cnt == 1) &&
  751. (log->log_pos.offset > log_start->offset)) {
  752. /* end position has overwritten start */
  753. log_start->offset = (log->log_pos.offset + 1) % log_len;
  754. }
  755. pr_debug("diag_buf wrap = %u, offset = %u\n",
  756. log->log_pos.wrap, log->log_pos.offset);
  757. while (log_start->offset == log->log_pos.offset) {
  758. /*
  759. * No data in ring buffer,
  760. * so we'll hang around until something happens
  761. */
  762. unsigned long t = msleep_interruptible(50);
  763. if (t != 0) {
  764. /* Some event woke us up, so let's quit */
  765. return 0;
  766. }
  767. if (buf_idx == TZDBG_LOG)
  768. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  769. debug_rw_buf_size);
  770. }
  771. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  772. pr_debug("diag_buf wrap = %u, offset = %u\n",
  773. log->log_pos.wrap, log->log_pos.offset);
  774. /*
  775. * Read from ring buff while there is data and space in return buff
  776. */
  777. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  778. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  779. log_start->offset = (log_start->offset + 1) % log_len;
  780. if (log_start->offset == 0)
  781. ++log_start->wrap;
  782. ++len;
  783. }
  784. /*
  785. * return buffer to caller
  786. */
  787. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  788. return len;
  789. }
  790. static int __disp_hyp_log_stats(uint8_t *log,
  791. struct hypdbg_log_pos_t *log_start, uint32_t log_len,
  792. size_t count, uint32_t buf_idx)
  793. {
  794. struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
  795. unsigned long t = 0;
  796. uint32_t wrap_start;
  797. uint32_t wrap_end;
  798. uint32_t wrap_cnt;
  799. int max_len;
  800. int len = 0;
  801. int i = 0;
  802. wrap_start = log_start->wrap;
  803. wrap_end = hyp->log_pos.wrap;
  804. /* Calculate difference in # of buffer wrap-arounds */
  805. if (wrap_end >= wrap_start)
  806. wrap_cnt = wrap_end - wrap_start;
  807. else {
  808. /* wrap counter has wrapped around, invalidate start position */
  809. wrap_cnt = 2;
  810. }
  811. if (wrap_cnt > 1) {
  812. /* end position has wrapped around more than once, */
  813. /* current start no longer valid */
  814. log_start->wrap = hyp->log_pos.wrap - 1;
  815. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  816. } else if ((wrap_cnt == 1) &&
  817. (hyp->log_pos.offset > log_start->offset)) {
  818. /* end position has overwritten start */
  819. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  820. }
  821. while (log_start->offset == hyp->log_pos.offset) {
  822. /*
  823. * No data in ring buffer,
  824. * so we'll hang around until something happens
  825. */
  826. t = msleep_interruptible(50);
  827. if (t != 0) {
  828. /* Some event woke us up, so let's quit */
  829. return 0;
  830. }
  831. /* TZDBG_HYP_LOG */
  832. memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
  833. tzdbg.hyp_debug_rw_buf_size);
  834. }
  835. max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
  836. tzdbg.hyp_debug_rw_buf_size : count;
  837. /*
  838. * Read from ring buff while there is data and space in return buff
  839. */
  840. while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
  841. tzdbg.disp_buf[i++] = log[log_start->offset];
  842. log_start->offset = (log_start->offset + 1) % log_len;
  843. if (log_start->offset == 0)
  844. ++log_start->wrap;
  845. ++len;
  846. }
  847. /*
  848. * return buffer to caller
  849. */
  850. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  851. return len;
  852. }
  853. static int __disp_rm_log_stats(uint8_t *log_ptr, uint32_t max_len)
  854. {
  855. uint32_t i = 0;
  856. /*
  857. * Transfer data from rm dialog buff to display buffer in user space
  858. */
  859. while ((i < max_len) && (i < display_buf_size)) {
  860. tzdbg.disp_buf[i] = log_ptr[i];
  861. i++;
  862. }
  863. if (i != max_len)
  864. pr_err("Dropping RM log message, max_len:%d display_buf_size:%d\n",
  865. i, display_buf_size);
  866. tzdbg.stat[TZDBG_RM_LOG].data = tzdbg.disp_buf;
  867. return i;
  868. }
  869. static int print_text(char *intro_message,
  870. unsigned char *text_addr,
  871. unsigned int size,
  872. char *buf, uint32_t buf_len)
  873. {
  874. unsigned int i;
  875. int len = 0;
  876. pr_debug("begin address %p, size %d\n", text_addr, size);
  877. len += scnprintf(buf + len, buf_len - len, "%s\n", intro_message);
  878. for (i = 0; i < size; i++) {
  879. if (buf_len <= len + 6) {
  880. pr_err("buffer not enough, buf_len %d, len %d\n",
  881. buf_len, len);
  882. return buf_len;
  883. }
  884. len += scnprintf(buf + len, buf_len - len, "%02hhx ",
  885. text_addr[i]);
  886. if ((i & 0x1f) == 0x1f)
  887. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  888. }
  889. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  890. return len;
  891. }
  892. static int _disp_encrpted_log_stats(struct encrypted_log_info *enc_log_info,
  893. enum tzdbg_stats_type type, uint32_t log_id)
  894. {
  895. int ret = 0, len = 0;
  896. struct tzbsp_encr_log_t *encr_log_head;
  897. uint32_t size = 0;
  898. if ((!tzdbg.is_full_encrypted_tz_logs_supported) &&
  899. (tzdbg.is_full_encrypted_tz_logs_enabled))
  900. pr_info("TZ not supporting full encrypted log functionality\n");
  901. ret = qcom_scm_request_encrypted_log(enc_log_info->paddr,
  902. enc_log_info->size, log_id, tzdbg.is_full_encrypted_tz_logs_supported,
  903. tzdbg.is_full_encrypted_tz_logs_enabled);
  904. if (ret)
  905. return 0;
  906. encr_log_head = (struct tzbsp_encr_log_t *)(enc_log_info->vaddr);
  907. pr_debug("display_buf_size = %d, encr_log_buff_size = %d\n",
  908. display_buf_size, encr_log_head->encr_log_buff_size);
  909. size = encr_log_head->encr_log_buff_size;
  910. len += scnprintf(tzdbg.disp_buf + len,
  911. (display_buf_size - 1) - len,
  912. "\n-------- New Encrypted %s --------\n",
  913. ((log_id == ENCRYPTED_QSEE_LOG_ID) ?
  914. "QSEE Log" : "TZ Dialog"));
  915. len += scnprintf(tzdbg.disp_buf + len,
  916. (display_buf_size - 1) - len,
  917. "\nMagic_Num :\n0x%x\n"
  918. "\nVerion :\n%d\n"
  919. "\nEncr_Log_Buff_Size :\n%d\n"
  920. "\nWrap_Count :\n%d\n",
  921. encr_log_head->magic_num,
  922. encr_log_head->version,
  923. encr_log_head->encr_log_buff_size,
  924. encr_log_head->wrap_count);
  925. len += print_text("\nKey : ", encr_log_head->key,
  926. TZBSP_AES_256_ENCRYPTED_KEY_SIZE,
  927. tzdbg.disp_buf + len, display_buf_size);
  928. len += print_text("\nNonce : ", encr_log_head->nonce,
  929. TZBSP_NONCE_LEN,
  930. tzdbg.disp_buf + len, display_buf_size - len);
  931. len += print_text("\nTag : ", encr_log_head->tag,
  932. TZBSP_TAG_LEN,
  933. tzdbg.disp_buf + len, display_buf_size - len);
  934. if (len > display_buf_size - size)
  935. pr_warn("Cannot fit all info into the buffer\n");
  936. pr_debug("encrypted log size %d, disply buffer size %d, used len %d\n",
  937. size, display_buf_size, len);
  938. len += print_text("\nLog : ", encr_log_head->log_buf, size,
  939. tzdbg.disp_buf + len, display_buf_size - len);
  940. memset(enc_log_info->vaddr, 0, enc_log_info->size);
  941. tzdbg.stat[type].data = tzdbg.disp_buf;
  942. return len;
  943. }
  944. static int _disp_tz_log_stats(size_t count)
  945. {
  946. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  947. static struct tzdbg_log_pos_t log_start = {0};
  948. struct tzdbg_log_v2_t *log_v2_ptr;
  949. struct tzdbg_log_t *log_ptr;
  950. log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
  951. tzdbg.diag_buf->ring_off -
  952. offsetof(struct tzdbg_log_t, log_buf));
  953. log_v2_ptr = (struct tzdbg_log_v2_t *)((unsigned char *)tzdbg.diag_buf +
  954. tzdbg.diag_buf->ring_off -
  955. offsetof(struct tzdbg_log_v2_t, log_buf));
  956. if (!tzdbg.is_enlarged_buf)
  957. return _disp_log_stats(log_ptr, &log_start,
  958. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  959. return _disp_log_stats_v2(log_v2_ptr, &log_start_v2,
  960. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  961. }
  962. static int _disp_hyp_log_stats(size_t count)
  963. {
  964. static struct hypdbg_log_pos_t log_start = {0};
  965. uint8_t *log_ptr;
  966. uint32_t log_len;
  967. log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
  968. tzdbg.hyp_diag_buf->ring_off);
  969. log_len = tzdbg.hyp_debug_rw_buf_size - tzdbg.hyp_diag_buf->ring_off;
  970. return __disp_hyp_log_stats(log_ptr, &log_start,
  971. log_len, count, TZDBG_HYP_LOG);
  972. }
  973. static int _disp_rm_log_stats(size_t count)
  974. {
  975. static struct rmdbg_log_pos_t log_start = { 0 };
  976. struct rmdbg_log_hdr_t *p_log_hdr = NULL;
  977. uint8_t *log_ptr = NULL;
  978. uint32_t log_len = 0;
  979. static bool wrap_around = { false };
  980. /* Return 0 to close the display file,if there is nothing else to do */
  981. if ((log_start.size == 0x0) && wrap_around) {
  982. wrap_around = false;
  983. return 0;
  984. }
  985. /* Copy RM log data to tzdbg diag buffer for the first time */
  986. /* Initialize the tracking data structure */
  987. if (tzdbg.rmlog_rw_buf_size != 0) {
  988. if (!wrap_around) {
  989. memcpy_fromio((void *)tzdbg.rm_diag_buf,
  990. tzdbg.rmlog_virt_iobase,
  991. tzdbg.rmlog_rw_buf_size);
  992. /* get RM header info first */
  993. p_log_hdr = (struct rmdbg_log_hdr_t *)tzdbg.rm_diag_buf;
  994. /* Update RM log buffer index tracker and its size */
  995. log_start.read_idx = 0x0;
  996. log_start.size = p_log_hdr->size;
  997. }
  998. /* Update RM log buffer starting ptr */
  999. log_ptr =
  1000. (uint8_t *) ((unsigned char *)tzdbg.rm_diag_buf +
  1001. sizeof(struct rmdbg_log_hdr_t));
  1002. } else {
  1003. /* Return 0 to close the display file,if there is nothing else to do */
  1004. pr_err("There is no RM log to read, size is %d!\n",
  1005. tzdbg.rmlog_rw_buf_size);
  1006. return 0;
  1007. }
  1008. log_len = log_start.size;
  1009. log_ptr += log_start.read_idx;
  1010. /* Check if we exceed the max length provided by user space */
  1011. log_len = (count > log_len) ? log_len : count;
  1012. /* Update tracking data structure */
  1013. log_start.size -= log_len;
  1014. log_start.read_idx += log_len;
  1015. if (log_start.size)
  1016. wrap_around = true;
  1017. return __disp_rm_log_stats(log_ptr, log_len);
  1018. }
  1019. static int _disp_qsee_log_stats(size_t count)
  1020. {
  1021. static struct tzdbg_log_pos_t log_start = {0};
  1022. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  1023. if (!tzdbg.is_enlarged_buf)
  1024. return _disp_log_stats(g_qsee_log, &log_start,
  1025. QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
  1026. count, TZDBG_QSEE_LOG);
  1027. return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2,
  1028. QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t),
  1029. count, TZDBG_QSEE_LOG);
  1030. }
  1031. static int _disp_hyp_general_stats(size_t count)
  1032. {
  1033. int len = 0;
  1034. int i;
  1035. struct hypdbg_boot_info_t *ptr = NULL;
  1036. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1037. tzdbg.hyp_debug_rw_buf_size - 1,
  1038. " Magic Number : 0x%x\n"
  1039. " CPU Count : 0x%x\n"
  1040. " S2 Fault Counter: 0x%x\n",
  1041. tzdbg.hyp_diag_buf->magic_num,
  1042. tzdbg.hyp_diag_buf->cpu_count,
  1043. tzdbg.hyp_diag_buf->s2_fault_counter);
  1044. ptr = tzdbg.hyp_diag_buf->boot_info;
  1045. for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
  1046. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1047. (tzdbg.hyp_debug_rw_buf_size - 1) - len,
  1048. " CPU #: %d\n"
  1049. " Warmboot entry CPU counter: 0x%x\n"
  1050. " Warmboot exit CPU counter : 0x%x\n",
  1051. i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
  1052. if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
  1053. pr_warn("%s: Cannot fit all info into the buffer\n",
  1054. __func__);
  1055. break;
  1056. }
  1057. ptr++;
  1058. }
  1059. tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
  1060. return len;
  1061. }
  1062. #if IS_ENABLED(CONFIG_MSM_TMECOM_QMP)
  1063. static int _disp_tme_log_stats(size_t count)
  1064. {
  1065. static struct tme_log_pos log_start = { 0 };
  1066. static bool wrap_around = { false };
  1067. uint32_t buf_size;
  1068. uint8_t *log_ptr = NULL;
  1069. uint32_t log_len = 0;
  1070. int ret = 0;
  1071. /* Return 0 to close the display file */
  1072. if ((log_start.size == 0x0) && wrap_around) {
  1073. wrap_around = false;
  1074. return 0;
  1075. }
  1076. /* Copy TME log data to tzdbg diag buffer for the first time */
  1077. if (!wrap_around) {
  1078. if (tmelog_process_request(tmecrashdump_address_offset,
  1079. TME_LOG_BUF_SIZE, &buf_size)) {
  1080. pr_err("Read tme log failed, ret=%d, buf_size: %#x\n", ret, buf_size);
  1081. return 0;
  1082. }
  1083. log_start.offset = 0x0;
  1084. log_start.size = buf_size;
  1085. }
  1086. log_ptr = tzdbg.tmelog_virt_iobase;
  1087. log_len = log_start.size;
  1088. log_ptr += log_start.offset;
  1089. /* Check if we exceed the max length provided by user space */
  1090. log_len = min(min((uint32_t)count, log_len), display_buf_size);
  1091. log_start.size -= log_len;
  1092. log_start.offset += log_len;
  1093. pr_debug("log_len: %d, log_start.offset: %#x, log_start.size: %#x\n",
  1094. log_len, log_start.offset, log_start.size);
  1095. if (log_start.size)
  1096. wrap_around = true;
  1097. /* Copy TME log data to display buffer */
  1098. memcpy_fromio(tzdbg.disp_buf, log_ptr, log_len);
  1099. tzdbg.stat[TZDBG_TME_LOG].data = tzdbg.disp_buf;
  1100. return log_len;
  1101. }
  1102. #else
  1103. static int _disp_tme_log_stats(size_t count)
  1104. {
  1105. return 0;
  1106. }
  1107. #endif
  1108. static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf,
  1109. size_t count, loff_t *offp)
  1110. {
  1111. int len = 0;
  1112. if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET ||
  1113. tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL ||
  1114. tz_id == TZDBG_VMID || tz_id == TZDBG_LOG)
  1115. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  1116. debug_rw_buf_size);
  1117. if (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1118. memcpy_fromio((void *)tzdbg.hyp_diag_buf,
  1119. tzdbg.hyp_virt_iobase,
  1120. tzdbg.hyp_debug_rw_buf_size);
  1121. switch (tz_id) {
  1122. case TZDBG_BOOT:
  1123. len = _disp_tz_boot_stats();
  1124. break;
  1125. case TZDBG_RESET:
  1126. len = _disp_tz_reset_stats();
  1127. break;
  1128. case TZDBG_INTERRUPT:
  1129. len = _disp_tz_interrupt_stats();
  1130. break;
  1131. case TZDBG_GENERAL:
  1132. len = _disp_tz_general_stats();
  1133. break;
  1134. case TZDBG_VMID:
  1135. len = _disp_tz_vmid_stats();
  1136. break;
  1137. case TZDBG_LOG:
  1138. if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
  1139. (tzdbg.diag_buf->version >> 16)) {
  1140. len = _disp_tz_log_stats(count);
  1141. *offp = 0;
  1142. } else {
  1143. len = _disp_tz_log_stats_legacy();
  1144. }
  1145. break;
  1146. case TZDBG_QSEE_LOG:
  1147. len = _disp_qsee_log_stats(count);
  1148. *offp = 0;
  1149. break;
  1150. case TZDBG_HYP_GENERAL:
  1151. len = _disp_hyp_general_stats(count);
  1152. break;
  1153. case TZDBG_HYP_LOG:
  1154. len = _disp_hyp_log_stats(count);
  1155. *offp = 0;
  1156. break;
  1157. case TZDBG_RM_LOG:
  1158. len = _disp_rm_log_stats(count);
  1159. *offp = 0;
  1160. break;
  1161. case TZDBG_TME_LOG:
  1162. len = _disp_tme_log_stats(count);
  1163. *offp = 0;
  1164. break;
  1165. default:
  1166. break;
  1167. }
  1168. if (len > count)
  1169. len = count;
  1170. return simple_read_from_buffer(buf, len, offp,
  1171. tzdbg.stat[tz_id].data, len);
  1172. }
  1173. static ssize_t tzdbg_fs_read_encrypted(int tz_id, char __user *buf,
  1174. size_t count, loff_t *offp)
  1175. {
  1176. int len = 0, ret = 0;
  1177. struct tzdbg_stat *stat = &(tzdbg.stat[tz_id]);
  1178. pr_debug("%s: tz_id = %d\n", __func__, tz_id);
  1179. if (tz_id >= TZDBG_STATS_MAX) {
  1180. pr_err("invalid encrypted log id %d\n", tz_id);
  1181. return ret;
  1182. }
  1183. if (!stat->display_len) {
  1184. if (tz_id == TZDBG_QSEE_LOG)
  1185. stat->display_len = _disp_encrpted_log_stats(
  1186. &enc_qseelog_info,
  1187. tz_id, ENCRYPTED_QSEE_LOG_ID);
  1188. else
  1189. stat->display_len = _disp_encrpted_log_stats(
  1190. &enc_tzlog_info,
  1191. tz_id, ENCRYPTED_TZ_LOG_ID);
  1192. stat->display_offset = 0;
  1193. }
  1194. len = stat->display_len;
  1195. if (len > count)
  1196. len = count;
  1197. *offp = 0;
  1198. ret = simple_read_from_buffer(buf, len, offp,
  1199. tzdbg.stat[tz_id].data + stat->display_offset,
  1200. count);
  1201. stat->display_offset += ret;
  1202. stat->display_len -= ret;
  1203. pr_debug("ret = %d, offset = %d\n", ret, (int)(*offp));
  1204. pr_debug("display_len = %lu, offset = %lu\n",
  1205. stat->display_len, stat->display_offset);
  1206. return ret;
  1207. }
  1208. static ssize_t tzdbg_fs_read(struct file *file, char __user *buf,
  1209. size_t count, loff_t *offp)
  1210. {
  1211. struct seq_file *seq = file->private_data;
  1212. int tz_id = TZDBG_STATS_MAX;
  1213. if (seq)
  1214. tz_id = *(int *)(seq->private);
  1215. else {
  1216. pr_err("%s: Seq data null unable to proceed\n", __func__);
  1217. return 0;
  1218. }
  1219. if (!tzdbg.is_encrypted_log_enabled ||
  1220. (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1221. || tz_id == TZDBG_RM_LOG || tz_id == TZDBG_TME_LOG)
  1222. return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp);
  1223. else
  1224. return tzdbg_fs_read_encrypted(tz_id, buf, count, offp);
  1225. }
  1226. static int tzdbg_procfs_open(struct inode *inode, struct file *file)
  1227. {
  1228. #if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
  1229. return single_open(file, NULL, PDE_DATA(inode));
  1230. #else
  1231. return single_open(file, NULL, pde_data(inode));
  1232. #endif
  1233. }
  1234. static int tzdbg_procfs_release(struct inode *inode, struct file *file)
  1235. {
  1236. return single_release(inode, file);
  1237. }
  1238. struct proc_ops tzdbg_fops = {
  1239. .proc_flags = PROC_ENTRY_PERMANENT,
  1240. .proc_read = tzdbg_fs_read,
  1241. .proc_open = tzdbg_procfs_open,
  1242. .proc_release = tzdbg_procfs_release,
  1243. };
  1244. static int tzdbg_init_tme_log(struct platform_device *pdev, void __iomem *virt_iobase)
  1245. {
  1246. /*
  1247. * Tme logs are dumped in tme log ddr region but that region is not
  1248. * accessible to hlos. Instead, collect logs at tme crashdump ddr
  1249. * region with tmecom interface and then display logs reading from
  1250. * crashdump region.
  1251. */
  1252. if (of_property_read_u32((&pdev->dev)->of_node, "tmecrashdump-address-offset",
  1253. &tmecrashdump_address_offset)) {
  1254. pr_err("Tme Crashdump address offset need to be defined!\n");
  1255. return -EINVAL;
  1256. }
  1257. tzdbg.tmelog_virt_iobase =
  1258. devm_ioremap(&pdev->dev, tmecrashdump_address_offset, TME_LOG_BUF_SIZE);
  1259. if (!tzdbg.tmelog_virt_iobase) {
  1260. pr_err("ERROR: Could not ioremap: start=%#x, len=%u\n",
  1261. tmecrashdump_address_offset, TME_LOG_BUF_SIZE);
  1262. return -ENXIO;
  1263. }
  1264. return 0;
  1265. }
  1266. /*
  1267. * Allocates log buffer from ION, registers the buffer at TZ
  1268. */
  1269. static int tzdbg_register_qsee_log_buf(struct platform_device *pdev)
  1270. {
  1271. int ret = 0;
  1272. void *buf = NULL;
  1273. uint32_t ns_vmids[] = {VMID_HLOS};
  1274. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1275. uint32_t ns_vm_nums = 1;
  1276. if (tzdbg.is_enlarged_buf) {
  1277. if (of_property_read_u32((&pdev->dev)->of_node,
  1278. "qseelog-buf-size-v2", &qseelog_buf_size)) {
  1279. pr_debug("Enlarged qseelog buf size isn't defined\n");
  1280. qseelog_buf_size = QSEE_LOG_BUF_SIZE_V2;
  1281. }
  1282. } else {
  1283. qseelog_buf_size = QSEE_LOG_BUF_SIZE;
  1284. }
  1285. pr_debug("qseelog buf size is 0x%x\n", qseelog_buf_size);
  1286. buf = dma_alloc_coherent(&pdev->dev,
  1287. qseelog_buf_size, &coh_pmem, GFP_KERNEL);
  1288. if (buf == NULL)
  1289. return -ENOMEM;
  1290. if (!tzdbg.is_encrypted_log_enabled) {
  1291. ret = qtee_shmbridge_register(coh_pmem,
  1292. qseelog_buf_size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1293. PERM_READ | PERM_WRITE,
  1294. &qseelog_shmbridge_handle);
  1295. if (ret) {
  1296. pr_err("failed to create bridge for qsee_log buf\n");
  1297. goto exit_free_mem;
  1298. }
  1299. }
  1300. g_qsee_log = (struct tzdbg_log_t *)buf;
  1301. g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
  1302. g_qsee_log_v2 = (struct tzdbg_log_v2_t *)buf;
  1303. g_qsee_log_v2->log_pos.wrap = g_qsee_log_v2->log_pos.offset = 0;
  1304. ret = qcom_scm_register_qsee_log_buf(coh_pmem, qseelog_buf_size);
  1305. if (ret != QSEOS_RESULT_SUCCESS) {
  1306. pr_err(
  1307. "%s: scm_call to register log buf failed, resp result =%d\n",
  1308. __func__, ret);
  1309. goto exit_dereg_bridge;
  1310. }
  1311. return ret;
  1312. exit_dereg_bridge:
  1313. if (!tzdbg.is_encrypted_log_enabled)
  1314. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1315. exit_free_mem:
  1316. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1317. (void *)g_qsee_log, coh_pmem);
  1318. return ret;
  1319. }
  1320. static void tzdbg_free_qsee_log_buf(struct platform_device *pdev)
  1321. {
  1322. if (!tzdbg.is_encrypted_log_enabled)
  1323. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1324. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1325. (void *)g_qsee_log, coh_pmem);
  1326. }
  1327. static int tzdbg_allocate_encrypted_log_buf(struct platform_device *pdev)
  1328. {
  1329. int ret = 0;
  1330. uint32_t ns_vmids[] = {VMID_HLOS};
  1331. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1332. uint32_t ns_vm_nums = 1;
  1333. if (!tzdbg.is_encrypted_log_enabled)
  1334. return 0;
  1335. /* max encrypted qsee log buf zize (include header, and page align) */
  1336. enc_qseelog_info.size = qseelog_buf_size + PAGE_SIZE;
  1337. enc_qseelog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1338. enc_qseelog_info.size,
  1339. &enc_qseelog_info.paddr, GFP_KERNEL);
  1340. if (enc_qseelog_info.vaddr == NULL)
  1341. return -ENOMEM;
  1342. ret = qtee_shmbridge_register(enc_qseelog_info.paddr,
  1343. enc_qseelog_info.size, ns_vmids,
  1344. ns_vm_perms, ns_vm_nums,
  1345. PERM_READ | PERM_WRITE, &enc_qseelog_info.shmb_handle);
  1346. if (ret) {
  1347. pr_err("failed to create encr_qsee_log bridge, ret %d\n", ret);
  1348. goto exit_free_qseelog;
  1349. }
  1350. pr_debug("Alloc memory for encr_qsee_log, size = %zu\n",
  1351. enc_qseelog_info.size);
  1352. enc_tzlog_info.size = debug_rw_buf_size;
  1353. enc_tzlog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1354. enc_tzlog_info.size,
  1355. &enc_tzlog_info.paddr, GFP_KERNEL);
  1356. if (enc_tzlog_info.vaddr == NULL)
  1357. goto exit_unreg_qseelog;
  1358. ret = qtee_shmbridge_register(enc_tzlog_info.paddr,
  1359. enc_tzlog_info.size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1360. PERM_READ | PERM_WRITE, &enc_tzlog_info.shmb_handle);
  1361. if (ret) {
  1362. pr_err("failed to create encr_tz_log bridge, ret = %d\n", ret);
  1363. goto exit_free_tzlog;
  1364. }
  1365. pr_debug("Alloc memory for encr_tz_log, size %zu\n",
  1366. enc_qseelog_info.size);
  1367. return 0;
  1368. exit_free_tzlog:
  1369. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1370. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1371. exit_unreg_qseelog:
  1372. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1373. exit_free_qseelog:
  1374. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1375. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1376. return -ENOMEM;
  1377. }
  1378. static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
  1379. {
  1380. qtee_shmbridge_deregister(enc_tzlog_info.shmb_handle);
  1381. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1382. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1383. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1384. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1385. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1386. }
  1387. static int tzdbg_fs_init(struct platform_device *pdev)
  1388. {
  1389. int rc = 0;
  1390. int i;
  1391. struct proc_dir_entry *dent_dir;
  1392. struct proc_dir_entry *dent;
  1393. dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL);
  1394. if (dent_dir == NULL) {
  1395. dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n");
  1396. return -ENOMEM;
  1397. }
  1398. for (i = 0; i < TZDBG_STATS_MAX; i++) {
  1399. tzdbg.debug_tz[i] = i;
  1400. if (!tzdbg.stat[i].avail)
  1401. continue;
  1402. dent = proc_create_data(tzdbg.stat[i].name,
  1403. 0444, dent_dir,
  1404. &tzdbg_fops, &tzdbg.debug_tz[i]);
  1405. if (dent == NULL) {
  1406. dev_err(&pdev->dev, "TZ proc_create_data failed\n");
  1407. rc = -ENOMEM;
  1408. goto err;
  1409. }
  1410. }
  1411. platform_set_drvdata(pdev, dent_dir);
  1412. return 0;
  1413. err:
  1414. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1415. return rc;
  1416. }
  1417. static void tzdbg_fs_exit(struct platform_device *pdev)
  1418. {
  1419. struct proc_dir_entry *dent_dir;
  1420. dent_dir = platform_get_drvdata(pdev);
  1421. if (dent_dir)
  1422. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1423. }
  1424. static int __update_hypdbg_base(struct platform_device *pdev,
  1425. void __iomem *virt_iobase)
  1426. {
  1427. phys_addr_t hypdiag_phy_iobase;
  1428. uint32_t hyp_address_offset;
  1429. uint32_t hyp_size_offset;
  1430. struct hypdbg_t *hyp;
  1431. uint32_t *ptr = NULL;
  1432. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
  1433. &hyp_address_offset)) {
  1434. dev_err(&pdev->dev, "hyplog address offset is not defined\n");
  1435. return -EINVAL;
  1436. }
  1437. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
  1438. &hyp_size_offset)) {
  1439. dev_err(&pdev->dev, "hyplog size offset is not defined\n");
  1440. return -EINVAL;
  1441. }
  1442. hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
  1443. tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
  1444. hyp_size_offset);
  1445. tzdbg.hyp_virt_iobase = devm_ioremap(&pdev->dev,
  1446. hypdiag_phy_iobase,
  1447. tzdbg.hyp_debug_rw_buf_size);
  1448. if (!tzdbg.hyp_virt_iobase) {
  1449. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1450. &hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
  1451. return -ENXIO;
  1452. }
  1453. ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
  1454. if (!ptr)
  1455. return -ENOMEM;
  1456. tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
  1457. hyp = tzdbg.hyp_diag_buf;
  1458. hyp->log_pos.wrap = hyp->log_pos.offset = 0;
  1459. return 0;
  1460. }
  1461. static int __update_rmlog_base(struct platform_device *pdev,
  1462. void __iomem *virt_iobase)
  1463. {
  1464. uint32_t rmlog_address;
  1465. uint32_t rmlog_size;
  1466. uint32_t *ptr = NULL;
  1467. /* if we don't get the node just ignore it */
  1468. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-address",
  1469. &rmlog_address)) {
  1470. dev_err(&pdev->dev, "RM log address is not defined\n");
  1471. tzdbg.rmlog_rw_buf_size = 0;
  1472. return 0;
  1473. }
  1474. /* if we don't get the node just ignore it */
  1475. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-size",
  1476. &rmlog_size)) {
  1477. dev_err(&pdev->dev, "RM log size is not defined\n");
  1478. tzdbg.rmlog_rw_buf_size = 0;
  1479. return 0;
  1480. }
  1481. tzdbg.rmlog_rw_buf_size = rmlog_size;
  1482. /* Check if there is RM log to read */
  1483. if (!tzdbg.rmlog_rw_buf_size) {
  1484. tzdbg.rmlog_virt_iobase = NULL;
  1485. tzdbg.rm_diag_buf = NULL;
  1486. dev_err(&pdev->dev, "RM log size is %d\n",
  1487. tzdbg.rmlog_rw_buf_size);
  1488. return 0;
  1489. }
  1490. tzdbg.rmlog_virt_iobase = devm_ioremap(&pdev->dev,
  1491. rmlog_address,
  1492. rmlog_size);
  1493. if (!tzdbg.rmlog_virt_iobase) {
  1494. dev_err(&pdev->dev, "ERROR could not ioremap: start=%u, len=%u\n",
  1495. rmlog_address, tzdbg.rmlog_rw_buf_size);
  1496. return -ENXIO;
  1497. }
  1498. ptr = kzalloc(tzdbg.rmlog_rw_buf_size, GFP_KERNEL);
  1499. if (!ptr)
  1500. return -ENOMEM;
  1501. tzdbg.rm_diag_buf = (uint8_t *)ptr;
  1502. return 0;
  1503. }
  1504. static int tzdbg_get_tz_version(void)
  1505. {
  1506. u64 version;
  1507. int ret = 0;
  1508. ret = qcom_scm_get_tz_log_feat_id(&version);
  1509. if (ret) {
  1510. pr_err("%s: scm_call to get tz version failed\n",
  1511. __func__);
  1512. return ret;
  1513. }
  1514. tzdbg.tz_version = version;
  1515. ret = qcom_scm_get_tz_feat_id_version(QCOM_SCM_FEAT_DIAG_ID, &version);
  1516. if (ret) {
  1517. pr_err("%s: scm_call to get tz diag version failed, ret = %d\n",
  1518. __func__, ret);
  1519. return ret;
  1520. }
  1521. pr_warn("tz diag version is %llu\n", version);
  1522. tzdbg.tz_diag_major_version =
  1523. ((version >> TZBSP_FVER_MAJOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1524. tzdbg.tz_diag_minor_version =
  1525. ((version >> TZBSP_FVER_MINOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1526. if (tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) {
  1527. switch (tzdbg.tz_diag_minor_version) {
  1528. case TZBSP_DIAG_MINOR_VERSION_V2:
  1529. case TZBSP_DIAG_MINOR_VERSION_V21:
  1530. case TZBSP_DIAG_MINOR_VERSION_V22:
  1531. tzdbg.is_enlarged_buf = true;
  1532. break;
  1533. default:
  1534. tzdbg.is_enlarged_buf = false;
  1535. }
  1536. } else {
  1537. tzdbg.is_enlarged_buf = false;
  1538. }
  1539. return ret;
  1540. }
  1541. static void tzdbg_query_encrypted_log(void)
  1542. {
  1543. int ret = 0;
  1544. uint64_t enabled;
  1545. ret = qcom_scm_query_encrypted_log_feature(&enabled);
  1546. if (ret) {
  1547. if (ret == -EIO)
  1548. pr_info("SCM_CALL : SYS CALL NOT SUPPORTED IN TZ\n");
  1549. else
  1550. pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret);
  1551. tzdbg.is_encrypted_log_enabled = false;
  1552. } else {
  1553. pr_warn("encrypted qseelog enabled is %llu\n", enabled);
  1554. tzdbg.is_encrypted_log_enabled = enabled;
  1555. }
  1556. }
  1557. /*
  1558. * Driver functions
  1559. */
  1560. static int tz_log_probe(struct platform_device *pdev)
  1561. {
  1562. struct resource *resource;
  1563. void __iomem *virt_iobase;
  1564. phys_addr_t tzdiag_phy_iobase;
  1565. uint32_t *ptr = NULL;
  1566. int ret = 0, i;
  1567. /*
  1568. * By default all nodes will be created.
  1569. * Mark avail as false later selectively if there's need to skip proc node creation.
  1570. */
  1571. for (i = 0; i < TZDBG_STATS_MAX; i++)
  1572. tzdbg.stat[i].avail = true;
  1573. ret = tzdbg_get_tz_version();
  1574. if (ret)
  1575. return ret;
  1576. /*
  1577. * Get address that stores the physical location diagnostic data
  1578. */
  1579. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1580. if (!resource) {
  1581. dev_err(&pdev->dev,
  1582. "%s: ERROR Missing MEM resource\n", __func__);
  1583. return -ENXIO;
  1584. }
  1585. /*
  1586. * Get the debug buffer size
  1587. */
  1588. debug_rw_buf_size = resource_size(resource);
  1589. /*
  1590. * Map address that stores the physical location diagnostic data
  1591. */
  1592. virt_iobase = devm_ioremap(&pdev->dev, resource->start,
  1593. debug_rw_buf_size);
  1594. if (!virt_iobase) {
  1595. dev_err(&pdev->dev,
  1596. "%s: ERROR could not ioremap: start=%pr, len=%u\n",
  1597. __func__, &resource->start,
  1598. (unsigned int)(debug_rw_buf_size));
  1599. return -ENXIO;
  1600. }
  1601. if (pdev->dev.of_node) {
  1602. tzdbg.is_hyplog_enabled = of_property_read_bool(
  1603. (&pdev->dev)->of_node, "qcom,hyplog-enabled");
  1604. if (tzdbg.is_hyplog_enabled) {
  1605. ret = __update_hypdbg_base(pdev, virt_iobase);
  1606. if (ret) {
  1607. dev_err(&pdev->dev,
  1608. "%s: fail to get hypdbg_base ret %d\n",
  1609. __func__, ret);
  1610. return -EINVAL;
  1611. }
  1612. ret = __update_rmlog_base(pdev, virt_iobase);
  1613. if (ret) {
  1614. dev_err(&pdev->dev,
  1615. "%s: fail to get rmlog_base ret %d\n",
  1616. __func__, ret);
  1617. return -EINVAL;
  1618. }
  1619. } else {
  1620. tzdbg.stat[TZDBG_HYP_LOG].avail = false;
  1621. tzdbg.stat[TZDBG_HYP_GENERAL].avail = false;
  1622. tzdbg.stat[TZDBG_RM_LOG].avail = false;
  1623. dev_info(&pdev->dev, "Hyp log service not support\n");
  1624. }
  1625. } else {
  1626. dev_dbg(&pdev->dev, "Device tree data is not found\n");
  1627. }
  1628. /*
  1629. * Retrieve the address of diagnostic data
  1630. */
  1631. tzdiag_phy_iobase = readl_relaxed(virt_iobase);
  1632. tzdbg_query_encrypted_log();
  1633. /*
  1634. * Map the diagnostic information area if encryption is disabled
  1635. */
  1636. if (!tzdbg.is_encrypted_log_enabled) {
  1637. tzdbg.virt_iobase = devm_ioremap(&pdev->dev,
  1638. tzdiag_phy_iobase, debug_rw_buf_size);
  1639. if (!tzdbg.virt_iobase) {
  1640. dev_err(&pdev->dev,
  1641. "%s: could not ioremap: start=%pr, len=%u\n",
  1642. __func__, &tzdiag_phy_iobase,
  1643. debug_rw_buf_size);
  1644. return -ENXIO;
  1645. }
  1646. /* allocate diag_buf */
  1647. ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
  1648. if (ptr == NULL)
  1649. return -ENOMEM;
  1650. tzdbg.diag_buf = (struct tzdbg_t *)ptr;
  1651. } else {
  1652. if ((tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) &&
  1653. (tzdbg.tz_diag_minor_version >= TZBSP_DIAG_MINOR_VERSION_V22))
  1654. tzdbg.is_full_encrypted_tz_logs_supported = true;
  1655. if (pdev->dev.of_node) {
  1656. tzdbg.is_full_encrypted_tz_logs_enabled = of_property_read_bool(
  1657. (&pdev->dev)->of_node, "qcom,full-encrypted-tz-logs-enabled");
  1658. }
  1659. }
  1660. /* Init for tme log */
  1661. ret = tzdbg_init_tme_log(pdev, virt_iobase);
  1662. if (ret < 0) {
  1663. tzdbg.stat[TZDBG_TME_LOG].avail = false;
  1664. pr_warn("Tme log initialization failed!\n");
  1665. }
  1666. /* register unencrypted qsee log buffer */
  1667. ret = tzdbg_register_qsee_log_buf(pdev);
  1668. if (ret)
  1669. goto exit_free_diag_buf;
  1670. /* allocate encrypted qsee and tz log buffer */
  1671. ret = tzdbg_allocate_encrypted_log_buf(pdev);
  1672. if (ret) {
  1673. dev_err(&pdev->dev,
  1674. " %s: Failed to allocate encrypted log buffer\n",
  1675. __func__);
  1676. goto exit_free_qsee_log_buf;
  1677. }
  1678. /* allocate display_buf */
  1679. if (UINT_MAX/4 < qseelog_buf_size) {
  1680. pr_err("display_buf_size integer overflow\n");
  1681. goto exit_free_qsee_log_buf;
  1682. }
  1683. display_buf_size = qseelog_buf_size * 4;
  1684. tzdbg.disp_buf = dma_alloc_coherent(&pdev->dev, display_buf_size,
  1685. &disp_buf_paddr, GFP_KERNEL);
  1686. if (tzdbg.disp_buf == NULL) {
  1687. ret = -ENOMEM;
  1688. goto exit_free_encr_log_buf;
  1689. }
  1690. if (tzdbg_fs_init(pdev))
  1691. goto exit_free_disp_buf;
  1692. return 0;
  1693. exit_free_disp_buf:
  1694. dma_free_coherent(&pdev->dev, display_buf_size,
  1695. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1696. exit_free_encr_log_buf:
  1697. tzdbg_free_encrypted_log_buf(pdev);
  1698. exit_free_qsee_log_buf:
  1699. tzdbg_free_qsee_log_buf(pdev);
  1700. exit_free_diag_buf:
  1701. if (!tzdbg.is_encrypted_log_enabled)
  1702. kfree(tzdbg.diag_buf);
  1703. return -ENXIO;
  1704. }
  1705. static int tz_log_remove(struct platform_device *pdev)
  1706. {
  1707. tzdbg_fs_exit(pdev);
  1708. dma_free_coherent(&pdev->dev, display_buf_size,
  1709. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1710. tzdbg_free_encrypted_log_buf(pdev);
  1711. tzdbg_free_qsee_log_buf(pdev);
  1712. if (!tzdbg.is_encrypted_log_enabled)
  1713. kfree(tzdbg.diag_buf);
  1714. return 0;
  1715. }
  1716. static const struct of_device_id tzlog_match[] = {
  1717. {.compatible = "qcom,tz-log"},
  1718. {}
  1719. };
  1720. static struct platform_driver tz_log_driver = {
  1721. .probe = tz_log_probe,
  1722. .remove = tz_log_remove,
  1723. .driver = {
  1724. .name = "tz_log",
  1725. .of_match_table = tzlog_match,
  1726. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1727. },
  1728. };
  1729. module_platform_driver(tz_log_driver);
  1730. MODULE_LICENSE("GPL v2");
  1731. MODULE_DESCRIPTION("TZ Log driver");