tz_log.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s:[%s][%d]: " fmt, KBUILD_MODNAME, __func__, __LINE__
  7. #include <linux/debugfs.h>
  8. #include <linux/errno.h>
  9. #include <linux/delay.h>
  10. #include <linux/io.h>
  11. #include <linux/msm_ion.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/string.h>
  17. #include <linux/types.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/of.h>
  20. #include <linux/dma-buf.h>
  21. #include <linux/qcom_scm.h>
  22. #include <linux/qtee_shmbridge.h>
  23. #include <linux/proc_fs.h>
  24. #include <linux/version.h>
  25. #if IS_ENABLED(CONFIG_MSM_TMECOM_QMP)
  26. #include <linux/tmelog.h>
  27. #endif
  28. #include "misc/qseecomi.h"
  29. /* QSEE_LOG_BUF_SIZE = 32K */
  30. #define QSEE_LOG_BUF_SIZE 0x8000
  31. /* enlarged qsee log buf size is 128K by default */
  32. #define QSEE_LOG_BUF_SIZE_V2 0x20000
  33. /* Tme log buffer size 20K */
  34. #define TME_LOG_BUF_SIZE 0x5000
  35. /* TZ Diagnostic Area legacy version number */
  36. #define TZBSP_DIAG_MAJOR_VERSION_LEGACY 2
  37. /* TZ Diagnostic Area version number */
  38. #define TZBSP_FVER_MAJOR_MINOR_MASK 0x3FF /* 10 bits */
  39. #define TZBSP_FVER_MAJOR_SHIFT 22
  40. #define TZBSP_FVER_MINOR_SHIFT 12
  41. #define TZBSP_DIAG_MAJOR_VERSION_V9 9
  42. #define TZBSP_DIAG_MINOR_VERSION_V2 2
  43. #define TZBSP_DIAG_MINOR_VERSION_V21 3
  44. #define TZBSP_DIAG_MINOR_VERSION_V22 4
  45. /* TZ Diag Feature Version Id */
  46. #define QCOM_SCM_FEAT_DIAG_ID 0x06
  47. /*
  48. * Preprocessor Definitions and Constants
  49. */
  50. #define TZBSP_MAX_CPU_COUNT 0x08
  51. /*
  52. * Number of VMID Tables
  53. */
  54. #define TZBSP_DIAG_NUM_OF_VMID 16
  55. /*
  56. * VMID Description length
  57. */
  58. #define TZBSP_DIAG_VMID_DESC_LEN 7
  59. /*
  60. * Number of Interrupts
  61. */
  62. #define TZBSP_DIAG_INT_NUM 32
  63. /*
  64. * Length of descriptive name associated with Interrupt
  65. */
  66. #define TZBSP_MAX_INT_DESC 16
  67. /*
  68. * TZ 3.X version info
  69. */
  70. #define QSEE_VERSION_TZ_3_X 0x800000
  71. /*
  72. * TZ 4.X version info
  73. */
  74. #define QSEE_VERSION_TZ_4_X 0x1000000
  75. #define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
  76. #define TZBSP_NONCE_LEN 12
  77. #define TZBSP_TAG_LEN 16
  78. #define ENCRYPTED_TZ_LOG_ID 0
  79. #define ENCRYPTED_QSEE_LOG_ID 1
  80. /*
  81. * Directory for TZ DBG logs
  82. */
  83. #define TZDBG_DIR_NAME "tzdbg"
  84. /*
  85. * VMID Table
  86. */
  87. struct tzdbg_vmid_t {
  88. uint8_t vmid; /* Virtual Machine Identifier */
  89. uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */
  90. };
  91. /*
  92. * Boot Info Table
  93. */
  94. struct tzdbg_boot_info_t {
  95. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  96. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  97. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  98. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  99. uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */
  100. uint32_t spare; /* Reserved for future use. */
  101. };
  102. /*
  103. * Boot Info Table for 64-bit
  104. */
  105. struct tzdbg_boot_info64_t {
  106. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  107. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  108. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  109. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  110. uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
  111. uint32_t psci_exit_cnt; /* PSCI syscall exit CPU Counter */
  112. uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
  113. uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
  114. };
  115. /*
  116. * Reset Info Table
  117. */
  118. struct tzdbg_reset_info_t {
  119. uint32_t reset_type; /* Reset Reason */
  120. uint32_t reset_cnt; /* Number of resets occurred/CPU */
  121. };
  122. /*
  123. * Interrupt Info Table
  124. */
  125. struct tzdbg_int_t {
  126. /*
  127. * Type of Interrupt/exception
  128. */
  129. uint16_t int_info;
  130. /*
  131. * Availability of the slot
  132. */
  133. uint8_t avail;
  134. /*
  135. * Reserved for future use
  136. */
  137. uint8_t spare;
  138. /*
  139. * Interrupt # for IRQ and FIQ
  140. */
  141. uint32_t int_num;
  142. /*
  143. * ASCII text describing type of interrupt e.g:
  144. * Secure Timer, EBI XPU. This string is always null terminated,
  145. * supporting at most TZBSP_MAX_INT_DESC characters.
  146. * Any additional characters are truncated.
  147. */
  148. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  149. uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
  150. };
  151. /*
  152. * Interrupt Info Table used in tz version >=4.X
  153. */
  154. struct tzdbg_int_t_tz40 {
  155. uint16_t int_info;
  156. uint8_t avail;
  157. uint8_t spare;
  158. uint32_t int_num;
  159. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  160. uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
  161. };
  162. /* warm boot reason for cores */
  163. struct tzbsp_diag_wakeup_info_t {
  164. /* Wake source info : APCS_GICC_HPPIR */
  165. uint32_t HPPIR;
  166. /* Wake source info : APCS_GICC_AHPPIR */
  167. uint32_t AHPPIR;
  168. };
  169. /*
  170. * Log ring buffer position
  171. */
  172. struct tzdbg_log_pos_t {
  173. uint16_t wrap;
  174. uint16_t offset;
  175. };
  176. struct tzdbg_log_pos_v2_t {
  177. uint32_t wrap;
  178. uint32_t offset;
  179. };
  180. /*
  181. * Log ring buffer
  182. */
  183. struct tzdbg_log_t {
  184. struct tzdbg_log_pos_t log_pos;
  185. /* open ended array to the end of the 4K IMEM buffer */
  186. uint8_t log_buf[];
  187. };
  188. struct tzdbg_log_v2_t {
  189. struct tzdbg_log_pos_v2_t log_pos;
  190. /* open ended array to the end of the 4K IMEM buffer */
  191. uint8_t log_buf[];
  192. };
  193. struct tzbsp_encr_info_for_log_chunk_t {
  194. uint32_t size_to_encr;
  195. uint8_t nonce[TZBSP_NONCE_LEN];
  196. uint8_t tag[TZBSP_TAG_LEN];
  197. };
  198. /*
  199. * Only `ENTIRE_LOG` will be used unless the
  200. * "OEM_tz_num_of_diag_log_chunks_to_encr" devcfg field >= 2.
  201. * If this is true, the diag log will be encrypted in two
  202. * separate chunks: a smaller chunk containing only error
  203. * fatal logs and a bigger "rest of the log" chunk. In this
  204. * case, `ERR_FATAL_LOG_CHUNK` and `BIG_LOG_CHUNK` will be
  205. * used instead of `ENTIRE_LOG`.
  206. */
  207. enum tzbsp_encr_info_for_log_chunks_idx_t {
  208. BIG_LOG_CHUNK = 0,
  209. ENTIRE_LOG = 1,
  210. ERR_FATAL_LOG_CHUNK = 1,
  211. MAX_NUM_OF_CHUNKS,
  212. };
  213. struct tzbsp_encr_info_t {
  214. uint32_t num_of_chunks;
  215. struct tzbsp_encr_info_for_log_chunk_t chunks[MAX_NUM_OF_CHUNKS];
  216. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  217. };
  218. /*
  219. * Diagnostic Table
  220. * Note: This is the reference data structure for tz diagnostic table
  221. * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
  222. * copied into buffer from i/o memory.
  223. */
  224. struct tzdbg_t {
  225. uint32_t magic_num;
  226. uint32_t version;
  227. /*
  228. * Number of CPU's
  229. */
  230. uint32_t cpu_count;
  231. /*
  232. * Offset of VMID Table
  233. */
  234. uint32_t vmid_info_off;
  235. /*
  236. * Offset of Boot Table
  237. */
  238. uint32_t boot_info_off;
  239. /*
  240. * Offset of Reset info Table
  241. */
  242. uint32_t reset_info_off;
  243. /*
  244. * Offset of Interrupt info Table
  245. */
  246. uint32_t int_info_off;
  247. /*
  248. * Ring Buffer Offset
  249. */
  250. uint32_t ring_off;
  251. /*
  252. * Ring Buffer Length
  253. */
  254. uint32_t ring_len;
  255. /* Offset for Wakeup info */
  256. uint32_t wakeup_info_off;
  257. union {
  258. /* The elements in below structure have to be used for TZ where
  259. * diag version = TZBSP_DIAG_MINOR_VERSION_V2
  260. */
  261. struct {
  262. /*
  263. * VMID to EE Mapping
  264. */
  265. struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
  266. /*
  267. * Boot Info
  268. */
  269. struct tzdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  270. /*
  271. * Reset Info
  272. */
  273. struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
  274. uint32_t num_interrupts;
  275. struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM];
  276. /* Wake up info */
  277. struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT];
  278. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  279. uint8_t nonce[TZBSP_NONCE_LEN];
  280. uint8_t tag[TZBSP_TAG_LEN];
  281. };
  282. /* The elements in below structure have to be used for TZ where
  283. * diag version = TZBSP_DIAG_MINOR_VERSION_V21
  284. */
  285. struct {
  286. uint32_t encr_info_for_log_off;
  287. /*
  288. * VMID to EE Mapping
  289. */
  290. struct tzdbg_vmid_t vmid_info_v2[TZBSP_DIAG_NUM_OF_VMID];
  291. /*
  292. * Boot Info
  293. */
  294. struct tzdbg_boot_info_t boot_info_v2[TZBSP_MAX_CPU_COUNT];
  295. /*
  296. * Reset Info
  297. */
  298. struct tzdbg_reset_info_t reset_info_v2[TZBSP_MAX_CPU_COUNT];
  299. uint32_t num_interrupts_v2;
  300. struct tzdbg_int_t int_info_v2[TZBSP_DIAG_INT_NUM];
  301. /* Wake up info */
  302. struct tzbsp_diag_wakeup_info_t wakeup_info_v2[TZBSP_MAX_CPU_COUNT];
  303. struct tzbsp_encr_info_t encr_info_for_log;
  304. };
  305. };
  306. /*
  307. * We need at least 2K for the ring buffer
  308. */
  309. struct tzdbg_log_t ring_buffer; /* TZ Ring Buffer */
  310. };
  311. struct hypdbg_log_pos_t {
  312. uint16_t wrap;
  313. uint16_t offset;
  314. };
  315. struct rmdbg_log_hdr_t {
  316. uint32_t write_idx;
  317. uint32_t size;
  318. };
  319. struct rmdbg_log_pos_t {
  320. uint32_t read_idx;
  321. uint32_t size;
  322. };
  323. struct hypdbg_boot_info_t {
  324. uint32_t warm_entry_cnt;
  325. uint32_t warm_exit_cnt;
  326. };
  327. struct hypdbg_t {
  328. /* Magic Number */
  329. uint32_t magic_num;
  330. /* Number of CPU's */
  331. uint32_t cpu_count;
  332. /* Ring Buffer Offset */
  333. uint32_t ring_off;
  334. /* Ring buffer position mgmt */
  335. struct hypdbg_log_pos_t log_pos;
  336. uint32_t log_len;
  337. /* S2 fault numbers */
  338. uint32_t s2_fault_counter;
  339. /* Boot Info */
  340. struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  341. /* Ring buffer pointer */
  342. uint8_t log_buf_p[];
  343. };
  344. struct tme_log_pos {
  345. uint32_t offset;
  346. size_t size;
  347. };
  348. /*
  349. * Enumeration order for VMID's
  350. */
  351. enum tzdbg_stats_type {
  352. TZDBG_BOOT = 0,
  353. TZDBG_RESET,
  354. TZDBG_INTERRUPT,
  355. TZDBG_VMID,
  356. TZDBG_GENERAL,
  357. TZDBG_LOG,
  358. TZDBG_QSEE_LOG,
  359. TZDBG_HYP_GENERAL,
  360. TZDBG_HYP_LOG,
  361. TZDBG_RM_LOG,
  362. TZDBG_TME_LOG,
  363. TZDBG_STATS_MAX
  364. };
  365. struct tzdbg_stat {
  366. size_t display_len;
  367. size_t display_offset;
  368. char *name;
  369. char *data;
  370. };
  371. struct tzdbg {
  372. void __iomem *virt_iobase;
  373. void __iomem *hyp_virt_iobase;
  374. void __iomem *rmlog_virt_iobase;
  375. void __iomem *tmelog_virt_iobase;
  376. struct tzdbg_t *diag_buf;
  377. struct hypdbg_t *hyp_diag_buf;
  378. uint8_t *rm_diag_buf;
  379. uint8_t *tme_buf;
  380. char *disp_buf;
  381. int debug_tz[TZDBG_STATS_MAX];
  382. struct tzdbg_stat stat[TZDBG_STATS_MAX];
  383. uint32_t hyp_debug_rw_buf_size;
  384. uint32_t rmlog_rw_buf_size;
  385. bool is_hyplog_enabled;
  386. uint32_t tz_version;
  387. bool is_encrypted_log_enabled;
  388. bool is_enlarged_buf;
  389. bool is_full_encrypted_tz_logs_supported;
  390. bool is_full_encrypted_tz_logs_enabled;
  391. int tz_diag_minor_version;
  392. int tz_diag_major_version;
  393. };
  394. struct tzbsp_encr_log_t {
  395. /* Magic Number */
  396. uint32_t magic_num;
  397. /* version NUMBER */
  398. uint32_t version;
  399. /* encrypted log size */
  400. uint32_t encr_log_buff_size;
  401. /* Wrap value*/
  402. uint16_t wrap_count;
  403. /* AES encryption key wrapped up with oem public key*/
  404. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  405. /* Nonce used for encryption*/
  406. uint8_t nonce[TZBSP_NONCE_LEN];
  407. /* Tag to be used for Validation */
  408. uint8_t tag[TZBSP_TAG_LEN];
  409. /* Encrypted log buffer */
  410. uint8_t log_buf[1];
  411. };
  412. struct encrypted_log_info {
  413. phys_addr_t paddr;
  414. void *vaddr;
  415. size_t size;
  416. uint64_t shmb_handle;
  417. };
  418. static struct tzdbg tzdbg = {
  419. .stat[TZDBG_BOOT].name = "boot",
  420. .stat[TZDBG_RESET].name = "reset",
  421. .stat[TZDBG_INTERRUPT].name = "interrupt",
  422. .stat[TZDBG_VMID].name = "vmid",
  423. .stat[TZDBG_GENERAL].name = "general",
  424. .stat[TZDBG_LOG].name = "log",
  425. .stat[TZDBG_QSEE_LOG].name = "qsee_log",
  426. .stat[TZDBG_HYP_GENERAL].name = "hyp_general",
  427. .stat[TZDBG_HYP_LOG].name = "hyp_log",
  428. .stat[TZDBG_RM_LOG].name = "rm_log",
  429. .stat[TZDBG_TME_LOG].name = "tme_log",
  430. };
  431. static struct tzdbg_log_t *g_qsee_log;
  432. static struct tzdbg_log_v2_t *g_qsee_log_v2;
  433. static dma_addr_t coh_pmem;
  434. static uint32_t debug_rw_buf_size;
  435. static uint32_t display_buf_size;
  436. static uint32_t qseelog_buf_size;
  437. static phys_addr_t disp_buf_paddr;
  438. static uint32_t tmecrashdump_address_offset;
  439. static uint64_t qseelog_shmbridge_handle;
  440. static struct encrypted_log_info enc_qseelog_info;
  441. static struct encrypted_log_info enc_tzlog_info;
  442. /*
  443. * Debugfs data structure and functions
  444. */
  445. static int _disp_tz_general_stats(void)
  446. {
  447. int len = 0;
  448. len += scnprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
  449. " Version : 0x%x\n"
  450. " Magic Number : 0x%x\n"
  451. " Number of CPU : %d\n",
  452. tzdbg.diag_buf->version,
  453. tzdbg.diag_buf->magic_num,
  454. tzdbg.diag_buf->cpu_count);
  455. tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
  456. return len;
  457. }
  458. static int _disp_tz_vmid_stats(void)
  459. {
  460. int i, num_vmid;
  461. int len = 0;
  462. struct tzdbg_vmid_t *ptr;
  463. ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
  464. tzdbg.diag_buf->vmid_info_off);
  465. num_vmid = ((tzdbg.diag_buf->boot_info_off -
  466. tzdbg.diag_buf->vmid_info_off)/
  467. (sizeof(struct tzdbg_vmid_t)));
  468. for (i = 0; i < num_vmid; i++) {
  469. if (ptr->vmid < 0xFF) {
  470. len += scnprintf(tzdbg.disp_buf + len,
  471. (debug_rw_buf_size - 1) - len,
  472. " 0x%x %s\n",
  473. (uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
  474. }
  475. if (len > (debug_rw_buf_size - 1)) {
  476. pr_warn("%s: Cannot fit all info into the buffer\n",
  477. __func__);
  478. break;
  479. }
  480. ptr++;
  481. }
  482. tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
  483. return len;
  484. }
  485. static int _disp_tz_boot_stats(void)
  486. {
  487. int i;
  488. int len = 0;
  489. struct tzdbg_boot_info_t *ptr = NULL;
  490. struct tzdbg_boot_info64_t *ptr_64 = NULL;
  491. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  492. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  493. ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
  494. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  495. } else {
  496. ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
  497. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  498. }
  499. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  500. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  501. len += scnprintf(tzdbg.disp_buf + len,
  502. (debug_rw_buf_size - 1) - len,
  503. " CPU #: %d\n"
  504. " Warmboot jump address : 0x%llx\n"
  505. " Warmboot entry CPU counter : 0x%x\n"
  506. " Warmboot exit CPU counter : 0x%x\n"
  507. " Power Collapse entry CPU counter : 0x%x\n"
  508. " Power Collapse exit CPU counter : 0x%x\n"
  509. " Psci entry CPU counter : 0x%x\n"
  510. " Psci exit CPU counter : 0x%x\n"
  511. " Warmboot Jump Address Instruction : 0x%x\n",
  512. i, (uint64_t)ptr_64->warm_jmp_addr,
  513. ptr_64->wb_entry_cnt,
  514. ptr_64->wb_exit_cnt,
  515. ptr_64->pc_entry_cnt,
  516. ptr_64->pc_exit_cnt,
  517. ptr_64->psci_entry_cnt,
  518. ptr_64->psci_exit_cnt,
  519. ptr_64->warm_jmp_instr);
  520. if (len > (debug_rw_buf_size - 1)) {
  521. pr_warn("%s: Cannot fit all info into the buffer\n",
  522. __func__);
  523. break;
  524. }
  525. ptr_64++;
  526. } else {
  527. len += scnprintf(tzdbg.disp_buf + len,
  528. (debug_rw_buf_size - 1) - len,
  529. " CPU #: %d\n"
  530. " Warmboot jump address : 0x%x\n"
  531. " Warmboot entry CPU counter: 0x%x\n"
  532. " Warmboot exit CPU counter : 0x%x\n"
  533. " Power Collapse entry CPU counter: 0x%x\n"
  534. " Power Collapse exit CPU counter : 0x%x\n",
  535. i, ptr->warm_jmp_addr,
  536. ptr->wb_entry_cnt,
  537. ptr->wb_exit_cnt,
  538. ptr->pc_entry_cnt,
  539. ptr->pc_exit_cnt);
  540. if (len > (debug_rw_buf_size - 1)) {
  541. pr_warn("%s: Cannot fit all info into the buffer\n",
  542. __func__);
  543. break;
  544. }
  545. ptr++;
  546. }
  547. }
  548. tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
  549. return len;
  550. }
  551. static int _disp_tz_reset_stats(void)
  552. {
  553. int i;
  554. int len = 0;
  555. struct tzdbg_reset_info_t *ptr;
  556. ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
  557. tzdbg.diag_buf->reset_info_off);
  558. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  559. len += scnprintf(tzdbg.disp_buf + len,
  560. (debug_rw_buf_size - 1) - len,
  561. " CPU #: %d\n"
  562. " Reset Type (reason) : 0x%x\n"
  563. " Reset counter : 0x%x\n",
  564. i, ptr->reset_type, ptr->reset_cnt);
  565. if (len > (debug_rw_buf_size - 1)) {
  566. pr_warn("%s: Cannot fit all info into the buffer\n",
  567. __func__);
  568. break;
  569. }
  570. ptr++;
  571. }
  572. tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
  573. return len;
  574. }
  575. static int _disp_tz_interrupt_stats(void)
  576. {
  577. int i, j;
  578. int len = 0;
  579. int *num_int;
  580. void *ptr;
  581. struct tzdbg_int_t *tzdbg_ptr;
  582. struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
  583. num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
  584. (tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
  585. ptr = ((unsigned char *)tzdbg.diag_buf +
  586. tzdbg.diag_buf->int_info_off);
  587. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  588. if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
  589. tzdbg_ptr = ptr;
  590. for (i = 0; i < (*num_int); i++) {
  591. len += scnprintf(tzdbg.disp_buf + len,
  592. (debug_rw_buf_size - 1) - len,
  593. " Interrupt Number : 0x%x\n"
  594. " Type of Interrupt : 0x%x\n"
  595. " Description of interrupt : %s\n",
  596. tzdbg_ptr->int_num,
  597. (uint32_t)tzdbg_ptr->int_info,
  598. (uint8_t *)tzdbg_ptr->int_desc);
  599. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  600. len += scnprintf(tzdbg.disp_buf + len,
  601. (debug_rw_buf_size - 1) - len,
  602. " int_count on CPU # %d : %u\n",
  603. (uint32_t)j,
  604. (uint32_t)tzdbg_ptr->int_count[j]);
  605. }
  606. len += scnprintf(tzdbg.disp_buf + len,
  607. debug_rw_buf_size - 1, "\n");
  608. if (len > (debug_rw_buf_size - 1)) {
  609. pr_warn("%s: Cannot fit all info into buf\n",
  610. __func__);
  611. break;
  612. }
  613. tzdbg_ptr++;
  614. }
  615. } else {
  616. tzdbg_ptr_tz40 = ptr;
  617. for (i = 0; i < (*num_int); i++) {
  618. len += scnprintf(tzdbg.disp_buf + len,
  619. (debug_rw_buf_size - 1) - len,
  620. " Interrupt Number : 0x%x\n"
  621. " Type of Interrupt : 0x%x\n"
  622. " Description of interrupt : %s\n",
  623. tzdbg_ptr_tz40->int_num,
  624. (uint32_t)tzdbg_ptr_tz40->int_info,
  625. (uint8_t *)tzdbg_ptr_tz40->int_desc);
  626. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  627. len += scnprintf(tzdbg.disp_buf + len,
  628. (debug_rw_buf_size - 1) - len,
  629. " int_count on CPU # %d : %u\n",
  630. (uint32_t)j,
  631. (uint32_t)tzdbg_ptr_tz40->int_count[j]);
  632. }
  633. len += scnprintf(tzdbg.disp_buf + len,
  634. debug_rw_buf_size - 1, "\n");
  635. if (len > (debug_rw_buf_size - 1)) {
  636. pr_warn("%s: Cannot fit all info into buf\n",
  637. __func__);
  638. break;
  639. }
  640. tzdbg_ptr_tz40++;
  641. }
  642. }
  643. tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
  644. return len;
  645. }
  646. static int _disp_tz_log_stats_legacy(void)
  647. {
  648. int len = 0;
  649. unsigned char *ptr;
  650. ptr = (unsigned char *)tzdbg.diag_buf +
  651. tzdbg.diag_buf->ring_off;
  652. len += scnprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
  653. "%s\n", ptr);
  654. tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
  655. return len;
  656. }
  657. static int _disp_log_stats(struct tzdbg_log_t *log,
  658. struct tzdbg_log_pos_t *log_start, uint32_t log_len,
  659. size_t count, uint32_t buf_idx)
  660. {
  661. uint32_t wrap_start;
  662. uint32_t wrap_end;
  663. uint32_t wrap_cnt;
  664. int max_len;
  665. int len = 0;
  666. int i = 0;
  667. wrap_start = log_start->wrap;
  668. wrap_end = log->log_pos.wrap;
  669. /* Calculate difference in # of buffer wrap-arounds */
  670. if (wrap_end >= wrap_start)
  671. wrap_cnt = wrap_end - wrap_start;
  672. else {
  673. /* wrap counter has wrapped around, invalidate start position */
  674. wrap_cnt = 2;
  675. }
  676. if (wrap_cnt > 1) {
  677. /* end position has wrapped around more than once, */
  678. /* current start no longer valid */
  679. log_start->wrap = log->log_pos.wrap - 1;
  680. log_start->offset = (log->log_pos.offset + 1) % log_len;
  681. } else if ((wrap_cnt == 1) &&
  682. (log->log_pos.offset > log_start->offset)) {
  683. /* end position has overwritten start */
  684. log_start->offset = (log->log_pos.offset + 1) % log_len;
  685. }
  686. pr_debug("diag_buf wrap = %u, offset = %u\n",
  687. log->log_pos.wrap, log->log_pos.offset);
  688. while (log_start->offset == log->log_pos.offset) {
  689. /*
  690. * No data in ring buffer,
  691. * so we'll hang around until something happens
  692. */
  693. unsigned long t = msleep_interruptible(50);
  694. if (t != 0) {
  695. /* Some event woke us up, so let's quit */
  696. return 0;
  697. }
  698. if (buf_idx == TZDBG_LOG)
  699. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  700. debug_rw_buf_size);
  701. }
  702. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  703. pr_debug("diag_buf wrap = %u, offset = %u\n",
  704. log->log_pos.wrap, log->log_pos.offset);
  705. /*
  706. * Read from ring buff while there is data and space in return buff
  707. */
  708. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  709. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  710. log_start->offset = (log_start->offset + 1) % log_len;
  711. if (log_start->offset == 0)
  712. ++log_start->wrap;
  713. ++len;
  714. }
  715. /*
  716. * return buffer to caller
  717. */
  718. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  719. return len;
  720. }
  721. static int _disp_log_stats_v2(struct tzdbg_log_v2_t *log,
  722. struct tzdbg_log_pos_v2_t *log_start, uint32_t log_len,
  723. size_t count, uint32_t buf_idx)
  724. {
  725. uint32_t wrap_start;
  726. uint32_t wrap_end;
  727. uint32_t wrap_cnt;
  728. int max_len;
  729. int len = 0;
  730. int i = 0;
  731. wrap_start = log_start->wrap;
  732. wrap_end = log->log_pos.wrap;
  733. /* Calculate difference in # of buffer wrap-arounds */
  734. if (wrap_end >= wrap_start)
  735. wrap_cnt = wrap_end - wrap_start;
  736. else {
  737. /* wrap counter has wrapped around, invalidate start position */
  738. wrap_cnt = 2;
  739. }
  740. if (wrap_cnt > 1) {
  741. /* end position has wrapped around more than once, */
  742. /* current start no longer valid */
  743. log_start->wrap = log->log_pos.wrap - 1;
  744. log_start->offset = (log->log_pos.offset + 1) % log_len;
  745. } else if ((wrap_cnt == 1) &&
  746. (log->log_pos.offset > log_start->offset)) {
  747. /* end position has overwritten start */
  748. log_start->offset = (log->log_pos.offset + 1) % log_len;
  749. }
  750. pr_debug("diag_buf wrap = %u, offset = %u\n",
  751. log->log_pos.wrap, log->log_pos.offset);
  752. while (log_start->offset == log->log_pos.offset) {
  753. /*
  754. * No data in ring buffer,
  755. * so we'll hang around until something happens
  756. */
  757. unsigned long t = msleep_interruptible(50);
  758. if (t != 0) {
  759. /* Some event woke us up, so let's quit */
  760. return 0;
  761. }
  762. if (buf_idx == TZDBG_LOG)
  763. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  764. debug_rw_buf_size);
  765. }
  766. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  767. pr_debug("diag_buf wrap = %u, offset = %u\n",
  768. log->log_pos.wrap, log->log_pos.offset);
  769. /*
  770. * Read from ring buff while there is data and space in return buff
  771. */
  772. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  773. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  774. log_start->offset = (log_start->offset + 1) % log_len;
  775. if (log_start->offset == 0)
  776. ++log_start->wrap;
  777. ++len;
  778. }
  779. /*
  780. * return buffer to caller
  781. */
  782. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  783. return len;
  784. }
  785. static int __disp_hyp_log_stats(uint8_t *log,
  786. struct hypdbg_log_pos_t *log_start, uint32_t log_len,
  787. size_t count, uint32_t buf_idx)
  788. {
  789. struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
  790. unsigned long t = 0;
  791. uint32_t wrap_start;
  792. uint32_t wrap_end;
  793. uint32_t wrap_cnt;
  794. int max_len;
  795. int len = 0;
  796. int i = 0;
  797. wrap_start = log_start->wrap;
  798. wrap_end = hyp->log_pos.wrap;
  799. /* Calculate difference in # of buffer wrap-arounds */
  800. if (wrap_end >= wrap_start)
  801. wrap_cnt = wrap_end - wrap_start;
  802. else {
  803. /* wrap counter has wrapped around, invalidate start position */
  804. wrap_cnt = 2;
  805. }
  806. if (wrap_cnt > 1) {
  807. /* end position has wrapped around more than once, */
  808. /* current start no longer valid */
  809. log_start->wrap = hyp->log_pos.wrap - 1;
  810. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  811. } else if ((wrap_cnt == 1) &&
  812. (hyp->log_pos.offset > log_start->offset)) {
  813. /* end position has overwritten start */
  814. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  815. }
  816. while (log_start->offset == hyp->log_pos.offset) {
  817. /*
  818. * No data in ring buffer,
  819. * so we'll hang around until something happens
  820. */
  821. t = msleep_interruptible(50);
  822. if (t != 0) {
  823. /* Some event woke us up, so let's quit */
  824. return 0;
  825. }
  826. /* TZDBG_HYP_LOG */
  827. memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
  828. tzdbg.hyp_debug_rw_buf_size);
  829. }
  830. max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
  831. tzdbg.hyp_debug_rw_buf_size : count;
  832. /*
  833. * Read from ring buff while there is data and space in return buff
  834. */
  835. while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
  836. tzdbg.disp_buf[i++] = log[log_start->offset];
  837. log_start->offset = (log_start->offset + 1) % log_len;
  838. if (log_start->offset == 0)
  839. ++log_start->wrap;
  840. ++len;
  841. }
  842. /*
  843. * return buffer to caller
  844. */
  845. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  846. return len;
  847. }
  848. static int __disp_rm_log_stats(uint8_t *log_ptr, uint32_t max_len)
  849. {
  850. uint32_t i = 0;
  851. /*
  852. * Transfer data from rm dialog buff to display buffer in user space
  853. */
  854. while ((i < max_len) && (i < display_buf_size)) {
  855. tzdbg.disp_buf[i] = log_ptr[i];
  856. i++;
  857. }
  858. if (i != max_len)
  859. pr_err("Dropping RM log message, max_len:%d display_buf_size:%d\n",
  860. i, display_buf_size);
  861. tzdbg.stat[TZDBG_RM_LOG].data = tzdbg.disp_buf;
  862. return i;
  863. }
  864. static int print_text(char *intro_message,
  865. unsigned char *text_addr,
  866. unsigned int size,
  867. char *buf, uint32_t buf_len)
  868. {
  869. unsigned int i;
  870. int len = 0;
  871. pr_debug("begin address %p, size %d\n", text_addr, size);
  872. len += scnprintf(buf + len, buf_len - len, "%s\n", intro_message);
  873. for (i = 0; i < size; i++) {
  874. if (buf_len <= len + 6) {
  875. pr_err("buffer not enough, buf_len %d, len %d\n",
  876. buf_len, len);
  877. return buf_len;
  878. }
  879. len += scnprintf(buf + len, buf_len - len, "%02hhx ",
  880. text_addr[i]);
  881. if ((i & 0x1f) == 0x1f)
  882. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  883. }
  884. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  885. return len;
  886. }
  887. static int _disp_encrpted_log_stats(struct encrypted_log_info *enc_log_info,
  888. enum tzdbg_stats_type type, uint32_t log_id)
  889. {
  890. int ret = 0, len = 0;
  891. struct tzbsp_encr_log_t *encr_log_head;
  892. uint32_t size = 0;
  893. if ((!tzdbg.is_full_encrypted_tz_logs_supported) &&
  894. (tzdbg.is_full_encrypted_tz_logs_enabled))
  895. pr_info("TZ not supporting full encrypted log functionality\n");
  896. ret = qcom_scm_request_encrypted_log(enc_log_info->paddr,
  897. enc_log_info->size, log_id, tzdbg.is_full_encrypted_tz_logs_supported,
  898. tzdbg.is_full_encrypted_tz_logs_enabled);
  899. if (ret)
  900. return 0;
  901. encr_log_head = (struct tzbsp_encr_log_t *)(enc_log_info->vaddr);
  902. pr_debug("display_buf_size = %d, encr_log_buff_size = %d\n",
  903. display_buf_size, encr_log_head->encr_log_buff_size);
  904. size = encr_log_head->encr_log_buff_size;
  905. len += scnprintf(tzdbg.disp_buf + len,
  906. (display_buf_size - 1) - len,
  907. "\n-------- New Encrypted %s --------\n",
  908. ((log_id == ENCRYPTED_QSEE_LOG_ID) ?
  909. "QSEE Log" : "TZ Dialog"));
  910. len += scnprintf(tzdbg.disp_buf + len,
  911. (display_buf_size - 1) - len,
  912. "\nMagic_Num :\n0x%x\n"
  913. "\nVerion :\n%d\n"
  914. "\nEncr_Log_Buff_Size :\n%d\n"
  915. "\nWrap_Count :\n%d\n",
  916. encr_log_head->magic_num,
  917. encr_log_head->version,
  918. encr_log_head->encr_log_buff_size,
  919. encr_log_head->wrap_count);
  920. len += print_text("\nKey : ", encr_log_head->key,
  921. TZBSP_AES_256_ENCRYPTED_KEY_SIZE,
  922. tzdbg.disp_buf + len, display_buf_size);
  923. len += print_text("\nNonce : ", encr_log_head->nonce,
  924. TZBSP_NONCE_LEN,
  925. tzdbg.disp_buf + len, display_buf_size - len);
  926. len += print_text("\nTag : ", encr_log_head->tag,
  927. TZBSP_TAG_LEN,
  928. tzdbg.disp_buf + len, display_buf_size - len);
  929. if (len > display_buf_size - size)
  930. pr_warn("Cannot fit all info into the buffer\n");
  931. pr_debug("encrypted log size %d, disply buffer size %d, used len %d\n",
  932. size, display_buf_size, len);
  933. len += print_text("\nLog : ", encr_log_head->log_buf, size,
  934. tzdbg.disp_buf + len, display_buf_size - len);
  935. memset(enc_log_info->vaddr, 0, enc_log_info->size);
  936. tzdbg.stat[type].data = tzdbg.disp_buf;
  937. return len;
  938. }
  939. static int _disp_tz_log_stats(size_t count)
  940. {
  941. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  942. static struct tzdbg_log_pos_t log_start = {0};
  943. struct tzdbg_log_v2_t *log_v2_ptr;
  944. struct tzdbg_log_t *log_ptr;
  945. log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
  946. tzdbg.diag_buf->ring_off -
  947. offsetof(struct tzdbg_log_t, log_buf));
  948. log_v2_ptr = (struct tzdbg_log_v2_t *)((unsigned char *)tzdbg.diag_buf +
  949. tzdbg.diag_buf->ring_off -
  950. offsetof(struct tzdbg_log_v2_t, log_buf));
  951. if (!tzdbg.is_enlarged_buf)
  952. return _disp_log_stats(log_ptr, &log_start,
  953. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  954. return _disp_log_stats_v2(log_v2_ptr, &log_start_v2,
  955. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  956. }
  957. static int _disp_hyp_log_stats(size_t count)
  958. {
  959. static struct hypdbg_log_pos_t log_start = {0};
  960. uint8_t *log_ptr;
  961. uint32_t log_len;
  962. log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
  963. tzdbg.hyp_diag_buf->ring_off);
  964. log_len = tzdbg.hyp_debug_rw_buf_size - tzdbg.hyp_diag_buf->ring_off;
  965. return __disp_hyp_log_stats(log_ptr, &log_start,
  966. log_len, count, TZDBG_HYP_LOG);
  967. }
  968. static int _disp_rm_log_stats(size_t count)
  969. {
  970. static struct rmdbg_log_pos_t log_start = { 0 };
  971. struct rmdbg_log_hdr_t *p_log_hdr = NULL;
  972. uint8_t *log_ptr = NULL;
  973. uint32_t log_len = 0;
  974. static bool wrap_around = { false };
  975. /* Return 0 to close the display file,if there is nothing else to do */
  976. if ((log_start.size == 0x0) && wrap_around) {
  977. wrap_around = false;
  978. return 0;
  979. }
  980. /* Copy RM log data to tzdbg diag buffer for the first time */
  981. /* Initialize the tracking data structure */
  982. if (tzdbg.rmlog_rw_buf_size != 0) {
  983. if (!wrap_around) {
  984. memcpy_fromio((void *)tzdbg.rm_diag_buf,
  985. tzdbg.rmlog_virt_iobase,
  986. tzdbg.rmlog_rw_buf_size);
  987. /* get RM header info first */
  988. p_log_hdr = (struct rmdbg_log_hdr_t *)tzdbg.rm_diag_buf;
  989. /* Update RM log buffer index tracker and its size */
  990. log_start.read_idx = 0x0;
  991. log_start.size = p_log_hdr->size;
  992. }
  993. /* Update RM log buffer starting ptr */
  994. log_ptr =
  995. (uint8_t *) ((unsigned char *)tzdbg.rm_diag_buf +
  996. sizeof(struct rmdbg_log_hdr_t));
  997. } else {
  998. /* Return 0 to close the display file,if there is nothing else to do */
  999. pr_err("There is no RM log to read, size is %d!\n",
  1000. tzdbg.rmlog_rw_buf_size);
  1001. return 0;
  1002. }
  1003. log_len = log_start.size;
  1004. log_ptr += log_start.read_idx;
  1005. /* Check if we exceed the max length provided by user space */
  1006. log_len = (count > log_len) ? log_len : count;
  1007. /* Update tracking data structure */
  1008. log_start.size -= log_len;
  1009. log_start.read_idx += log_len;
  1010. if (log_start.size)
  1011. wrap_around = true;
  1012. return __disp_rm_log_stats(log_ptr, log_len);
  1013. }
  1014. static int _disp_qsee_log_stats(size_t count)
  1015. {
  1016. static struct tzdbg_log_pos_t log_start = {0};
  1017. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  1018. if (!tzdbg.is_enlarged_buf)
  1019. return _disp_log_stats(g_qsee_log, &log_start,
  1020. QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
  1021. count, TZDBG_QSEE_LOG);
  1022. return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2,
  1023. QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t),
  1024. count, TZDBG_QSEE_LOG);
  1025. }
  1026. static int _disp_hyp_general_stats(size_t count)
  1027. {
  1028. int len = 0;
  1029. int i;
  1030. struct hypdbg_boot_info_t *ptr = NULL;
  1031. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1032. tzdbg.hyp_debug_rw_buf_size - 1,
  1033. " Magic Number : 0x%x\n"
  1034. " CPU Count : 0x%x\n"
  1035. " S2 Fault Counter: 0x%x\n",
  1036. tzdbg.hyp_diag_buf->magic_num,
  1037. tzdbg.hyp_diag_buf->cpu_count,
  1038. tzdbg.hyp_diag_buf->s2_fault_counter);
  1039. ptr = tzdbg.hyp_diag_buf->boot_info;
  1040. for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
  1041. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1042. (tzdbg.hyp_debug_rw_buf_size - 1) - len,
  1043. " CPU #: %d\n"
  1044. " Warmboot entry CPU counter: 0x%x\n"
  1045. " Warmboot exit CPU counter : 0x%x\n",
  1046. i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
  1047. if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
  1048. pr_warn("%s: Cannot fit all info into the buffer\n",
  1049. __func__);
  1050. break;
  1051. }
  1052. ptr++;
  1053. }
  1054. tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
  1055. return len;
  1056. }
  1057. #if IS_ENABLED(CONFIG_MSM_TMECOM_QMP)
  1058. static int _disp_tme_log_stats(size_t count)
  1059. {
  1060. static struct tme_log_pos log_start = { 0 };
  1061. static bool wrap_around = { false };
  1062. uint32_t buf_size;
  1063. uint8_t *log_ptr = NULL;
  1064. uint32_t log_len = 0;
  1065. int ret = 0;
  1066. /* Return 0 to close file in case some error in initialising step. */
  1067. if (!tzdbg.tmelog_virt_iobase || !tmecrashdump_address_offset)
  1068. return 0;
  1069. /* Return 0 to close the display file */
  1070. if ((log_start.size == 0x0) && wrap_around) {
  1071. wrap_around = false;
  1072. return 0;
  1073. }
  1074. /* Copy TME log data to tzdbg diag buffer for the first time */
  1075. if (!wrap_around) {
  1076. if (tmelog_process_request(tmecrashdump_address_offset,
  1077. TME_LOG_BUF_SIZE, &buf_size)) {
  1078. pr_err("Read tme log failed, ret=%d, buf_size: %#x\n", ret, buf_size);
  1079. return 0;
  1080. }
  1081. log_start.offset = 0x0;
  1082. log_start.size = buf_size;
  1083. }
  1084. log_ptr = tzdbg.tmelog_virt_iobase;
  1085. log_len = log_start.size;
  1086. log_ptr += log_start.offset;
  1087. /* Check if we exceed the max length provided by user space */
  1088. log_len = min(min((uint32_t)count, log_len), display_buf_size);
  1089. log_start.size -= log_len;
  1090. log_start.offset += log_len;
  1091. pr_debug("log_len: %d, log_start.offset: %#x, log_start.size: %#x\n",
  1092. log_len, log_start.offset, log_start.size);
  1093. if (log_start.size)
  1094. wrap_around = true;
  1095. /* Copy TME log data to display buffer */
  1096. memcpy_fromio(tzdbg.disp_buf, log_ptr, log_len);
  1097. tzdbg.stat[TZDBG_TME_LOG].data = tzdbg.disp_buf;
  1098. return log_len;
  1099. }
  1100. #else
  1101. static int _disp_tme_log_stats(size_t count)
  1102. {
  1103. return 0;
  1104. }
  1105. #endif
  1106. static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf,
  1107. size_t count, loff_t *offp)
  1108. {
  1109. int len = 0;
  1110. if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET ||
  1111. tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL ||
  1112. tz_id == TZDBG_VMID || tz_id == TZDBG_LOG)
  1113. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  1114. debug_rw_buf_size);
  1115. if (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1116. memcpy_fromio((void *)tzdbg.hyp_diag_buf,
  1117. tzdbg.hyp_virt_iobase,
  1118. tzdbg.hyp_debug_rw_buf_size);
  1119. switch (tz_id) {
  1120. case TZDBG_BOOT:
  1121. len = _disp_tz_boot_stats();
  1122. break;
  1123. case TZDBG_RESET:
  1124. len = _disp_tz_reset_stats();
  1125. break;
  1126. case TZDBG_INTERRUPT:
  1127. len = _disp_tz_interrupt_stats();
  1128. break;
  1129. case TZDBG_GENERAL:
  1130. len = _disp_tz_general_stats();
  1131. break;
  1132. case TZDBG_VMID:
  1133. len = _disp_tz_vmid_stats();
  1134. break;
  1135. case TZDBG_LOG:
  1136. if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
  1137. (tzdbg.diag_buf->version >> 16)) {
  1138. len = _disp_tz_log_stats(count);
  1139. *offp = 0;
  1140. } else {
  1141. len = _disp_tz_log_stats_legacy();
  1142. }
  1143. break;
  1144. case TZDBG_QSEE_LOG:
  1145. len = _disp_qsee_log_stats(count);
  1146. *offp = 0;
  1147. break;
  1148. case TZDBG_HYP_GENERAL:
  1149. len = _disp_hyp_general_stats(count);
  1150. break;
  1151. case TZDBG_HYP_LOG:
  1152. len = _disp_hyp_log_stats(count);
  1153. *offp = 0;
  1154. break;
  1155. case TZDBG_RM_LOG:
  1156. len = _disp_rm_log_stats(count);
  1157. *offp = 0;
  1158. break;
  1159. case TZDBG_TME_LOG:
  1160. len = _disp_tme_log_stats(count);
  1161. *offp = 0;
  1162. break;
  1163. default:
  1164. break;
  1165. }
  1166. if (len > count)
  1167. len = count;
  1168. return simple_read_from_buffer(buf, len, offp,
  1169. tzdbg.stat[tz_id].data, len);
  1170. }
  1171. static ssize_t tzdbg_fs_read_encrypted(int tz_id, char __user *buf,
  1172. size_t count, loff_t *offp)
  1173. {
  1174. int len = 0, ret = 0;
  1175. struct tzdbg_stat *stat = &(tzdbg.stat[tz_id]);
  1176. pr_debug("%s: tz_id = %d\n", __func__, tz_id);
  1177. if (tz_id >= TZDBG_STATS_MAX) {
  1178. pr_err("invalid encrypted log id %d\n", tz_id);
  1179. return ret;
  1180. }
  1181. if (!stat->display_len) {
  1182. if (tz_id == TZDBG_QSEE_LOG)
  1183. stat->display_len = _disp_encrpted_log_stats(
  1184. &enc_qseelog_info,
  1185. tz_id, ENCRYPTED_QSEE_LOG_ID);
  1186. else
  1187. stat->display_len = _disp_encrpted_log_stats(
  1188. &enc_tzlog_info,
  1189. tz_id, ENCRYPTED_TZ_LOG_ID);
  1190. stat->display_offset = 0;
  1191. }
  1192. len = stat->display_len;
  1193. if (len > count)
  1194. len = count;
  1195. *offp = 0;
  1196. ret = simple_read_from_buffer(buf, len, offp,
  1197. tzdbg.stat[tz_id].data + stat->display_offset,
  1198. count);
  1199. stat->display_offset += ret;
  1200. stat->display_len -= ret;
  1201. pr_debug("ret = %d, offset = %d\n", ret, (int)(*offp));
  1202. pr_debug("display_len = %d, offset = %d\n",
  1203. stat->display_len, stat->display_offset);
  1204. return ret;
  1205. }
  1206. static ssize_t tzdbg_fs_read(struct file *file, char __user *buf,
  1207. size_t count, loff_t *offp)
  1208. {
  1209. struct seq_file *seq = file->private_data;
  1210. int tz_id = TZDBG_STATS_MAX;
  1211. if (seq)
  1212. tz_id = *(int *)(seq->private);
  1213. else {
  1214. pr_err("%s: Seq data null unable to proceed\n", __func__);
  1215. return 0;
  1216. }
  1217. if (!tzdbg.is_encrypted_log_enabled ||
  1218. (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1219. || tz_id == TZDBG_RM_LOG || tz_id == TZDBG_TME_LOG)
  1220. return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp);
  1221. else
  1222. return tzdbg_fs_read_encrypted(tz_id, buf, count, offp);
  1223. }
  1224. static int tzdbg_procfs_open(struct inode *inode, struct file *file)
  1225. {
  1226. #if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
  1227. return single_open(file, NULL, PDE_DATA(inode));
  1228. #else
  1229. return single_open(file, NULL, pde_data(inode));
  1230. #endif
  1231. }
  1232. static int tzdbg_procfs_release(struct inode *inode, struct file *file)
  1233. {
  1234. return single_release(inode, file);
  1235. }
  1236. struct proc_ops tzdbg_fops = {
  1237. .proc_flags = PROC_ENTRY_PERMANENT,
  1238. .proc_read = tzdbg_fs_read,
  1239. .proc_open = tzdbg_procfs_open,
  1240. .proc_release = tzdbg_procfs_release,
  1241. };
  1242. static int tzdbg_init_tme_log(struct platform_device *pdev, void __iomem *virt_iobase)
  1243. {
  1244. /*
  1245. * Tme logs are dumped in tme log ddr region but that region is not
  1246. * accessible to hlos. Instead, collect logs at tme crashdump ddr
  1247. * region with tmecom interface and then display logs reading from
  1248. * crashdump region.
  1249. */
  1250. if (of_property_read_u32((&pdev->dev)->of_node, "tmecrashdump-address-offset",
  1251. &tmecrashdump_address_offset)) {
  1252. pr_err("Tme Crashdump address offset need to be defined!\n");
  1253. return -EINVAL;
  1254. }
  1255. tzdbg.tmelog_virt_iobase =
  1256. devm_ioremap(&pdev->dev, tmecrashdump_address_offset, TME_LOG_BUF_SIZE);
  1257. if (!tzdbg.tmelog_virt_iobase) {
  1258. pr_err("ERROR: Could not ioremap: start=%#x, len=%u\n",
  1259. tmecrashdump_address_offset, TME_LOG_BUF_SIZE);
  1260. return -ENXIO;
  1261. }
  1262. return 0;
  1263. }
  1264. /*
  1265. * Allocates log buffer from ION, registers the buffer at TZ
  1266. */
  1267. static int tzdbg_register_qsee_log_buf(struct platform_device *pdev)
  1268. {
  1269. int ret = 0;
  1270. void *buf = NULL;
  1271. uint32_t ns_vmids[] = {VMID_HLOS};
  1272. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1273. uint32_t ns_vm_nums = 1;
  1274. if (tzdbg.is_enlarged_buf) {
  1275. if (of_property_read_u32((&pdev->dev)->of_node,
  1276. "qseelog-buf-size-v2", &qseelog_buf_size)) {
  1277. pr_debug("Enlarged qseelog buf size isn't defined\n");
  1278. qseelog_buf_size = QSEE_LOG_BUF_SIZE_V2;
  1279. }
  1280. } else {
  1281. qseelog_buf_size = QSEE_LOG_BUF_SIZE;
  1282. }
  1283. pr_debug("qseelog buf size is 0x%x\n", qseelog_buf_size);
  1284. buf = dma_alloc_coherent(&pdev->dev,
  1285. qseelog_buf_size, &coh_pmem, GFP_KERNEL);
  1286. if (buf == NULL)
  1287. return -ENOMEM;
  1288. if (!tzdbg.is_encrypted_log_enabled) {
  1289. ret = qtee_shmbridge_register(coh_pmem,
  1290. qseelog_buf_size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1291. PERM_READ | PERM_WRITE,
  1292. &qseelog_shmbridge_handle);
  1293. if (ret) {
  1294. pr_err("failed to create bridge for qsee_log buf\n");
  1295. goto exit_free_mem;
  1296. }
  1297. }
  1298. g_qsee_log = (struct tzdbg_log_t *)buf;
  1299. g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
  1300. g_qsee_log_v2 = (struct tzdbg_log_v2_t *)buf;
  1301. g_qsee_log_v2->log_pos.wrap = g_qsee_log_v2->log_pos.offset = 0;
  1302. ret = qcom_scm_register_qsee_log_buf(coh_pmem, qseelog_buf_size);
  1303. if (ret != QSEOS_RESULT_SUCCESS) {
  1304. pr_err(
  1305. "%s: scm_call to register log buf failed, resp result =%lld\n",
  1306. __func__, ret);
  1307. goto exit_dereg_bridge;
  1308. }
  1309. return ret;
  1310. exit_dereg_bridge:
  1311. if (!tzdbg.is_encrypted_log_enabled)
  1312. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1313. exit_free_mem:
  1314. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1315. (void *)g_qsee_log, coh_pmem);
  1316. return ret;
  1317. }
  1318. static void tzdbg_free_qsee_log_buf(struct platform_device *pdev)
  1319. {
  1320. if (!tzdbg.is_encrypted_log_enabled)
  1321. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1322. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1323. (void *)g_qsee_log, coh_pmem);
  1324. }
  1325. static int tzdbg_allocate_encrypted_log_buf(struct platform_device *pdev)
  1326. {
  1327. int ret = 0;
  1328. uint32_t ns_vmids[] = {VMID_HLOS};
  1329. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1330. uint32_t ns_vm_nums = 1;
  1331. if (!tzdbg.is_encrypted_log_enabled)
  1332. return 0;
  1333. /* max encrypted qsee log buf zize (include header, and page align) */
  1334. enc_qseelog_info.size = qseelog_buf_size + PAGE_SIZE;
  1335. enc_qseelog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1336. enc_qseelog_info.size,
  1337. &enc_qseelog_info.paddr, GFP_KERNEL);
  1338. if (enc_qseelog_info.vaddr == NULL)
  1339. return -ENOMEM;
  1340. ret = qtee_shmbridge_register(enc_qseelog_info.paddr,
  1341. enc_qseelog_info.size, ns_vmids,
  1342. ns_vm_perms, ns_vm_nums,
  1343. PERM_READ | PERM_WRITE, &enc_qseelog_info.shmb_handle);
  1344. if (ret) {
  1345. pr_err("failed to create encr_qsee_log bridge, ret %d\n", ret);
  1346. goto exit_free_qseelog;
  1347. }
  1348. pr_debug("Alloc memory for encr_qsee_log, size = %zu\n",
  1349. enc_qseelog_info.size);
  1350. enc_tzlog_info.size = debug_rw_buf_size;
  1351. enc_tzlog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1352. enc_tzlog_info.size,
  1353. &enc_tzlog_info.paddr, GFP_KERNEL);
  1354. if (enc_tzlog_info.vaddr == NULL)
  1355. goto exit_unreg_qseelog;
  1356. ret = qtee_shmbridge_register(enc_tzlog_info.paddr,
  1357. enc_tzlog_info.size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1358. PERM_READ | PERM_WRITE, &enc_tzlog_info.shmb_handle);
  1359. if (ret) {
  1360. pr_err("failed to create encr_tz_log bridge, ret = %d\n", ret);
  1361. goto exit_free_tzlog;
  1362. }
  1363. pr_debug("Alloc memory for encr_tz_log, size %zu\n",
  1364. enc_qseelog_info.size);
  1365. return 0;
  1366. exit_free_tzlog:
  1367. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1368. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1369. exit_unreg_qseelog:
  1370. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1371. exit_free_qseelog:
  1372. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1373. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1374. return -ENOMEM;
  1375. }
  1376. static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
  1377. {
  1378. qtee_shmbridge_deregister(enc_tzlog_info.shmb_handle);
  1379. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1380. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1381. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1382. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1383. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1384. }
  1385. static bool is_hyp_dir(int tzdbg_stat_type)
  1386. {
  1387. switch(tzdbg_stat_type)
  1388. {
  1389. case TZDBG_HYP_GENERAL:
  1390. case TZDBG_HYP_LOG:
  1391. case TZDBG_RM_LOG:
  1392. return true;
  1393. default:
  1394. return false;
  1395. }
  1396. return false;
  1397. }
  1398. static int tzdbg_fs_init(struct platform_device *pdev)
  1399. {
  1400. int rc = 0;
  1401. int i;
  1402. struct proc_dir_entry *dent_dir;
  1403. struct proc_dir_entry *dent;
  1404. dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL);
  1405. if (dent_dir == NULL) {
  1406. dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n");
  1407. return -ENOMEM;
  1408. }
  1409. for (i = 0; i < TZDBG_STATS_MAX; i++) {
  1410. tzdbg.debug_tz[i] = i;
  1411. /*
  1412. * If hypervisor is disabled, do not create
  1413. * hyp_general, hyp_log and rm_log directories,
  1414. * as accessing them would give segmentation fault
  1415. */
  1416. if ((!tzdbg.is_hyplog_enabled) && (is_hyp_dir(i))) {
  1417. continue;
  1418. }
  1419. dent = proc_create_data(tzdbg.stat[i].name,
  1420. 0444, dent_dir,
  1421. &tzdbg_fops, &tzdbg.debug_tz[i]);
  1422. if (dent == NULL) {
  1423. dev_err(&pdev->dev, "TZ proc_create_data failed\n");
  1424. rc = -ENOMEM;
  1425. goto err;
  1426. }
  1427. }
  1428. platform_set_drvdata(pdev, dent_dir);
  1429. return 0;
  1430. err:
  1431. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1432. return rc;
  1433. }
  1434. static void tzdbg_fs_exit(struct platform_device *pdev)
  1435. {
  1436. struct proc_dir_entry *dent_dir;
  1437. dent_dir = platform_get_drvdata(pdev);
  1438. if (dent_dir)
  1439. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1440. }
  1441. static int __update_hypdbg_base(struct platform_device *pdev,
  1442. void __iomem *virt_iobase)
  1443. {
  1444. phys_addr_t hypdiag_phy_iobase;
  1445. uint32_t hyp_address_offset;
  1446. uint32_t hyp_size_offset;
  1447. struct hypdbg_t *hyp;
  1448. uint32_t *ptr = NULL;
  1449. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
  1450. &hyp_address_offset)) {
  1451. dev_err(&pdev->dev, "hyplog address offset is not defined\n");
  1452. return -EINVAL;
  1453. }
  1454. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
  1455. &hyp_size_offset)) {
  1456. dev_err(&pdev->dev, "hyplog size offset is not defined\n");
  1457. return -EINVAL;
  1458. }
  1459. hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
  1460. tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
  1461. hyp_size_offset);
  1462. tzdbg.hyp_virt_iobase = devm_ioremap(&pdev->dev,
  1463. hypdiag_phy_iobase,
  1464. tzdbg.hyp_debug_rw_buf_size);
  1465. if (!tzdbg.hyp_virt_iobase) {
  1466. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1467. &hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
  1468. return -ENXIO;
  1469. }
  1470. ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
  1471. if (!ptr)
  1472. return -ENOMEM;
  1473. tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
  1474. hyp = tzdbg.hyp_diag_buf;
  1475. hyp->log_pos.wrap = hyp->log_pos.offset = 0;
  1476. return 0;
  1477. }
  1478. static int __update_rmlog_base(struct platform_device *pdev,
  1479. void __iomem *virt_iobase)
  1480. {
  1481. uint32_t rmlog_address;
  1482. uint32_t rmlog_size;
  1483. uint32_t *ptr = NULL;
  1484. /* if we don't get the node just ignore it */
  1485. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-address",
  1486. &rmlog_address)) {
  1487. dev_err(&pdev->dev, "RM log address is not defined\n");
  1488. tzdbg.rmlog_rw_buf_size = 0;
  1489. return 0;
  1490. }
  1491. /* if we don't get the node just ignore it */
  1492. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-size",
  1493. &rmlog_size)) {
  1494. dev_err(&pdev->dev, "RM log size is not defined\n");
  1495. tzdbg.rmlog_rw_buf_size = 0;
  1496. return 0;
  1497. }
  1498. tzdbg.rmlog_rw_buf_size = rmlog_size;
  1499. /* Check if there is RM log to read */
  1500. if (!tzdbg.rmlog_rw_buf_size) {
  1501. tzdbg.rmlog_virt_iobase = NULL;
  1502. tzdbg.rm_diag_buf = NULL;
  1503. dev_err(&pdev->dev, "RM log size is %d\n",
  1504. tzdbg.rmlog_rw_buf_size);
  1505. return 0;
  1506. }
  1507. tzdbg.rmlog_virt_iobase = devm_ioremap(&pdev->dev,
  1508. rmlog_address,
  1509. rmlog_size);
  1510. if (!tzdbg.rmlog_virt_iobase) {
  1511. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1512. rmlog_address, tzdbg.rmlog_rw_buf_size);
  1513. return -ENXIO;
  1514. }
  1515. ptr = kzalloc(tzdbg.rmlog_rw_buf_size, GFP_KERNEL);
  1516. if (!ptr)
  1517. return -ENOMEM;
  1518. tzdbg.rm_diag_buf = (uint8_t *)ptr;
  1519. return 0;
  1520. }
  1521. static int tzdbg_get_tz_version(void)
  1522. {
  1523. u64 version;
  1524. int ret = 0;
  1525. ret = qcom_scm_get_tz_log_feat_id(&version);
  1526. if (ret) {
  1527. pr_err("%s: scm_call to get tz version failed\n",
  1528. __func__);
  1529. return ret;
  1530. }
  1531. tzdbg.tz_version = version;
  1532. ret = qcom_scm_get_tz_feat_id_version(QCOM_SCM_FEAT_DIAG_ID, &version);
  1533. if (ret) {
  1534. pr_err("%s: scm_call to get tz diag version failed, ret = %d\n",
  1535. __func__, ret);
  1536. return ret;
  1537. }
  1538. pr_warn("tz diag version is %x\n", version);
  1539. tzdbg.tz_diag_major_version =
  1540. ((version >> TZBSP_FVER_MAJOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1541. tzdbg.tz_diag_minor_version =
  1542. ((version >> TZBSP_FVER_MINOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1543. if (tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) {
  1544. switch (tzdbg.tz_diag_minor_version) {
  1545. case TZBSP_DIAG_MINOR_VERSION_V2:
  1546. case TZBSP_DIAG_MINOR_VERSION_V21:
  1547. case TZBSP_DIAG_MINOR_VERSION_V22:
  1548. tzdbg.is_enlarged_buf = true;
  1549. break;
  1550. default:
  1551. tzdbg.is_enlarged_buf = false;
  1552. }
  1553. } else {
  1554. tzdbg.is_enlarged_buf = false;
  1555. }
  1556. return ret;
  1557. }
  1558. static void tzdbg_query_encrypted_log(void)
  1559. {
  1560. int ret = 0;
  1561. uint64_t enabled;
  1562. ret = qcom_scm_query_encrypted_log_feature(&enabled);
  1563. if (ret) {
  1564. if (ret == -EIO)
  1565. pr_info("SCM_CALL : SYS CALL NOT SUPPORTED IN TZ\n");
  1566. else
  1567. pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret);
  1568. tzdbg.is_encrypted_log_enabled = false;
  1569. } else {
  1570. pr_warn("encrypted qseelog enabled is %d\n", enabled);
  1571. tzdbg.is_encrypted_log_enabled = enabled;
  1572. }
  1573. }
  1574. /*
  1575. * Driver functions
  1576. */
  1577. static int tz_log_probe(struct platform_device *pdev)
  1578. {
  1579. struct resource *resource;
  1580. void __iomem *virt_iobase;
  1581. phys_addr_t tzdiag_phy_iobase;
  1582. uint32_t *ptr = NULL;
  1583. int ret = 0;
  1584. ret = tzdbg_get_tz_version();
  1585. if (ret)
  1586. return ret;
  1587. /*
  1588. * Get address that stores the physical location diagnostic data
  1589. */
  1590. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1591. if (!resource) {
  1592. dev_err(&pdev->dev,
  1593. "%s: ERROR Missing MEM resource\n", __func__);
  1594. return -ENXIO;
  1595. }
  1596. /*
  1597. * Get the debug buffer size
  1598. */
  1599. debug_rw_buf_size = resource_size(resource);
  1600. /*
  1601. * Map address that stores the physical location diagnostic data
  1602. */
  1603. virt_iobase = devm_ioremap(&pdev->dev, resource->start,
  1604. debug_rw_buf_size);
  1605. if (!virt_iobase) {
  1606. dev_err(&pdev->dev,
  1607. "%s: ERROR could not ioremap: start=%pr, len=%u\n",
  1608. __func__, &resource->start,
  1609. (unsigned int)(debug_rw_buf_size));
  1610. return -ENXIO;
  1611. }
  1612. if (pdev->dev.of_node) {
  1613. tzdbg.is_hyplog_enabled = of_property_read_bool(
  1614. (&pdev->dev)->of_node, "qcom,hyplog-enabled");
  1615. if (tzdbg.is_hyplog_enabled) {
  1616. ret = __update_hypdbg_base(pdev, virt_iobase);
  1617. if (ret) {
  1618. dev_err(&pdev->dev,
  1619. "%s: fail to get hypdbg_base ret %d\n",
  1620. __func__, ret);
  1621. return -EINVAL;
  1622. }
  1623. ret = __update_rmlog_base(pdev, virt_iobase);
  1624. if (ret) {
  1625. dev_err(&pdev->dev,
  1626. "%s: fail to get rmlog_base ret %d\n",
  1627. __func__, ret);
  1628. return -EINVAL;
  1629. }
  1630. } else {
  1631. dev_info(&pdev->dev, "Hyp log service not support\n");
  1632. }
  1633. } else {
  1634. dev_dbg(&pdev->dev, "Device tree data is not found\n");
  1635. }
  1636. /*
  1637. * Retrieve the address of diagnostic data
  1638. */
  1639. tzdiag_phy_iobase = readl_relaxed(virt_iobase);
  1640. tzdbg_query_encrypted_log();
  1641. /*
  1642. * Map the diagnostic information area if encryption is disabled
  1643. */
  1644. if (!tzdbg.is_encrypted_log_enabled) {
  1645. tzdbg.virt_iobase = devm_ioremap(&pdev->dev,
  1646. tzdiag_phy_iobase, debug_rw_buf_size);
  1647. if (!tzdbg.virt_iobase) {
  1648. dev_err(&pdev->dev,
  1649. "%s: could not ioremap: start=%pr, len=%u\n",
  1650. __func__, &tzdiag_phy_iobase,
  1651. debug_rw_buf_size);
  1652. return -ENXIO;
  1653. }
  1654. /* allocate diag_buf */
  1655. ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
  1656. if (ptr == NULL)
  1657. return -ENOMEM;
  1658. tzdbg.diag_buf = (struct tzdbg_t *)ptr;
  1659. } else {
  1660. if ((tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) &&
  1661. (tzdbg.tz_diag_minor_version >= TZBSP_DIAG_MINOR_VERSION_V22))
  1662. tzdbg.is_full_encrypted_tz_logs_supported = true;
  1663. if (pdev->dev.of_node) {
  1664. tzdbg.is_full_encrypted_tz_logs_enabled = of_property_read_bool(
  1665. (&pdev->dev)->of_node, "qcom,full-encrypted-tz-logs-enabled");
  1666. }
  1667. }
  1668. /* Init for tme log */
  1669. ret = tzdbg_init_tme_log(pdev, virt_iobase);
  1670. if (ret < 0)
  1671. pr_warn("Tme log initialization failed!\n");
  1672. /* register unencrypted qsee log buffer */
  1673. ret = tzdbg_register_qsee_log_buf(pdev);
  1674. if (ret)
  1675. goto exit_free_diag_buf;
  1676. /* allocate encrypted qsee and tz log buffer */
  1677. ret = tzdbg_allocate_encrypted_log_buf(pdev);
  1678. if (ret) {
  1679. dev_err(&pdev->dev,
  1680. " %s: Failed to allocate encrypted log buffer\n",
  1681. __func__);
  1682. goto exit_free_qsee_log_buf;
  1683. }
  1684. /* allocate display_buf */
  1685. if (UINT_MAX/4 < qseelog_buf_size) {
  1686. pr_err("display_buf_size integer overflow\n");
  1687. goto exit_free_qsee_log_buf;
  1688. }
  1689. display_buf_size = qseelog_buf_size * 4;
  1690. tzdbg.disp_buf = dma_alloc_coherent(&pdev->dev, display_buf_size,
  1691. &disp_buf_paddr, GFP_KERNEL);
  1692. if (tzdbg.disp_buf == NULL) {
  1693. ret = -ENOMEM;
  1694. goto exit_free_encr_log_buf;
  1695. }
  1696. if (tzdbg_fs_init(pdev))
  1697. goto exit_free_disp_buf;
  1698. return 0;
  1699. exit_free_disp_buf:
  1700. dma_free_coherent(&pdev->dev, display_buf_size,
  1701. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1702. exit_free_encr_log_buf:
  1703. tzdbg_free_encrypted_log_buf(pdev);
  1704. exit_free_qsee_log_buf:
  1705. tzdbg_free_qsee_log_buf(pdev);
  1706. exit_free_diag_buf:
  1707. if (!tzdbg.is_encrypted_log_enabled)
  1708. kfree(tzdbg.diag_buf);
  1709. return -ENXIO;
  1710. }
  1711. static int tz_log_remove(struct platform_device *pdev)
  1712. {
  1713. tzdbg_fs_exit(pdev);
  1714. dma_free_coherent(&pdev->dev, display_buf_size,
  1715. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1716. tzdbg_free_encrypted_log_buf(pdev);
  1717. tzdbg_free_qsee_log_buf(pdev);
  1718. if (!tzdbg.is_encrypted_log_enabled)
  1719. kfree(tzdbg.diag_buf);
  1720. return 0;
  1721. }
  1722. static const struct of_device_id tzlog_match[] = {
  1723. {.compatible = "qcom,tz-log"},
  1724. {}
  1725. };
  1726. static struct platform_driver tz_log_driver = {
  1727. .probe = tz_log_probe,
  1728. .remove = tz_log_remove,
  1729. .driver = {
  1730. .name = "tz_log",
  1731. .of_match_table = tzlog_match,
  1732. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1733. },
  1734. };
  1735. module_platform_driver(tz_log_driver);
  1736. MODULE_LICENSE("GPL v2");
  1737. MODULE_DESCRIPTION("TZ Log driver");