tz_log.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/errno.h>
  7. #include <linux/delay.h>
  8. #include <linux/io.h>
  9. #include <linux/msm_ion.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/types.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/of.h>
  18. #include <linux/dma-buf.h>
  19. #include <linux/qcom_scm.h>
  20. #include <soc/qcom/qseecomi.h>
  21. #include <linux/qtee_shmbridge.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/version.h>
  24. /* QSEE_LOG_BUF_SIZE = 32K */
  25. #define QSEE_LOG_BUF_SIZE 0x8000
  26. /* enlarged qsee log buf size is 128K by default */
  27. #define QSEE_LOG_BUF_SIZE_V2 0x20000
  28. /* TZ Diagnostic Area legacy version number */
  29. #define TZBSP_DIAG_MAJOR_VERSION_LEGACY 2
  30. /* TZ Diagnostic Area version number */
  31. #define TZBSP_FVER_MAJOR_MINOR_MASK 0x3FF /* 10 bits */
  32. #define TZBSP_FVER_MAJOR_SHIFT 22
  33. #define TZBSP_FVER_MINOR_SHIFT 12
  34. #define TZBSP_DIAG_MAJOR_VERSION_V9 9
  35. #define TZBSP_DIAG_MINOR_VERSION_V2 2
  36. #define TZBSP_DIAG_MINOR_VERSION_V21 3
  37. #define TZBSP_DIAG_MINOR_VERSION_V22 4
  38. /* TZ Diag Feature Version Id */
  39. #define QCOM_SCM_FEAT_DIAG_ID 0x06
  40. /*
  41. * Preprocessor Definitions and Constants
  42. */
  43. #define TZBSP_MAX_CPU_COUNT 0x08
  44. /*
  45. * Number of VMID Tables
  46. */
  47. #define TZBSP_DIAG_NUM_OF_VMID 16
  48. /*
  49. * VMID Description length
  50. */
  51. #define TZBSP_DIAG_VMID_DESC_LEN 7
  52. /*
  53. * Number of Interrupts
  54. */
  55. #define TZBSP_DIAG_INT_NUM 32
  56. /*
  57. * Length of descriptive name associated with Interrupt
  58. */
  59. #define TZBSP_MAX_INT_DESC 16
  60. /*
  61. * TZ 3.X version info
  62. */
  63. #define QSEE_VERSION_TZ_3_X 0x800000
  64. /*
  65. * TZ 4.X version info
  66. */
  67. #define QSEE_VERSION_TZ_4_X 0x1000000
  68. #define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
  69. #define TZBSP_NONCE_LEN 12
  70. #define TZBSP_TAG_LEN 16
  71. #define ENCRYPTED_TZ_LOG_ID 0
  72. #define ENCRYPTED_QSEE_LOG_ID 1
  73. /*
  74. * Directory for TZ DBG logs
  75. */
  76. #define TZDBG_DIR_NAME "tzdbg"
  77. /*
  78. * VMID Table
  79. */
  80. struct tzdbg_vmid_t {
  81. uint8_t vmid; /* Virtual Machine Identifier */
  82. uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */
  83. };
  84. /*
  85. * Boot Info Table
  86. */
  87. struct tzdbg_boot_info_t {
  88. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  89. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  90. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  91. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  92. uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */
  93. uint32_t spare; /* Reserved for future use. */
  94. };
  95. /*
  96. * Boot Info Table for 64-bit
  97. */
  98. struct tzdbg_boot_info64_t {
  99. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  100. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  101. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  102. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  103. uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
  104. uint32_t psci_exit_cnt; /* PSCI syscall exit CPU Counter */
  105. uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
  106. uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
  107. };
  108. /*
  109. * Reset Info Table
  110. */
  111. struct tzdbg_reset_info_t {
  112. uint32_t reset_type; /* Reset Reason */
  113. uint32_t reset_cnt; /* Number of resets occurred/CPU */
  114. };
  115. /*
  116. * Interrupt Info Table
  117. */
  118. struct tzdbg_int_t {
  119. /*
  120. * Type of Interrupt/exception
  121. */
  122. uint16_t int_info;
  123. /*
  124. * Availability of the slot
  125. */
  126. uint8_t avail;
  127. /*
  128. * Reserved for future use
  129. */
  130. uint8_t spare;
  131. /*
  132. * Interrupt # for IRQ and FIQ
  133. */
  134. uint32_t int_num;
  135. /*
  136. * ASCII text describing type of interrupt e.g:
  137. * Secure Timer, EBI XPU. This string is always null terminated,
  138. * supporting at most TZBSP_MAX_INT_DESC characters.
  139. * Any additional characters are truncated.
  140. */
  141. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  142. uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
  143. };
  144. /*
  145. * Interrupt Info Table used in tz version >=4.X
  146. */
  147. struct tzdbg_int_t_tz40 {
  148. uint16_t int_info;
  149. uint8_t avail;
  150. uint8_t spare;
  151. uint32_t int_num;
  152. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  153. uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
  154. };
  155. /* warm boot reason for cores */
  156. struct tzbsp_diag_wakeup_info_t {
  157. /* Wake source info : APCS_GICC_HPPIR */
  158. uint32_t HPPIR;
  159. /* Wake source info : APCS_GICC_AHPPIR */
  160. uint32_t AHPPIR;
  161. };
  162. /*
  163. * Log ring buffer position
  164. */
  165. struct tzdbg_log_pos_t {
  166. uint16_t wrap;
  167. uint16_t offset;
  168. };
  169. struct tzdbg_log_pos_v2_t {
  170. uint32_t wrap;
  171. uint32_t offset;
  172. };
  173. /*
  174. * Log ring buffer
  175. */
  176. struct tzdbg_log_t {
  177. struct tzdbg_log_pos_t log_pos;
  178. /* open ended array to the end of the 4K IMEM buffer */
  179. uint8_t log_buf[];
  180. };
  181. struct tzdbg_log_v2_t {
  182. struct tzdbg_log_pos_v2_t log_pos;
  183. /* open ended array to the end of the 4K IMEM buffer */
  184. uint8_t log_buf[];
  185. };
  186. struct tzbsp_encr_info_for_log_chunk_t {
  187. uint32_t size_to_encr;
  188. uint8_t nonce[TZBSP_NONCE_LEN];
  189. uint8_t tag[TZBSP_TAG_LEN];
  190. };
  191. /*
  192. * Only `ENTIRE_LOG` will be used unless the
  193. * "OEM_tz_num_of_diag_log_chunks_to_encr" devcfg field >= 2.
  194. * If this is true, the diag log will be encrypted in two
  195. * separate chunks: a smaller chunk containing only error
  196. * fatal logs and a bigger "rest of the log" chunk. In this
  197. * case, `ERR_FATAL_LOG_CHUNK` and `BIG_LOG_CHUNK` will be
  198. * used instead of `ENTIRE_LOG`.
  199. */
  200. enum tzbsp_encr_info_for_log_chunks_idx_t {
  201. BIG_LOG_CHUNK = 0,
  202. ENTIRE_LOG = 1,
  203. ERR_FATAL_LOG_CHUNK = 1,
  204. MAX_NUM_OF_CHUNKS,
  205. };
  206. struct tzbsp_encr_info_t {
  207. uint32_t num_of_chunks;
  208. struct tzbsp_encr_info_for_log_chunk_t chunks[MAX_NUM_OF_CHUNKS];
  209. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  210. };
  211. /*
  212. * Diagnostic Table
  213. * Note: This is the reference data structure for tz diagnostic table
  214. * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
  215. * copied into buffer from i/o memory.
  216. */
  217. struct tzdbg_t {
  218. uint32_t magic_num;
  219. uint32_t version;
  220. /*
  221. * Number of CPU's
  222. */
  223. uint32_t cpu_count;
  224. /*
  225. * Offset of VMID Table
  226. */
  227. uint32_t vmid_info_off;
  228. /*
  229. * Offset of Boot Table
  230. */
  231. uint32_t boot_info_off;
  232. /*
  233. * Offset of Reset info Table
  234. */
  235. uint32_t reset_info_off;
  236. /*
  237. * Offset of Interrupt info Table
  238. */
  239. uint32_t int_info_off;
  240. /*
  241. * Ring Buffer Offset
  242. */
  243. uint32_t ring_off;
  244. /*
  245. * Ring Buffer Length
  246. */
  247. uint32_t ring_len;
  248. /* Offset for Wakeup info */
  249. uint32_t wakeup_info_off;
  250. union {
  251. /* The elements in below structure have to be used for TZ where
  252. * diag version = TZBSP_DIAG_MINOR_VERSION_V2
  253. */
  254. struct {
  255. /*
  256. * VMID to EE Mapping
  257. */
  258. struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
  259. /*
  260. * Boot Info
  261. */
  262. struct tzdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  263. /*
  264. * Reset Info
  265. */
  266. struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
  267. uint32_t num_interrupts;
  268. struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM];
  269. /* Wake up info */
  270. struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT];
  271. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  272. uint8_t nonce[TZBSP_NONCE_LEN];
  273. uint8_t tag[TZBSP_TAG_LEN];
  274. };
  275. /* The elements in below structure have to be used for TZ where
  276. * diag version = TZBSP_DIAG_MINOR_VERSION_V21
  277. */
  278. struct {
  279. uint32_t encr_info_for_log_off;
  280. /*
  281. * VMID to EE Mapping
  282. */
  283. struct tzdbg_vmid_t vmid_info_v2[TZBSP_DIAG_NUM_OF_VMID];
  284. /*
  285. * Boot Info
  286. */
  287. struct tzdbg_boot_info_t boot_info_v2[TZBSP_MAX_CPU_COUNT];
  288. /*
  289. * Reset Info
  290. */
  291. struct tzdbg_reset_info_t reset_info_v2[TZBSP_MAX_CPU_COUNT];
  292. uint32_t num_interrupts_v2;
  293. struct tzdbg_int_t int_info_v2[TZBSP_DIAG_INT_NUM];
  294. /* Wake up info */
  295. struct tzbsp_diag_wakeup_info_t wakeup_info_v2[TZBSP_MAX_CPU_COUNT];
  296. struct tzbsp_encr_info_t encr_info_for_log;
  297. };
  298. };
  299. /*
  300. * We need at least 2K for the ring buffer
  301. */
  302. struct tzdbg_log_t ring_buffer; /* TZ Ring Buffer */
  303. };
  304. struct hypdbg_log_pos_t {
  305. uint16_t wrap;
  306. uint16_t offset;
  307. };
  308. struct rmdbg_log_hdr_t {
  309. uint32_t write_idx;
  310. uint32_t size;
  311. };
  312. struct rmdbg_log_pos_t {
  313. uint32_t read_idx;
  314. uint32_t size;
  315. };
  316. struct hypdbg_boot_info_t {
  317. uint32_t warm_entry_cnt;
  318. uint32_t warm_exit_cnt;
  319. };
  320. struct hypdbg_t {
  321. /* Magic Number */
  322. uint32_t magic_num;
  323. /* Number of CPU's */
  324. uint32_t cpu_count;
  325. /* Ring Buffer Offset */
  326. uint32_t ring_off;
  327. /* Ring buffer position mgmt */
  328. struct hypdbg_log_pos_t log_pos;
  329. uint32_t log_len;
  330. /* S2 fault numbers */
  331. uint32_t s2_fault_counter;
  332. /* Boot Info */
  333. struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  334. /* Ring buffer pointer */
  335. uint8_t log_buf_p[];
  336. };
  337. /*
  338. * Enumeration order for VMID's
  339. */
  340. enum tzdbg_stats_type {
  341. TZDBG_BOOT = 0,
  342. TZDBG_RESET,
  343. TZDBG_INTERRUPT,
  344. TZDBG_VMID,
  345. TZDBG_GENERAL,
  346. TZDBG_LOG,
  347. TZDBG_QSEE_LOG,
  348. TZDBG_HYP_GENERAL,
  349. TZDBG_HYP_LOG,
  350. TZDBG_RM_LOG,
  351. TZDBG_STATS_MAX
  352. };
  353. struct tzdbg_stat {
  354. size_t display_len;
  355. size_t display_offset;
  356. char *name;
  357. char *data;
  358. };
  359. struct tzdbg {
  360. void __iomem *virt_iobase;
  361. void __iomem *hyp_virt_iobase;
  362. void __iomem *rmlog_virt_iobase;
  363. struct tzdbg_t *diag_buf;
  364. struct hypdbg_t *hyp_diag_buf;
  365. uint8_t *rm_diag_buf;
  366. char *disp_buf;
  367. int debug_tz[TZDBG_STATS_MAX];
  368. struct tzdbg_stat stat[TZDBG_STATS_MAX];
  369. uint32_t hyp_debug_rw_buf_size;
  370. uint32_t rmlog_rw_buf_size;
  371. bool is_hyplog_enabled;
  372. uint32_t tz_version;
  373. bool is_encrypted_log_enabled;
  374. bool is_enlarged_buf;
  375. bool is_full_encrypted_tz_logs_supported;
  376. bool is_full_encrypted_tz_logs_enabled;
  377. int tz_diag_minor_version;
  378. int tz_diag_major_version;
  379. };
  380. struct tzbsp_encr_log_t {
  381. /* Magic Number */
  382. uint32_t magic_num;
  383. /* version NUMBER */
  384. uint32_t version;
  385. /* encrypted log size */
  386. uint32_t encr_log_buff_size;
  387. /* Wrap value*/
  388. uint16_t wrap_count;
  389. /* AES encryption key wrapped up with oem public key*/
  390. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  391. /* Nonce used for encryption*/
  392. uint8_t nonce[TZBSP_NONCE_LEN];
  393. /* Tag to be used for Validation */
  394. uint8_t tag[TZBSP_TAG_LEN];
  395. /* Encrypted log buffer */
  396. uint8_t log_buf[1];
  397. };
  398. struct encrypted_log_info {
  399. phys_addr_t paddr;
  400. void *vaddr;
  401. size_t size;
  402. uint64_t shmb_handle;
  403. };
  404. static struct tzdbg tzdbg = {
  405. .stat[TZDBG_BOOT].name = "boot",
  406. .stat[TZDBG_RESET].name = "reset",
  407. .stat[TZDBG_INTERRUPT].name = "interrupt",
  408. .stat[TZDBG_VMID].name = "vmid",
  409. .stat[TZDBG_GENERAL].name = "general",
  410. .stat[TZDBG_LOG].name = "log",
  411. .stat[TZDBG_QSEE_LOG].name = "qsee_log",
  412. .stat[TZDBG_HYP_GENERAL].name = "hyp_general",
  413. .stat[TZDBG_HYP_LOG].name = "hyp_log",
  414. .stat[TZDBG_RM_LOG].name = "rm_log",
  415. };
  416. static struct tzdbg_log_t *g_qsee_log;
  417. static struct tzdbg_log_v2_t *g_qsee_log_v2;
  418. static dma_addr_t coh_pmem;
  419. static uint32_t debug_rw_buf_size;
  420. static uint32_t display_buf_size;
  421. static uint32_t qseelog_buf_size;
  422. static phys_addr_t disp_buf_paddr;
  423. static uint64_t qseelog_shmbridge_handle;
  424. static struct encrypted_log_info enc_qseelog_info;
  425. static struct encrypted_log_info enc_tzlog_info;
  426. /*
  427. * Debugfs data structure and functions
  428. */
  429. static int _disp_tz_general_stats(void)
  430. {
  431. int len = 0;
  432. len += scnprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
  433. " Version : 0x%x\n"
  434. " Magic Number : 0x%x\n"
  435. " Number of CPU : %d\n",
  436. tzdbg.diag_buf->version,
  437. tzdbg.diag_buf->magic_num,
  438. tzdbg.diag_buf->cpu_count);
  439. tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
  440. return len;
  441. }
  442. static int _disp_tz_vmid_stats(void)
  443. {
  444. int i, num_vmid;
  445. int len = 0;
  446. struct tzdbg_vmid_t *ptr;
  447. ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
  448. tzdbg.diag_buf->vmid_info_off);
  449. num_vmid = ((tzdbg.diag_buf->boot_info_off -
  450. tzdbg.diag_buf->vmid_info_off)/
  451. (sizeof(struct tzdbg_vmid_t)));
  452. for (i = 0; i < num_vmid; i++) {
  453. if (ptr->vmid < 0xFF) {
  454. len += scnprintf(tzdbg.disp_buf + len,
  455. (debug_rw_buf_size - 1) - len,
  456. " 0x%x %s\n",
  457. (uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
  458. }
  459. if (len > (debug_rw_buf_size - 1)) {
  460. pr_warn("%s: Cannot fit all info into the buffer\n",
  461. __func__);
  462. break;
  463. }
  464. ptr++;
  465. }
  466. tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
  467. return len;
  468. }
  469. static int _disp_tz_boot_stats(void)
  470. {
  471. int i;
  472. int len = 0;
  473. struct tzdbg_boot_info_t *ptr = NULL;
  474. struct tzdbg_boot_info64_t *ptr_64 = NULL;
  475. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  476. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  477. ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
  478. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  479. } else {
  480. ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
  481. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  482. }
  483. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  484. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  485. len += scnprintf(tzdbg.disp_buf + len,
  486. (debug_rw_buf_size - 1) - len,
  487. " CPU #: %d\n"
  488. " Warmboot jump address : 0x%llx\n"
  489. " Warmboot entry CPU counter : 0x%x\n"
  490. " Warmboot exit CPU counter : 0x%x\n"
  491. " Power Collapse entry CPU counter : 0x%x\n"
  492. " Power Collapse exit CPU counter : 0x%x\n"
  493. " Psci entry CPU counter : 0x%x\n"
  494. " Psci exit CPU counter : 0x%x\n"
  495. " Warmboot Jump Address Instruction : 0x%x\n",
  496. i, (uint64_t)ptr_64->warm_jmp_addr,
  497. ptr_64->wb_entry_cnt,
  498. ptr_64->wb_exit_cnt,
  499. ptr_64->pc_entry_cnt,
  500. ptr_64->pc_exit_cnt,
  501. ptr_64->psci_entry_cnt,
  502. ptr_64->psci_exit_cnt,
  503. ptr_64->warm_jmp_instr);
  504. if (len > (debug_rw_buf_size - 1)) {
  505. pr_warn("%s: Cannot fit all info into the buffer\n",
  506. __func__);
  507. break;
  508. }
  509. ptr_64++;
  510. } else {
  511. len += scnprintf(tzdbg.disp_buf + len,
  512. (debug_rw_buf_size - 1) - len,
  513. " CPU #: %d\n"
  514. " Warmboot jump address : 0x%x\n"
  515. " Warmboot entry CPU counter: 0x%x\n"
  516. " Warmboot exit CPU counter : 0x%x\n"
  517. " Power Collapse entry CPU counter: 0x%x\n"
  518. " Power Collapse exit CPU counter : 0x%x\n",
  519. i, ptr->warm_jmp_addr,
  520. ptr->wb_entry_cnt,
  521. ptr->wb_exit_cnt,
  522. ptr->pc_entry_cnt,
  523. ptr->pc_exit_cnt);
  524. if (len > (debug_rw_buf_size - 1)) {
  525. pr_warn("%s: Cannot fit all info into the buffer\n",
  526. __func__);
  527. break;
  528. }
  529. ptr++;
  530. }
  531. }
  532. tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
  533. return len;
  534. }
  535. static int _disp_tz_reset_stats(void)
  536. {
  537. int i;
  538. int len = 0;
  539. struct tzdbg_reset_info_t *ptr;
  540. ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
  541. tzdbg.diag_buf->reset_info_off);
  542. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  543. len += scnprintf(tzdbg.disp_buf + len,
  544. (debug_rw_buf_size - 1) - len,
  545. " CPU #: %d\n"
  546. " Reset Type (reason) : 0x%x\n"
  547. " Reset counter : 0x%x\n",
  548. i, ptr->reset_type, ptr->reset_cnt);
  549. if (len > (debug_rw_buf_size - 1)) {
  550. pr_warn("%s: Cannot fit all info into the buffer\n",
  551. __func__);
  552. break;
  553. }
  554. ptr++;
  555. }
  556. tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
  557. return len;
  558. }
  559. static int _disp_tz_interrupt_stats(void)
  560. {
  561. int i, j;
  562. int len = 0;
  563. int *num_int;
  564. void *ptr;
  565. struct tzdbg_int_t *tzdbg_ptr;
  566. struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
  567. num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
  568. (tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
  569. ptr = ((unsigned char *)tzdbg.diag_buf +
  570. tzdbg.diag_buf->int_info_off);
  571. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  572. if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
  573. tzdbg_ptr = ptr;
  574. for (i = 0; i < (*num_int); i++) {
  575. len += scnprintf(tzdbg.disp_buf + len,
  576. (debug_rw_buf_size - 1) - len,
  577. " Interrupt Number : 0x%x\n"
  578. " Type of Interrupt : 0x%x\n"
  579. " Description of interrupt : %s\n",
  580. tzdbg_ptr->int_num,
  581. (uint32_t)tzdbg_ptr->int_info,
  582. (uint8_t *)tzdbg_ptr->int_desc);
  583. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  584. len += scnprintf(tzdbg.disp_buf + len,
  585. (debug_rw_buf_size - 1) - len,
  586. " int_count on CPU # %d : %u\n",
  587. (uint32_t)j,
  588. (uint32_t)tzdbg_ptr->int_count[j]);
  589. }
  590. len += scnprintf(tzdbg.disp_buf + len,
  591. debug_rw_buf_size - 1, "\n");
  592. if (len > (debug_rw_buf_size - 1)) {
  593. pr_warn("%s: Cannot fit all info into buf\n",
  594. __func__);
  595. break;
  596. }
  597. tzdbg_ptr++;
  598. }
  599. } else {
  600. tzdbg_ptr_tz40 = ptr;
  601. for (i = 0; i < (*num_int); i++) {
  602. len += scnprintf(tzdbg.disp_buf + len,
  603. (debug_rw_buf_size - 1) - len,
  604. " Interrupt Number : 0x%x\n"
  605. " Type of Interrupt : 0x%x\n"
  606. " Description of interrupt : %s\n",
  607. tzdbg_ptr_tz40->int_num,
  608. (uint32_t)tzdbg_ptr_tz40->int_info,
  609. (uint8_t *)tzdbg_ptr_tz40->int_desc);
  610. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  611. len += scnprintf(tzdbg.disp_buf + len,
  612. (debug_rw_buf_size - 1) - len,
  613. " int_count on CPU # %d : %u\n",
  614. (uint32_t)j,
  615. (uint32_t)tzdbg_ptr_tz40->int_count[j]);
  616. }
  617. len += scnprintf(tzdbg.disp_buf + len,
  618. debug_rw_buf_size - 1, "\n");
  619. if (len > (debug_rw_buf_size - 1)) {
  620. pr_warn("%s: Cannot fit all info into buf\n",
  621. __func__);
  622. break;
  623. }
  624. tzdbg_ptr_tz40++;
  625. }
  626. }
  627. tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
  628. return len;
  629. }
  630. static int _disp_tz_log_stats_legacy(void)
  631. {
  632. int len = 0;
  633. unsigned char *ptr;
  634. ptr = (unsigned char *)tzdbg.diag_buf +
  635. tzdbg.diag_buf->ring_off;
  636. len += scnprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
  637. "%s\n", ptr);
  638. tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
  639. return len;
  640. }
  641. static int _disp_log_stats(struct tzdbg_log_t *log,
  642. struct tzdbg_log_pos_t *log_start, uint32_t log_len,
  643. size_t count, uint32_t buf_idx)
  644. {
  645. uint32_t wrap_start;
  646. uint32_t wrap_end;
  647. uint32_t wrap_cnt;
  648. int max_len;
  649. int len = 0;
  650. int i = 0;
  651. wrap_start = log_start->wrap;
  652. wrap_end = log->log_pos.wrap;
  653. /* Calculate difference in # of buffer wrap-arounds */
  654. if (wrap_end >= wrap_start)
  655. wrap_cnt = wrap_end - wrap_start;
  656. else {
  657. /* wrap counter has wrapped around, invalidate start position */
  658. wrap_cnt = 2;
  659. }
  660. if (wrap_cnt > 1) {
  661. /* end position has wrapped around more than once, */
  662. /* current start no longer valid */
  663. log_start->wrap = log->log_pos.wrap - 1;
  664. log_start->offset = (log->log_pos.offset + 1) % log_len;
  665. } else if ((wrap_cnt == 1) &&
  666. (log->log_pos.offset > log_start->offset)) {
  667. /* end position has overwritten start */
  668. log_start->offset = (log->log_pos.offset + 1) % log_len;
  669. }
  670. pr_debug("diag_buf wrap = %u, offset = %u\n",
  671. log->log_pos.wrap, log->log_pos.offset);
  672. while (log_start->offset == log->log_pos.offset) {
  673. /*
  674. * No data in ring buffer,
  675. * so we'll hang around until something happens
  676. */
  677. unsigned long t = msleep_interruptible(50);
  678. if (t != 0) {
  679. /* Some event woke us up, so let's quit */
  680. return 0;
  681. }
  682. if (buf_idx == TZDBG_LOG)
  683. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  684. debug_rw_buf_size);
  685. }
  686. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  687. pr_debug("diag_buf wrap = %u, offset = %u\n",
  688. log->log_pos.wrap, log->log_pos.offset);
  689. /*
  690. * Read from ring buff while there is data and space in return buff
  691. */
  692. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  693. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  694. log_start->offset = (log_start->offset + 1) % log_len;
  695. if (log_start->offset == 0)
  696. ++log_start->wrap;
  697. ++len;
  698. }
  699. /*
  700. * return buffer to caller
  701. */
  702. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  703. return len;
  704. }
  705. static int _disp_log_stats_v2(struct tzdbg_log_v2_t *log,
  706. struct tzdbg_log_pos_v2_t *log_start, uint32_t log_len,
  707. size_t count, uint32_t buf_idx)
  708. {
  709. uint32_t wrap_start;
  710. uint32_t wrap_end;
  711. uint32_t wrap_cnt;
  712. int max_len;
  713. int len = 0;
  714. int i = 0;
  715. wrap_start = log_start->wrap;
  716. wrap_end = log->log_pos.wrap;
  717. /* Calculate difference in # of buffer wrap-arounds */
  718. if (wrap_end >= wrap_start)
  719. wrap_cnt = wrap_end - wrap_start;
  720. else {
  721. /* wrap counter has wrapped around, invalidate start position */
  722. wrap_cnt = 2;
  723. }
  724. if (wrap_cnt > 1) {
  725. /* end position has wrapped around more than once, */
  726. /* current start no longer valid */
  727. log_start->wrap = log->log_pos.wrap - 1;
  728. log_start->offset = (log->log_pos.offset + 1) % log_len;
  729. } else if ((wrap_cnt == 1) &&
  730. (log->log_pos.offset > log_start->offset)) {
  731. /* end position has overwritten start */
  732. log_start->offset = (log->log_pos.offset + 1) % log_len;
  733. }
  734. pr_debug("diag_buf wrap = %u, offset = %u\n",
  735. log->log_pos.wrap, log->log_pos.offset);
  736. while (log_start->offset == log->log_pos.offset) {
  737. /*
  738. * No data in ring buffer,
  739. * so we'll hang around until something happens
  740. */
  741. unsigned long t = msleep_interruptible(50);
  742. if (t != 0) {
  743. /* Some event woke us up, so let's quit */
  744. return 0;
  745. }
  746. if (buf_idx == TZDBG_LOG)
  747. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  748. debug_rw_buf_size);
  749. }
  750. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  751. pr_debug("diag_buf wrap = %u, offset = %u\n",
  752. log->log_pos.wrap, log->log_pos.offset);
  753. /*
  754. * Read from ring buff while there is data and space in return buff
  755. */
  756. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  757. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  758. log_start->offset = (log_start->offset + 1) % log_len;
  759. if (log_start->offset == 0)
  760. ++log_start->wrap;
  761. ++len;
  762. }
  763. /*
  764. * return buffer to caller
  765. */
  766. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  767. return len;
  768. }
  769. static int __disp_hyp_log_stats(uint8_t *log,
  770. struct hypdbg_log_pos_t *log_start, uint32_t log_len,
  771. size_t count, uint32_t buf_idx)
  772. {
  773. struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
  774. unsigned long t = 0;
  775. uint32_t wrap_start;
  776. uint32_t wrap_end;
  777. uint32_t wrap_cnt;
  778. int max_len;
  779. int len = 0;
  780. int i = 0;
  781. wrap_start = log_start->wrap;
  782. wrap_end = hyp->log_pos.wrap;
  783. /* Calculate difference in # of buffer wrap-arounds */
  784. if (wrap_end >= wrap_start)
  785. wrap_cnt = wrap_end - wrap_start;
  786. else {
  787. /* wrap counter has wrapped around, invalidate start position */
  788. wrap_cnt = 2;
  789. }
  790. if (wrap_cnt > 1) {
  791. /* end position has wrapped around more than once, */
  792. /* current start no longer valid */
  793. log_start->wrap = hyp->log_pos.wrap - 1;
  794. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  795. } else if ((wrap_cnt == 1) &&
  796. (hyp->log_pos.offset > log_start->offset)) {
  797. /* end position has overwritten start */
  798. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  799. }
  800. while (log_start->offset == hyp->log_pos.offset) {
  801. /*
  802. * No data in ring buffer,
  803. * so we'll hang around until something happens
  804. */
  805. t = msleep_interruptible(50);
  806. if (t != 0) {
  807. /* Some event woke us up, so let's quit */
  808. return 0;
  809. }
  810. /* TZDBG_HYP_LOG */
  811. memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
  812. tzdbg.hyp_debug_rw_buf_size);
  813. }
  814. max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
  815. tzdbg.hyp_debug_rw_buf_size : count;
  816. /*
  817. * Read from ring buff while there is data and space in return buff
  818. */
  819. while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
  820. tzdbg.disp_buf[i++] = log[log_start->offset];
  821. log_start->offset = (log_start->offset + 1) % log_len;
  822. if (log_start->offset == 0)
  823. ++log_start->wrap;
  824. ++len;
  825. }
  826. /*
  827. * return buffer to caller
  828. */
  829. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  830. return len;
  831. }
  832. static int __disp_rm_log_stats(uint8_t *log_ptr, uint32_t max_len)
  833. {
  834. uint32_t i = 0;
  835. /*
  836. * Transfer data from rm dialog buff to display buffer in user space
  837. */
  838. while ((i < max_len) && (i < display_buf_size)) {
  839. tzdbg.disp_buf[i] = log_ptr[i];
  840. i++;
  841. }
  842. if (i != max_len)
  843. pr_err("Dropping RM log message, max_len:%d display_buf_size:%d\n",
  844. i, display_buf_size);
  845. tzdbg.stat[TZDBG_RM_LOG].data = tzdbg.disp_buf;
  846. return i;
  847. }
  848. static int print_text(char *intro_message,
  849. unsigned char *text_addr,
  850. unsigned int size,
  851. char *buf, uint32_t buf_len)
  852. {
  853. unsigned int i;
  854. int len = 0;
  855. pr_debug("begin address %p, size %d\n", text_addr, size);
  856. len += scnprintf(buf + len, buf_len - len, "%s\n", intro_message);
  857. for (i = 0; i < size; i++) {
  858. if (buf_len <= len + 6) {
  859. pr_err("buffer not enough, buf_len %d, len %d\n",
  860. buf_len, len);
  861. return buf_len;
  862. }
  863. len += scnprintf(buf + len, buf_len - len, "%02hhx ",
  864. text_addr[i]);
  865. if ((i & 0x1f) == 0x1f)
  866. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  867. }
  868. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  869. return len;
  870. }
  871. static int _disp_encrpted_log_stats(struct encrypted_log_info *enc_log_info,
  872. enum tzdbg_stats_type type, uint32_t log_id)
  873. {
  874. int ret = 0, len = 0;
  875. struct tzbsp_encr_log_t *encr_log_head;
  876. uint32_t size = 0;
  877. if ((!tzdbg.is_full_encrypted_tz_logs_supported) &&
  878. (tzdbg.is_full_encrypted_tz_logs_enabled))
  879. pr_info("TZ not supporting full encrypted log functionality\n");
  880. ret = qcom_scm_request_encrypted_log(enc_log_info->paddr,
  881. enc_log_info->size, log_id, tzdbg.is_full_encrypted_tz_logs_supported,
  882. tzdbg.is_full_encrypted_tz_logs_enabled);
  883. if (ret)
  884. return 0;
  885. encr_log_head = (struct tzbsp_encr_log_t *)(enc_log_info->vaddr);
  886. pr_debug("display_buf_size = %d, encr_log_buff_size = %d\n",
  887. display_buf_size, encr_log_head->encr_log_buff_size);
  888. size = encr_log_head->encr_log_buff_size;
  889. len += scnprintf(tzdbg.disp_buf + len,
  890. (display_buf_size - 1) - len,
  891. "\n-------- New Encrypted %s --------\n",
  892. ((log_id == ENCRYPTED_QSEE_LOG_ID) ?
  893. "QSEE Log" : "TZ Dialog"));
  894. len += scnprintf(tzdbg.disp_buf + len,
  895. (display_buf_size - 1) - len,
  896. "\nMagic_Num :\n0x%x\n"
  897. "\nVerion :\n%d\n"
  898. "\nEncr_Log_Buff_Size :\n%d\n"
  899. "\nWrap_Count :\n%d\n",
  900. encr_log_head->magic_num,
  901. encr_log_head->version,
  902. encr_log_head->encr_log_buff_size,
  903. encr_log_head->wrap_count);
  904. len += print_text("\nKey : ", encr_log_head->key,
  905. TZBSP_AES_256_ENCRYPTED_KEY_SIZE,
  906. tzdbg.disp_buf + len, display_buf_size);
  907. len += print_text("\nNonce : ", encr_log_head->nonce,
  908. TZBSP_NONCE_LEN,
  909. tzdbg.disp_buf + len, display_buf_size - len);
  910. len += print_text("\nTag : ", encr_log_head->tag,
  911. TZBSP_TAG_LEN,
  912. tzdbg.disp_buf + len, display_buf_size - len);
  913. if (len > display_buf_size - size)
  914. pr_warn("Cannot fit all info into the buffer\n");
  915. pr_debug("encrypted log size %d, disply buffer size %d, used len %d\n",
  916. size, display_buf_size, len);
  917. len += print_text("\nLog : ", encr_log_head->log_buf, size,
  918. tzdbg.disp_buf + len, display_buf_size - len);
  919. memset(enc_log_info->vaddr, 0, enc_log_info->size);
  920. tzdbg.stat[type].data = tzdbg.disp_buf;
  921. return len;
  922. }
  923. static int _disp_tz_log_stats(size_t count)
  924. {
  925. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  926. static struct tzdbg_log_pos_t log_start = {0};
  927. struct tzdbg_log_v2_t *log_v2_ptr;
  928. struct tzdbg_log_t *log_ptr;
  929. log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
  930. tzdbg.diag_buf->ring_off -
  931. offsetof(struct tzdbg_log_t, log_buf));
  932. log_v2_ptr = (struct tzdbg_log_v2_t *)((unsigned char *)tzdbg.diag_buf +
  933. tzdbg.diag_buf->ring_off -
  934. offsetof(struct tzdbg_log_v2_t, log_buf));
  935. if (!tzdbg.is_enlarged_buf)
  936. return _disp_log_stats(log_ptr, &log_start,
  937. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  938. return _disp_log_stats_v2(log_v2_ptr, &log_start_v2,
  939. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  940. }
  941. static int _disp_hyp_log_stats(size_t count)
  942. {
  943. static struct hypdbg_log_pos_t log_start = {0};
  944. uint8_t *log_ptr;
  945. uint32_t log_len;
  946. log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
  947. tzdbg.hyp_diag_buf->ring_off);
  948. log_len = tzdbg.hyp_debug_rw_buf_size - tzdbg.hyp_diag_buf->ring_off;
  949. return __disp_hyp_log_stats(log_ptr, &log_start,
  950. log_len, count, TZDBG_HYP_LOG);
  951. }
  952. static int _disp_rm_log_stats(size_t count)
  953. {
  954. static struct rmdbg_log_pos_t log_start = { 0 };
  955. struct rmdbg_log_hdr_t *p_log_hdr = NULL;
  956. uint8_t *log_ptr = NULL;
  957. uint32_t log_len = 0;
  958. static bool wrap_around = { false };
  959. /* Return 0 to close the display file,if there is nothing else to do */
  960. if ((log_start.size == 0x0) && wrap_around) {
  961. wrap_around = false;
  962. return 0;
  963. }
  964. /* Copy RM log data to tzdbg diag buffer for the first time */
  965. /* Initialize the tracking data structure */
  966. if (tzdbg.rmlog_rw_buf_size != 0) {
  967. if (!wrap_around) {
  968. memcpy_fromio((void *)tzdbg.rm_diag_buf,
  969. tzdbg.rmlog_virt_iobase,
  970. tzdbg.rmlog_rw_buf_size);
  971. /* get RM header info first */
  972. p_log_hdr = (struct rmdbg_log_hdr_t *)tzdbg.rm_diag_buf;
  973. /* Update RM log buffer index tracker and its size */
  974. log_start.read_idx = 0x0;
  975. log_start.size = p_log_hdr->size;
  976. }
  977. /* Update RM log buffer starting ptr */
  978. log_ptr =
  979. (uint8_t *) ((unsigned char *)tzdbg.rm_diag_buf +
  980. sizeof(struct rmdbg_log_hdr_t));
  981. } else {
  982. /* Return 0 to close the display file,if there is nothing else to do */
  983. pr_err("There is no RM log to read, size is %d!\n",
  984. tzdbg.rmlog_rw_buf_size);
  985. return 0;
  986. }
  987. log_len = log_start.size;
  988. log_ptr += log_start.read_idx;
  989. /* Check if we exceed the max length provided by user space */
  990. log_len = (count > log_len) ? log_len : count;
  991. /* Update tracking data structure */
  992. log_start.size -= log_len;
  993. log_start.read_idx += log_len;
  994. if (log_start.size)
  995. wrap_around = true;
  996. return __disp_rm_log_stats(log_ptr, log_len);
  997. }
  998. static int _disp_qsee_log_stats(size_t count)
  999. {
  1000. static struct tzdbg_log_pos_t log_start = {0};
  1001. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  1002. if (!tzdbg.is_enlarged_buf)
  1003. return _disp_log_stats(g_qsee_log, &log_start,
  1004. QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
  1005. count, TZDBG_QSEE_LOG);
  1006. return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2,
  1007. QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t),
  1008. count, TZDBG_QSEE_LOG);
  1009. }
  1010. static int _disp_hyp_general_stats(size_t count)
  1011. {
  1012. int len = 0;
  1013. int i;
  1014. struct hypdbg_boot_info_t *ptr = NULL;
  1015. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1016. tzdbg.hyp_debug_rw_buf_size - 1,
  1017. " Magic Number : 0x%x\n"
  1018. " CPU Count : 0x%x\n"
  1019. " S2 Fault Counter: 0x%x\n",
  1020. tzdbg.hyp_diag_buf->magic_num,
  1021. tzdbg.hyp_diag_buf->cpu_count,
  1022. tzdbg.hyp_diag_buf->s2_fault_counter);
  1023. ptr = tzdbg.hyp_diag_buf->boot_info;
  1024. for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
  1025. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1026. (tzdbg.hyp_debug_rw_buf_size - 1) - len,
  1027. " CPU #: %d\n"
  1028. " Warmboot entry CPU counter: 0x%x\n"
  1029. " Warmboot exit CPU counter : 0x%x\n",
  1030. i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
  1031. if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
  1032. pr_warn("%s: Cannot fit all info into the buffer\n",
  1033. __func__);
  1034. break;
  1035. }
  1036. ptr++;
  1037. }
  1038. tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
  1039. return len;
  1040. }
  1041. static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf,
  1042. size_t count, loff_t *offp)
  1043. {
  1044. int len = 0;
  1045. if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET ||
  1046. tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL ||
  1047. tz_id == TZDBG_VMID || tz_id == TZDBG_LOG)
  1048. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  1049. debug_rw_buf_size);
  1050. if (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1051. memcpy_fromio((void *)tzdbg.hyp_diag_buf,
  1052. tzdbg.hyp_virt_iobase,
  1053. tzdbg.hyp_debug_rw_buf_size);
  1054. switch (tz_id) {
  1055. case TZDBG_BOOT:
  1056. len = _disp_tz_boot_stats();
  1057. break;
  1058. case TZDBG_RESET:
  1059. len = _disp_tz_reset_stats();
  1060. break;
  1061. case TZDBG_INTERRUPT:
  1062. len = _disp_tz_interrupt_stats();
  1063. break;
  1064. case TZDBG_GENERAL:
  1065. len = _disp_tz_general_stats();
  1066. break;
  1067. case TZDBG_VMID:
  1068. len = _disp_tz_vmid_stats();
  1069. break;
  1070. case TZDBG_LOG:
  1071. if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
  1072. (tzdbg.diag_buf->version >> 16)) {
  1073. len = _disp_tz_log_stats(count);
  1074. *offp = 0;
  1075. } else {
  1076. len = _disp_tz_log_stats_legacy();
  1077. }
  1078. break;
  1079. case TZDBG_QSEE_LOG:
  1080. len = _disp_qsee_log_stats(count);
  1081. *offp = 0;
  1082. break;
  1083. case TZDBG_HYP_GENERAL:
  1084. len = _disp_hyp_general_stats(count);
  1085. break;
  1086. case TZDBG_HYP_LOG:
  1087. len = _disp_hyp_log_stats(count);
  1088. *offp = 0;
  1089. break;
  1090. case TZDBG_RM_LOG:
  1091. len = _disp_rm_log_stats(count);
  1092. *offp = 0;
  1093. break;
  1094. default:
  1095. break;
  1096. }
  1097. if (len > count)
  1098. len = count;
  1099. return simple_read_from_buffer(buf, len, offp,
  1100. tzdbg.stat[tz_id].data, len);
  1101. }
  1102. static ssize_t tzdbg_fs_read_encrypted(int tz_id, char __user *buf,
  1103. size_t count, loff_t *offp)
  1104. {
  1105. int len = 0, ret = 0;
  1106. struct tzdbg_stat *stat = &(tzdbg.stat[tz_id]);
  1107. pr_debug("%s: tz_id = %d\n", __func__, tz_id);
  1108. if (tz_id >= TZDBG_STATS_MAX) {
  1109. pr_err("invalid encrypted log id %d\n", tz_id);
  1110. return ret;
  1111. }
  1112. if (!stat->display_len) {
  1113. if (tz_id == TZDBG_QSEE_LOG)
  1114. stat->display_len = _disp_encrpted_log_stats(
  1115. &enc_qseelog_info,
  1116. tz_id, ENCRYPTED_QSEE_LOG_ID);
  1117. else
  1118. stat->display_len = _disp_encrpted_log_stats(
  1119. &enc_tzlog_info,
  1120. tz_id, ENCRYPTED_TZ_LOG_ID);
  1121. stat->display_offset = 0;
  1122. }
  1123. len = stat->display_len;
  1124. if (len > count)
  1125. len = count;
  1126. *offp = 0;
  1127. ret = simple_read_from_buffer(buf, len, offp,
  1128. tzdbg.stat[tz_id].data + stat->display_offset,
  1129. count);
  1130. stat->display_offset += ret;
  1131. stat->display_len -= ret;
  1132. pr_debug("ret = %d, offset = %d\n", ret, (int)(*offp));
  1133. pr_debug("display_len = %d, offset = %d\n",
  1134. stat->display_len, stat->display_offset);
  1135. return ret;
  1136. }
  1137. static ssize_t tzdbg_fs_read(struct file *file, char __user *buf,
  1138. size_t count, loff_t *offp)
  1139. {
  1140. struct seq_file *seq = file->private_data;
  1141. int tz_id = TZDBG_STATS_MAX;
  1142. if (seq)
  1143. tz_id = *(int *)(seq->private);
  1144. else {
  1145. pr_err("%s: Seq data null unable to proceed\n", __func__);
  1146. return 0;
  1147. }
  1148. if (!tzdbg.is_encrypted_log_enabled ||
  1149. (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1150. || tz_id == TZDBG_RM_LOG)
  1151. return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp);
  1152. else
  1153. return tzdbg_fs_read_encrypted(tz_id, buf, count, offp);
  1154. }
  1155. static int tzdbg_procfs_open(struct inode *inode, struct file *file)
  1156. {
  1157. #if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
  1158. return single_open(file, NULL, PDE_DATA(inode));
  1159. #else
  1160. return single_open(file, NULL, pde_data(inode));
  1161. #endif
  1162. }
  1163. static int tzdbg_procfs_release(struct inode *inode, struct file *file)
  1164. {
  1165. return single_release(inode, file);
  1166. }
  1167. struct proc_ops tzdbg_fops = {
  1168. .proc_flags = PROC_ENTRY_PERMANENT,
  1169. .proc_read = tzdbg_fs_read,
  1170. .proc_open = tzdbg_procfs_open,
  1171. .proc_release = tzdbg_procfs_release,
  1172. };
  1173. /*
  1174. * Allocates log buffer from ION, registers the buffer at TZ
  1175. */
  1176. static int tzdbg_register_qsee_log_buf(struct platform_device *pdev)
  1177. {
  1178. int ret = 0;
  1179. void *buf = NULL;
  1180. uint32_t ns_vmids[] = {VMID_HLOS};
  1181. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1182. uint32_t ns_vm_nums = 1;
  1183. if (tzdbg.is_enlarged_buf) {
  1184. if (of_property_read_u32((&pdev->dev)->of_node,
  1185. "qseelog-buf-size-v2", &qseelog_buf_size)) {
  1186. pr_debug("Enlarged qseelog buf size isn't defined\n");
  1187. qseelog_buf_size = QSEE_LOG_BUF_SIZE_V2;
  1188. }
  1189. } else {
  1190. qseelog_buf_size = QSEE_LOG_BUF_SIZE;
  1191. }
  1192. pr_debug("qseelog buf size is 0x%x\n", qseelog_buf_size);
  1193. buf = dma_alloc_coherent(&pdev->dev,
  1194. qseelog_buf_size, &coh_pmem, GFP_KERNEL);
  1195. if (buf == NULL)
  1196. return -ENOMEM;
  1197. if (!tzdbg.is_encrypted_log_enabled) {
  1198. ret = qtee_shmbridge_register(coh_pmem,
  1199. qseelog_buf_size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1200. PERM_READ | PERM_WRITE,
  1201. &qseelog_shmbridge_handle);
  1202. if (ret) {
  1203. pr_err("failed to create bridge for qsee_log buf\n");
  1204. goto exit_free_mem;
  1205. }
  1206. }
  1207. g_qsee_log = (struct tzdbg_log_t *)buf;
  1208. g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
  1209. g_qsee_log_v2 = (struct tzdbg_log_v2_t *)buf;
  1210. g_qsee_log_v2->log_pos.wrap = g_qsee_log_v2->log_pos.offset = 0;
  1211. ret = qcom_scm_register_qsee_log_buf(coh_pmem, qseelog_buf_size);
  1212. if (ret != QSEOS_RESULT_SUCCESS) {
  1213. pr_err(
  1214. "%s: scm_call to register log buf failed, resp result =%lld\n",
  1215. __func__, ret);
  1216. goto exit_dereg_bridge;
  1217. }
  1218. return ret;
  1219. exit_dereg_bridge:
  1220. if (!tzdbg.is_encrypted_log_enabled)
  1221. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1222. exit_free_mem:
  1223. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1224. (void *)g_qsee_log, coh_pmem);
  1225. return ret;
  1226. }
  1227. static void tzdbg_free_qsee_log_buf(struct platform_device *pdev)
  1228. {
  1229. if (!tzdbg.is_encrypted_log_enabled)
  1230. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1231. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1232. (void *)g_qsee_log, coh_pmem);
  1233. }
  1234. static int tzdbg_allocate_encrypted_log_buf(struct platform_device *pdev)
  1235. {
  1236. int ret = 0;
  1237. uint32_t ns_vmids[] = {VMID_HLOS};
  1238. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1239. uint32_t ns_vm_nums = 1;
  1240. if (!tzdbg.is_encrypted_log_enabled)
  1241. return 0;
  1242. /* max encrypted qsee log buf zize (include header, and page align) */
  1243. enc_qseelog_info.size = qseelog_buf_size + PAGE_SIZE;
  1244. enc_qseelog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1245. enc_qseelog_info.size,
  1246. &enc_qseelog_info.paddr, GFP_KERNEL);
  1247. if (enc_qseelog_info.vaddr == NULL)
  1248. return -ENOMEM;
  1249. ret = qtee_shmbridge_register(enc_qseelog_info.paddr,
  1250. enc_qseelog_info.size, ns_vmids,
  1251. ns_vm_perms, ns_vm_nums,
  1252. PERM_READ | PERM_WRITE, &enc_qseelog_info.shmb_handle);
  1253. if (ret) {
  1254. pr_err("failed to create encr_qsee_log bridge, ret %d\n", ret);
  1255. goto exit_free_qseelog;
  1256. }
  1257. pr_debug("Alloc memory for encr_qsee_log, size = %zu\n",
  1258. enc_qseelog_info.size);
  1259. enc_tzlog_info.size = debug_rw_buf_size;
  1260. enc_tzlog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1261. enc_tzlog_info.size,
  1262. &enc_tzlog_info.paddr, GFP_KERNEL);
  1263. if (enc_tzlog_info.vaddr == NULL)
  1264. goto exit_unreg_qseelog;
  1265. ret = qtee_shmbridge_register(enc_tzlog_info.paddr,
  1266. enc_tzlog_info.size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1267. PERM_READ | PERM_WRITE, &enc_tzlog_info.shmb_handle);
  1268. if (ret) {
  1269. pr_err("failed to create encr_tz_log bridge, ret = %d\n", ret);
  1270. goto exit_free_tzlog;
  1271. }
  1272. pr_debug("Alloc memory for encr_tz_log, size %zu\n",
  1273. enc_qseelog_info.size);
  1274. return 0;
  1275. exit_free_tzlog:
  1276. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1277. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1278. exit_unreg_qseelog:
  1279. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1280. exit_free_qseelog:
  1281. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1282. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1283. return -ENOMEM;
  1284. }
  1285. static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
  1286. {
  1287. qtee_shmbridge_deregister(enc_tzlog_info.shmb_handle);
  1288. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1289. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1290. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1291. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1292. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1293. }
  1294. static bool is_hyp_dir(int tzdbg_stat_type)
  1295. {
  1296. switch(tzdbg_stat_type)
  1297. {
  1298. case TZDBG_HYP_GENERAL:
  1299. case TZDBG_HYP_LOG:
  1300. case TZDBG_RM_LOG:
  1301. return true;
  1302. default:
  1303. return false;
  1304. }
  1305. return false;
  1306. }
  1307. static int tzdbg_fs_init(struct platform_device *pdev)
  1308. {
  1309. int rc = 0;
  1310. int i;
  1311. struct proc_dir_entry *dent_dir;
  1312. struct proc_dir_entry *dent;
  1313. dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL);
  1314. if (dent_dir == NULL) {
  1315. dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n");
  1316. return -ENOMEM;
  1317. }
  1318. for (i = 0; i < TZDBG_STATS_MAX; i++) {
  1319. tzdbg.debug_tz[i] = i;
  1320. /*
  1321. * If hypervisor is disabled, do not create
  1322. * hyp_general, hyp_log and rm_log directories,
  1323. * as accessing them would give segmentation fault
  1324. */
  1325. if ((!tzdbg.is_hyplog_enabled) && (is_hyp_dir(i))) {
  1326. continue;
  1327. }
  1328. dent = proc_create_data(tzdbg.stat[i].name,
  1329. 0444, dent_dir,
  1330. &tzdbg_fops, &tzdbg.debug_tz[i]);
  1331. if (dent == NULL) {
  1332. dev_err(&pdev->dev, "TZ proc_create_data failed\n");
  1333. rc = -ENOMEM;
  1334. goto err;
  1335. }
  1336. }
  1337. platform_set_drvdata(pdev, dent_dir);
  1338. return 0;
  1339. err:
  1340. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1341. return rc;
  1342. }
  1343. static void tzdbg_fs_exit(struct platform_device *pdev)
  1344. {
  1345. struct proc_dir_entry *dent_dir;
  1346. dent_dir = platform_get_drvdata(pdev);
  1347. if (dent_dir)
  1348. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1349. }
  1350. static int __update_hypdbg_base(struct platform_device *pdev,
  1351. void __iomem *virt_iobase)
  1352. {
  1353. phys_addr_t hypdiag_phy_iobase;
  1354. uint32_t hyp_address_offset;
  1355. uint32_t hyp_size_offset;
  1356. struct hypdbg_t *hyp;
  1357. uint32_t *ptr = NULL;
  1358. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
  1359. &hyp_address_offset)) {
  1360. dev_err(&pdev->dev, "hyplog address offset is not defined\n");
  1361. return -EINVAL;
  1362. }
  1363. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
  1364. &hyp_size_offset)) {
  1365. dev_err(&pdev->dev, "hyplog size offset is not defined\n");
  1366. return -EINVAL;
  1367. }
  1368. hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
  1369. tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
  1370. hyp_size_offset);
  1371. tzdbg.hyp_virt_iobase = devm_ioremap(&pdev->dev,
  1372. hypdiag_phy_iobase,
  1373. tzdbg.hyp_debug_rw_buf_size);
  1374. if (!tzdbg.hyp_virt_iobase) {
  1375. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1376. &hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
  1377. return -ENXIO;
  1378. }
  1379. ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
  1380. if (!ptr)
  1381. return -ENOMEM;
  1382. tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
  1383. hyp = tzdbg.hyp_diag_buf;
  1384. hyp->log_pos.wrap = hyp->log_pos.offset = 0;
  1385. return 0;
  1386. }
  1387. static int __update_rmlog_base(struct platform_device *pdev,
  1388. void __iomem *virt_iobase)
  1389. {
  1390. uint32_t rmlog_address;
  1391. uint32_t rmlog_size;
  1392. uint32_t *ptr = NULL;
  1393. /* if we don't get the node just ignore it */
  1394. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-address",
  1395. &rmlog_address)) {
  1396. dev_err(&pdev->dev, "RM log address is not defined\n");
  1397. tzdbg.rmlog_rw_buf_size = 0;
  1398. return 0;
  1399. }
  1400. /* if we don't get the node just ignore it */
  1401. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-size",
  1402. &rmlog_size)) {
  1403. dev_err(&pdev->dev, "RM log size is not defined\n");
  1404. tzdbg.rmlog_rw_buf_size = 0;
  1405. return 0;
  1406. }
  1407. tzdbg.rmlog_rw_buf_size = rmlog_size;
  1408. /* Check if there is RM log to read */
  1409. if (!tzdbg.rmlog_rw_buf_size) {
  1410. tzdbg.rmlog_virt_iobase = NULL;
  1411. tzdbg.rm_diag_buf = NULL;
  1412. dev_err(&pdev->dev, "RM log size is %d\n",
  1413. tzdbg.rmlog_rw_buf_size);
  1414. return 0;
  1415. }
  1416. tzdbg.rmlog_virt_iobase = devm_ioremap(&pdev->dev,
  1417. rmlog_address,
  1418. rmlog_size);
  1419. if (!tzdbg.rmlog_virt_iobase) {
  1420. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1421. rmlog_address, tzdbg.rmlog_rw_buf_size);
  1422. return -ENXIO;
  1423. }
  1424. ptr = kzalloc(tzdbg.rmlog_rw_buf_size, GFP_KERNEL);
  1425. if (!ptr)
  1426. return -ENOMEM;
  1427. tzdbg.rm_diag_buf = (uint8_t *)ptr;
  1428. return 0;
  1429. }
  1430. static int tzdbg_get_tz_version(void)
  1431. {
  1432. u64 version;
  1433. int ret = 0;
  1434. ret = qcom_scm_get_tz_log_feat_id(&version);
  1435. if (ret) {
  1436. pr_err("%s: scm_call to get tz version failed\n",
  1437. __func__);
  1438. return ret;
  1439. }
  1440. tzdbg.tz_version = version;
  1441. ret = qcom_scm_get_tz_feat_id_version(QCOM_SCM_FEAT_DIAG_ID, &version);
  1442. if (ret) {
  1443. pr_err("%s: scm_call to get tz diag version failed, ret = %d\n",
  1444. __func__, ret);
  1445. return ret;
  1446. }
  1447. pr_warn("tz diag version is %x\n", version);
  1448. tzdbg.tz_diag_major_version =
  1449. ((version >> TZBSP_FVER_MAJOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1450. tzdbg.tz_diag_minor_version =
  1451. ((version >> TZBSP_FVER_MINOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1452. if (tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) {
  1453. switch (tzdbg.tz_diag_minor_version) {
  1454. case TZBSP_DIAG_MINOR_VERSION_V2:
  1455. case TZBSP_DIAG_MINOR_VERSION_V21:
  1456. case TZBSP_DIAG_MINOR_VERSION_V22:
  1457. tzdbg.is_enlarged_buf = true;
  1458. break;
  1459. default:
  1460. tzdbg.is_enlarged_buf = false;
  1461. }
  1462. } else {
  1463. tzdbg.is_enlarged_buf = false;
  1464. }
  1465. return ret;
  1466. }
  1467. static void tzdbg_query_encrypted_log(void)
  1468. {
  1469. int ret = 0;
  1470. uint64_t enabled;
  1471. ret = qcom_scm_query_encrypted_log_feature(&enabled);
  1472. if (ret) {
  1473. if (ret == -EIO)
  1474. pr_info("SCM_CALL : SYS CALL NOT SUPPORTED IN TZ\n");
  1475. else
  1476. pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret);
  1477. tzdbg.is_encrypted_log_enabled = false;
  1478. } else {
  1479. pr_warn("encrypted qseelog enabled is %d\n", enabled);
  1480. tzdbg.is_encrypted_log_enabled = enabled;
  1481. }
  1482. }
  1483. /*
  1484. * Driver functions
  1485. */
  1486. static int tz_log_probe(struct platform_device *pdev)
  1487. {
  1488. struct resource *resource;
  1489. void __iomem *virt_iobase;
  1490. phys_addr_t tzdiag_phy_iobase;
  1491. uint32_t *ptr = NULL;
  1492. int ret = 0;
  1493. ret = tzdbg_get_tz_version();
  1494. if (ret)
  1495. return ret;
  1496. /*
  1497. * Get address that stores the physical location diagnostic data
  1498. */
  1499. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1500. if (!resource) {
  1501. dev_err(&pdev->dev,
  1502. "%s: ERROR Missing MEM resource\n", __func__);
  1503. return -ENXIO;
  1504. }
  1505. /*
  1506. * Get the debug buffer size
  1507. */
  1508. debug_rw_buf_size = resource_size(resource);
  1509. /*
  1510. * Map address that stores the physical location diagnostic data
  1511. */
  1512. virt_iobase = devm_ioremap(&pdev->dev, resource->start,
  1513. debug_rw_buf_size);
  1514. if (!virt_iobase) {
  1515. dev_err(&pdev->dev,
  1516. "%s: ERROR could not ioremap: start=%pr, len=%u\n",
  1517. __func__, &resource->start,
  1518. (unsigned int)(debug_rw_buf_size));
  1519. return -ENXIO;
  1520. }
  1521. if (pdev->dev.of_node) {
  1522. tzdbg.is_hyplog_enabled = of_property_read_bool(
  1523. (&pdev->dev)->of_node, "qcom,hyplog-enabled");
  1524. if (tzdbg.is_hyplog_enabled) {
  1525. ret = __update_hypdbg_base(pdev, virt_iobase);
  1526. if (ret) {
  1527. dev_err(&pdev->dev,
  1528. "%s: fail to get hypdbg_base ret %d\n",
  1529. __func__, ret);
  1530. return -EINVAL;
  1531. }
  1532. ret = __update_rmlog_base(pdev, virt_iobase);
  1533. if (ret) {
  1534. dev_err(&pdev->dev,
  1535. "%s: fail to get rmlog_base ret %d\n",
  1536. __func__, ret);
  1537. return -EINVAL;
  1538. }
  1539. } else {
  1540. dev_info(&pdev->dev, "Hyp log service not support\n");
  1541. }
  1542. } else {
  1543. dev_dbg(&pdev->dev, "Device tree data is not found\n");
  1544. }
  1545. /*
  1546. * Retrieve the address of diagnostic data
  1547. */
  1548. tzdiag_phy_iobase = readl_relaxed(virt_iobase);
  1549. tzdbg_query_encrypted_log();
  1550. /*
  1551. * Map the diagnostic information area if encryption is disabled
  1552. */
  1553. if (!tzdbg.is_encrypted_log_enabled) {
  1554. tzdbg.virt_iobase = devm_ioremap(&pdev->dev,
  1555. tzdiag_phy_iobase, debug_rw_buf_size);
  1556. if (!tzdbg.virt_iobase) {
  1557. dev_err(&pdev->dev,
  1558. "%s: could not ioremap: start=%pr, len=%u\n",
  1559. __func__, &tzdiag_phy_iobase,
  1560. debug_rw_buf_size);
  1561. return -ENXIO;
  1562. }
  1563. /* allocate diag_buf */
  1564. ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
  1565. if (ptr == NULL)
  1566. return -ENOMEM;
  1567. tzdbg.diag_buf = (struct tzdbg_t *)ptr;
  1568. } else {
  1569. if ((tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) &&
  1570. (tzdbg.tz_diag_minor_version >= TZBSP_DIAG_MINOR_VERSION_V22))
  1571. tzdbg.is_full_encrypted_tz_logs_supported = true;
  1572. if (pdev->dev.of_node) {
  1573. tzdbg.is_full_encrypted_tz_logs_enabled = of_property_read_bool(
  1574. (&pdev->dev)->of_node, "qcom,full-encrypted-tz-logs-enabled");
  1575. }
  1576. }
  1577. /* register unencrypted qsee log buffer */
  1578. ret = tzdbg_register_qsee_log_buf(pdev);
  1579. if (ret)
  1580. goto exit_free_diag_buf;
  1581. /* allocate encrypted qsee and tz log buffer */
  1582. ret = tzdbg_allocate_encrypted_log_buf(pdev);
  1583. if (ret) {
  1584. dev_err(&pdev->dev,
  1585. " %s: Failed to allocate encrypted log buffer\n",
  1586. __func__);
  1587. goto exit_free_qsee_log_buf;
  1588. }
  1589. /* allocate display_buf */
  1590. if (UINT_MAX/4 < qseelog_buf_size) {
  1591. pr_err("display_buf_size integer overflow\n");
  1592. goto exit_free_qsee_log_buf;
  1593. }
  1594. display_buf_size = qseelog_buf_size * 4;
  1595. tzdbg.disp_buf = dma_alloc_coherent(&pdev->dev, display_buf_size,
  1596. &disp_buf_paddr, GFP_KERNEL);
  1597. if (tzdbg.disp_buf == NULL) {
  1598. ret = -ENOMEM;
  1599. goto exit_free_encr_log_buf;
  1600. }
  1601. if (tzdbg_fs_init(pdev))
  1602. goto exit_free_disp_buf;
  1603. return 0;
  1604. exit_free_disp_buf:
  1605. dma_free_coherent(&pdev->dev, display_buf_size,
  1606. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1607. exit_free_encr_log_buf:
  1608. tzdbg_free_encrypted_log_buf(pdev);
  1609. exit_free_qsee_log_buf:
  1610. tzdbg_free_qsee_log_buf(pdev);
  1611. exit_free_diag_buf:
  1612. if (!tzdbg.is_encrypted_log_enabled)
  1613. kfree(tzdbg.diag_buf);
  1614. return -ENXIO;
  1615. }
  1616. static int tz_log_remove(struct platform_device *pdev)
  1617. {
  1618. tzdbg_fs_exit(pdev);
  1619. dma_free_coherent(&pdev->dev, display_buf_size,
  1620. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1621. tzdbg_free_encrypted_log_buf(pdev);
  1622. tzdbg_free_qsee_log_buf(pdev);
  1623. if (!tzdbg.is_encrypted_log_enabled)
  1624. kfree(tzdbg.diag_buf);
  1625. return 0;
  1626. }
  1627. static const struct of_device_id tzlog_match[] = {
  1628. {.compatible = "qcom,tz-log"},
  1629. {}
  1630. };
  1631. static struct platform_driver tz_log_driver = {
  1632. .probe = tz_log_probe,
  1633. .remove = tz_log_remove,
  1634. .driver = {
  1635. .name = "tz_log",
  1636. .of_match_table = tzlog_match,
  1637. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1638. },
  1639. };
  1640. module_platform_driver(tz_log_driver);
  1641. MODULE_LICENSE("GPL v2");
  1642. MODULE_DESCRIPTION("TZ Log driver");