tz_log.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/errno.h>
  7. #include <linux/delay.h>
  8. #include <linux/io.h>
  9. #include <linux/msm_ion.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/types.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/of.h>
  18. #include <linux/dma-buf.h>
  19. #include <linux/qcom_scm.h>
  20. #include <soc/qcom/qseecomi.h>
  21. #include <linux/qtee_shmbridge.h>
  22. #include <linux/proc_fs.h>
  23. /* QSEE_LOG_BUF_SIZE = 32K */
  24. #define QSEE_LOG_BUF_SIZE 0x8000
  25. /* enlarged qsee log buf size is 128K by default */
  26. #define QSEE_LOG_BUF_SIZE_V2 0x20000
  27. /* TZ Diagnostic Area legacy version number */
  28. #define TZBSP_DIAG_MAJOR_VERSION_LEGACY 2
  29. /* TZ Diagnostic Area version number */
  30. #define TZBSP_FVER_MAJOR_MINOR_MASK 0x3FF /* 10 bits */
  31. #define TZBSP_FVER_MAJOR_SHIFT 22
  32. #define TZBSP_FVER_MINOR_SHIFT 12
  33. #define TZBSP_DIAG_MAJOR_VERSION_V9 9
  34. #define TZBSP_DIAG_MINOR_VERSION_V2 2
  35. #define TZBSP_DIAG_MINOR_VERSION_V21 3
  36. #define TZBSP_DIAG_MINOR_VERSION_V22 4
  37. /* TZ Diag Feature Version Id */
  38. #define QCOM_SCM_FEAT_DIAG_ID 0x06
  39. /*
  40. * Preprocessor Definitions and Constants
  41. */
  42. #define TZBSP_MAX_CPU_COUNT 0x08
  43. /*
  44. * Number of VMID Tables
  45. */
  46. #define TZBSP_DIAG_NUM_OF_VMID 16
  47. /*
  48. * VMID Description length
  49. */
  50. #define TZBSP_DIAG_VMID_DESC_LEN 7
  51. /*
  52. * Number of Interrupts
  53. */
  54. #define TZBSP_DIAG_INT_NUM 32
  55. /*
  56. * Length of descriptive name associated with Interrupt
  57. */
  58. #define TZBSP_MAX_INT_DESC 16
  59. /*
  60. * TZ 3.X version info
  61. */
  62. #define QSEE_VERSION_TZ_3_X 0x800000
  63. /*
  64. * TZ 4.X version info
  65. */
  66. #define QSEE_VERSION_TZ_4_X 0x1000000
  67. #define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
  68. #define TZBSP_NONCE_LEN 12
  69. #define TZBSP_TAG_LEN 16
  70. #define ENCRYPTED_TZ_LOG_ID 0
  71. #define ENCRYPTED_QSEE_LOG_ID 1
  72. /*
  73. * Directory for TZ DBG logs
  74. */
  75. #define TZDBG_DIR_NAME "tzdbg"
  76. /*
  77. * VMID Table
  78. */
  79. struct tzdbg_vmid_t {
  80. uint8_t vmid; /* Virtual Machine Identifier */
  81. uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */
  82. };
  83. /*
  84. * Boot Info Table
  85. */
  86. struct tzdbg_boot_info_t {
  87. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  88. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  89. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  90. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  91. uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */
  92. uint32_t spare; /* Reserved for future use. */
  93. };
  94. /*
  95. * Boot Info Table for 64-bit
  96. */
  97. struct tzdbg_boot_info64_t {
  98. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  99. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  100. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  101. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  102. uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
  103. uint32_t psci_exit_cnt; /* PSCI syscall exit CPU Counter */
  104. uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
  105. uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
  106. };
  107. /*
  108. * Reset Info Table
  109. */
  110. struct tzdbg_reset_info_t {
  111. uint32_t reset_type; /* Reset Reason */
  112. uint32_t reset_cnt; /* Number of resets occurred/CPU */
  113. };
  114. /*
  115. * Interrupt Info Table
  116. */
  117. struct tzdbg_int_t {
  118. /*
  119. * Type of Interrupt/exception
  120. */
  121. uint16_t int_info;
  122. /*
  123. * Availability of the slot
  124. */
  125. uint8_t avail;
  126. /*
  127. * Reserved for future use
  128. */
  129. uint8_t spare;
  130. /*
  131. * Interrupt # for IRQ and FIQ
  132. */
  133. uint32_t int_num;
  134. /*
  135. * ASCII text describing type of interrupt e.g:
  136. * Secure Timer, EBI XPU. This string is always null terminated,
  137. * supporting at most TZBSP_MAX_INT_DESC characters.
  138. * Any additional characters are truncated.
  139. */
  140. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  141. uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
  142. };
  143. /*
  144. * Interrupt Info Table used in tz version >=4.X
  145. */
  146. struct tzdbg_int_t_tz40 {
  147. uint16_t int_info;
  148. uint8_t avail;
  149. uint8_t spare;
  150. uint32_t int_num;
  151. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  152. uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
  153. };
  154. /* warm boot reason for cores */
  155. struct tzbsp_diag_wakeup_info_t {
  156. /* Wake source info : APCS_GICC_HPPIR */
  157. uint32_t HPPIR;
  158. /* Wake source info : APCS_GICC_AHPPIR */
  159. uint32_t AHPPIR;
  160. };
  161. /*
  162. * Log ring buffer position
  163. */
  164. struct tzdbg_log_pos_t {
  165. uint16_t wrap;
  166. uint16_t offset;
  167. };
  168. struct tzdbg_log_pos_v2_t {
  169. uint32_t wrap;
  170. uint32_t offset;
  171. };
  172. /*
  173. * Log ring buffer
  174. */
  175. struct tzdbg_log_t {
  176. struct tzdbg_log_pos_t log_pos;
  177. /* open ended array to the end of the 4K IMEM buffer */
  178. uint8_t log_buf[];
  179. };
  180. struct tzdbg_log_v2_t {
  181. struct tzdbg_log_pos_v2_t log_pos;
  182. /* open ended array to the end of the 4K IMEM buffer */
  183. uint8_t log_buf[];
  184. };
  185. struct tzbsp_encr_info_for_log_chunk_t {
  186. uint32_t size_to_encr;
  187. uint8_t nonce[TZBSP_NONCE_LEN];
  188. uint8_t tag[TZBSP_TAG_LEN];
  189. };
  190. /*
  191. * Only `ENTIRE_LOG` will be used unless the
  192. * "OEM_tz_num_of_diag_log_chunks_to_encr" devcfg field >= 2.
  193. * If this is true, the diag log will be encrypted in two
  194. * separate chunks: a smaller chunk containing only error
  195. * fatal logs and a bigger "rest of the log" chunk. In this
  196. * case, `ERR_FATAL_LOG_CHUNK` and `BIG_LOG_CHUNK` will be
  197. * used instead of `ENTIRE_LOG`.
  198. */
  199. enum tzbsp_encr_info_for_log_chunks_idx_t {
  200. BIG_LOG_CHUNK = 0,
  201. ENTIRE_LOG = 1,
  202. ERR_FATAL_LOG_CHUNK = 1,
  203. MAX_NUM_OF_CHUNKS,
  204. };
  205. struct tzbsp_encr_info_t {
  206. uint32_t num_of_chunks;
  207. struct tzbsp_encr_info_for_log_chunk_t chunks[MAX_NUM_OF_CHUNKS];
  208. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  209. };
  210. /*
  211. * Diagnostic Table
  212. * Note: This is the reference data structure for tz diagnostic table
  213. * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
  214. * copied into buffer from i/o memory.
  215. */
  216. struct tzdbg_t {
  217. uint32_t magic_num;
  218. uint32_t version;
  219. /*
  220. * Number of CPU's
  221. */
  222. uint32_t cpu_count;
  223. /*
  224. * Offset of VMID Table
  225. */
  226. uint32_t vmid_info_off;
  227. /*
  228. * Offset of Boot Table
  229. */
  230. uint32_t boot_info_off;
  231. /*
  232. * Offset of Reset info Table
  233. */
  234. uint32_t reset_info_off;
  235. /*
  236. * Offset of Interrupt info Table
  237. */
  238. uint32_t int_info_off;
  239. /*
  240. * Ring Buffer Offset
  241. */
  242. uint32_t ring_off;
  243. /*
  244. * Ring Buffer Length
  245. */
  246. uint32_t ring_len;
  247. /* Offset for Wakeup info */
  248. uint32_t wakeup_info_off;
  249. union {
  250. /* The elements in below structure have to be used for TZ where
  251. * diag version = TZBSP_DIAG_MINOR_VERSION_V2
  252. */
  253. struct {
  254. /*
  255. * VMID to EE Mapping
  256. */
  257. struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
  258. /*
  259. * Boot Info
  260. */
  261. struct tzdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  262. /*
  263. * Reset Info
  264. */
  265. struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
  266. uint32_t num_interrupts;
  267. struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM];
  268. /* Wake up info */
  269. struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT];
  270. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  271. uint8_t nonce[TZBSP_NONCE_LEN];
  272. uint8_t tag[TZBSP_TAG_LEN];
  273. };
  274. /* The elements in below structure have to be used for TZ where
  275. * diag version = TZBSP_DIAG_MINOR_VERSION_V21
  276. */
  277. struct {
  278. uint32_t encr_info_for_log_off;
  279. /*
  280. * VMID to EE Mapping
  281. */
  282. struct tzdbg_vmid_t vmid_info_v2[TZBSP_DIAG_NUM_OF_VMID];
  283. /*
  284. * Boot Info
  285. */
  286. struct tzdbg_boot_info_t boot_info_v2[TZBSP_MAX_CPU_COUNT];
  287. /*
  288. * Reset Info
  289. */
  290. struct tzdbg_reset_info_t reset_info_v2[TZBSP_MAX_CPU_COUNT];
  291. uint32_t num_interrupts_v2;
  292. struct tzdbg_int_t int_info_v2[TZBSP_DIAG_INT_NUM];
  293. /* Wake up info */
  294. struct tzbsp_diag_wakeup_info_t wakeup_info_v2[TZBSP_MAX_CPU_COUNT];
  295. struct tzbsp_encr_info_t encr_info_for_log;
  296. };
  297. };
  298. /*
  299. * We need at least 2K for the ring buffer
  300. */
  301. struct tzdbg_log_t ring_buffer; /* TZ Ring Buffer */
  302. };
  303. struct hypdbg_log_pos_t {
  304. uint16_t wrap;
  305. uint16_t offset;
  306. };
  307. struct rmdbg_log_hdr_t {
  308. uint32_t write_idx;
  309. uint32_t size;
  310. };
  311. struct rmdbg_log_pos_t {
  312. uint32_t read_idx;
  313. uint32_t size;
  314. };
  315. struct hypdbg_boot_info_t {
  316. uint32_t warm_entry_cnt;
  317. uint32_t warm_exit_cnt;
  318. };
  319. struct hypdbg_t {
  320. /* Magic Number */
  321. uint32_t magic_num;
  322. /* Number of CPU's */
  323. uint32_t cpu_count;
  324. /* Ring Buffer Offset */
  325. uint32_t ring_off;
  326. /* Ring buffer position mgmt */
  327. struct hypdbg_log_pos_t log_pos;
  328. uint32_t log_len;
  329. /* S2 fault numbers */
  330. uint32_t s2_fault_counter;
  331. /* Boot Info */
  332. struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  333. /* Ring buffer pointer */
  334. uint8_t log_buf_p[];
  335. };
  336. /*
  337. * Enumeration order for VMID's
  338. */
  339. enum tzdbg_stats_type {
  340. TZDBG_BOOT = 0,
  341. TZDBG_RESET,
  342. TZDBG_INTERRUPT,
  343. TZDBG_VMID,
  344. TZDBG_GENERAL,
  345. TZDBG_LOG,
  346. TZDBG_QSEE_LOG,
  347. TZDBG_HYP_GENERAL,
  348. TZDBG_HYP_LOG,
  349. TZDBG_RM_LOG,
  350. TZDBG_STATS_MAX
  351. };
  352. struct tzdbg_stat {
  353. size_t display_len;
  354. size_t display_offset;
  355. char *name;
  356. char *data;
  357. };
  358. struct tzdbg {
  359. void __iomem *virt_iobase;
  360. void __iomem *hyp_virt_iobase;
  361. void __iomem *rmlog_virt_iobase;
  362. struct tzdbg_t *diag_buf;
  363. struct hypdbg_t *hyp_diag_buf;
  364. uint8_t *rm_diag_buf;
  365. char *disp_buf;
  366. int debug_tz[TZDBG_STATS_MAX];
  367. struct tzdbg_stat stat[TZDBG_STATS_MAX];
  368. uint32_t hyp_debug_rw_buf_size;
  369. uint32_t rmlog_rw_buf_size;
  370. bool is_hyplog_enabled;
  371. uint32_t tz_version;
  372. bool is_encrypted_log_enabled;
  373. bool is_enlarged_buf;
  374. bool is_full_encrypted_tz_logs_supported;
  375. bool is_full_encrypted_tz_logs_enabled;
  376. int tz_diag_minor_version;
  377. int tz_diag_major_version;
  378. };
  379. struct tzbsp_encr_log_t {
  380. /* Magic Number */
  381. uint32_t magic_num;
  382. /* version NUMBER */
  383. uint32_t version;
  384. /* encrypted log size */
  385. uint32_t encr_log_buff_size;
  386. /* Wrap value*/
  387. uint16_t wrap_count;
  388. /* AES encryption key wrapped up with oem public key*/
  389. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  390. /* Nonce used for encryption*/
  391. uint8_t nonce[TZBSP_NONCE_LEN];
  392. /* Tag to be used for Validation */
  393. uint8_t tag[TZBSP_TAG_LEN];
  394. /* Encrypted log buffer */
  395. uint8_t log_buf[1];
  396. };
  397. struct encrypted_log_info {
  398. phys_addr_t paddr;
  399. void *vaddr;
  400. size_t size;
  401. uint64_t shmb_handle;
  402. };
  403. static struct tzdbg tzdbg = {
  404. .stat[TZDBG_BOOT].name = "boot",
  405. .stat[TZDBG_RESET].name = "reset",
  406. .stat[TZDBG_INTERRUPT].name = "interrupt",
  407. .stat[TZDBG_VMID].name = "vmid",
  408. .stat[TZDBG_GENERAL].name = "general",
  409. .stat[TZDBG_LOG].name = "log",
  410. .stat[TZDBG_QSEE_LOG].name = "qsee_log",
  411. .stat[TZDBG_HYP_GENERAL].name = "hyp_general",
  412. .stat[TZDBG_HYP_LOG].name = "hyp_log",
  413. .stat[TZDBG_RM_LOG].name = "rm_log",
  414. };
  415. static struct tzdbg_log_t *g_qsee_log;
  416. static struct tzdbg_log_v2_t *g_qsee_log_v2;
  417. static dma_addr_t coh_pmem;
  418. static uint32_t debug_rw_buf_size;
  419. static uint32_t display_buf_size;
  420. static uint32_t qseelog_buf_size;
  421. static phys_addr_t disp_buf_paddr;
  422. static uint64_t qseelog_shmbridge_handle;
  423. static struct encrypted_log_info enc_qseelog_info;
  424. static struct encrypted_log_info enc_tzlog_info;
  425. /*
  426. * Debugfs data structure and functions
  427. */
  428. static int _disp_tz_general_stats(void)
  429. {
  430. int len = 0;
  431. len += scnprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
  432. " Version : 0x%x\n"
  433. " Magic Number : 0x%x\n"
  434. " Number of CPU : %d\n",
  435. tzdbg.diag_buf->version,
  436. tzdbg.diag_buf->magic_num,
  437. tzdbg.diag_buf->cpu_count);
  438. tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
  439. return len;
  440. }
  441. static int _disp_tz_vmid_stats(void)
  442. {
  443. int i, num_vmid;
  444. int len = 0;
  445. struct tzdbg_vmid_t *ptr;
  446. ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
  447. tzdbg.diag_buf->vmid_info_off);
  448. num_vmid = ((tzdbg.diag_buf->boot_info_off -
  449. tzdbg.diag_buf->vmid_info_off)/
  450. (sizeof(struct tzdbg_vmid_t)));
  451. for (i = 0; i < num_vmid; i++) {
  452. if (ptr->vmid < 0xFF) {
  453. len += scnprintf(tzdbg.disp_buf + len,
  454. (debug_rw_buf_size - 1) - len,
  455. " 0x%x %s\n",
  456. (uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
  457. }
  458. if (len > (debug_rw_buf_size - 1)) {
  459. pr_warn("%s: Cannot fit all info into the buffer\n",
  460. __func__);
  461. break;
  462. }
  463. ptr++;
  464. }
  465. tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
  466. return len;
  467. }
  468. static int _disp_tz_boot_stats(void)
  469. {
  470. int i;
  471. int len = 0;
  472. struct tzdbg_boot_info_t *ptr = NULL;
  473. struct tzdbg_boot_info64_t *ptr_64 = NULL;
  474. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  475. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  476. ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
  477. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  478. } else {
  479. ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
  480. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  481. }
  482. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  483. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  484. len += scnprintf(tzdbg.disp_buf + len,
  485. (debug_rw_buf_size - 1) - len,
  486. " CPU #: %d\n"
  487. " Warmboot jump address : 0x%llx\n"
  488. " Warmboot entry CPU counter : 0x%x\n"
  489. " Warmboot exit CPU counter : 0x%x\n"
  490. " Power Collapse entry CPU counter : 0x%x\n"
  491. " Power Collapse exit CPU counter : 0x%x\n"
  492. " Psci entry CPU counter : 0x%x\n"
  493. " Psci exit CPU counter : 0x%x\n"
  494. " Warmboot Jump Address Instruction : 0x%x\n",
  495. i, (uint64_t)ptr_64->warm_jmp_addr,
  496. ptr_64->wb_entry_cnt,
  497. ptr_64->wb_exit_cnt,
  498. ptr_64->pc_entry_cnt,
  499. ptr_64->pc_exit_cnt,
  500. ptr_64->psci_entry_cnt,
  501. ptr_64->psci_exit_cnt,
  502. ptr_64->warm_jmp_instr);
  503. if (len > (debug_rw_buf_size - 1)) {
  504. pr_warn("%s: Cannot fit all info into the buffer\n",
  505. __func__);
  506. break;
  507. }
  508. ptr_64++;
  509. } else {
  510. len += scnprintf(tzdbg.disp_buf + len,
  511. (debug_rw_buf_size - 1) - len,
  512. " CPU #: %d\n"
  513. " Warmboot jump address : 0x%x\n"
  514. " Warmboot entry CPU counter: 0x%x\n"
  515. " Warmboot exit CPU counter : 0x%x\n"
  516. " Power Collapse entry CPU counter: 0x%x\n"
  517. " Power Collapse exit CPU counter : 0x%x\n",
  518. i, ptr->warm_jmp_addr,
  519. ptr->wb_entry_cnt,
  520. ptr->wb_exit_cnt,
  521. ptr->pc_entry_cnt,
  522. ptr->pc_exit_cnt);
  523. if (len > (debug_rw_buf_size - 1)) {
  524. pr_warn("%s: Cannot fit all info into the buffer\n",
  525. __func__);
  526. break;
  527. }
  528. ptr++;
  529. }
  530. }
  531. tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
  532. return len;
  533. }
  534. static int _disp_tz_reset_stats(void)
  535. {
  536. int i;
  537. int len = 0;
  538. struct tzdbg_reset_info_t *ptr;
  539. ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
  540. tzdbg.diag_buf->reset_info_off);
  541. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  542. len += scnprintf(tzdbg.disp_buf + len,
  543. (debug_rw_buf_size - 1) - len,
  544. " CPU #: %d\n"
  545. " Reset Type (reason) : 0x%x\n"
  546. " Reset counter : 0x%x\n",
  547. i, ptr->reset_type, ptr->reset_cnt);
  548. if (len > (debug_rw_buf_size - 1)) {
  549. pr_warn("%s: Cannot fit all info into the buffer\n",
  550. __func__);
  551. break;
  552. }
  553. ptr++;
  554. }
  555. tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
  556. return len;
  557. }
  558. static int _disp_tz_interrupt_stats(void)
  559. {
  560. int i, j;
  561. int len = 0;
  562. int *num_int;
  563. void *ptr;
  564. struct tzdbg_int_t *tzdbg_ptr;
  565. struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
  566. num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
  567. (tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
  568. ptr = ((unsigned char *)tzdbg.diag_buf +
  569. tzdbg.diag_buf->int_info_off);
  570. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  571. if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
  572. tzdbg_ptr = ptr;
  573. for (i = 0; i < (*num_int); i++) {
  574. len += scnprintf(tzdbg.disp_buf + len,
  575. (debug_rw_buf_size - 1) - len,
  576. " Interrupt Number : 0x%x\n"
  577. " Type of Interrupt : 0x%x\n"
  578. " Description of interrupt : %s\n",
  579. tzdbg_ptr->int_num,
  580. (uint32_t)tzdbg_ptr->int_info,
  581. (uint8_t *)tzdbg_ptr->int_desc);
  582. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  583. len += scnprintf(tzdbg.disp_buf + len,
  584. (debug_rw_buf_size - 1) - len,
  585. " int_count on CPU # %d : %u\n",
  586. (uint32_t)j,
  587. (uint32_t)tzdbg_ptr->int_count[j]);
  588. }
  589. len += scnprintf(tzdbg.disp_buf + len,
  590. debug_rw_buf_size - 1, "\n");
  591. if (len > (debug_rw_buf_size - 1)) {
  592. pr_warn("%s: Cannot fit all info into buf\n",
  593. __func__);
  594. break;
  595. }
  596. tzdbg_ptr++;
  597. }
  598. } else {
  599. tzdbg_ptr_tz40 = ptr;
  600. for (i = 0; i < (*num_int); i++) {
  601. len += scnprintf(tzdbg.disp_buf + len,
  602. (debug_rw_buf_size - 1) - len,
  603. " Interrupt Number : 0x%x\n"
  604. " Type of Interrupt : 0x%x\n"
  605. " Description of interrupt : %s\n",
  606. tzdbg_ptr_tz40->int_num,
  607. (uint32_t)tzdbg_ptr_tz40->int_info,
  608. (uint8_t *)tzdbg_ptr_tz40->int_desc);
  609. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  610. len += scnprintf(tzdbg.disp_buf + len,
  611. (debug_rw_buf_size - 1) - len,
  612. " int_count on CPU # %d : %u\n",
  613. (uint32_t)j,
  614. (uint32_t)tzdbg_ptr_tz40->int_count[j]);
  615. }
  616. len += scnprintf(tzdbg.disp_buf + len,
  617. debug_rw_buf_size - 1, "\n");
  618. if (len > (debug_rw_buf_size - 1)) {
  619. pr_warn("%s: Cannot fit all info into buf\n",
  620. __func__);
  621. break;
  622. }
  623. tzdbg_ptr_tz40++;
  624. }
  625. }
  626. tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
  627. return len;
  628. }
  629. static int _disp_tz_log_stats_legacy(void)
  630. {
  631. int len = 0;
  632. unsigned char *ptr;
  633. ptr = (unsigned char *)tzdbg.diag_buf +
  634. tzdbg.diag_buf->ring_off;
  635. len += scnprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
  636. "%s\n", ptr);
  637. tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
  638. return len;
  639. }
  640. static int _disp_log_stats(struct tzdbg_log_t *log,
  641. struct tzdbg_log_pos_t *log_start, uint32_t log_len,
  642. size_t count, uint32_t buf_idx)
  643. {
  644. uint32_t wrap_start;
  645. uint32_t wrap_end;
  646. uint32_t wrap_cnt;
  647. int max_len;
  648. int len = 0;
  649. int i = 0;
  650. wrap_start = log_start->wrap;
  651. wrap_end = log->log_pos.wrap;
  652. /* Calculate difference in # of buffer wrap-arounds */
  653. if (wrap_end >= wrap_start)
  654. wrap_cnt = wrap_end - wrap_start;
  655. else {
  656. /* wrap counter has wrapped around, invalidate start position */
  657. wrap_cnt = 2;
  658. }
  659. if (wrap_cnt > 1) {
  660. /* end position has wrapped around more than once, */
  661. /* current start no longer valid */
  662. log_start->wrap = log->log_pos.wrap - 1;
  663. log_start->offset = (log->log_pos.offset + 1) % log_len;
  664. } else if ((wrap_cnt == 1) &&
  665. (log->log_pos.offset > log_start->offset)) {
  666. /* end position has overwritten start */
  667. log_start->offset = (log->log_pos.offset + 1) % log_len;
  668. }
  669. pr_debug("diag_buf wrap = %u, offset = %u\n",
  670. log->log_pos.wrap, log->log_pos.offset);
  671. while (log_start->offset == log->log_pos.offset) {
  672. /*
  673. * No data in ring buffer,
  674. * so we'll hang around until something happens
  675. */
  676. unsigned long t = msleep_interruptible(50);
  677. if (t != 0) {
  678. /* Some event woke us up, so let's quit */
  679. return 0;
  680. }
  681. if (buf_idx == TZDBG_LOG)
  682. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  683. debug_rw_buf_size);
  684. }
  685. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  686. pr_debug("diag_buf wrap = %u, offset = %u\n",
  687. log->log_pos.wrap, log->log_pos.offset);
  688. /*
  689. * Read from ring buff while there is data and space in return buff
  690. */
  691. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  692. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  693. log_start->offset = (log_start->offset + 1) % log_len;
  694. if (log_start->offset == 0)
  695. ++log_start->wrap;
  696. ++len;
  697. }
  698. /*
  699. * return buffer to caller
  700. */
  701. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  702. return len;
  703. }
  704. static int _disp_log_stats_v2(struct tzdbg_log_v2_t *log,
  705. struct tzdbg_log_pos_v2_t *log_start, uint32_t log_len,
  706. size_t count, uint32_t buf_idx)
  707. {
  708. uint32_t wrap_start;
  709. uint32_t wrap_end;
  710. uint32_t wrap_cnt;
  711. int max_len;
  712. int len = 0;
  713. int i = 0;
  714. wrap_start = log_start->wrap;
  715. wrap_end = log->log_pos.wrap;
  716. /* Calculate difference in # of buffer wrap-arounds */
  717. if (wrap_end >= wrap_start)
  718. wrap_cnt = wrap_end - wrap_start;
  719. else {
  720. /* wrap counter has wrapped around, invalidate start position */
  721. wrap_cnt = 2;
  722. }
  723. if (wrap_cnt > 1) {
  724. /* end position has wrapped around more than once, */
  725. /* current start no longer valid */
  726. log_start->wrap = log->log_pos.wrap - 1;
  727. log_start->offset = (log->log_pos.offset + 1) % log_len;
  728. } else if ((wrap_cnt == 1) &&
  729. (log->log_pos.offset > log_start->offset)) {
  730. /* end position has overwritten start */
  731. log_start->offset = (log->log_pos.offset + 1) % log_len;
  732. }
  733. pr_debug("diag_buf wrap = %u, offset = %u\n",
  734. log->log_pos.wrap, log->log_pos.offset);
  735. while (log_start->offset == log->log_pos.offset) {
  736. /*
  737. * No data in ring buffer,
  738. * so we'll hang around until something happens
  739. */
  740. unsigned long t = msleep_interruptible(50);
  741. if (t != 0) {
  742. /* Some event woke us up, so let's quit */
  743. return 0;
  744. }
  745. if (buf_idx == TZDBG_LOG)
  746. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  747. debug_rw_buf_size);
  748. }
  749. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  750. pr_debug("diag_buf wrap = %u, offset = %u\n",
  751. log->log_pos.wrap, log->log_pos.offset);
  752. /*
  753. * Read from ring buff while there is data and space in return buff
  754. */
  755. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  756. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  757. log_start->offset = (log_start->offset + 1) % log_len;
  758. if (log_start->offset == 0)
  759. ++log_start->wrap;
  760. ++len;
  761. }
  762. /*
  763. * return buffer to caller
  764. */
  765. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  766. return len;
  767. }
  768. static int __disp_hyp_log_stats(uint8_t *log,
  769. struct hypdbg_log_pos_t *log_start, uint32_t log_len,
  770. size_t count, uint32_t buf_idx)
  771. {
  772. struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
  773. unsigned long t = 0;
  774. uint32_t wrap_start;
  775. uint32_t wrap_end;
  776. uint32_t wrap_cnt;
  777. int max_len;
  778. int len = 0;
  779. int i = 0;
  780. wrap_start = log_start->wrap;
  781. wrap_end = hyp->log_pos.wrap;
  782. /* Calculate difference in # of buffer wrap-arounds */
  783. if (wrap_end >= wrap_start)
  784. wrap_cnt = wrap_end - wrap_start;
  785. else {
  786. /* wrap counter has wrapped around, invalidate start position */
  787. wrap_cnt = 2;
  788. }
  789. if (wrap_cnt > 1) {
  790. /* end position has wrapped around more than once, */
  791. /* current start no longer valid */
  792. log_start->wrap = hyp->log_pos.wrap - 1;
  793. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  794. } else if ((wrap_cnt == 1) &&
  795. (hyp->log_pos.offset > log_start->offset)) {
  796. /* end position has overwritten start */
  797. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  798. }
  799. while (log_start->offset == hyp->log_pos.offset) {
  800. /*
  801. * No data in ring buffer,
  802. * so we'll hang around until something happens
  803. */
  804. t = msleep_interruptible(50);
  805. if (t != 0) {
  806. /* Some event woke us up, so let's quit */
  807. return 0;
  808. }
  809. /* TZDBG_HYP_LOG */
  810. memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
  811. tzdbg.hyp_debug_rw_buf_size);
  812. }
  813. max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
  814. tzdbg.hyp_debug_rw_buf_size : count;
  815. /*
  816. * Read from ring buff while there is data and space in return buff
  817. */
  818. while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
  819. tzdbg.disp_buf[i++] = log[log_start->offset];
  820. log_start->offset = (log_start->offset + 1) % log_len;
  821. if (log_start->offset == 0)
  822. ++log_start->wrap;
  823. ++len;
  824. }
  825. /*
  826. * return buffer to caller
  827. */
  828. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  829. return len;
  830. }
  831. static int __disp_rm_log_stats(uint8_t *log_ptr, uint32_t max_len)
  832. {
  833. uint32_t i = 0;
  834. /*
  835. * Transfer data from rm dialog buff to display buffer in user space
  836. */
  837. while ((i < max_len) && (i < display_buf_size)) {
  838. tzdbg.disp_buf[i] = log_ptr[i];
  839. i++;
  840. }
  841. if (i != max_len)
  842. pr_err("Dropping RM log message, max_len:%d display_buf_size:%d\n",
  843. i, display_buf_size);
  844. tzdbg.stat[TZDBG_RM_LOG].data = tzdbg.disp_buf;
  845. return i;
  846. }
  847. static int print_text(char *intro_message,
  848. unsigned char *text_addr,
  849. unsigned int size,
  850. char *buf, uint32_t buf_len)
  851. {
  852. unsigned int i;
  853. int len = 0;
  854. pr_debug("begin address %p, size %d\n", text_addr, size);
  855. len += scnprintf(buf + len, buf_len - len, "%s\n", intro_message);
  856. for (i = 0; i < size; i++) {
  857. if (buf_len <= len + 6) {
  858. pr_err("buffer not enough, buf_len %d, len %d\n",
  859. buf_len, len);
  860. return buf_len;
  861. }
  862. len += scnprintf(buf + len, buf_len - len, "%02hhx ",
  863. text_addr[i]);
  864. if ((i & 0x1f) == 0x1f)
  865. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  866. }
  867. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  868. return len;
  869. }
  870. static int _disp_encrpted_log_stats(struct encrypted_log_info *enc_log_info,
  871. enum tzdbg_stats_type type, uint32_t log_id)
  872. {
  873. int ret = 0, len = 0;
  874. struct tzbsp_encr_log_t *encr_log_head;
  875. uint32_t size = 0;
  876. if ((!tzdbg.is_full_encrypted_tz_logs_supported) &&
  877. (tzdbg.is_full_encrypted_tz_logs_enabled))
  878. pr_info("TZ not supporting full encrypted log functionality\n");
  879. ret = qcom_scm_request_encrypted_log(enc_log_info->paddr,
  880. enc_log_info->size, log_id, tzdbg.is_full_encrypted_tz_logs_supported,
  881. tzdbg.is_full_encrypted_tz_logs_enabled);
  882. if (ret)
  883. return 0;
  884. encr_log_head = (struct tzbsp_encr_log_t *)(enc_log_info->vaddr);
  885. pr_debug("display_buf_size = %d, encr_log_buff_size = %d\n",
  886. display_buf_size, encr_log_head->encr_log_buff_size);
  887. size = encr_log_head->encr_log_buff_size;
  888. len += scnprintf(tzdbg.disp_buf + len,
  889. (display_buf_size - 1) - len,
  890. "\n-------- New Encrypted %s --------\n",
  891. ((log_id == ENCRYPTED_QSEE_LOG_ID) ?
  892. "QSEE Log" : "TZ Dialog"));
  893. len += scnprintf(tzdbg.disp_buf + len,
  894. (display_buf_size - 1) - len,
  895. "\nMagic_Num :\n0x%x\n"
  896. "\nVerion :\n%d\n"
  897. "\nEncr_Log_Buff_Size :\n%d\n"
  898. "\nWrap_Count :\n%d\n",
  899. encr_log_head->magic_num,
  900. encr_log_head->version,
  901. encr_log_head->encr_log_buff_size,
  902. encr_log_head->wrap_count);
  903. len += print_text("\nKey : ", encr_log_head->key,
  904. TZBSP_AES_256_ENCRYPTED_KEY_SIZE,
  905. tzdbg.disp_buf + len, display_buf_size);
  906. len += print_text("\nNonce : ", encr_log_head->nonce,
  907. TZBSP_NONCE_LEN,
  908. tzdbg.disp_buf + len, display_buf_size - len);
  909. len += print_text("\nTag : ", encr_log_head->tag,
  910. TZBSP_TAG_LEN,
  911. tzdbg.disp_buf + len, display_buf_size - len);
  912. if (len > display_buf_size - size)
  913. pr_warn("Cannot fit all info into the buffer\n");
  914. pr_debug("encrypted log size %d, disply buffer size %d, used len %d\n",
  915. size, display_buf_size, len);
  916. len += print_text("\nLog : ", encr_log_head->log_buf, size,
  917. tzdbg.disp_buf + len, display_buf_size - len);
  918. memset(enc_log_info->vaddr, 0, enc_log_info->size);
  919. tzdbg.stat[type].data = tzdbg.disp_buf;
  920. return len;
  921. }
  922. static int _disp_tz_log_stats(size_t count)
  923. {
  924. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  925. static struct tzdbg_log_pos_t log_start = {0};
  926. struct tzdbg_log_v2_t *log_v2_ptr;
  927. struct tzdbg_log_t *log_ptr;
  928. log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
  929. tzdbg.diag_buf->ring_off -
  930. offsetof(struct tzdbg_log_t, log_buf));
  931. log_v2_ptr = (struct tzdbg_log_v2_t *)((unsigned char *)tzdbg.diag_buf +
  932. tzdbg.diag_buf->ring_off -
  933. offsetof(struct tzdbg_log_v2_t, log_buf));
  934. if (!tzdbg.is_enlarged_buf)
  935. return _disp_log_stats(log_ptr, &log_start,
  936. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  937. return _disp_log_stats_v2(log_v2_ptr, &log_start_v2,
  938. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  939. }
  940. static int _disp_hyp_log_stats(size_t count)
  941. {
  942. static struct hypdbg_log_pos_t log_start = {0};
  943. uint8_t *log_ptr;
  944. uint32_t log_len;
  945. log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
  946. tzdbg.hyp_diag_buf->ring_off);
  947. log_len = tzdbg.hyp_debug_rw_buf_size - tzdbg.hyp_diag_buf->ring_off;
  948. return __disp_hyp_log_stats(log_ptr, &log_start,
  949. log_len, count, TZDBG_HYP_LOG);
  950. }
  951. static int _disp_rm_log_stats(size_t count)
  952. {
  953. static struct rmdbg_log_pos_t log_start = { 0 };
  954. struct rmdbg_log_hdr_t *p_log_hdr = NULL;
  955. uint8_t *log_ptr = NULL;
  956. uint32_t log_len = 0;
  957. static bool wrap_around = { false };
  958. /* Return 0 to close the display file,if there is nothing else to do */
  959. if ((log_start.size == 0x0) && wrap_around) {
  960. wrap_around = false;
  961. return 0;
  962. }
  963. /* Copy RM log data to tzdbg diag buffer for the first time */
  964. /* Initialize the tracking data structure */
  965. if (tzdbg.rmlog_rw_buf_size != 0) {
  966. if (!wrap_around) {
  967. memcpy_fromio((void *)tzdbg.rm_diag_buf,
  968. tzdbg.rmlog_virt_iobase,
  969. tzdbg.rmlog_rw_buf_size);
  970. /* get RM header info first */
  971. p_log_hdr = (struct rmdbg_log_hdr_t *)tzdbg.rm_diag_buf;
  972. /* Update RM log buffer index tracker and its size */
  973. log_start.read_idx = 0x0;
  974. log_start.size = p_log_hdr->size;
  975. }
  976. /* Update RM log buffer starting ptr */
  977. log_ptr =
  978. (uint8_t *) ((unsigned char *)tzdbg.rm_diag_buf +
  979. sizeof(struct rmdbg_log_hdr_t));
  980. } else {
  981. /* Return 0 to close the display file,if there is nothing else to do */
  982. pr_err("There is no RM log to read, size is %d!\n",
  983. tzdbg.rmlog_rw_buf_size);
  984. return 0;
  985. }
  986. log_len = log_start.size;
  987. log_ptr += log_start.read_idx;
  988. /* Check if we exceed the max length provided by user space */
  989. log_len = (count > log_len) ? log_len : count;
  990. /* Update tracking data structure */
  991. log_start.size -= log_len;
  992. log_start.read_idx += log_len;
  993. if (log_start.size)
  994. wrap_around = true;
  995. return __disp_rm_log_stats(log_ptr, log_len);
  996. }
  997. static int _disp_qsee_log_stats(size_t count)
  998. {
  999. static struct tzdbg_log_pos_t log_start = {0};
  1000. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  1001. if (!tzdbg.is_enlarged_buf)
  1002. return _disp_log_stats(g_qsee_log, &log_start,
  1003. QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
  1004. count, TZDBG_QSEE_LOG);
  1005. return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2,
  1006. QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t),
  1007. count, TZDBG_QSEE_LOG);
  1008. }
  1009. static int _disp_hyp_general_stats(size_t count)
  1010. {
  1011. int len = 0;
  1012. int i;
  1013. struct hypdbg_boot_info_t *ptr = NULL;
  1014. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1015. tzdbg.hyp_debug_rw_buf_size - 1,
  1016. " Magic Number : 0x%x\n"
  1017. " CPU Count : 0x%x\n"
  1018. " S2 Fault Counter: 0x%x\n",
  1019. tzdbg.hyp_diag_buf->magic_num,
  1020. tzdbg.hyp_diag_buf->cpu_count,
  1021. tzdbg.hyp_diag_buf->s2_fault_counter);
  1022. ptr = tzdbg.hyp_diag_buf->boot_info;
  1023. for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
  1024. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  1025. (tzdbg.hyp_debug_rw_buf_size - 1) - len,
  1026. " CPU #: %d\n"
  1027. " Warmboot entry CPU counter: 0x%x\n"
  1028. " Warmboot exit CPU counter : 0x%x\n",
  1029. i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
  1030. if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
  1031. pr_warn("%s: Cannot fit all info into the buffer\n",
  1032. __func__);
  1033. break;
  1034. }
  1035. ptr++;
  1036. }
  1037. tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
  1038. return len;
  1039. }
  1040. static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf,
  1041. size_t count, loff_t *offp)
  1042. {
  1043. int len = 0;
  1044. if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET ||
  1045. tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL ||
  1046. tz_id == TZDBG_VMID || tz_id == TZDBG_LOG)
  1047. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  1048. debug_rw_buf_size);
  1049. if (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1050. memcpy_fromio((void *)tzdbg.hyp_diag_buf,
  1051. tzdbg.hyp_virt_iobase,
  1052. tzdbg.hyp_debug_rw_buf_size);
  1053. switch (tz_id) {
  1054. case TZDBG_BOOT:
  1055. len = _disp_tz_boot_stats();
  1056. break;
  1057. case TZDBG_RESET:
  1058. len = _disp_tz_reset_stats();
  1059. break;
  1060. case TZDBG_INTERRUPT:
  1061. len = _disp_tz_interrupt_stats();
  1062. break;
  1063. case TZDBG_GENERAL:
  1064. len = _disp_tz_general_stats();
  1065. break;
  1066. case TZDBG_VMID:
  1067. len = _disp_tz_vmid_stats();
  1068. break;
  1069. case TZDBG_LOG:
  1070. if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
  1071. (tzdbg.diag_buf->version >> 16)) {
  1072. len = _disp_tz_log_stats(count);
  1073. *offp = 0;
  1074. } else {
  1075. len = _disp_tz_log_stats_legacy();
  1076. }
  1077. break;
  1078. case TZDBG_QSEE_LOG:
  1079. len = _disp_qsee_log_stats(count);
  1080. *offp = 0;
  1081. break;
  1082. case TZDBG_HYP_GENERAL:
  1083. len = _disp_hyp_general_stats(count);
  1084. break;
  1085. case TZDBG_HYP_LOG:
  1086. len = _disp_hyp_log_stats(count);
  1087. *offp = 0;
  1088. break;
  1089. case TZDBG_RM_LOG:
  1090. len = _disp_rm_log_stats(count);
  1091. *offp = 0;
  1092. break;
  1093. default:
  1094. break;
  1095. }
  1096. if (len > count)
  1097. len = count;
  1098. return simple_read_from_buffer(buf, len, offp,
  1099. tzdbg.stat[tz_id].data, len);
  1100. }
  1101. static ssize_t tzdbg_fs_read_encrypted(int tz_id, char __user *buf,
  1102. size_t count, loff_t *offp)
  1103. {
  1104. int len = 0, ret = 0;
  1105. struct tzdbg_stat *stat = &(tzdbg.stat[tz_id]);
  1106. pr_debug("%s: tz_id = %d\n", __func__, tz_id);
  1107. if (tz_id >= TZDBG_STATS_MAX) {
  1108. pr_err("invalid encrypted log id %d\n", tz_id);
  1109. return ret;
  1110. }
  1111. if (!stat->display_len) {
  1112. if (tz_id == TZDBG_QSEE_LOG)
  1113. stat->display_len = _disp_encrpted_log_stats(
  1114. &enc_qseelog_info,
  1115. tz_id, ENCRYPTED_QSEE_LOG_ID);
  1116. else
  1117. stat->display_len = _disp_encrpted_log_stats(
  1118. &enc_tzlog_info,
  1119. tz_id, ENCRYPTED_TZ_LOG_ID);
  1120. stat->display_offset = 0;
  1121. }
  1122. len = stat->display_len;
  1123. if (len > count)
  1124. len = count;
  1125. *offp = 0;
  1126. ret = simple_read_from_buffer(buf, len, offp,
  1127. tzdbg.stat[tz_id].data + stat->display_offset,
  1128. count);
  1129. stat->display_offset += ret;
  1130. stat->display_len -= ret;
  1131. pr_debug("ret = %d, offset = %d\n", ret, (int)(*offp));
  1132. pr_debug("display_len = %d, offset = %d\n",
  1133. stat->display_len, stat->display_offset);
  1134. return ret;
  1135. }
  1136. static ssize_t tzdbg_fs_read(struct file *file, char __user *buf,
  1137. size_t count, loff_t *offp)
  1138. {
  1139. struct seq_file *seq = file->private_data;
  1140. int tz_id = TZDBG_STATS_MAX;
  1141. if (seq)
  1142. tz_id = *(int *)(seq->private);
  1143. else {
  1144. pr_err("%s: Seq data null unable to proceed\n", __func__);
  1145. return 0;
  1146. }
  1147. if (!tzdbg.is_encrypted_log_enabled ||
  1148. (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  1149. || tz_id == TZDBG_RM_LOG)
  1150. return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp);
  1151. else
  1152. return tzdbg_fs_read_encrypted(tz_id, buf, count, offp);
  1153. }
  1154. static int tzdbg_procfs_open(struct inode *inode, struct file *file)
  1155. {
  1156. return single_open(file, NULL, pde_data(inode));
  1157. }
  1158. static int tzdbg_procfs_release(struct inode *inode, struct file *file)
  1159. {
  1160. return single_release(inode, file);
  1161. }
  1162. struct proc_ops tzdbg_fops = {
  1163. .proc_flags = PROC_ENTRY_PERMANENT,
  1164. .proc_read = tzdbg_fs_read,
  1165. .proc_open = tzdbg_procfs_open,
  1166. .proc_release = tzdbg_procfs_release,
  1167. };
  1168. /*
  1169. * Allocates log buffer from ION, registers the buffer at TZ
  1170. */
  1171. static int tzdbg_register_qsee_log_buf(struct platform_device *pdev)
  1172. {
  1173. int ret = 0;
  1174. void *buf = NULL;
  1175. uint32_t ns_vmids[] = {VMID_HLOS};
  1176. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1177. uint32_t ns_vm_nums = 1;
  1178. if (tzdbg.is_enlarged_buf) {
  1179. if (of_property_read_u32((&pdev->dev)->of_node,
  1180. "qseelog-buf-size-v2", &qseelog_buf_size)) {
  1181. pr_debug("Enlarged qseelog buf size isn't defined\n");
  1182. qseelog_buf_size = QSEE_LOG_BUF_SIZE_V2;
  1183. }
  1184. } else {
  1185. qseelog_buf_size = QSEE_LOG_BUF_SIZE;
  1186. }
  1187. pr_debug("qseelog buf size is 0x%x\n", qseelog_buf_size);
  1188. buf = dma_alloc_coherent(&pdev->dev,
  1189. qseelog_buf_size, &coh_pmem, GFP_KERNEL);
  1190. if (buf == NULL)
  1191. return -ENOMEM;
  1192. if (!tzdbg.is_encrypted_log_enabled) {
  1193. ret = qtee_shmbridge_register(coh_pmem,
  1194. qseelog_buf_size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1195. PERM_READ | PERM_WRITE,
  1196. &qseelog_shmbridge_handle);
  1197. if (ret) {
  1198. pr_err("failed to create bridge for qsee_log buf\n");
  1199. goto exit_free_mem;
  1200. }
  1201. }
  1202. g_qsee_log = (struct tzdbg_log_t *)buf;
  1203. g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
  1204. g_qsee_log_v2 = (struct tzdbg_log_v2_t *)buf;
  1205. g_qsee_log_v2->log_pos.wrap = g_qsee_log_v2->log_pos.offset = 0;
  1206. ret = qcom_scm_register_qsee_log_buf(coh_pmem, qseelog_buf_size);
  1207. if (ret != QSEOS_RESULT_SUCCESS) {
  1208. pr_err(
  1209. "%s: scm_call to register log buf failed, resp result =%lld\n",
  1210. __func__, ret);
  1211. goto exit_dereg_bridge;
  1212. }
  1213. return ret;
  1214. exit_dereg_bridge:
  1215. if (!tzdbg.is_encrypted_log_enabled)
  1216. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1217. exit_free_mem:
  1218. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1219. (void *)g_qsee_log, coh_pmem);
  1220. return ret;
  1221. }
  1222. static void tzdbg_free_qsee_log_buf(struct platform_device *pdev)
  1223. {
  1224. if (!tzdbg.is_encrypted_log_enabled)
  1225. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1226. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1227. (void *)g_qsee_log, coh_pmem);
  1228. }
  1229. static int tzdbg_allocate_encrypted_log_buf(struct platform_device *pdev)
  1230. {
  1231. int ret = 0;
  1232. uint32_t ns_vmids[] = {VMID_HLOS};
  1233. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1234. uint32_t ns_vm_nums = 1;
  1235. if (!tzdbg.is_encrypted_log_enabled)
  1236. return 0;
  1237. /* max encrypted qsee log buf zize (include header, and page align) */
  1238. enc_qseelog_info.size = qseelog_buf_size + PAGE_SIZE;
  1239. enc_qseelog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1240. enc_qseelog_info.size,
  1241. &enc_qseelog_info.paddr, GFP_KERNEL);
  1242. if (enc_qseelog_info.vaddr == NULL)
  1243. return -ENOMEM;
  1244. ret = qtee_shmbridge_register(enc_qseelog_info.paddr,
  1245. enc_qseelog_info.size, ns_vmids,
  1246. ns_vm_perms, ns_vm_nums,
  1247. PERM_READ | PERM_WRITE, &enc_qseelog_info.shmb_handle);
  1248. if (ret) {
  1249. pr_err("failed to create encr_qsee_log bridge, ret %d\n", ret);
  1250. goto exit_free_qseelog;
  1251. }
  1252. pr_debug("Alloc memory for encr_qsee_log, size = %zu\n",
  1253. enc_qseelog_info.size);
  1254. enc_tzlog_info.size = debug_rw_buf_size;
  1255. enc_tzlog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1256. enc_tzlog_info.size,
  1257. &enc_tzlog_info.paddr, GFP_KERNEL);
  1258. if (enc_tzlog_info.vaddr == NULL)
  1259. goto exit_unreg_qseelog;
  1260. ret = qtee_shmbridge_register(enc_tzlog_info.paddr,
  1261. enc_tzlog_info.size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1262. PERM_READ | PERM_WRITE, &enc_tzlog_info.shmb_handle);
  1263. if (ret) {
  1264. pr_err("failed to create encr_tz_log bridge, ret = %d\n", ret);
  1265. goto exit_free_tzlog;
  1266. }
  1267. pr_debug("Alloc memory for encr_tz_log, size %zu\n",
  1268. enc_qseelog_info.size);
  1269. return 0;
  1270. exit_free_tzlog:
  1271. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1272. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1273. exit_unreg_qseelog:
  1274. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1275. exit_free_qseelog:
  1276. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1277. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1278. return -ENOMEM;
  1279. }
  1280. static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
  1281. {
  1282. qtee_shmbridge_deregister(enc_tzlog_info.shmb_handle);
  1283. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1284. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1285. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1286. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1287. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1288. }
  1289. static int tzdbg_fs_init(struct platform_device *pdev)
  1290. {
  1291. int rc = 0;
  1292. int i;
  1293. struct proc_dir_entry *dent_dir;
  1294. struct proc_dir_entry *dent;
  1295. dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL);
  1296. if (dent_dir == NULL) {
  1297. dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n");
  1298. return -ENOMEM;
  1299. }
  1300. for (i = 0; i < TZDBG_STATS_MAX; i++) {
  1301. tzdbg.debug_tz[i] = i;
  1302. dent = proc_create_data(tzdbg.stat[i].name,
  1303. 0444, dent_dir,
  1304. &tzdbg_fops, &tzdbg.debug_tz[i]);
  1305. if (dent == NULL) {
  1306. dev_err(&pdev->dev, "TZ proc_create_data failed\n");
  1307. rc = -ENOMEM;
  1308. goto err;
  1309. }
  1310. }
  1311. platform_set_drvdata(pdev, dent_dir);
  1312. return 0;
  1313. err:
  1314. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1315. return rc;
  1316. }
  1317. static void tzdbg_fs_exit(struct platform_device *pdev)
  1318. {
  1319. struct proc_dir_entry *dent_dir;
  1320. dent_dir = platform_get_drvdata(pdev);
  1321. if (dent_dir)
  1322. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1323. }
  1324. static int __update_hypdbg_base(struct platform_device *pdev,
  1325. void __iomem *virt_iobase)
  1326. {
  1327. phys_addr_t hypdiag_phy_iobase;
  1328. uint32_t hyp_address_offset;
  1329. uint32_t hyp_size_offset;
  1330. struct hypdbg_t *hyp;
  1331. uint32_t *ptr = NULL;
  1332. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
  1333. &hyp_address_offset)) {
  1334. dev_err(&pdev->dev, "hyplog address offset is not defined\n");
  1335. return -EINVAL;
  1336. }
  1337. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
  1338. &hyp_size_offset)) {
  1339. dev_err(&pdev->dev, "hyplog size offset is not defined\n");
  1340. return -EINVAL;
  1341. }
  1342. hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
  1343. tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
  1344. hyp_size_offset);
  1345. tzdbg.hyp_virt_iobase = devm_ioremap(&pdev->dev,
  1346. hypdiag_phy_iobase,
  1347. tzdbg.hyp_debug_rw_buf_size);
  1348. if (!tzdbg.hyp_virt_iobase) {
  1349. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1350. &hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
  1351. return -ENXIO;
  1352. }
  1353. ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
  1354. if (!ptr)
  1355. return -ENOMEM;
  1356. tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
  1357. hyp = tzdbg.hyp_diag_buf;
  1358. hyp->log_pos.wrap = hyp->log_pos.offset = 0;
  1359. return 0;
  1360. }
  1361. static int __update_rmlog_base(struct platform_device *pdev,
  1362. void __iomem *virt_iobase)
  1363. {
  1364. uint32_t rmlog_address;
  1365. uint32_t rmlog_size;
  1366. uint32_t *ptr = NULL;
  1367. /* if we don't get the node just ignore it */
  1368. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-address",
  1369. &rmlog_address)) {
  1370. dev_err(&pdev->dev, "RM log address is not defined\n");
  1371. tzdbg.rmlog_rw_buf_size = 0;
  1372. return 0;
  1373. }
  1374. /* if we don't get the node just ignore it */
  1375. if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-size",
  1376. &rmlog_size)) {
  1377. dev_err(&pdev->dev, "RM log size is not defined\n");
  1378. tzdbg.rmlog_rw_buf_size = 0;
  1379. return 0;
  1380. }
  1381. tzdbg.rmlog_rw_buf_size = rmlog_size;
  1382. /* Check if there is RM log to read */
  1383. if (!tzdbg.rmlog_rw_buf_size) {
  1384. tzdbg.rmlog_virt_iobase = NULL;
  1385. tzdbg.rm_diag_buf = NULL;
  1386. dev_err(&pdev->dev, "RM log size is %d\n",
  1387. tzdbg.rmlog_rw_buf_size);
  1388. return 0;
  1389. }
  1390. tzdbg.rmlog_virt_iobase = devm_ioremap(&pdev->dev,
  1391. rmlog_address,
  1392. rmlog_size);
  1393. if (!tzdbg.rmlog_virt_iobase) {
  1394. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1395. rmlog_address, tzdbg.rmlog_rw_buf_size);
  1396. return -ENXIO;
  1397. }
  1398. ptr = kzalloc(tzdbg.rmlog_rw_buf_size, GFP_KERNEL);
  1399. if (!ptr)
  1400. return -ENOMEM;
  1401. tzdbg.rm_diag_buf = (uint8_t *)ptr;
  1402. return 0;
  1403. }
  1404. static int tzdbg_get_tz_version(void)
  1405. {
  1406. u64 version;
  1407. int ret = 0;
  1408. ret = qcom_scm_get_tz_log_feat_id(&version);
  1409. if (ret) {
  1410. pr_err("%s: scm_call to get tz version failed\n",
  1411. __func__);
  1412. return ret;
  1413. }
  1414. tzdbg.tz_version = version;
  1415. ret = qcom_scm_get_tz_feat_id_version(QCOM_SCM_FEAT_DIAG_ID, &version);
  1416. if (ret) {
  1417. pr_err("%s: scm_call to get tz diag version failed, ret = %d\n",
  1418. __func__, ret);
  1419. return ret;
  1420. }
  1421. pr_warn("tz diag version is %x\n", version);
  1422. tzdbg.tz_diag_major_version =
  1423. ((version >> TZBSP_FVER_MAJOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1424. tzdbg.tz_diag_minor_version =
  1425. ((version >> TZBSP_FVER_MINOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1426. if (tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) {
  1427. switch (tzdbg.tz_diag_minor_version) {
  1428. case TZBSP_DIAG_MINOR_VERSION_V2:
  1429. case TZBSP_DIAG_MINOR_VERSION_V21:
  1430. case TZBSP_DIAG_MINOR_VERSION_V22:
  1431. tzdbg.is_enlarged_buf = true;
  1432. break;
  1433. default:
  1434. tzdbg.is_enlarged_buf = false;
  1435. }
  1436. } else {
  1437. tzdbg.is_enlarged_buf = false;
  1438. }
  1439. return ret;
  1440. }
  1441. static void tzdbg_query_encrypted_log(void)
  1442. {
  1443. int ret = 0;
  1444. uint64_t enabled;
  1445. ret = qcom_scm_query_encrypted_log_feature(&enabled);
  1446. if (ret) {
  1447. if (ret == -EIO)
  1448. pr_info("SCM_CALL : SYS CALL NOT SUPPORTED IN TZ\n");
  1449. else
  1450. pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret);
  1451. tzdbg.is_encrypted_log_enabled = false;
  1452. } else {
  1453. pr_warn("encrypted qseelog enabled is %d\n", enabled);
  1454. tzdbg.is_encrypted_log_enabled = enabled;
  1455. }
  1456. }
  1457. /*
  1458. * Driver functions
  1459. */
  1460. static int tz_log_probe(struct platform_device *pdev)
  1461. {
  1462. struct resource *resource;
  1463. void __iomem *virt_iobase;
  1464. phys_addr_t tzdiag_phy_iobase;
  1465. uint32_t *ptr = NULL;
  1466. int ret = 0;
  1467. ret = tzdbg_get_tz_version();
  1468. if (ret)
  1469. return ret;
  1470. /*
  1471. * Get address that stores the physical location diagnostic data
  1472. */
  1473. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1474. if (!resource) {
  1475. dev_err(&pdev->dev,
  1476. "%s: ERROR Missing MEM resource\n", __func__);
  1477. return -ENXIO;
  1478. }
  1479. /*
  1480. * Get the debug buffer size
  1481. */
  1482. debug_rw_buf_size = resource_size(resource);
  1483. /*
  1484. * Map address that stores the physical location diagnostic data
  1485. */
  1486. virt_iobase = devm_ioremap(&pdev->dev, resource->start,
  1487. debug_rw_buf_size);
  1488. if (!virt_iobase) {
  1489. dev_err(&pdev->dev,
  1490. "%s: ERROR could not ioremap: start=%pr, len=%u\n",
  1491. __func__, &resource->start,
  1492. (unsigned int)(debug_rw_buf_size));
  1493. return -ENXIO;
  1494. }
  1495. if (pdev->dev.of_node) {
  1496. tzdbg.is_hyplog_enabled = of_property_read_bool(
  1497. (&pdev->dev)->of_node, "qcom,hyplog-enabled");
  1498. if (tzdbg.is_hyplog_enabled) {
  1499. ret = __update_hypdbg_base(pdev, virt_iobase);
  1500. if (ret) {
  1501. dev_err(&pdev->dev,
  1502. "%s: fail to get hypdbg_base ret %d\n",
  1503. __func__, ret);
  1504. return -EINVAL;
  1505. }
  1506. ret = __update_rmlog_base(pdev, virt_iobase);
  1507. if (ret) {
  1508. dev_err(&pdev->dev,
  1509. "%s: fail to get rmlog_base ret %d\n",
  1510. __func__, ret);
  1511. return -EINVAL;
  1512. }
  1513. } else {
  1514. dev_info(&pdev->dev, "Hyp log service not support\n");
  1515. }
  1516. } else {
  1517. dev_dbg(&pdev->dev, "Device tree data is not found\n");
  1518. }
  1519. /*
  1520. * Retrieve the address of diagnostic data
  1521. */
  1522. tzdiag_phy_iobase = readl_relaxed(virt_iobase);
  1523. tzdbg_query_encrypted_log();
  1524. /*
  1525. * Map the diagnostic information area if encryption is disabled
  1526. */
  1527. if (!tzdbg.is_encrypted_log_enabled) {
  1528. tzdbg.virt_iobase = devm_ioremap(&pdev->dev,
  1529. tzdiag_phy_iobase, debug_rw_buf_size);
  1530. if (!tzdbg.virt_iobase) {
  1531. dev_err(&pdev->dev,
  1532. "%s: could not ioremap: start=%pr, len=%u\n",
  1533. __func__, &tzdiag_phy_iobase,
  1534. debug_rw_buf_size);
  1535. return -ENXIO;
  1536. }
  1537. /* allocate diag_buf */
  1538. ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
  1539. if (ptr == NULL)
  1540. return -ENOMEM;
  1541. tzdbg.diag_buf = (struct tzdbg_t *)ptr;
  1542. } else {
  1543. if ((tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) &&
  1544. (tzdbg.tz_diag_minor_version >= TZBSP_DIAG_MINOR_VERSION_V22))
  1545. tzdbg.is_full_encrypted_tz_logs_supported = true;
  1546. if (pdev->dev.of_node) {
  1547. tzdbg.is_full_encrypted_tz_logs_enabled = of_property_read_bool(
  1548. (&pdev->dev)->of_node, "qcom,full-encrypted-tz-logs-enabled");
  1549. }
  1550. }
  1551. /* register unencrypted qsee log buffer */
  1552. ret = tzdbg_register_qsee_log_buf(pdev);
  1553. if (ret)
  1554. goto exit_free_diag_buf;
  1555. /* allocate encrypted qsee and tz log buffer */
  1556. ret = tzdbg_allocate_encrypted_log_buf(pdev);
  1557. if (ret) {
  1558. dev_err(&pdev->dev,
  1559. "Failed to allocate encrypted log buffer\n",
  1560. __func__);
  1561. goto exit_free_qsee_log_buf;
  1562. }
  1563. /* allocate display_buf */
  1564. if (UINT_MAX/4 < qseelog_buf_size) {
  1565. pr_err("display_buf_size integer overflow\n");
  1566. goto exit_free_qsee_log_buf;
  1567. }
  1568. display_buf_size = qseelog_buf_size * 4;
  1569. tzdbg.disp_buf = dma_alloc_coherent(&pdev->dev, display_buf_size,
  1570. &disp_buf_paddr, GFP_KERNEL);
  1571. if (tzdbg.disp_buf == NULL) {
  1572. ret = -ENOMEM;
  1573. goto exit_free_encr_log_buf;
  1574. }
  1575. if (tzdbg_fs_init(pdev))
  1576. goto exit_free_disp_buf;
  1577. return 0;
  1578. exit_free_disp_buf:
  1579. dma_free_coherent(&pdev->dev, display_buf_size,
  1580. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1581. exit_free_encr_log_buf:
  1582. tzdbg_free_encrypted_log_buf(pdev);
  1583. exit_free_qsee_log_buf:
  1584. tzdbg_free_qsee_log_buf(pdev);
  1585. exit_free_diag_buf:
  1586. if (!tzdbg.is_encrypted_log_enabled)
  1587. kfree(tzdbg.diag_buf);
  1588. return -ENXIO;
  1589. }
  1590. static int tz_log_remove(struct platform_device *pdev)
  1591. {
  1592. tzdbg_fs_exit(pdev);
  1593. dma_free_coherent(&pdev->dev, display_buf_size,
  1594. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1595. tzdbg_free_encrypted_log_buf(pdev);
  1596. tzdbg_free_qsee_log_buf(pdev);
  1597. if (!tzdbg.is_encrypted_log_enabled)
  1598. kfree(tzdbg.diag_buf);
  1599. return 0;
  1600. }
  1601. static const struct of_device_id tzlog_match[] = {
  1602. {.compatible = "qcom,tz-log"},
  1603. {}
  1604. };
  1605. static struct platform_driver tz_log_driver = {
  1606. .probe = tz_log_probe,
  1607. .remove = tz_log_remove,
  1608. .driver = {
  1609. .name = "tz_log",
  1610. .of_match_table = tzlog_match,
  1611. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1612. },
  1613. };
  1614. module_platform_driver(tz_log_driver);
  1615. MODULE_LICENSE("GPL v2");
  1616. MODULE_DESCRIPTION("TZ Log driver");