tz_log.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/errno.h>
  7. #include <linux/delay.h>
  8. #include <linux/io.h>
  9. #include <linux/msm_ion.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/types.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/of.h>
  18. #include <linux/dma-buf.h>
  19. #include <linux/qcom_scm.h>
  20. #include <soc/qcom/qseecomi.h>
  21. #include <linux/qtee_shmbridge.h>
  22. #include <linux/proc_fs.h>
  23. /* QSEE_LOG_BUF_SIZE = 32K */
  24. #define QSEE_LOG_BUF_SIZE 0x8000
  25. /* enlarged qsee log buf size is 128K by default */
  26. #define QSEE_LOG_BUF_SIZE_V2 0x20000
  27. /* TZ Diagnostic Area legacy version number */
  28. #define TZBSP_DIAG_MAJOR_VERSION_LEGACY 2
  29. /* TZ Diagnostic Area version number */
  30. #define TZBSP_FVER_MAJOR_MINOR_MASK 0x3FF /* 10 bits */
  31. #define TZBSP_FVER_MAJOR_SHIFT 22
  32. #define TZBSP_FVER_MINOR_SHIFT 12
  33. #define TZBSP_DIAG_MAJOR_VERSION_V9 9
  34. #define TZBSP_DIAG_MINOR_VERSION_V2 2
  35. #define TZBSP_DIAG_MINOR_VERSION_V21 3
  36. #define TZBSP_DIAG_MINOR_VERSION_V22 4
  37. /* TZ Diag Feature Version Id */
  38. #define QCOM_SCM_FEAT_DIAG_ID 0x06
  39. /*
  40. * Preprocessor Definitions and Constants
  41. */
  42. #define TZBSP_MAX_CPU_COUNT 0x08
  43. /*
  44. * Number of VMID Tables
  45. */
  46. #define TZBSP_DIAG_NUM_OF_VMID 16
  47. /*
  48. * VMID Description length
  49. */
  50. #define TZBSP_DIAG_VMID_DESC_LEN 7
  51. /*
  52. * Number of Interrupts
  53. */
  54. #define TZBSP_DIAG_INT_NUM 32
  55. /*
  56. * Length of descriptive name associated with Interrupt
  57. */
  58. #define TZBSP_MAX_INT_DESC 16
  59. /*
  60. * TZ 3.X version info
  61. */
  62. #define QSEE_VERSION_TZ_3_X 0x800000
  63. /*
  64. * TZ 4.X version info
  65. */
  66. #define QSEE_VERSION_TZ_4_X 0x1000000
  67. #define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
  68. #define TZBSP_NONCE_LEN 12
  69. #define TZBSP_TAG_LEN 16
  70. #define ENCRYPTED_TZ_LOG_ID 0
  71. #define ENCRYPTED_QSEE_LOG_ID 1
  72. /*
  73. * Directory for TZ DBG logs
  74. */
  75. #define TZDBG_DIR_NAME "tzdbg"
  76. /*
  77. * VMID Table
  78. */
  79. struct tzdbg_vmid_t {
  80. uint8_t vmid; /* Virtual Machine Identifier */
  81. uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */
  82. };
  83. /*
  84. * Boot Info Table
  85. */
  86. struct tzdbg_boot_info_t {
  87. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  88. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  89. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  90. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  91. uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */
  92. uint32_t spare; /* Reserved for future use. */
  93. };
  94. /*
  95. * Boot Info Table for 64-bit
  96. */
  97. struct tzdbg_boot_info64_t {
  98. uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
  99. uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
  100. uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
  101. uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
  102. uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
  103. uint32_t psci_exit_cnt; /* PSCI syscall exit CPU Counter */
  104. uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
  105. uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
  106. };
  107. /*
  108. * Reset Info Table
  109. */
  110. struct tzdbg_reset_info_t {
  111. uint32_t reset_type; /* Reset Reason */
  112. uint32_t reset_cnt; /* Number of resets occurred/CPU */
  113. };
  114. /*
  115. * Interrupt Info Table
  116. */
  117. struct tzdbg_int_t {
  118. /*
  119. * Type of Interrupt/exception
  120. */
  121. uint16_t int_info;
  122. /*
  123. * Availability of the slot
  124. */
  125. uint8_t avail;
  126. /*
  127. * Reserved for future use
  128. */
  129. uint8_t spare;
  130. /*
  131. * Interrupt # for IRQ and FIQ
  132. */
  133. uint32_t int_num;
  134. /*
  135. * ASCII text describing type of interrupt e.g:
  136. * Secure Timer, EBI XPU. This string is always null terminated,
  137. * supporting at most TZBSP_MAX_INT_DESC characters.
  138. * Any additional characters are truncated.
  139. */
  140. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  141. uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
  142. };
  143. /*
  144. * Interrupt Info Table used in tz version >=4.X
  145. */
  146. struct tzdbg_int_t_tz40 {
  147. uint16_t int_info;
  148. uint8_t avail;
  149. uint8_t spare;
  150. uint32_t int_num;
  151. uint8_t int_desc[TZBSP_MAX_INT_DESC];
  152. uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
  153. };
  154. /* warm boot reason for cores */
  155. struct tzbsp_diag_wakeup_info_t {
  156. /* Wake source info : APCS_GICC_HPPIR */
  157. uint32_t HPPIR;
  158. /* Wake source info : APCS_GICC_AHPPIR */
  159. uint32_t AHPPIR;
  160. };
  161. /*
  162. * Log ring buffer position
  163. */
  164. struct tzdbg_log_pos_t {
  165. uint16_t wrap;
  166. uint16_t offset;
  167. };
  168. struct tzdbg_log_pos_v2_t {
  169. uint32_t wrap;
  170. uint32_t offset;
  171. };
  172. /*
  173. * Log ring buffer
  174. */
  175. struct tzdbg_log_t {
  176. struct tzdbg_log_pos_t log_pos;
  177. /* open ended array to the end of the 4K IMEM buffer */
  178. uint8_t log_buf[];
  179. };
  180. struct tzdbg_log_v2_t {
  181. struct tzdbg_log_pos_v2_t log_pos;
  182. /* open ended array to the end of the 4K IMEM buffer */
  183. uint8_t log_buf[];
  184. };
  185. struct tzbsp_encr_info_for_log_chunk_t {
  186. uint32_t size_to_encr;
  187. uint8_t nonce[TZBSP_NONCE_LEN];
  188. uint8_t tag[TZBSP_TAG_LEN];
  189. };
  190. /*
  191. * Only `ENTIRE_LOG` will be used unless the
  192. * "OEM_tz_num_of_diag_log_chunks_to_encr" devcfg field >= 2.
  193. * If this is true, the diag log will be encrypted in two
  194. * separate chunks: a smaller chunk containing only error
  195. * fatal logs and a bigger "rest of the log" chunk. In this
  196. * case, `ERR_FATAL_LOG_CHUNK` and `BIG_LOG_CHUNK` will be
  197. * used instead of `ENTIRE_LOG`.
  198. */
  199. enum tzbsp_encr_info_for_log_chunks_idx_t {
  200. BIG_LOG_CHUNK = 0,
  201. ENTIRE_LOG = 1,
  202. ERR_FATAL_LOG_CHUNK = 1,
  203. MAX_NUM_OF_CHUNKS,
  204. };
  205. struct tzbsp_encr_info_t {
  206. uint32_t num_of_chunks;
  207. struct tzbsp_encr_info_for_log_chunk_t chunks[MAX_NUM_OF_CHUNKS];
  208. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  209. };
  210. /*
  211. * Diagnostic Table
  212. * Note: This is the reference data structure for tz diagnostic table
  213. * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
  214. * copied into buffer from i/o memory.
  215. */
  216. struct tzdbg_t {
  217. uint32_t magic_num;
  218. uint32_t version;
  219. /*
  220. * Number of CPU's
  221. */
  222. uint32_t cpu_count;
  223. /*
  224. * Offset of VMID Table
  225. */
  226. uint32_t vmid_info_off;
  227. /*
  228. * Offset of Boot Table
  229. */
  230. uint32_t boot_info_off;
  231. /*
  232. * Offset of Reset info Table
  233. */
  234. uint32_t reset_info_off;
  235. /*
  236. * Offset of Interrupt info Table
  237. */
  238. uint32_t int_info_off;
  239. /*
  240. * Ring Buffer Offset
  241. */
  242. uint32_t ring_off;
  243. /*
  244. * Ring Buffer Length
  245. */
  246. uint32_t ring_len;
  247. /* Offset for Wakeup info */
  248. uint32_t wakeup_info_off;
  249. union {
  250. /* The elements in below structure have to be used for TZ where
  251. * diag version = TZBSP_DIAG_MINOR_VERSION_V2
  252. */
  253. struct {
  254. /*
  255. * VMID to EE Mapping
  256. */
  257. struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
  258. /*
  259. * Boot Info
  260. */
  261. struct tzdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  262. /*
  263. * Reset Info
  264. */
  265. struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
  266. uint32_t num_interrupts;
  267. struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM];
  268. /* Wake up info */
  269. struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT];
  270. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  271. uint8_t nonce[TZBSP_NONCE_LEN];
  272. uint8_t tag[TZBSP_TAG_LEN];
  273. };
  274. /* The elements in below structure have to be used for TZ where
  275. * diag version = TZBSP_DIAG_MINOR_VERSION_V21
  276. */
  277. struct {
  278. uint32_t encr_info_for_log_off;
  279. /*
  280. * VMID to EE Mapping
  281. */
  282. struct tzdbg_vmid_t vmid_info_v2[TZBSP_DIAG_NUM_OF_VMID];
  283. /*
  284. * Boot Info
  285. */
  286. struct tzdbg_boot_info_t boot_info_v2[TZBSP_MAX_CPU_COUNT];
  287. /*
  288. * Reset Info
  289. */
  290. struct tzdbg_reset_info_t reset_info_v2[TZBSP_MAX_CPU_COUNT];
  291. uint32_t num_interrupts_v2;
  292. struct tzdbg_int_t int_info_v2[TZBSP_DIAG_INT_NUM];
  293. /* Wake up info */
  294. struct tzbsp_diag_wakeup_info_t wakeup_info_v2[TZBSP_MAX_CPU_COUNT];
  295. struct tzbsp_encr_info_t encr_info_for_log;
  296. };
  297. };
  298. /*
  299. * We need at least 2K for the ring buffer
  300. */
  301. struct tzdbg_log_t ring_buffer; /* TZ Ring Buffer */
  302. };
  303. struct hypdbg_log_pos_t {
  304. uint16_t wrap;
  305. uint16_t offset;
  306. };
  307. struct hypdbg_boot_info_t {
  308. uint32_t warm_entry_cnt;
  309. uint32_t warm_exit_cnt;
  310. };
  311. struct hypdbg_t {
  312. /* Magic Number */
  313. uint32_t magic_num;
  314. /* Number of CPU's */
  315. uint32_t cpu_count;
  316. /* Ring Buffer Offset */
  317. uint32_t ring_off;
  318. /* Ring buffer position mgmt */
  319. struct hypdbg_log_pos_t log_pos;
  320. uint32_t log_len;
  321. /* S2 fault numbers */
  322. uint32_t s2_fault_counter;
  323. /* Boot Info */
  324. struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
  325. /* Ring buffer pointer */
  326. uint8_t log_buf_p[];
  327. };
  328. /*
  329. * Enumeration order for VMID's
  330. */
  331. enum tzdbg_stats_type {
  332. TZDBG_BOOT = 0,
  333. TZDBG_RESET,
  334. TZDBG_INTERRUPT,
  335. TZDBG_VMID,
  336. TZDBG_GENERAL,
  337. TZDBG_LOG,
  338. TZDBG_QSEE_LOG,
  339. TZDBG_HYP_GENERAL,
  340. TZDBG_HYP_LOG,
  341. TZDBG_STATS_MAX
  342. };
  343. struct tzdbg_stat {
  344. size_t display_len;
  345. size_t display_offset;
  346. char *name;
  347. char *data;
  348. };
  349. struct tzdbg {
  350. void __iomem *virt_iobase;
  351. void __iomem *hyp_virt_iobase;
  352. struct tzdbg_t *diag_buf;
  353. struct hypdbg_t *hyp_diag_buf;
  354. char *disp_buf;
  355. int debug_tz[TZDBG_STATS_MAX];
  356. struct tzdbg_stat stat[TZDBG_STATS_MAX];
  357. uint32_t hyp_debug_rw_buf_size;
  358. bool is_hyplog_enabled;
  359. uint32_t tz_version;
  360. bool is_encrypted_log_enabled;
  361. bool is_enlarged_buf;
  362. bool is_full_encrypted_tz_logs_supported;
  363. bool is_full_encrypted_tz_logs_enabled;
  364. int tz_diag_minor_version;
  365. int tz_diag_major_version;
  366. };
  367. struct tzbsp_encr_log_t {
  368. /* Magic Number */
  369. uint32_t magic_num;
  370. /* version NUMBER */
  371. uint32_t version;
  372. /* encrypted log size */
  373. uint32_t encr_log_buff_size;
  374. /* Wrap value*/
  375. uint16_t wrap_count;
  376. /* AES encryption key wrapped up with oem public key*/
  377. uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
  378. /* Nonce used for encryption*/
  379. uint8_t nonce[TZBSP_NONCE_LEN];
  380. /* Tag to be used for Validation */
  381. uint8_t tag[TZBSP_TAG_LEN];
  382. /* Encrypted log buffer */
  383. uint8_t log_buf[1];
  384. };
  385. struct encrypted_log_info {
  386. phys_addr_t paddr;
  387. void *vaddr;
  388. size_t size;
  389. uint64_t shmb_handle;
  390. };
  391. static struct tzdbg tzdbg = {
  392. .stat[TZDBG_BOOT].name = "boot",
  393. .stat[TZDBG_RESET].name = "reset",
  394. .stat[TZDBG_INTERRUPT].name = "interrupt",
  395. .stat[TZDBG_VMID].name = "vmid",
  396. .stat[TZDBG_GENERAL].name = "general",
  397. .stat[TZDBG_LOG].name = "log",
  398. .stat[TZDBG_QSEE_LOG].name = "qsee_log",
  399. .stat[TZDBG_HYP_GENERAL].name = "hyp_general",
  400. .stat[TZDBG_HYP_LOG].name = "hyp_log",
  401. };
  402. static struct tzdbg_log_t *g_qsee_log;
  403. static struct tzdbg_log_v2_t *g_qsee_log_v2;
  404. static dma_addr_t coh_pmem;
  405. static uint32_t debug_rw_buf_size;
  406. static uint32_t display_buf_size;
  407. static uint32_t qseelog_buf_size;
  408. static phys_addr_t disp_buf_paddr;
  409. static uint64_t qseelog_shmbridge_handle;
  410. static struct encrypted_log_info enc_qseelog_info;
  411. static struct encrypted_log_info enc_tzlog_info;
  412. /*
  413. * Debugfs data structure and functions
  414. */
  415. static int _disp_tz_general_stats(void)
  416. {
  417. int len = 0;
  418. len += scnprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
  419. " Version : 0x%x\n"
  420. " Magic Number : 0x%x\n"
  421. " Number of CPU : %d\n",
  422. tzdbg.diag_buf->version,
  423. tzdbg.diag_buf->magic_num,
  424. tzdbg.diag_buf->cpu_count);
  425. tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
  426. return len;
  427. }
  428. static int _disp_tz_vmid_stats(void)
  429. {
  430. int i, num_vmid;
  431. int len = 0;
  432. struct tzdbg_vmid_t *ptr;
  433. ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
  434. tzdbg.diag_buf->vmid_info_off);
  435. num_vmid = ((tzdbg.diag_buf->boot_info_off -
  436. tzdbg.diag_buf->vmid_info_off)/
  437. (sizeof(struct tzdbg_vmid_t)));
  438. for (i = 0; i < num_vmid; i++) {
  439. if (ptr->vmid < 0xFF) {
  440. len += scnprintf(tzdbg.disp_buf + len,
  441. (debug_rw_buf_size - 1) - len,
  442. " 0x%x %s\n",
  443. (uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
  444. }
  445. if (len > (debug_rw_buf_size - 1)) {
  446. pr_warn("%s: Cannot fit all info into the buffer\n",
  447. __func__);
  448. break;
  449. }
  450. ptr++;
  451. }
  452. tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
  453. return len;
  454. }
  455. static int _disp_tz_boot_stats(void)
  456. {
  457. int i;
  458. int len = 0;
  459. struct tzdbg_boot_info_t *ptr = NULL;
  460. struct tzdbg_boot_info64_t *ptr_64 = NULL;
  461. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  462. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  463. ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
  464. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  465. } else {
  466. ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
  467. tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
  468. }
  469. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  470. if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
  471. len += scnprintf(tzdbg.disp_buf + len,
  472. (debug_rw_buf_size - 1) - len,
  473. " CPU #: %d\n"
  474. " Warmboot jump address : 0x%llx\n"
  475. " Warmboot entry CPU counter : 0x%x\n"
  476. " Warmboot exit CPU counter : 0x%x\n"
  477. " Power Collapse entry CPU counter : 0x%x\n"
  478. " Power Collapse exit CPU counter : 0x%x\n"
  479. " Psci entry CPU counter : 0x%x\n"
  480. " Psci exit CPU counter : 0x%x\n"
  481. " Warmboot Jump Address Instruction : 0x%x\n",
  482. i, (uint64_t)ptr_64->warm_jmp_addr,
  483. ptr_64->wb_entry_cnt,
  484. ptr_64->wb_exit_cnt,
  485. ptr_64->pc_entry_cnt,
  486. ptr_64->pc_exit_cnt,
  487. ptr_64->psci_entry_cnt,
  488. ptr_64->psci_exit_cnt,
  489. ptr_64->warm_jmp_instr);
  490. if (len > (debug_rw_buf_size - 1)) {
  491. pr_warn("%s: Cannot fit all info into the buffer\n",
  492. __func__);
  493. break;
  494. }
  495. ptr_64++;
  496. } else {
  497. len += scnprintf(tzdbg.disp_buf + len,
  498. (debug_rw_buf_size - 1) - len,
  499. " CPU #: %d\n"
  500. " Warmboot jump address : 0x%x\n"
  501. " Warmboot entry CPU counter: 0x%x\n"
  502. " Warmboot exit CPU counter : 0x%x\n"
  503. " Power Collapse entry CPU counter: 0x%x\n"
  504. " Power Collapse exit CPU counter : 0x%x\n",
  505. i, ptr->warm_jmp_addr,
  506. ptr->wb_entry_cnt,
  507. ptr->wb_exit_cnt,
  508. ptr->pc_entry_cnt,
  509. ptr->pc_exit_cnt);
  510. if (len > (debug_rw_buf_size - 1)) {
  511. pr_warn("%s: Cannot fit all info into the buffer\n",
  512. __func__);
  513. break;
  514. }
  515. ptr++;
  516. }
  517. }
  518. tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
  519. return len;
  520. }
  521. static int _disp_tz_reset_stats(void)
  522. {
  523. int i;
  524. int len = 0;
  525. struct tzdbg_reset_info_t *ptr;
  526. ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
  527. tzdbg.diag_buf->reset_info_off);
  528. for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
  529. len += scnprintf(tzdbg.disp_buf + len,
  530. (debug_rw_buf_size - 1) - len,
  531. " CPU #: %d\n"
  532. " Reset Type (reason) : 0x%x\n"
  533. " Reset counter : 0x%x\n",
  534. i, ptr->reset_type, ptr->reset_cnt);
  535. if (len > (debug_rw_buf_size - 1)) {
  536. pr_warn("%s: Cannot fit all info into the buffer\n",
  537. __func__);
  538. break;
  539. }
  540. ptr++;
  541. }
  542. tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
  543. return len;
  544. }
  545. static int _disp_tz_interrupt_stats(void)
  546. {
  547. int i, j;
  548. int len = 0;
  549. int *num_int;
  550. void *ptr;
  551. struct tzdbg_int_t *tzdbg_ptr;
  552. struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
  553. num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
  554. (tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
  555. ptr = ((unsigned char *)tzdbg.diag_buf +
  556. tzdbg.diag_buf->int_info_off);
  557. pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
  558. if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
  559. tzdbg_ptr = ptr;
  560. for (i = 0; i < (*num_int); i++) {
  561. len += scnprintf(tzdbg.disp_buf + len,
  562. (debug_rw_buf_size - 1) - len,
  563. " Interrupt Number : 0x%x\n"
  564. " Type of Interrupt : 0x%x\n"
  565. " Description of interrupt : %s\n",
  566. tzdbg_ptr->int_num,
  567. (uint32_t)tzdbg_ptr->int_info,
  568. (uint8_t *)tzdbg_ptr->int_desc);
  569. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  570. len += scnprintf(tzdbg.disp_buf + len,
  571. (debug_rw_buf_size - 1) - len,
  572. " int_count on CPU # %d : %u\n",
  573. (uint32_t)j,
  574. (uint32_t)tzdbg_ptr->int_count[j]);
  575. }
  576. len += scnprintf(tzdbg.disp_buf + len,
  577. debug_rw_buf_size - 1, "\n");
  578. if (len > (debug_rw_buf_size - 1)) {
  579. pr_warn("%s: Cannot fit all info into buf\n",
  580. __func__);
  581. break;
  582. }
  583. tzdbg_ptr++;
  584. }
  585. } else {
  586. tzdbg_ptr_tz40 = ptr;
  587. for (i = 0; i < (*num_int); i++) {
  588. len += scnprintf(tzdbg.disp_buf + len,
  589. (debug_rw_buf_size - 1) - len,
  590. " Interrupt Number : 0x%x\n"
  591. " Type of Interrupt : 0x%x\n"
  592. " Description of interrupt : %s\n",
  593. tzdbg_ptr_tz40->int_num,
  594. (uint32_t)tzdbg_ptr_tz40->int_info,
  595. (uint8_t *)tzdbg_ptr_tz40->int_desc);
  596. for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
  597. len += scnprintf(tzdbg.disp_buf + len,
  598. (debug_rw_buf_size - 1) - len,
  599. " int_count on CPU # %d : %u\n",
  600. (uint32_t)j,
  601. (uint32_t)tzdbg_ptr_tz40->int_count[j]);
  602. }
  603. len += scnprintf(tzdbg.disp_buf + len,
  604. debug_rw_buf_size - 1, "\n");
  605. if (len > (debug_rw_buf_size - 1)) {
  606. pr_warn("%s: Cannot fit all info into buf\n",
  607. __func__);
  608. break;
  609. }
  610. tzdbg_ptr_tz40++;
  611. }
  612. }
  613. tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
  614. return len;
  615. }
  616. static int _disp_tz_log_stats_legacy(void)
  617. {
  618. int len = 0;
  619. unsigned char *ptr;
  620. ptr = (unsigned char *)tzdbg.diag_buf +
  621. tzdbg.diag_buf->ring_off;
  622. len += scnprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
  623. "%s\n", ptr);
  624. tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
  625. return len;
  626. }
  627. static int _disp_log_stats(struct tzdbg_log_t *log,
  628. struct tzdbg_log_pos_t *log_start, uint32_t log_len,
  629. size_t count, uint32_t buf_idx)
  630. {
  631. uint32_t wrap_start;
  632. uint32_t wrap_end;
  633. uint32_t wrap_cnt;
  634. int max_len;
  635. int len = 0;
  636. int i = 0;
  637. wrap_start = log_start->wrap;
  638. wrap_end = log->log_pos.wrap;
  639. /* Calculate difference in # of buffer wrap-arounds */
  640. if (wrap_end >= wrap_start)
  641. wrap_cnt = wrap_end - wrap_start;
  642. else {
  643. /* wrap counter has wrapped around, invalidate start position */
  644. wrap_cnt = 2;
  645. }
  646. if (wrap_cnt > 1) {
  647. /* end position has wrapped around more than once, */
  648. /* current start no longer valid */
  649. log_start->wrap = log->log_pos.wrap - 1;
  650. log_start->offset = (log->log_pos.offset + 1) % log_len;
  651. } else if ((wrap_cnt == 1) &&
  652. (log->log_pos.offset > log_start->offset)) {
  653. /* end position has overwritten start */
  654. log_start->offset = (log->log_pos.offset + 1) % log_len;
  655. }
  656. pr_debug("diag_buf wrap = %u, offset = %u\n",
  657. log->log_pos.wrap, log->log_pos.offset);
  658. while (log_start->offset == log->log_pos.offset) {
  659. /*
  660. * No data in ring buffer,
  661. * so we'll hang around until something happens
  662. */
  663. unsigned long t = msleep_interruptible(50);
  664. if (t != 0) {
  665. /* Some event woke us up, so let's quit */
  666. return 0;
  667. }
  668. if (buf_idx == TZDBG_LOG)
  669. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  670. debug_rw_buf_size);
  671. }
  672. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  673. pr_debug("diag_buf wrap = %u, offset = %u\n",
  674. log->log_pos.wrap, log->log_pos.offset);
  675. /*
  676. * Read from ring buff while there is data and space in return buff
  677. */
  678. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  679. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  680. log_start->offset = (log_start->offset + 1) % log_len;
  681. if (log_start->offset == 0)
  682. ++log_start->wrap;
  683. ++len;
  684. }
  685. /*
  686. * return buffer to caller
  687. */
  688. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  689. return len;
  690. }
  691. static int _disp_log_stats_v2(struct tzdbg_log_v2_t *log,
  692. struct tzdbg_log_pos_v2_t *log_start, uint32_t log_len,
  693. size_t count, uint32_t buf_idx)
  694. {
  695. uint32_t wrap_start;
  696. uint32_t wrap_end;
  697. uint32_t wrap_cnt;
  698. int max_len;
  699. int len = 0;
  700. int i = 0;
  701. wrap_start = log_start->wrap;
  702. wrap_end = log->log_pos.wrap;
  703. /* Calculate difference in # of buffer wrap-arounds */
  704. if (wrap_end >= wrap_start)
  705. wrap_cnt = wrap_end - wrap_start;
  706. else {
  707. /* wrap counter has wrapped around, invalidate start position */
  708. wrap_cnt = 2;
  709. }
  710. if (wrap_cnt > 1) {
  711. /* end position has wrapped around more than once, */
  712. /* current start no longer valid */
  713. log_start->wrap = log->log_pos.wrap - 1;
  714. log_start->offset = (log->log_pos.offset + 1) % log_len;
  715. } else if ((wrap_cnt == 1) &&
  716. (log->log_pos.offset > log_start->offset)) {
  717. /* end position has overwritten start */
  718. log_start->offset = (log->log_pos.offset + 1) % log_len;
  719. }
  720. pr_debug("diag_buf wrap = %u, offset = %u\n",
  721. log->log_pos.wrap, log->log_pos.offset);
  722. while (log_start->offset == log->log_pos.offset) {
  723. /*
  724. * No data in ring buffer,
  725. * so we'll hang around until something happens
  726. */
  727. unsigned long t = msleep_interruptible(50);
  728. if (t != 0) {
  729. /* Some event woke us up, so let's quit */
  730. return 0;
  731. }
  732. if (buf_idx == TZDBG_LOG)
  733. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  734. debug_rw_buf_size);
  735. }
  736. max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
  737. pr_debug("diag_buf wrap = %u, offset = %u\n",
  738. log->log_pos.wrap, log->log_pos.offset);
  739. /*
  740. * Read from ring buff while there is data and space in return buff
  741. */
  742. while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
  743. tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
  744. log_start->offset = (log_start->offset + 1) % log_len;
  745. if (log_start->offset == 0)
  746. ++log_start->wrap;
  747. ++len;
  748. }
  749. /*
  750. * return buffer to caller
  751. */
  752. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  753. return len;
  754. }
  755. static int __disp_hyp_log_stats(uint8_t *log,
  756. struct hypdbg_log_pos_t *log_start, uint32_t log_len,
  757. size_t count, uint32_t buf_idx)
  758. {
  759. struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
  760. unsigned long t = 0;
  761. uint32_t wrap_start;
  762. uint32_t wrap_end;
  763. uint32_t wrap_cnt;
  764. int max_len;
  765. int len = 0;
  766. int i = 0;
  767. wrap_start = log_start->wrap;
  768. wrap_end = hyp->log_pos.wrap;
  769. /* Calculate difference in # of buffer wrap-arounds */
  770. if (wrap_end >= wrap_start)
  771. wrap_cnt = wrap_end - wrap_start;
  772. else {
  773. /* wrap counter has wrapped around, invalidate start position */
  774. wrap_cnt = 2;
  775. }
  776. if (wrap_cnt > 1) {
  777. /* end position has wrapped around more than once, */
  778. /* current start no longer valid */
  779. log_start->wrap = hyp->log_pos.wrap - 1;
  780. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  781. } else if ((wrap_cnt == 1) &&
  782. (hyp->log_pos.offset > log_start->offset)) {
  783. /* end position has overwritten start */
  784. log_start->offset = (hyp->log_pos.offset + 1) % log_len;
  785. }
  786. while (log_start->offset == hyp->log_pos.offset) {
  787. /*
  788. * No data in ring buffer,
  789. * so we'll hang around until something happens
  790. */
  791. t = msleep_interruptible(50);
  792. if (t != 0) {
  793. /* Some event woke us up, so let's quit */
  794. return 0;
  795. }
  796. /* TZDBG_HYP_LOG */
  797. memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
  798. tzdbg.hyp_debug_rw_buf_size);
  799. }
  800. max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
  801. tzdbg.hyp_debug_rw_buf_size : count;
  802. /*
  803. * Read from ring buff while there is data and space in return buff
  804. */
  805. while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
  806. tzdbg.disp_buf[i++] = log[log_start->offset];
  807. log_start->offset = (log_start->offset + 1) % log_len;
  808. if (log_start->offset == 0)
  809. ++log_start->wrap;
  810. ++len;
  811. }
  812. /*
  813. * return buffer to caller
  814. */
  815. tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
  816. return len;
  817. }
  818. static int print_text(char *intro_message,
  819. unsigned char *text_addr,
  820. unsigned int size,
  821. char *buf, uint32_t buf_len)
  822. {
  823. unsigned int i;
  824. int len = 0;
  825. pr_debug("begin address %p, size %d\n", text_addr, size);
  826. len += scnprintf(buf + len, buf_len - len, "%s\n", intro_message);
  827. for (i = 0; i < size; i++) {
  828. if (buf_len <= len + 6) {
  829. pr_err("buffer not enough, buf_len %d, len %d\n",
  830. buf_len, len);
  831. return buf_len;
  832. }
  833. len += scnprintf(buf + len, buf_len - len, "%02hhx ",
  834. text_addr[i]);
  835. if ((i & 0x1f) == 0x1f)
  836. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  837. }
  838. len += scnprintf(buf + len, buf_len - len, "%c", '\n');
  839. return len;
  840. }
  841. static int _disp_encrpted_log_stats(struct encrypted_log_info *enc_log_info,
  842. enum tzdbg_stats_type type, uint32_t log_id)
  843. {
  844. int ret = 0, len = 0;
  845. struct tzbsp_encr_log_t *encr_log_head;
  846. uint32_t size = 0;
  847. if ((!tzdbg.is_full_encrypted_tz_logs_supported) &&
  848. (tzdbg.is_full_encrypted_tz_logs_enabled))
  849. pr_info("TZ not supporting full encrypted log functionality\n");
  850. ret = qcom_scm_request_encrypted_log(enc_log_info->paddr,
  851. enc_log_info->size, log_id, tzdbg.is_full_encrypted_tz_logs_supported,
  852. tzdbg.is_full_encrypted_tz_logs_enabled);
  853. if (ret)
  854. return 0;
  855. encr_log_head = (struct tzbsp_encr_log_t *)(enc_log_info->vaddr);
  856. pr_debug("display_buf_size = %d, encr_log_buff_size = %d\n",
  857. display_buf_size, encr_log_head->encr_log_buff_size);
  858. size = encr_log_head->encr_log_buff_size;
  859. len += scnprintf(tzdbg.disp_buf + len,
  860. (display_buf_size - 1) - len,
  861. "\n-------- New Encrypted %s --------\n",
  862. ((log_id == ENCRYPTED_QSEE_LOG_ID) ?
  863. "QSEE Log" : "TZ Dialog"));
  864. len += scnprintf(tzdbg.disp_buf + len,
  865. (display_buf_size - 1) - len,
  866. "\nMagic_Num :\n0x%x\n"
  867. "\nVerion :\n%d\n"
  868. "\nEncr_Log_Buff_Size :\n%d\n"
  869. "\nWrap_Count :\n%d\n",
  870. encr_log_head->magic_num,
  871. encr_log_head->version,
  872. encr_log_head->encr_log_buff_size,
  873. encr_log_head->wrap_count);
  874. len += print_text("\nKey : ", encr_log_head->key,
  875. TZBSP_AES_256_ENCRYPTED_KEY_SIZE,
  876. tzdbg.disp_buf + len, display_buf_size);
  877. len += print_text("\nNonce : ", encr_log_head->nonce,
  878. TZBSP_NONCE_LEN,
  879. tzdbg.disp_buf + len, display_buf_size - len);
  880. len += print_text("\nTag : ", encr_log_head->tag,
  881. TZBSP_TAG_LEN,
  882. tzdbg.disp_buf + len, display_buf_size - len);
  883. if (len > display_buf_size - size)
  884. pr_warn("Cannot fit all info into the buffer\n");
  885. pr_debug("encrypted log size %d, disply buffer size %d, used len %d\n",
  886. size, display_buf_size, len);
  887. len += print_text("\nLog : ", encr_log_head->log_buf, size,
  888. tzdbg.disp_buf + len, display_buf_size - len);
  889. memset(enc_log_info->vaddr, 0, enc_log_info->size);
  890. tzdbg.stat[type].data = tzdbg.disp_buf;
  891. return len;
  892. }
  893. static int _disp_tz_log_stats(size_t count)
  894. {
  895. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  896. static struct tzdbg_log_pos_t log_start = {0};
  897. struct tzdbg_log_v2_t *log_v2_ptr;
  898. struct tzdbg_log_t *log_ptr;
  899. log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
  900. tzdbg.diag_buf->ring_off -
  901. offsetof(struct tzdbg_log_t, log_buf));
  902. log_v2_ptr = (struct tzdbg_log_v2_t *)((unsigned char *)tzdbg.diag_buf +
  903. tzdbg.diag_buf->ring_off -
  904. offsetof(struct tzdbg_log_v2_t, log_buf));
  905. if (!tzdbg.is_enlarged_buf)
  906. return _disp_log_stats(log_ptr, &log_start,
  907. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  908. return _disp_log_stats_v2(log_v2_ptr, &log_start_v2,
  909. tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
  910. }
  911. static int _disp_hyp_log_stats(size_t count)
  912. {
  913. static struct hypdbg_log_pos_t log_start = {0};
  914. uint8_t *log_ptr;
  915. uint32_t log_len;
  916. log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
  917. tzdbg.hyp_diag_buf->ring_off);
  918. log_len = tzdbg.hyp_debug_rw_buf_size - tzdbg.hyp_diag_buf->ring_off;
  919. return __disp_hyp_log_stats(log_ptr, &log_start,
  920. log_len, count, TZDBG_HYP_LOG);
  921. }
  922. static int _disp_qsee_log_stats(size_t count)
  923. {
  924. static struct tzdbg_log_pos_t log_start = {0};
  925. static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
  926. if (!tzdbg.is_enlarged_buf)
  927. return _disp_log_stats(g_qsee_log, &log_start,
  928. QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
  929. count, TZDBG_QSEE_LOG);
  930. return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2,
  931. QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t),
  932. count, TZDBG_QSEE_LOG);
  933. }
  934. static int _disp_hyp_general_stats(size_t count)
  935. {
  936. int len = 0;
  937. int i;
  938. struct hypdbg_boot_info_t *ptr = NULL;
  939. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  940. tzdbg.hyp_debug_rw_buf_size - 1,
  941. " Magic Number : 0x%x\n"
  942. " CPU Count : 0x%x\n"
  943. " S2 Fault Counter: 0x%x\n",
  944. tzdbg.hyp_diag_buf->magic_num,
  945. tzdbg.hyp_diag_buf->cpu_count,
  946. tzdbg.hyp_diag_buf->s2_fault_counter);
  947. ptr = tzdbg.hyp_diag_buf->boot_info;
  948. for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
  949. len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
  950. (tzdbg.hyp_debug_rw_buf_size - 1) - len,
  951. " CPU #: %d\n"
  952. " Warmboot entry CPU counter: 0x%x\n"
  953. " Warmboot exit CPU counter : 0x%x\n",
  954. i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
  955. if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
  956. pr_warn("%s: Cannot fit all info into the buffer\n",
  957. __func__);
  958. break;
  959. }
  960. ptr++;
  961. }
  962. tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
  963. return len;
  964. }
  965. static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf,
  966. size_t count, loff_t *offp)
  967. {
  968. int len = 0;
  969. if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET ||
  970. tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL ||
  971. tz_id == TZDBG_VMID || tz_id == TZDBG_LOG)
  972. memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
  973. debug_rw_buf_size);
  974. if (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
  975. memcpy_fromio((void *)tzdbg.hyp_diag_buf,
  976. tzdbg.hyp_virt_iobase,
  977. tzdbg.hyp_debug_rw_buf_size);
  978. switch (tz_id) {
  979. case TZDBG_BOOT:
  980. len = _disp_tz_boot_stats();
  981. break;
  982. case TZDBG_RESET:
  983. len = _disp_tz_reset_stats();
  984. break;
  985. case TZDBG_INTERRUPT:
  986. len = _disp_tz_interrupt_stats();
  987. break;
  988. case TZDBG_GENERAL:
  989. len = _disp_tz_general_stats();
  990. break;
  991. case TZDBG_VMID:
  992. len = _disp_tz_vmid_stats();
  993. break;
  994. case TZDBG_LOG:
  995. if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
  996. (tzdbg.diag_buf->version >> 16)) {
  997. len = _disp_tz_log_stats(count);
  998. *offp = 0;
  999. } else {
  1000. len = _disp_tz_log_stats_legacy();
  1001. }
  1002. break;
  1003. case TZDBG_QSEE_LOG:
  1004. len = _disp_qsee_log_stats(count);
  1005. *offp = 0;
  1006. break;
  1007. case TZDBG_HYP_GENERAL:
  1008. len = _disp_hyp_general_stats(count);
  1009. break;
  1010. case TZDBG_HYP_LOG:
  1011. len = _disp_hyp_log_stats(count);
  1012. *offp = 0;
  1013. break;
  1014. default:
  1015. break;
  1016. }
  1017. if (len > count)
  1018. len = count;
  1019. return simple_read_from_buffer(buf, len, offp,
  1020. tzdbg.stat[tz_id].data, len);
  1021. }
  1022. static ssize_t tzdbg_fs_read_encrypted(int tz_id, char __user *buf,
  1023. size_t count, loff_t *offp)
  1024. {
  1025. int len = 0, ret = 0;
  1026. struct tzdbg_stat *stat = &(tzdbg.stat[tz_id]);
  1027. pr_debug("%s: tz_id = %d\n", __func__, tz_id);
  1028. if (tz_id >= TZDBG_STATS_MAX) {
  1029. pr_err("invalid encrypted log id %d\n", tz_id);
  1030. return ret;
  1031. }
  1032. if (!stat->display_len) {
  1033. if (tz_id == TZDBG_QSEE_LOG)
  1034. stat->display_len = _disp_encrpted_log_stats(
  1035. &enc_qseelog_info,
  1036. tz_id, ENCRYPTED_QSEE_LOG_ID);
  1037. else
  1038. stat->display_len = _disp_encrpted_log_stats(
  1039. &enc_tzlog_info,
  1040. tz_id, ENCRYPTED_TZ_LOG_ID);
  1041. stat->display_offset = 0;
  1042. }
  1043. len = stat->display_len;
  1044. if (len > count)
  1045. len = count;
  1046. *offp = 0;
  1047. ret = simple_read_from_buffer(buf, len, offp,
  1048. tzdbg.stat[tz_id].data + stat->display_offset,
  1049. count);
  1050. stat->display_offset += ret;
  1051. stat->display_len -= ret;
  1052. pr_debug("ret = %d, offset = %d\n", ret, (int)(*offp));
  1053. pr_debug("display_len = %d, offset = %d\n",
  1054. stat->display_len, stat->display_offset);
  1055. return ret;
  1056. }
  1057. static ssize_t tzdbg_fs_read(struct file *file, char __user *buf,
  1058. size_t count, loff_t *offp)
  1059. {
  1060. struct seq_file *seq = file->private_data;
  1061. int tz_id = TZDBG_STATS_MAX;
  1062. if (seq)
  1063. tz_id = *(int *)(seq->private);
  1064. else {
  1065. pr_err("%s: Seq data null unable to proceed\n", __func__);
  1066. return 0;
  1067. }
  1068. if (!tzdbg.is_encrypted_log_enabled ||
  1069. (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG))
  1070. return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp);
  1071. else
  1072. return tzdbg_fs_read_encrypted(tz_id, buf, count, offp);
  1073. }
  1074. static int tzdbg_procfs_open(struct inode *inode, struct file *file)
  1075. {
  1076. return single_open(file, NULL, PDE_DATA(inode));
  1077. }
  1078. static int tzdbg_procfs_release(struct inode *inode, struct file *file)
  1079. {
  1080. return single_release(inode, file);
  1081. }
  1082. struct proc_ops tzdbg_fops = {
  1083. .proc_flags = PROC_ENTRY_PERMANENT,
  1084. .proc_read = tzdbg_fs_read,
  1085. .proc_open = tzdbg_procfs_open,
  1086. .proc_release = tzdbg_procfs_release,
  1087. };
  1088. /*
  1089. * Allocates log buffer from ION, registers the buffer at TZ
  1090. */
  1091. static int tzdbg_register_qsee_log_buf(struct platform_device *pdev)
  1092. {
  1093. int ret = 0;
  1094. void *buf = NULL;
  1095. uint32_t ns_vmids[] = {VMID_HLOS};
  1096. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1097. uint32_t ns_vm_nums = 1;
  1098. if (tzdbg.is_enlarged_buf) {
  1099. if (of_property_read_u32((&pdev->dev)->of_node,
  1100. "qseelog-buf-size-v2", &qseelog_buf_size)) {
  1101. pr_debug("Enlarged qseelog buf size isn't defined\n");
  1102. qseelog_buf_size = QSEE_LOG_BUF_SIZE_V2;
  1103. }
  1104. } else {
  1105. qseelog_buf_size = QSEE_LOG_BUF_SIZE;
  1106. }
  1107. pr_debug("qseelog buf size is 0x%x\n", qseelog_buf_size);
  1108. buf = dma_alloc_coherent(&pdev->dev,
  1109. qseelog_buf_size, &coh_pmem, GFP_KERNEL);
  1110. if (buf == NULL)
  1111. return -ENOMEM;
  1112. if (!tzdbg.is_encrypted_log_enabled) {
  1113. ret = qtee_shmbridge_register(coh_pmem,
  1114. qseelog_buf_size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1115. PERM_READ | PERM_WRITE,
  1116. &qseelog_shmbridge_handle);
  1117. if (ret) {
  1118. pr_err("failed to create bridge for qsee_log buf\n");
  1119. goto exit_free_mem;
  1120. }
  1121. }
  1122. g_qsee_log = (struct tzdbg_log_t *)buf;
  1123. g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
  1124. g_qsee_log_v2 = (struct tzdbg_log_v2_t *)buf;
  1125. g_qsee_log_v2->log_pos.wrap = g_qsee_log_v2->log_pos.offset = 0;
  1126. ret = qcom_scm_register_qsee_log_buf(coh_pmem, qseelog_buf_size);
  1127. if (ret != QSEOS_RESULT_SUCCESS) {
  1128. pr_err(
  1129. "%s: scm_call to register log buf failed, resp result =%lld\n",
  1130. __func__, ret);
  1131. goto exit_dereg_bridge;
  1132. }
  1133. return ret;
  1134. exit_dereg_bridge:
  1135. if (!tzdbg.is_encrypted_log_enabled)
  1136. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1137. exit_free_mem:
  1138. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1139. (void *)g_qsee_log, coh_pmem);
  1140. return ret;
  1141. }
  1142. static void tzdbg_free_qsee_log_buf(struct platform_device *pdev)
  1143. {
  1144. if (!tzdbg.is_encrypted_log_enabled)
  1145. qtee_shmbridge_deregister(qseelog_shmbridge_handle);
  1146. dma_free_coherent(&pdev->dev, qseelog_buf_size,
  1147. (void *)g_qsee_log, coh_pmem);
  1148. }
  1149. static int tzdbg_allocate_encrypted_log_buf(struct platform_device *pdev)
  1150. {
  1151. int ret = 0;
  1152. uint32_t ns_vmids[] = {VMID_HLOS};
  1153. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  1154. uint32_t ns_vm_nums = 1;
  1155. if (!tzdbg.is_encrypted_log_enabled)
  1156. return 0;
  1157. /* max encrypted qsee log buf zize (include header, and page align) */
  1158. enc_qseelog_info.size = qseelog_buf_size + PAGE_SIZE;
  1159. enc_qseelog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1160. enc_qseelog_info.size,
  1161. &enc_qseelog_info.paddr, GFP_KERNEL);
  1162. if (enc_qseelog_info.vaddr == NULL)
  1163. return -ENOMEM;
  1164. ret = qtee_shmbridge_register(enc_qseelog_info.paddr,
  1165. enc_qseelog_info.size, ns_vmids,
  1166. ns_vm_perms, ns_vm_nums,
  1167. PERM_READ | PERM_WRITE, &enc_qseelog_info.shmb_handle);
  1168. if (ret) {
  1169. pr_err("failed to create encr_qsee_log bridge, ret %d\n", ret);
  1170. goto exit_free_qseelog;
  1171. }
  1172. pr_debug("Alloc memory for encr_qsee_log, size = %zu\n",
  1173. enc_qseelog_info.size);
  1174. enc_tzlog_info.size = debug_rw_buf_size;
  1175. enc_tzlog_info.vaddr = dma_alloc_coherent(&pdev->dev,
  1176. enc_tzlog_info.size,
  1177. &enc_tzlog_info.paddr, GFP_KERNEL);
  1178. if (enc_tzlog_info.vaddr == NULL)
  1179. goto exit_unreg_qseelog;
  1180. ret = qtee_shmbridge_register(enc_tzlog_info.paddr,
  1181. enc_tzlog_info.size, ns_vmids, ns_vm_perms, ns_vm_nums,
  1182. PERM_READ | PERM_WRITE, &enc_tzlog_info.shmb_handle);
  1183. if (ret) {
  1184. pr_err("failed to create encr_tz_log bridge, ret = %d\n", ret);
  1185. goto exit_free_tzlog;
  1186. }
  1187. pr_debug("Alloc memory for encr_tz_log, size %zu\n",
  1188. enc_qseelog_info.size);
  1189. return 0;
  1190. exit_free_tzlog:
  1191. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1192. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1193. exit_unreg_qseelog:
  1194. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1195. exit_free_qseelog:
  1196. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1197. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1198. return -ENOMEM;
  1199. }
  1200. static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
  1201. {
  1202. qtee_shmbridge_deregister(enc_tzlog_info.shmb_handle);
  1203. dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
  1204. enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
  1205. qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
  1206. dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
  1207. enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
  1208. }
  1209. static int tzdbg_fs_init(struct platform_device *pdev)
  1210. {
  1211. int rc = 0;
  1212. int i;
  1213. struct proc_dir_entry *dent_dir;
  1214. struct proc_dir_entry *dent;
  1215. dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL);
  1216. if (dent_dir == NULL) {
  1217. dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n");
  1218. return -ENOMEM;
  1219. }
  1220. for (i = 0; i < TZDBG_STATS_MAX; i++) {
  1221. tzdbg.debug_tz[i] = i;
  1222. dent = proc_create_data(tzdbg.stat[i].name,
  1223. 0444, dent_dir,
  1224. &tzdbg_fops, &tzdbg.debug_tz[i]);
  1225. if (dent == NULL) {
  1226. dev_err(&pdev->dev, "TZ proc_create_data failed\n");
  1227. rc = -ENOMEM;
  1228. goto err;
  1229. }
  1230. }
  1231. platform_set_drvdata(pdev, dent_dir);
  1232. return 0;
  1233. err:
  1234. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1235. return rc;
  1236. }
  1237. static void tzdbg_fs_exit(struct platform_device *pdev)
  1238. {
  1239. struct proc_dir_entry *dent_dir;
  1240. dent_dir = platform_get_drvdata(pdev);
  1241. if (dent_dir)
  1242. remove_proc_entry(TZDBG_DIR_NAME, NULL);
  1243. }
  1244. static int __update_hypdbg_base(struct platform_device *pdev,
  1245. void __iomem *virt_iobase)
  1246. {
  1247. phys_addr_t hypdiag_phy_iobase;
  1248. uint32_t hyp_address_offset;
  1249. uint32_t hyp_size_offset;
  1250. struct hypdbg_t *hyp;
  1251. uint32_t *ptr = NULL;
  1252. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
  1253. &hyp_address_offset)) {
  1254. dev_err(&pdev->dev, "hyplog address offset is not defined\n");
  1255. return -EINVAL;
  1256. }
  1257. if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
  1258. &hyp_size_offset)) {
  1259. dev_err(&pdev->dev, "hyplog size offset is not defined\n");
  1260. return -EINVAL;
  1261. }
  1262. hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
  1263. tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
  1264. hyp_size_offset);
  1265. tzdbg.hyp_virt_iobase = devm_ioremap(&pdev->dev,
  1266. hypdiag_phy_iobase,
  1267. tzdbg.hyp_debug_rw_buf_size);
  1268. if (!tzdbg.hyp_virt_iobase) {
  1269. dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
  1270. &hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
  1271. return -ENXIO;
  1272. }
  1273. ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
  1274. if (!ptr)
  1275. return -ENOMEM;
  1276. tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
  1277. hyp = tzdbg.hyp_diag_buf;
  1278. hyp->log_pos.wrap = hyp->log_pos.offset = 0;
  1279. return 0;
  1280. }
  1281. static int tzdbg_get_tz_version(void)
  1282. {
  1283. u64 version;
  1284. int ret = 0;
  1285. ret = qcom_scm_get_tz_log_feat_id(&version);
  1286. if (ret) {
  1287. pr_err("%s: scm_call to get tz version failed\n",
  1288. __func__);
  1289. return ret;
  1290. }
  1291. tzdbg.tz_version = version;
  1292. ret = qcom_scm_get_tz_feat_id_version(QCOM_SCM_FEAT_DIAG_ID, &version);
  1293. if (ret) {
  1294. pr_err("%s: scm_call to get tz diag version failed, ret = %d\n",
  1295. __func__, ret);
  1296. return ret;
  1297. }
  1298. pr_warn("tz diag version is %x\n", version);
  1299. tzdbg.tz_diag_major_version =
  1300. ((version >> TZBSP_FVER_MAJOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1301. tzdbg.tz_diag_minor_version =
  1302. ((version >> TZBSP_FVER_MINOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
  1303. if (tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) {
  1304. switch (tzdbg.tz_diag_minor_version) {
  1305. case TZBSP_DIAG_MINOR_VERSION_V2:
  1306. case TZBSP_DIAG_MINOR_VERSION_V21:
  1307. case TZBSP_DIAG_MINOR_VERSION_V22:
  1308. tzdbg.is_enlarged_buf = true;
  1309. break;
  1310. default:
  1311. tzdbg.is_enlarged_buf = false;
  1312. }
  1313. } else {
  1314. tzdbg.is_enlarged_buf = false;
  1315. }
  1316. return ret;
  1317. }
  1318. static void tzdbg_query_encrypted_log(void)
  1319. {
  1320. int ret = 0;
  1321. uint64_t enabled;
  1322. ret = qcom_scm_query_encrypted_log_feature(&enabled);
  1323. if (ret) {
  1324. pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret);
  1325. tzdbg.is_encrypted_log_enabled = false;
  1326. } else {
  1327. pr_warn("encrypted qseelog enabled is %d\n", enabled);
  1328. tzdbg.is_encrypted_log_enabled = enabled;
  1329. }
  1330. }
  1331. /*
  1332. * Driver functions
  1333. */
  1334. static int tz_log_probe(struct platform_device *pdev)
  1335. {
  1336. struct resource *resource;
  1337. void __iomem *virt_iobase;
  1338. phys_addr_t tzdiag_phy_iobase;
  1339. uint32_t *ptr = NULL;
  1340. int ret = 0;
  1341. ret = tzdbg_get_tz_version();
  1342. if (ret)
  1343. return ret;
  1344. /*
  1345. * Get address that stores the physical location diagnostic data
  1346. */
  1347. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1348. if (!resource) {
  1349. dev_err(&pdev->dev,
  1350. "%s: ERROR Missing MEM resource\n", __func__);
  1351. return -ENXIO;
  1352. }
  1353. /*
  1354. * Get the debug buffer size
  1355. */
  1356. debug_rw_buf_size = resource_size(resource);
  1357. /*
  1358. * Map address that stores the physical location diagnostic data
  1359. */
  1360. virt_iobase = devm_ioremap(&pdev->dev, resource->start,
  1361. debug_rw_buf_size);
  1362. if (!virt_iobase) {
  1363. dev_err(&pdev->dev,
  1364. "%s: ERROR could not ioremap: start=%pr, len=%u\n",
  1365. __func__, &resource->start,
  1366. (unsigned int)(debug_rw_buf_size));
  1367. return -ENXIO;
  1368. }
  1369. if (pdev->dev.of_node) {
  1370. tzdbg.is_hyplog_enabled = of_property_read_bool(
  1371. (&pdev->dev)->of_node, "qcom,hyplog-enabled");
  1372. if (tzdbg.is_hyplog_enabled) {
  1373. ret = __update_hypdbg_base(pdev, virt_iobase);
  1374. if (ret) {
  1375. dev_err(&pdev->dev,
  1376. "%s: fail to get hypdbg_base ret %d\n",
  1377. __func__, ret);
  1378. return -EINVAL;
  1379. }
  1380. } else {
  1381. dev_info(&pdev->dev, "Hyp log service not support\n");
  1382. }
  1383. } else {
  1384. dev_dbg(&pdev->dev, "Device tree data is not found\n");
  1385. }
  1386. /*
  1387. * Retrieve the address of diagnostic data
  1388. */
  1389. tzdiag_phy_iobase = readl_relaxed(virt_iobase);
  1390. tzdbg_query_encrypted_log();
  1391. /*
  1392. * Map the diagnostic information area if encryption is disabled
  1393. */
  1394. if (!tzdbg.is_encrypted_log_enabled) {
  1395. tzdbg.virt_iobase = devm_ioremap(&pdev->dev,
  1396. tzdiag_phy_iobase, debug_rw_buf_size);
  1397. if (!tzdbg.virt_iobase) {
  1398. dev_err(&pdev->dev,
  1399. "%s: could not ioremap: start=%pr, len=%u\n",
  1400. __func__, &tzdiag_phy_iobase,
  1401. debug_rw_buf_size);
  1402. return -ENXIO;
  1403. }
  1404. /* allocate diag_buf */
  1405. ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
  1406. if (ptr == NULL)
  1407. return -ENOMEM;
  1408. tzdbg.diag_buf = (struct tzdbg_t *)ptr;
  1409. } else {
  1410. if ((tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) &&
  1411. (tzdbg.tz_diag_minor_version >= TZBSP_DIAG_MINOR_VERSION_V22))
  1412. tzdbg.is_full_encrypted_tz_logs_supported = true;
  1413. if (pdev->dev.of_node) {
  1414. tzdbg.is_full_encrypted_tz_logs_enabled = of_property_read_bool(
  1415. (&pdev->dev)->of_node, "qcom,full-encrypted-tz-logs-enabled");
  1416. }
  1417. }
  1418. /* register unencrypted qsee log buffer */
  1419. ret = tzdbg_register_qsee_log_buf(pdev);
  1420. if (ret)
  1421. goto exit_free_diag_buf;
  1422. /* allocate encrypted qsee and tz log buffer */
  1423. ret = tzdbg_allocate_encrypted_log_buf(pdev);
  1424. if (ret) {
  1425. dev_err(&pdev->dev,
  1426. "Failed to allocate encrypted log buffer\n",
  1427. __func__);
  1428. goto exit_free_qsee_log_buf;
  1429. }
  1430. /* allocate display_buf */
  1431. if (UINT_MAX/4 < qseelog_buf_size) {
  1432. pr_err("display_buf_size integer overflow\n");
  1433. goto exit_free_qsee_log_buf;
  1434. }
  1435. display_buf_size = qseelog_buf_size * 4;
  1436. tzdbg.disp_buf = dma_alloc_coherent(&pdev->dev, display_buf_size,
  1437. &disp_buf_paddr, GFP_KERNEL);
  1438. if (tzdbg.disp_buf == NULL) {
  1439. ret = -ENOMEM;
  1440. goto exit_free_encr_log_buf;
  1441. }
  1442. if (tzdbg_fs_init(pdev))
  1443. goto exit_free_disp_buf;
  1444. return 0;
  1445. exit_free_disp_buf:
  1446. dma_free_coherent(&pdev->dev, display_buf_size,
  1447. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1448. exit_free_encr_log_buf:
  1449. tzdbg_free_encrypted_log_buf(pdev);
  1450. exit_free_qsee_log_buf:
  1451. tzdbg_free_qsee_log_buf(pdev);
  1452. exit_free_diag_buf:
  1453. if (!tzdbg.is_encrypted_log_enabled)
  1454. kfree(tzdbg.diag_buf);
  1455. return -ENXIO;
  1456. }
  1457. static int tz_log_remove(struct platform_device *pdev)
  1458. {
  1459. tzdbg_fs_exit(pdev);
  1460. dma_free_coherent(&pdev->dev, display_buf_size,
  1461. (void *)tzdbg.disp_buf, disp_buf_paddr);
  1462. tzdbg_free_encrypted_log_buf(pdev);
  1463. tzdbg_free_qsee_log_buf(pdev);
  1464. if (!tzdbg.is_encrypted_log_enabled)
  1465. kfree(tzdbg.diag_buf);
  1466. return 0;
  1467. }
  1468. static const struct of_device_id tzlog_match[] = {
  1469. {.compatible = "qcom,tz-log"},
  1470. {}
  1471. };
  1472. static struct platform_driver tz_log_driver = {
  1473. .probe = tz_log_probe,
  1474. .remove = tz_log_remove,
  1475. .driver = {
  1476. .name = "tz_log",
  1477. .of_match_table = tzlog_match,
  1478. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1479. },
  1480. };
  1481. module_platform_driver(tz_log_driver);
  1482. MODULE_LICENSE("GPL v2");
  1483. MODULE_DESCRIPTION("TZ Log driver");