pmc.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * AMD SoC Power Management Controller Driver
  4. *
  5. * Copyright (c) 2020, Advanced Micro Devices, Inc.
  6. * All Rights Reserved.
  7. *
  8. * Author: Shyam Sundar S K <[email protected]>
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <asm/amd_nb.h>
  12. #include <linux/acpi.h>
  13. #include <linux/bitfield.h>
  14. #include <linux/bits.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/delay.h>
  17. #include <linux/io.h>
  18. #include <linux/iopoll.h>
  19. #include <linux/limits.h>
  20. #include <linux/module.h>
  21. #include <linux/pci.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/rtc.h>
  24. #include <linux/serio.h>
  25. #include <linux/suspend.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/uaccess.h>
  28. /* SMU communication registers */
  29. #define AMD_PMC_REGISTER_MESSAGE 0x538
  30. #define AMD_PMC_REGISTER_RESPONSE 0x980
  31. #define AMD_PMC_REGISTER_ARGUMENT 0x9BC
  32. /* PMC Scratch Registers */
  33. #define AMD_PMC_SCRATCH_REG_CZN 0x94
  34. #define AMD_PMC_SCRATCH_REG_YC 0xD14
  35. /* STB Registers */
  36. #define AMD_PMC_STB_PMI_0 0x03E30600
  37. #define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
  38. #define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
  39. #define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
  40. /* STB S2D(Spill to DRAM) has different message port offset */
  41. #define STB_SPILL_TO_DRAM 0xBE
  42. #define AMD_S2D_REGISTER_MESSAGE 0xA20
  43. #define AMD_S2D_REGISTER_RESPONSE 0xA80
  44. #define AMD_S2D_REGISTER_ARGUMENT 0xA88
  45. /* STB Spill to DRAM Parameters */
  46. #define S2D_TELEMETRY_BYTES_MAX 0x100000
  47. #define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
  48. /* Base address of SMU for mapping physical address to virtual address */
  49. #define AMD_PMC_MAPPING_SIZE 0x01000
  50. #define AMD_PMC_BASE_ADDR_OFFSET 0x10000
  51. #define AMD_PMC_BASE_ADDR_LO 0x13B102E8
  52. #define AMD_PMC_BASE_ADDR_HI 0x13B102EC
  53. #define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
  54. #define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
  55. /* SMU Response Codes */
  56. #define AMD_PMC_RESULT_OK 0x01
  57. #define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
  58. #define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
  59. #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
  60. #define AMD_PMC_RESULT_FAILED 0xFF
  61. /* FCH SSC Registers */
  62. #define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
  63. #define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
  64. #define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
  65. #define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
  66. #define FCH_SSC_MAPPING_SIZE 0x800
  67. #define FCH_BASE_PHY_ADDR_LOW 0xFED81100
  68. #define FCH_BASE_PHY_ADDR_HIGH 0x00000000
  69. /* SMU Message Definations */
  70. #define SMU_MSG_GETSMUVERSION 0x02
  71. #define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
  72. #define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
  73. #define SMU_MSG_LOG_START 0x06
  74. #define SMU_MSG_LOG_RESET 0x07
  75. #define SMU_MSG_LOG_DUMP_DATA 0x08
  76. #define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
  77. /* List of supported CPU ids */
  78. #define AMD_CPU_ID_RV 0x15D0
  79. #define AMD_CPU_ID_RN 0x1630
  80. #define AMD_CPU_ID_PCO AMD_CPU_ID_RV
  81. #define AMD_CPU_ID_CZN AMD_CPU_ID_RN
  82. #define AMD_CPU_ID_YC 0x14B5
  83. #define AMD_CPU_ID_CB 0x14D8
  84. #define AMD_CPU_ID_PS 0x14E8
  85. #define PMC_MSG_DELAY_MIN_US 50
  86. #define RESPONSE_REGISTER_LOOP_MAX 20000
  87. #define SOC_SUBSYSTEM_IP_MAX 12
  88. #define DELAY_MIN_US 2000
  89. #define DELAY_MAX_US 3000
  90. #define FIFO_SIZE 4096
  91. enum amd_pmc_def {
  92. MSG_TEST = 0x01,
  93. MSG_OS_HINT_PCO,
  94. MSG_OS_HINT_RN,
  95. };
  96. enum s2d_arg {
  97. S2D_TELEMETRY_SIZE = 0x01,
  98. S2D_PHYS_ADDR_LOW,
  99. S2D_PHYS_ADDR_HIGH,
  100. };
  101. struct amd_pmc_bit_map {
  102. const char *name;
  103. u32 bit_mask;
  104. };
  105. static const struct amd_pmc_bit_map soc15_ip_blk[] = {
  106. {"DISPLAY", BIT(0)},
  107. {"CPU", BIT(1)},
  108. {"GFX", BIT(2)},
  109. {"VDD", BIT(3)},
  110. {"ACP", BIT(4)},
  111. {"VCN", BIT(5)},
  112. {"ISP", BIT(6)},
  113. {"NBIO", BIT(7)},
  114. {"DF", BIT(8)},
  115. {"USB0", BIT(9)},
  116. {"USB1", BIT(10)},
  117. {"LAPIC", BIT(11)},
  118. {}
  119. };
  120. struct amd_pmc_dev {
  121. void __iomem *regbase;
  122. void __iomem *smu_virt_addr;
  123. void __iomem *stb_virt_addr;
  124. void __iomem *fch_virt_addr;
  125. bool msg_port;
  126. u32 base_addr;
  127. u32 cpu_id;
  128. u32 active_ips;
  129. /* SMU version information */
  130. u8 smu_program;
  131. u8 major;
  132. u8 minor;
  133. u8 rev;
  134. struct device *dev;
  135. struct pci_dev *rdev;
  136. struct mutex lock; /* generic mutex lock */
  137. struct dentry *dbgfs_dir;
  138. };
  139. static bool enable_stb;
  140. module_param(enable_stb, bool, 0644);
  141. MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
  142. static struct amd_pmc_dev pmc;
  143. static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
  144. static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
  145. #ifdef CONFIG_SUSPEND
  146. static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
  147. #endif
  148. static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
  149. {
  150. return ioread32(dev->regbase + reg_offset);
  151. }
  152. static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val)
  153. {
  154. iowrite32(val, dev->regbase + reg_offset);
  155. }
  156. struct smu_metrics {
  157. u32 table_version;
  158. u32 hint_count;
  159. u32 s0i3_last_entry_status;
  160. u32 timein_s0i2;
  161. u64 timeentering_s0i3_lastcapture;
  162. u64 timeentering_s0i3_totaltime;
  163. u64 timeto_resume_to_os_lastcapture;
  164. u64 timeto_resume_to_os_totaltime;
  165. u64 timein_s0i3_lastcapture;
  166. u64 timein_s0i3_totaltime;
  167. u64 timein_swdrips_lastcapture;
  168. u64 timein_swdrips_totaltime;
  169. u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
  170. u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
  171. } __packed;
  172. static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
  173. {
  174. struct amd_pmc_dev *dev = filp->f_inode->i_private;
  175. u32 size = FIFO_SIZE * sizeof(u32);
  176. u32 *buf;
  177. int rc;
  178. buf = kzalloc(size, GFP_KERNEL);
  179. if (!buf)
  180. return -ENOMEM;
  181. rc = amd_pmc_read_stb(dev, buf);
  182. if (rc) {
  183. kfree(buf);
  184. return rc;
  185. }
  186. filp->private_data = buf;
  187. return rc;
  188. }
  189. static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
  190. loff_t *pos)
  191. {
  192. if (!filp->private_data)
  193. return -EINVAL;
  194. return simple_read_from_buffer(buf, size, pos, filp->private_data,
  195. FIFO_SIZE * sizeof(u32));
  196. }
  197. static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
  198. {
  199. kfree(filp->private_data);
  200. return 0;
  201. }
  202. static const struct file_operations amd_pmc_stb_debugfs_fops = {
  203. .owner = THIS_MODULE,
  204. .open = amd_pmc_stb_debugfs_open,
  205. .read = amd_pmc_stb_debugfs_read,
  206. .release = amd_pmc_stb_debugfs_release,
  207. };
  208. static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
  209. {
  210. struct amd_pmc_dev *dev = filp->f_inode->i_private;
  211. u32 *buf;
  212. buf = kzalloc(S2D_TELEMETRY_BYTES_MAX, GFP_KERNEL);
  213. if (!buf)
  214. return -ENOMEM;
  215. memcpy_fromio(buf, dev->stb_virt_addr, S2D_TELEMETRY_BYTES_MAX);
  216. filp->private_data = buf;
  217. return 0;
  218. }
  219. static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
  220. loff_t *pos)
  221. {
  222. if (!filp->private_data)
  223. return -EINVAL;
  224. return simple_read_from_buffer(buf, size, pos, filp->private_data,
  225. S2D_TELEMETRY_BYTES_MAX);
  226. }
  227. static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
  228. {
  229. kfree(filp->private_data);
  230. return 0;
  231. }
  232. static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
  233. .owner = THIS_MODULE,
  234. .open = amd_pmc_stb_debugfs_open_v2,
  235. .read = amd_pmc_stb_debugfs_read_v2,
  236. .release = amd_pmc_stb_debugfs_release_v2,
  237. };
  238. static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
  239. {
  240. if (dev->cpu_id == AMD_CPU_ID_PCO) {
  241. dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n");
  242. return -EINVAL;
  243. }
  244. /* Get Active devices list from SMU */
  245. if (!dev->active_ips)
  246. amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
  247. /* Get dram address */
  248. if (!dev->smu_virt_addr) {
  249. u32 phys_addr_low, phys_addr_hi;
  250. u64 smu_phys_addr;
  251. amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
  252. amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
  253. smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
  254. dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr,
  255. sizeof(struct smu_metrics));
  256. if (!dev->smu_virt_addr)
  257. return -ENOMEM;
  258. }
  259. /* Start the logging */
  260. amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, 0);
  261. amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
  262. return 0;
  263. }
  264. static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
  265. {
  266. if (!pdev->smu_virt_addr) {
  267. int ret = amd_pmc_setup_smu_logging(pdev);
  268. if (ret)
  269. return ret;
  270. }
  271. if (pdev->cpu_id == AMD_CPU_ID_PCO)
  272. return -ENODEV;
  273. memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics));
  274. return 0;
  275. }
  276. #ifdef CONFIG_SUSPEND
  277. static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
  278. {
  279. struct smu_metrics table;
  280. if (get_metrics_table(pdev, &table))
  281. return;
  282. if (!table.s0i3_last_entry_status)
  283. dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n");
  284. else
  285. dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
  286. table.timein_s0i3_lastcapture);
  287. }
  288. #endif
  289. static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
  290. {
  291. int rc;
  292. u32 val;
  293. if (dev->cpu_id == AMD_CPU_ID_PCO)
  294. return -ENODEV;
  295. rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
  296. if (rc)
  297. return rc;
  298. dev->smu_program = (val >> 24) & GENMASK(7, 0);
  299. dev->major = (val >> 16) & GENMASK(7, 0);
  300. dev->minor = (val >> 8) & GENMASK(7, 0);
  301. dev->rev = (val >> 0) & GENMASK(7, 0);
  302. dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
  303. dev->smu_program, dev->major, dev->minor, dev->rev);
  304. return 0;
  305. }
  306. static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr,
  307. char *buf)
  308. {
  309. struct amd_pmc_dev *dev = dev_get_drvdata(d);
  310. if (!dev->major) {
  311. int rc = amd_pmc_get_smu_version(dev);
  312. if (rc)
  313. return rc;
  314. }
  315. return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev);
  316. }
  317. static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
  318. char *buf)
  319. {
  320. struct amd_pmc_dev *dev = dev_get_drvdata(d);
  321. if (!dev->major) {
  322. int rc = amd_pmc_get_smu_version(dev);
  323. if (rc)
  324. return rc;
  325. }
  326. return sysfs_emit(buf, "%u\n", dev->smu_program);
  327. }
  328. static DEVICE_ATTR_RO(smu_fw_version);
  329. static DEVICE_ATTR_RO(smu_program);
  330. static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
  331. {
  332. struct device *dev = kobj_to_dev(kobj);
  333. struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
  334. if (pdev->cpu_id == AMD_CPU_ID_PCO)
  335. return 0;
  336. return 0444;
  337. }
  338. static struct attribute *pmc_attrs[] = {
  339. &dev_attr_smu_fw_version.attr,
  340. &dev_attr_smu_program.attr,
  341. NULL,
  342. };
  343. static struct attribute_group pmc_attr_group = {
  344. .attrs = pmc_attrs,
  345. .is_visible = pmc_attr_is_visible,
  346. };
  347. static const struct attribute_group *pmc_groups[] = {
  348. &pmc_attr_group,
  349. NULL,
  350. };
  351. static int smu_fw_info_show(struct seq_file *s, void *unused)
  352. {
  353. struct amd_pmc_dev *dev = s->private;
  354. struct smu_metrics table;
  355. int idx;
  356. if (get_metrics_table(dev, &table))
  357. return -EINVAL;
  358. seq_puts(s, "\n=== SMU Statistics ===\n");
  359. seq_printf(s, "Table Version: %d\n", table.table_version);
  360. seq_printf(s, "Hint Count: %d\n", table.hint_count);
  361. seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" :
  362. "Unknown/Fail");
  363. seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
  364. seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
  365. seq_printf(s, "Time (in us) to resume from S0i3: %lld\n",
  366. table.timeto_resume_to_os_lastcapture);
  367. seq_puts(s, "\n=== Active time (in us) ===\n");
  368. for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
  369. if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
  370. seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
  371. table.timecondition_notmet_lastcapture[idx]);
  372. }
  373. return 0;
  374. }
  375. DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
  376. static int s0ix_stats_show(struct seq_file *s, void *unused)
  377. {
  378. struct amd_pmc_dev *dev = s->private;
  379. u64 entry_time, exit_time, residency;
  380. /* Use FCH registers to get the S0ix stats */
  381. if (!dev->fch_virt_addr) {
  382. u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
  383. u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
  384. u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
  385. dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
  386. if (!dev->fch_virt_addr)
  387. return -ENOMEM;
  388. }
  389. entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
  390. entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
  391. exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
  392. exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
  393. /* It's in 48MHz. We need to convert it */
  394. residency = exit_time - entry_time;
  395. do_div(residency, 48);
  396. seq_puts(s, "=== S0ix statistics ===\n");
  397. seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
  398. seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
  399. seq_printf(s, "Residency Time: %lld\n", residency);
  400. return 0;
  401. }
  402. DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
  403. static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
  404. struct seq_file *s)
  405. {
  406. u32 val;
  407. int rc;
  408. switch (pdev->cpu_id) {
  409. case AMD_CPU_ID_CZN:
  410. /* we haven't yet read SMU version */
  411. if (!pdev->major) {
  412. rc = amd_pmc_get_smu_version(pdev);
  413. if (rc)
  414. return rc;
  415. }
  416. if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37))
  417. val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
  418. else
  419. return -EINVAL;
  420. break;
  421. case AMD_CPU_ID_YC:
  422. case AMD_CPU_ID_CB:
  423. case AMD_CPU_ID_PS:
  424. val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
  425. break;
  426. default:
  427. return -EINVAL;
  428. }
  429. if (dev)
  430. dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
  431. if (s)
  432. seq_printf(s, "SMU idlemask : 0x%x\n", val);
  433. return 0;
  434. }
  435. static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
  436. {
  437. return amd_pmc_idlemask_read(s->private, NULL, s);
  438. }
  439. DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
  440. static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
  441. {
  442. debugfs_remove_recursive(dev->dbgfs_dir);
  443. }
  444. static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
  445. {
  446. dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
  447. debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
  448. &smu_fw_info_fops);
  449. debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
  450. &s0ix_stats_fops);
  451. debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
  452. &amd_pmc_idlemask_fops);
  453. /* Enable STB only when the module_param is set */
  454. if (enable_stb) {
  455. if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB ||
  456. dev->cpu_id == AMD_CPU_ID_PS)
  457. debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
  458. &amd_pmc_stb_debugfs_fops_v2);
  459. else
  460. debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
  461. &amd_pmc_stb_debugfs_fops);
  462. }
  463. }
  464. static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
  465. {
  466. u32 value, message, argument, response;
  467. if (dev->msg_port) {
  468. message = AMD_S2D_REGISTER_MESSAGE;
  469. argument = AMD_S2D_REGISTER_ARGUMENT;
  470. response = AMD_S2D_REGISTER_RESPONSE;
  471. } else {
  472. message = AMD_PMC_REGISTER_MESSAGE;
  473. argument = AMD_PMC_REGISTER_ARGUMENT;
  474. response = AMD_PMC_REGISTER_RESPONSE;
  475. }
  476. value = amd_pmc_reg_read(dev, response);
  477. dev_dbg(dev->dev, "AMD_PMC_REGISTER_RESPONSE:%x\n", value);
  478. value = amd_pmc_reg_read(dev, argument);
  479. dev_dbg(dev->dev, "AMD_PMC_REGISTER_ARGUMENT:%x\n", value);
  480. value = amd_pmc_reg_read(dev, message);
  481. dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
  482. }
  483. static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
  484. {
  485. int rc;
  486. u32 val, message, argument, response;
  487. mutex_lock(&dev->lock);
  488. if (dev->msg_port) {
  489. message = AMD_S2D_REGISTER_MESSAGE;
  490. argument = AMD_S2D_REGISTER_ARGUMENT;
  491. response = AMD_S2D_REGISTER_RESPONSE;
  492. } else {
  493. message = AMD_PMC_REGISTER_MESSAGE;
  494. argument = AMD_PMC_REGISTER_ARGUMENT;
  495. response = AMD_PMC_REGISTER_RESPONSE;
  496. }
  497. /* Wait until we get a valid response */
  498. rc = readx_poll_timeout(ioread32, dev->regbase + response,
  499. val, val != 0, PMC_MSG_DELAY_MIN_US,
  500. PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
  501. if (rc) {
  502. dev_err(dev->dev, "failed to talk to SMU\n");
  503. goto out_unlock;
  504. }
  505. /* Write zero to response register */
  506. amd_pmc_reg_write(dev, response, 0);
  507. /* Write argument into response register */
  508. amd_pmc_reg_write(dev, argument, arg);
  509. /* Write message ID to message ID register */
  510. amd_pmc_reg_write(dev, message, msg);
  511. /* Wait until we get a valid response */
  512. rc = readx_poll_timeout(ioread32, dev->regbase + response,
  513. val, val != 0, PMC_MSG_DELAY_MIN_US,
  514. PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
  515. if (rc) {
  516. dev_err(dev->dev, "SMU response timed out\n");
  517. goto out_unlock;
  518. }
  519. switch (val) {
  520. case AMD_PMC_RESULT_OK:
  521. if (ret) {
  522. /* PMFW may take longer time to return back the data */
  523. usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
  524. *data = amd_pmc_reg_read(dev, argument);
  525. }
  526. break;
  527. case AMD_PMC_RESULT_CMD_REJECT_BUSY:
  528. dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
  529. rc = -EBUSY;
  530. goto out_unlock;
  531. case AMD_PMC_RESULT_CMD_UNKNOWN:
  532. dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
  533. rc = -EINVAL;
  534. goto out_unlock;
  535. case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
  536. case AMD_PMC_RESULT_FAILED:
  537. default:
  538. dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
  539. rc = -EIO;
  540. goto out_unlock;
  541. }
  542. out_unlock:
  543. mutex_unlock(&dev->lock);
  544. amd_pmc_dump_registers(dev);
  545. return rc;
  546. }
  547. #ifdef CONFIG_SUSPEND
  548. static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
  549. {
  550. switch (dev->cpu_id) {
  551. case AMD_CPU_ID_PCO:
  552. return MSG_OS_HINT_PCO;
  553. case AMD_CPU_ID_RN:
  554. case AMD_CPU_ID_YC:
  555. case AMD_CPU_ID_CB:
  556. case AMD_CPU_ID_PS:
  557. return MSG_OS_HINT_RN;
  558. }
  559. return -EINVAL;
  560. }
  561. static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
  562. {
  563. struct device *d;
  564. int rc;
  565. if (!pdev->major) {
  566. rc = amd_pmc_get_smu_version(pdev);
  567. if (rc)
  568. return rc;
  569. }
  570. if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
  571. return 0;
  572. d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
  573. if (!d)
  574. return 0;
  575. if (device_may_wakeup(d)) {
  576. dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n");
  577. disable_irq_wake(1);
  578. device_set_wakeup_enable(d, false);
  579. }
  580. put_device(d);
  581. return 0;
  582. }
  583. static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
  584. {
  585. struct rtc_device *rtc_device;
  586. time64_t then, now, duration;
  587. struct rtc_wkalrm alarm;
  588. struct rtc_time tm;
  589. int rc;
  590. /* we haven't yet read SMU version */
  591. if (!pdev->major) {
  592. rc = amd_pmc_get_smu_version(pdev);
  593. if (rc)
  594. return rc;
  595. }
  596. if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53))
  597. return 0;
  598. rtc_device = rtc_class_open("rtc0");
  599. if (!rtc_device)
  600. return 0;
  601. rc = rtc_read_alarm(rtc_device, &alarm);
  602. if (rc)
  603. return rc;
  604. if (!alarm.enabled) {
  605. dev_dbg(pdev->dev, "alarm not enabled\n");
  606. return 0;
  607. }
  608. rc = rtc_read_time(rtc_device, &tm);
  609. if (rc)
  610. return rc;
  611. then = rtc_tm_to_time64(&alarm.time);
  612. now = rtc_tm_to_time64(&tm);
  613. duration = then-now;
  614. /* in the past */
  615. if (then < now)
  616. return 0;
  617. /* will be stored in upper 16 bits of s0i3 hint argument,
  618. * so timer wakeup from s0i3 is limited to ~18 hours or less
  619. */
  620. if (duration <= 4 || duration > U16_MAX)
  621. return -EINVAL;
  622. *arg |= (duration << 16);
  623. rc = rtc_alarm_irq_enable(rtc_device, 0);
  624. dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
  625. return rc;
  626. }
  627. static void amd_pmc_s2idle_prepare(void)
  628. {
  629. struct amd_pmc_dev *pdev = &pmc;
  630. int rc;
  631. u8 msg;
  632. u32 arg = 1;
  633. /* Reset and Start SMU logging - to monitor the s0i3 stats */
  634. amd_pmc_setup_smu_logging(pdev);
  635. /* Activate CZN specific RTC functionality */
  636. if (pdev->cpu_id == AMD_CPU_ID_CZN) {
  637. rc = amd_pmc_verify_czn_rtc(pdev, &arg);
  638. if (rc) {
  639. dev_err(pdev->dev, "failed to set RTC: %d\n", rc);
  640. return;
  641. }
  642. }
  643. msg = amd_pmc_get_os_hint(pdev);
  644. rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
  645. if (rc) {
  646. dev_err(pdev->dev, "suspend failed: %d\n", rc);
  647. return;
  648. }
  649. rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
  650. if (rc)
  651. dev_err(pdev->dev, "error writing to STB: %d\n", rc);
  652. }
  653. static void amd_pmc_s2idle_check(void)
  654. {
  655. struct amd_pmc_dev *pdev = &pmc;
  656. struct smu_metrics table;
  657. int rc;
  658. /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
  659. if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
  660. table.s0i3_last_entry_status)
  661. usleep_range(10000, 20000);
  662. /* Dump the IdleMask before we add to the STB */
  663. amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
  664. rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
  665. if (rc)
  666. dev_err(pdev->dev, "error writing to STB: %d\n", rc);
  667. }
  668. static int amd_pmc_dump_data(struct amd_pmc_dev *pdev)
  669. {
  670. if (pdev->cpu_id == AMD_CPU_ID_PCO)
  671. return -ENODEV;
  672. return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
  673. }
  674. static void amd_pmc_s2idle_restore(void)
  675. {
  676. struct amd_pmc_dev *pdev = &pmc;
  677. int rc;
  678. u8 msg;
  679. msg = amd_pmc_get_os_hint(pdev);
  680. rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
  681. if (rc)
  682. dev_err(pdev->dev, "resume failed: %d\n", rc);
  683. /* Let SMU know that we are looking for stats */
  684. amd_pmc_dump_data(pdev);
  685. rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
  686. if (rc)
  687. dev_err(pdev->dev, "error writing to STB: %d\n", rc);
  688. /* Notify on failed entry */
  689. amd_pmc_validate_deepest(pdev);
  690. }
  691. static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
  692. .prepare = amd_pmc_s2idle_prepare,
  693. .check = amd_pmc_s2idle_check,
  694. .restore = amd_pmc_s2idle_restore,
  695. };
  696. static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
  697. {
  698. struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
  699. if (pdev->cpu_id == AMD_CPU_ID_CZN) {
  700. int rc = amd_pmc_czn_wa_irq1(pdev);
  701. if (rc) {
  702. dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
  703. return rc;
  704. }
  705. }
  706. return 0;
  707. }
  708. static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
  709. #endif
  710. static const struct pci_device_id pmc_pci_ids[] = {
  711. { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
  712. { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },
  713. { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
  714. { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
  715. { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
  716. { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
  717. { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
  718. { }
  719. };
  720. static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
  721. {
  722. u32 phys_addr_low, phys_addr_hi;
  723. u64 stb_phys_addr;
  724. u32 size = 0;
  725. /* Spill to DRAM feature uses separate SMU message port */
  726. dev->msg_port = 1;
  727. amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, STB_SPILL_TO_DRAM, 1);
  728. if (size != S2D_TELEMETRY_BYTES_MAX)
  729. return -EIO;
  730. /* Get STB DRAM address */
  731. amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, STB_SPILL_TO_DRAM, 1);
  732. amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, STB_SPILL_TO_DRAM, 1);
  733. stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
  734. /* Clear msg_port for other SMU operation */
  735. dev->msg_port = 0;
  736. dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, S2D_TELEMETRY_DRAMBYTES_MAX);
  737. if (!dev->stb_virt_addr)
  738. return -ENOMEM;
  739. return 0;
  740. }
  741. #ifdef CONFIG_SUSPEND
  742. static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
  743. {
  744. int err;
  745. err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
  746. if (err) {
  747. dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
  748. return pcibios_err_to_errno(err);
  749. }
  750. return 0;
  751. }
  752. #endif
  753. static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
  754. {
  755. int i, err;
  756. for (i = 0; i < FIFO_SIZE; i++) {
  757. err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
  758. if (err) {
  759. dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
  760. return pcibios_err_to_errno(err);
  761. }
  762. }
  763. return 0;
  764. }
  765. static int amd_pmc_probe(struct platform_device *pdev)
  766. {
  767. struct amd_pmc_dev *dev = &pmc;
  768. struct pci_dev *rdev;
  769. u32 base_addr_lo, base_addr_hi;
  770. u64 base_addr;
  771. int err;
  772. u32 val;
  773. dev->dev = &pdev->dev;
  774. rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
  775. if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
  776. err = -ENODEV;
  777. goto err_pci_dev_put;
  778. }
  779. dev->cpu_id = rdev->device;
  780. dev->rdev = rdev;
  781. err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val);
  782. if (err) {
  783. dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO);
  784. err = pcibios_err_to_errno(err);
  785. goto err_pci_dev_put;
  786. }
  787. base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
  788. err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
  789. if (err) {
  790. dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
  791. err = pcibios_err_to_errno(err);
  792. goto err_pci_dev_put;
  793. }
  794. base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK;
  795. base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
  796. dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
  797. AMD_PMC_MAPPING_SIZE);
  798. if (!dev->regbase) {
  799. err = -ENOMEM;
  800. goto err_pci_dev_put;
  801. }
  802. mutex_init(&dev->lock);
  803. if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
  804. err = amd_pmc_s2d_init(dev);
  805. if (err)
  806. goto err_pci_dev_put;
  807. }
  808. platform_set_drvdata(pdev, dev);
  809. #ifdef CONFIG_SUSPEND
  810. err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
  811. if (err)
  812. dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
  813. #endif
  814. amd_pmc_dbgfs_register(dev);
  815. return 0;
  816. err_pci_dev_put:
  817. pci_dev_put(rdev);
  818. return err;
  819. }
  820. static int amd_pmc_remove(struct platform_device *pdev)
  821. {
  822. struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
  823. #ifdef CONFIG_SUSPEND
  824. acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
  825. #endif
  826. amd_pmc_dbgfs_unregister(dev);
  827. pci_dev_put(dev->rdev);
  828. mutex_destroy(&dev->lock);
  829. return 0;
  830. }
  831. static const struct acpi_device_id amd_pmc_acpi_ids[] = {
  832. {"AMDI0005", 0},
  833. {"AMDI0006", 0},
  834. {"AMDI0007", 0},
  835. {"AMDI0008", 0},
  836. {"AMDI0009", 0},
  837. {"AMD0004", 0},
  838. {"AMD0005", 0},
  839. { }
  840. };
  841. MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
  842. static struct platform_driver amd_pmc_driver = {
  843. .driver = {
  844. .name = "amd_pmc",
  845. .acpi_match_table = amd_pmc_acpi_ids,
  846. .dev_groups = pmc_groups,
  847. #ifdef CONFIG_SUSPEND
  848. .pm = &amd_pmc_pm,
  849. #endif
  850. },
  851. .probe = amd_pmc_probe,
  852. .remove = amd_pmc_remove,
  853. };
  854. module_platform_driver(amd_pmc_driver);
  855. MODULE_LICENSE("GPL v2");
  856. MODULE_DESCRIPTION("AMD PMC Driver");