qti_epm_hw.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "qti_epm: %s: " fmt, __func__
  6. #include <linux/device.h>
  7. #include <linux/errno.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/nvmem-consumer.h>
  12. #include <linux/of.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/sched/clock.h>
  17. #include <linux/workqueue.h>
  18. #include "qti_epm.h"
  19. #include <linux/delay.h>
  20. #define EPM_CONFIG_SDAM_BASE_OFF 0x45
  21. #define EPM_DATA_SDAM_BASE_OFF 0x45
  22. #define EPM_CH_ENABLE_MASK BIT(7)
  23. #define EPM_SID_MASK 0xf
  24. #define EPM_GANG_NUM_MASK 0x70
  25. #define EPM_DATA_BYTE_SIZE 2
  26. #define EPM_NSEC_PER_SEC 1000000000L
  27. #define EPM_DATA_TO_POWER_UW 1500L /* 1 LSB = 1.5 mW */
  28. #define EPM_TZ_BYTE_SIZE 1
  29. #define EPM_TZ_MIN_VAL_IN_C 50
  30. #define EPM_GET_POWER_UW_FROM_ADC(adc) ((adc) * EPM_DATA_TO_POWER_UW)
  31. #define EPM_GET_TEMP_MC_FROM_ADC(adc) (((adc) - EPM_TZ_MIN_VAL_IN_C) * 1000)
  32. #define EPM_AVG_SDAM_RETRY_DELAY msecs_to_jiffies(200)
  33. static int epm_sdam_nvmem_read(struct epm_priv *epm, struct epm_sdam *sdam,
  34. uint16_t offset, size_t bytes, uint8_t *data)
  35. {
  36. int rc = 0;
  37. mutex_lock(&sdam->lock);
  38. rc = nvmem_device_read(sdam->nvmem, offset, bytes, data);
  39. mutex_unlock(&sdam->lock);
  40. EPM_DBG(epm, "sdam[%d] off:0x%x,size:%d rc=%d data[0]:0x%x data[1]:0x%x",
  41. sdam->id, offset, bytes, rc, data[0], data[1]);
  42. if (rc < 0)
  43. dev_err(epm->dev,
  44. "Failed to read sdam[%d] off:0x%x,size:%d rc=%d\n",
  45. sdam->id, offset, bytes, rc);
  46. return rc;
  47. }
  48. static struct epm_sdam *get_data_sdam_from_pid(struct epm_priv *epm, uint8_t pid)
  49. {
  50. if (!epm->data_1s_base_pid ||
  51. (pid - epm->data_1s_base_pid) >= (MAX_EPM_SDAM - DATA_1_SDAM)) {
  52. dev_err(epm->dev, "Invalid sdam pids, base=0x%x curr=0x%x\n",
  53. epm->data_1s_base_pid, pid);
  54. return ERR_PTR(-EINVAL);
  55. }
  56. if (pid < epm->data_1s_base_pid)
  57. pid = epm->data_1s_base_pid;
  58. return &epm->sdam[DATA_1_SDAM + (pid - epm->data_1s_base_pid)];
  59. }
  60. static int epm_validate_data_sdam_sequence_matching(struct epm_priv *epm,
  61. struct epm_sdam *sdam)
  62. {
  63. int rc = 0;
  64. uint8_t data_counter[2] = {0};
  65. rc = epm_sdam_nvmem_read(epm, sdam,
  66. EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_SEQ_START,
  67. 2, data_counter);
  68. if (rc < 0)
  69. return rc;
  70. if (!data_counter[0] ||
  71. data_counter[0] !=
  72. data_counter[1]) {
  73. EPM_DBG(epm,
  74. "sdam[%d] No matching counter START:%d END:%d, rc=%d",
  75. sdam->id, data_counter[0], data_counter[1], rc);
  76. return -EBUSY;
  77. }
  78. return 0;
  79. }
  80. static struct epm_sdam *get_prev_data_sdam(struct epm_priv *epm,
  81. struct epm_sdam *cur_sdam)
  82. {
  83. enum epm_sdam_id id;
  84. struct epm_sdam *prev;
  85. if (cur_sdam->id - 1 < DATA_1_SDAM)
  86. id = DATA_11_SDAM;
  87. else
  88. id = cur_sdam->id - 1;
  89. prev = &epm->sdam[id];
  90. if (prev && (prev != cur_sdam))
  91. return prev;
  92. return NULL;
  93. }
  94. static enum epm_mode qti_epm_get_mode(struct epm_priv *epm)
  95. {
  96. if (!epm || !epm->initialized)
  97. return -ENODEV;
  98. return epm->mode;
  99. }
  100. static bool epm_is_need_hw_read(u64 last_timestamp)
  101. {
  102. if (sched_clock() - last_timestamp < EPM_NSEC_PER_SEC)
  103. return false;
  104. return true;
  105. }
  106. static void epm_channel_avg_data_update(struct epm_device *epm_dev,
  107. uint8_t lsb, uint8_t msb, u64 ts)
  108. {
  109. epm_dev->last_avg_data = (msb << 8) | lsb;
  110. epm_dev->last_avg_data_uw = EPM_GET_POWER_UW_FROM_ADC((msb << 8) | lsb);
  111. epm_dev->avg_time_stamp = ts;
  112. EPM_DBG(epm_dev->priv, "epm[%s]:avg power:%duw msb:0x%x lsb:0x%x",
  113. epm_dev->name, epm_dev->last_avg_data_uw, msb, lsb);
  114. }
  115. static int qti_epm_read_acat_10s_avg_data_common(struct epm_priv *epm,
  116. struct epm_device *epm_dev,
  117. uint16_t offset, size_t size)
  118. {
  119. uint8_t data_sdam_avg[DATA_SDAM_POWER_MSB_CH48 + 1] = {0};
  120. int rc = 0;
  121. struct epm_device *epm_dev_tmp;
  122. rc = epm_validate_data_sdam_sequence_matching(epm,
  123. &epm->sdam[DATA_AVG_SDAM]);
  124. if (rc < 0) {
  125. if (rc == -EBUSY) {
  126. dev_dbg(epm->dev,
  127. "Retry avg data update after sometime\n");
  128. schedule_delayed_work(&epm->avg_data_work,
  129. EPM_AVG_SDAM_RETRY_DELAY);
  130. }
  131. return rc;
  132. }
  133. rc = epm_sdam_nvmem_read(epm, &epm->sdam[DATA_AVG_SDAM], offset,
  134. size, data_sdam_avg);
  135. if (rc < 0)
  136. return rc;
  137. if (!epm_dev && size > EPM_DATA_BYTE_SIZE) {
  138. epm->all_avg_read_ts = sched_clock();
  139. list_for_each_entry(epm_dev_tmp, &epm->epm_dev_head, epm_node) {
  140. if (!epm_dev_tmp->enabled)
  141. continue;
  142. if (epm_dev_tmp->data_offset >= (offset + size))
  143. continue;
  144. epm_channel_avg_data_update(epm_dev_tmp,
  145. data_sdam_avg[epm_dev_tmp->data_offset],
  146. data_sdam_avg[epm_dev_tmp->data_offset + 1],
  147. epm->all_avg_read_ts);
  148. }
  149. } else if (epm_dev && size == EPM_DATA_BYTE_SIZE) {
  150. epm_channel_avg_data_update(epm_dev, data_sdam_avg[0],
  151. data_sdam_avg[1], sched_clock());
  152. }
  153. return 0;
  154. }
  155. static int qti_epm_update_acat_10s_avg_full_data(struct epm_priv *epm)
  156. {
  157. int rc = 0;
  158. mutex_lock(&epm->avg_read_lock);
  159. if (epm_is_need_hw_read(epm->all_avg_read_ts))
  160. rc = qti_epm_read_acat_10s_avg_data_common(epm, NULL,
  161. EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1,
  162. epm->last_ch_offset + 2);
  163. mutex_unlock(&epm->avg_read_lock);
  164. if (rc < 0)
  165. return rc;
  166. return 0;
  167. }
  168. static int qti_epm_read_acat_10s_avg_channel(struct epm_device *epm_dev,
  169. u64 *power_uw)
  170. {
  171. struct epm_priv *epm = epm_dev->priv;
  172. int rc = 0;
  173. if (epm_dev->data_offset >= DATA_SDAM_POWER_MSB_CH48)
  174. return -EINVAL;
  175. mutex_lock(&epm->avg_read_lock);
  176. if (epm_is_need_hw_read(epm_dev->avg_time_stamp))
  177. rc = qti_epm_read_acat_10s_avg_data_common(epm, epm_dev,
  178. EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1
  179. + epm_dev->data_offset, EPM_DATA_BYTE_SIZE);
  180. mutex_unlock(&epm->avg_read_lock);
  181. if (rc >= 0 || rc == -EBUSY) {
  182. rc = 0;
  183. *power_uw = epm_dev->last_avg_data_uw;
  184. }
  185. return rc;
  186. }
  187. static int epm_get_latest_sdam_pid(struct epm_priv *epm, uint8_t *pid)
  188. {
  189. int rc = 0;
  190. rc = epm_sdam_nvmem_read(epm, &epm->sdam[CONFIG_SDAM],
  191. EPM_CONFIG_SDAM_BASE_OFF + CONFIG_SDAM_LAST_FULL_SDAM,
  192. 1, pid);
  193. if (rc < 0)
  194. return rc;
  195. return rc;
  196. }
  197. static struct epm_sdam *get_next_valid_data_1s_sdam(struct epm_priv *epm,
  198. struct epm_sdam *sdam)
  199. {
  200. uint8_t data_sdam_pid;
  201. int rc = 0, idx = 0;
  202. if (!sdam) {
  203. /* get latest data sdam pid */
  204. rc = epm_get_latest_sdam_pid(epm, &data_sdam_pid);
  205. if (rc < 0)
  206. return ERR_PTR(-ENODEV);
  207. /* Better save last sdam */
  208. epm->last_sdam_pid = data_sdam_pid;
  209. /* Get data sdam from sdam pid */
  210. sdam = get_data_sdam_from_pid(epm, data_sdam_pid);
  211. if (IS_ERR(sdam))
  212. return sdam;
  213. } else {
  214. sdam = get_prev_data_sdam(epm, sdam);
  215. if (!sdam)
  216. return ERR_PTR(-ENODEV);
  217. }
  218. rc = epm_validate_data_sdam_sequence_matching(epm, sdam);
  219. while (idx < (MAX_EPM_SDAM - 2) && rc != 0) {
  220. sdam = get_prev_data_sdam(epm, sdam);
  221. if (!sdam)
  222. return ERR_PTR(-ENODEV);
  223. rc = epm_validate_data_sdam_sequence_matching(epm, sdam);
  224. if (!rc)
  225. break;
  226. idx++;
  227. }
  228. if (idx >= (MAX_EPM_SDAM - 2)) {
  229. dev_err(epm->dev, "No matching data sdam\n");
  230. return ERR_PTR(-EBUSY);
  231. }
  232. return sdam;
  233. }
  234. static void epm_channel_data_update(struct epm_device *epm_dev,
  235. uint8_t lsb, uint8_t msb, int idx, u64 ts)
  236. {
  237. epm_dev->last_data[idx] = (msb << 8) | lsb;
  238. epm_dev->last_data_uw[idx] = EPM_GET_POWER_UW_FROM_ADC((msb << 8) | lsb);
  239. epm_dev->time_stamp = ts;
  240. EPM_DBG(epm_dev->priv, "epm[%s]:1s power[%d]:%duw msb:0x%x lsb:0x%x",
  241. epm_dev->name, idx, epm_dev->last_data_uw[idx],
  242. msb, lsb);
  243. }
  244. static int qti_epm_read_acat_data_common(struct epm_priv *epm,
  245. struct epm_device *epm_dev, uint16_t offset,
  246. size_t size, bool epm_full)
  247. {
  248. uint8_t data[MAX_SDAM_DATA] = {0};
  249. struct epm_sdam *sdam = NULL;
  250. int rc = 0, data_idx = 0;
  251. struct epm_device *epm_dev_tmp;
  252. do {
  253. sdam = get_next_valid_data_1s_sdam(epm, sdam);
  254. if (IS_ERR(sdam))
  255. return PTR_ERR(sdam);
  256. rc = epm_sdam_nvmem_read(epm, sdam, offset, size, data);
  257. if (rc < 0)
  258. return rc;
  259. if (!epm_dev && size > EPM_DATA_BYTE_SIZE) {
  260. epm->all_1s_read_ts = sched_clock();
  261. list_for_each_entry(
  262. epm_dev_tmp, &epm->epm_dev_head, epm_node) {
  263. if (!epm_dev_tmp->enabled)
  264. continue;
  265. if (epm_dev_tmp->data_offset >= (offset + size))
  266. continue;
  267. epm_channel_data_update(epm_dev_tmp,
  268. data[epm_dev_tmp->data_offset],
  269. data[epm_dev_tmp->data_offset + 1],
  270. data_idx, epm->all_1s_read_ts);
  271. }
  272. } else if (epm_dev && size == EPM_DATA_BYTE_SIZE) {
  273. epm_channel_data_update(epm_dev, data[0], data[1],
  274. 0, sched_clock());
  275. }
  276. data_idx++;
  277. } while (epm_full && data_idx < EPM_MAX_DATA_MAX &&
  278. data_idx < epm->max_data);
  279. return rc;
  280. }
  281. int qti_epm_update_acat_full_data(struct epm_priv *epm)
  282. {
  283. int rc = 0;
  284. mutex_lock(&epm->sec_read_lock);
  285. if (epm_is_need_hw_read(epm->all_1s_read_ts))
  286. rc = qti_epm_read_acat_data_common(epm, NULL,
  287. EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1,
  288. epm->last_ch_offset + 2, true);
  289. mutex_unlock(&epm->sec_read_lock);
  290. if (rc < 0)
  291. return rc;
  292. return rc;
  293. }
  294. static int qti_epm_read_acat_1s_channel(struct epm_device *epm_dev, u64 *power_uw)
  295. {
  296. struct epm_priv *epm = epm_dev->priv;
  297. int rc = 0;
  298. if (epm_dev->data_offset > epm->last_ch_offset)
  299. return -EINVAL;
  300. mutex_lock(&epm->sec_read_lock);
  301. if (epm_is_need_hw_read(epm_dev->time_stamp))
  302. rc = qti_epm_read_acat_data_common(epm, epm_dev,
  303. EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1
  304. + epm_dev->data_offset, EPM_DATA_BYTE_SIZE,
  305. false);
  306. mutex_unlock(&epm->sec_read_lock);
  307. if (rc >= 0 || rc == -EBUSY) {
  308. rc = 0;
  309. *power_uw = epm_dev->last_data_uw[0];
  310. }
  311. return rc;
  312. }
  313. static int qti_epm_get_power(struct epm_device *epm_dev,
  314. enum epm_data_type type, u64 *power_uw)
  315. {
  316. int rc = 0;
  317. mutex_lock(&epm_dev->lock);
  318. switch (type) {
  319. case EPM_1S_DATA:
  320. rc = qti_epm_read_acat_1s_channel(epm_dev, power_uw);
  321. break;
  322. case EPM_10S_AVG_DATA:
  323. rc = qti_epm_read_acat_10s_avg_channel(epm_dev, power_uw);
  324. break;
  325. default:
  326. dev_err(epm_dev->priv->dev,
  327. "No valid epm data type, type:%d\n", type);
  328. rc = -EINVAL;
  329. break;
  330. }
  331. mutex_unlock(&epm_dev->lock);
  332. return rc;
  333. }
  334. static void epm_temp_data_update(struct epm_tz_device *epm_tz,
  335. uint8_t data, u64 ts)
  336. {
  337. epm_tz->last_temp = EPM_GET_TEMP_MC_FROM_ADC(data);
  338. epm_tz->time_stamp = ts;
  339. EPM_DBG(epm_tz->priv, "epm tz[%d]:temp_adc:0x%x temp:%d mC",
  340. epm_tz->offset + 1, data, epm_tz->last_temp);
  341. }
  342. static int qti_epm_read_tz_common(struct epm_priv *epm,
  343. struct epm_tz_device *epm_tz, uint16_t offset, size_t size)
  344. {
  345. uint8_t data[EPM_TZ_CH_MAX] = {0};
  346. struct epm_sdam *sdam = NULL;
  347. int rc = 0, data_idx = 0;
  348. sdam = get_next_valid_data_1s_sdam(epm, sdam);
  349. if (IS_ERR(sdam))
  350. return PTR_ERR(sdam);
  351. rc = epm_sdam_nvmem_read(epm, sdam, offset, size, data);
  352. if (rc < 0)
  353. return rc;
  354. if (!epm_tz && size > EPM_TZ_BYTE_SIZE) {
  355. epm->all_tz_read_ts = sched_clock();
  356. for (data_idx = 0; data_idx < epm->dt_tz_cnt; data_idx++) {
  357. epm_tz = &epm->epm_tz[data_idx];
  358. if (epm_tz->offset >= (offset + size))
  359. continue;
  360. epm_temp_data_update(epm_tz,
  361. data[epm_tz->offset], epm->all_tz_read_ts);
  362. }
  363. } else if (epm_tz && size == EPM_TZ_BYTE_SIZE) {
  364. epm_temp_data_update(epm_tz, data[0], sched_clock());
  365. }
  366. return rc;
  367. }
  368. static int qti_epm_read_1s_temp(struct epm_tz_device *epm_tz, int *temp)
  369. {
  370. struct epm_priv *epm = epm_tz->priv;
  371. int rc = 0;
  372. if (epm_is_need_hw_read(epm_tz->time_stamp))
  373. rc = qti_epm_read_tz_common(epm, epm_tz,
  374. EPM_DATA_SDAM_BASE_OFF + DATA_SDAM_DIE_TEMP_SID1
  375. + epm_tz->offset, 1);
  376. if (rc >= 0 || rc == -EBUSY) {
  377. rc = 0;
  378. *temp = epm_tz->last_temp;
  379. }
  380. return rc;
  381. }
  382. static int qti_epm_get_temp(struct epm_tz_device *epm_tz, int *temp)
  383. {
  384. int rc = 0;
  385. mutex_lock(&epm_tz->lock);
  386. rc = qti_epm_read_1s_temp(epm_tz, temp);
  387. mutex_unlock(&epm_tz->lock);
  388. return rc;
  389. }
  390. static int qti_epm_read_data_update(struct epm_priv *epm)
  391. {
  392. switch (qti_epm_get_mode(epm)) {
  393. case EPM_ACAT_MODE:
  394. qti_epm_update_acat_10s_avg_full_data(epm);
  395. break;
  396. default:
  397. break;
  398. }
  399. return 0;
  400. }
  401. static void qti_epm_update_avg_data(struct work_struct *work)
  402. {
  403. struct epm_priv *epm = container_of(work, struct epm_priv,
  404. avg_data_work.work);
  405. qti_epm_update_acat_10s_avg_full_data(epm);
  406. }
  407. static irqreturn_t epm_sdam_irq_handler(int irq, void *data)
  408. {
  409. struct epm_priv *epm = data;
  410. qti_epm_read_data_update(epm);
  411. return IRQ_HANDLED;
  412. }
  413. static int get_dt_index_from_ppid(struct epm_device *epm_dev)
  414. {
  415. uint16_t ppid = 0, i = 0;
  416. struct epm_priv *epm = epm_dev->priv;
  417. if (!epm_dev->enabled || !epm->dt_reg_cnt)
  418. return -EINVAL;
  419. ppid = epm_dev->sid << 8 | epm_dev->pid;
  420. for (i = 0; i < epm->dt_reg_cnt; i++) {
  421. if (ppid == epm->reg_ppid_map[i])
  422. return i;
  423. }
  424. return -ENODEV;
  425. }
  426. static int qti_epm_config_sdam_read(struct epm_priv *epm)
  427. {
  428. uint8_t *config_sdam = NULL;
  429. struct epm_device *epm_dev = NULL;
  430. int rc = 0;
  431. uint8_t conf_idx, data_idx;
  432. if (!epm->sdam[CONFIG_SDAM].nvmem) {
  433. dev_err(epm->dev, "Invalid sdam nvmem\n");
  434. return -EINVAL;
  435. }
  436. config_sdam = devm_kcalloc(epm->dev, MAX_CONFIG_SDAM_DATA,
  437. sizeof(*config_sdam), GFP_KERNEL);
  438. if (!config_sdam)
  439. return -ENOMEM;
  440. rc = epm_sdam_nvmem_read(epm, &epm->sdam[CONFIG_SDAM],
  441. EPM_CONFIG_SDAM_BASE_OFF,
  442. MAX_CONFIG_SDAM_DATA, config_sdam);
  443. if (rc < 0)
  444. return rc;
  445. epm->g_enabled = config_sdam[CONFIG_SDAM_EPM_MODE] & BIT(7);
  446. if (!epm->g_enabled) {
  447. dev_err(epm->dev, "pmic epm is in disabled state, reg:0x%x\n",
  448. config_sdam[CONFIG_SDAM_EPM_MODE]);
  449. return -ENODEV;
  450. }
  451. epm->mode = config_sdam[CONFIG_SDAM_EPM_MODE] & BIT(0);
  452. epm->max_data = config_sdam[CONFIG_SDAM_MAX_DATA];
  453. epm->last_sdam_pid = config_sdam[CONFIG_SDAM_LAST_FULL_SDAM];
  454. epm->config_sdam_data = config_sdam;
  455. /* logic to read number of channels and die_temps */
  456. for (conf_idx = CONFIG_SDAM_CONFIG_1, data_idx = 0;
  457. conf_idx <= CONFIG_SDAM_CONFIG_48;
  458. conf_idx += 2, data_idx += EPM_DATA_BYTE_SIZE) {
  459. const char *reg_name;
  460. if (!(config_sdam[conf_idx] & EPM_CH_ENABLE_MASK))
  461. continue;
  462. epm->num_reg++;
  463. epm_dev = devm_kzalloc(epm->dev, sizeof(*epm_dev), GFP_KERNEL);
  464. if (!epm_dev)
  465. return -ENOMEM;
  466. epm_dev->enabled = config_sdam[conf_idx] & EPM_CH_ENABLE_MASK ?
  467. true : false;
  468. epm_dev->sid = config_sdam[conf_idx] & EPM_SID_MASK;
  469. epm_dev->gang_num = config_sdam[conf_idx] & EPM_GANG_NUM_MASK;
  470. epm_dev->pid = config_sdam[conf_idx + 1];
  471. epm_dev->priv = epm;
  472. epm_dev->data_offset = data_idx;
  473. mutex_init(&epm_dev->lock);
  474. if (data_idx > epm->last_ch_offset)
  475. epm->last_ch_offset = data_idx;
  476. rc = get_dt_index_from_ppid(epm_dev);
  477. if (rc < 0 || rc >= epm->dt_reg_cnt) {
  478. dev_err(epm->dev, "No matching channel ppid, rc:%d\n",
  479. rc);
  480. return rc;
  481. }
  482. of_property_read_string_index(epm->dev->of_node,
  483. "qcom,reg-ppid-names", rc, &reg_name);
  484. dev_dbg(epm->dev, "%s: epm channel:%s off:0x%x\n", __func__,
  485. reg_name, data_idx);
  486. strscpy(epm_dev->name, reg_name, sizeof(epm_dev->name));
  487. list_add(&epm_dev->epm_node, &epm->epm_dev_head);
  488. }
  489. return 0;
  490. }
  491. static int initialize_epm_tz(struct epm_priv *epm)
  492. {
  493. struct epm_tz_device *epm_tz = NULL;
  494. int tz_idx = 0;
  495. if (!epm->dt_tz_cnt || epm->dt_tz_cnt > EPM_TZ_CH_MAX)
  496. return 0;
  497. epm_tz = devm_kzalloc(epm->dev,
  498. sizeof(*epm_tz) * epm->dt_tz_cnt, GFP_KERNEL);
  499. if (!epm_tz)
  500. return -ENOMEM;
  501. for (tz_idx = 0; tz_idx < epm->dt_tz_cnt; tz_idx++) {
  502. epm_tz[tz_idx].priv = epm;
  503. epm_tz[tz_idx].offset = tz_idx;
  504. mutex_init(&epm_tz[tz_idx].lock);
  505. }
  506. epm->epm_tz = epm_tz;
  507. return 0;
  508. }
  509. static int epm_get_sdam_nvmem(struct device *dev, struct epm_sdam *sdam,
  510. char *sdam_name)
  511. {
  512. int rc = 0;
  513. sdam->nvmem = devm_nvmem_device_get(dev, sdam_name);
  514. if (IS_ERR(sdam->nvmem)) {
  515. rc = PTR_ERR(sdam->nvmem);
  516. if (rc != -EPROBE_DEFER)
  517. dev_err(dev, "Failed to get nvmem device, rc=%d\n",
  518. rc);
  519. sdam->nvmem = NULL;
  520. return rc;
  521. }
  522. mutex_init(&sdam->lock);
  523. return rc;
  524. }
  525. static int epm_parse_sdam_data(struct epm_priv *epm)
  526. {
  527. int rc = 0;
  528. char buf[20];
  529. rc = of_property_count_strings(epm->dev->of_node, "nvmem-names");
  530. if (rc < 0) {
  531. dev_err(epm->dev, "Could not find nvmem device\n");
  532. return rc;
  533. }
  534. if (rc > MAX_EPM_SDAM) {
  535. dev_err(epm->dev, "Invalid num of SDAMs:%d\n", rc);
  536. return -EINVAL;
  537. }
  538. epm->num_sdams = rc;
  539. epm->sdam = devm_kcalloc(epm->dev, epm->num_sdams,
  540. sizeof(*epm->sdam), GFP_KERNEL);
  541. if (!epm->sdam)
  542. return -ENOMEM;
  543. /* Check for config sdam */
  544. epm->sdam[0].id = CONFIG_SDAM;
  545. scnprintf(buf, sizeof(buf), "epm-config-sdam");
  546. rc = epm_get_sdam_nvmem(epm->dev, &epm->sdam[0], buf);
  547. if (rc < 0)
  548. return rc;
  549. /* Check 10s avg sdam */
  550. epm->sdam[1].id = DATA_AVG_SDAM;
  551. scnprintf(buf, sizeof(buf), "epm-10s-avg-sdam");
  552. rc = epm_get_sdam_nvmem(epm->dev, &epm->sdam[1], buf);
  553. if (rc < 0)
  554. return rc;
  555. return 0;
  556. }
  557. static int epm_parse_dt(struct epm_priv *epm)
  558. {
  559. struct platform_device *pdev;
  560. int rc = 0;
  561. uint32_t val = 0;
  562. struct device_node *np = epm->dev->of_node;
  563. /* 1s data is not enabled yet, hence below DT is optional for now */
  564. rc = of_property_read_u32(np, "qcom,data-sdam-base-id", &val);
  565. if (rc < 0)
  566. dev_dbg(epm->dev, "Failed to get sdam base, rc = %d\n", rc);
  567. epm->data_1s_base_pid = val;
  568. rc = of_property_count_strings(np, "qcom,reg-ppid-names");
  569. if (rc < 1 || rc >= EPM_POWER_CH_MAX) {
  570. dev_err(epm->dev,
  571. "Invalid ppid name mapping count, rc=%d\n", rc);
  572. return rc;
  573. }
  574. epm->dt_reg_cnt = rc;
  575. rc = of_property_count_elems_of_size(np, "qcom,reg-ppid-ids",
  576. sizeof(u16));
  577. if (rc < 1 || rc >= EPM_POWER_CH_MAX || rc != epm->dt_reg_cnt) {
  578. dev_err(epm->dev,
  579. "Invalid ppid mapping count, rc = %d strings:%d\n",
  580. rc, epm->dt_reg_cnt);
  581. return rc;
  582. }
  583. rc = of_property_read_u16_array(np, "qcom,reg-ppid-ids",
  584. epm->reg_ppid_map, epm->dt_reg_cnt);
  585. if (rc < 0) {
  586. dev_err(epm->dev,
  587. "Failed to read ppid mapping array, rc = %d\n", rc);
  588. return rc;
  589. }
  590. rc = of_property_read_u8(np, "#qcom,epm-tz-sensor", &epm->dt_tz_cnt);
  591. if (rc < 0)
  592. dev_dbg(epm->dev,
  593. "Failed to read epm tz sensor count, rc = %d\n", rc);
  594. rc = epm_parse_sdam_data(epm);
  595. if (rc < 0)
  596. return rc;
  597. pdev = of_find_device_by_node(np);
  598. if (!pdev) {
  599. dev_err(epm->dev, "Invalid pdev\n");
  600. return -ENODEV;
  601. }
  602. rc = platform_get_irq(pdev, 0);
  603. if (rc <= 0)
  604. dev_dbg(epm->dev, "Failed to get epm irq, rc=%d\n", rc);
  605. epm->irq = rc;
  606. return 0;
  607. }
  608. static int qti_epm_hw_init(struct epm_priv *epm)
  609. {
  610. int rc;
  611. if (epm->initialized)
  612. return 0;
  613. mutex_init(&epm->sec_read_lock);
  614. mutex_init(&epm->avg_read_lock);
  615. INIT_LIST_HEAD(&epm->epm_dev_head);
  616. INIT_DELAYED_WORK(&epm->avg_data_work, qti_epm_update_avg_data);
  617. rc = epm_parse_dt(epm);
  618. if (rc < 0) {
  619. dev_err(epm->dev, "Failed to parse epm rc=%d\n", rc);
  620. return rc;
  621. }
  622. rc = qti_epm_config_sdam_read(epm);
  623. if (rc < 0) {
  624. dev_err(epm->dev, "Failed to parse config sdam rc=%d\n", rc);
  625. return rc;
  626. }
  627. if (epm->irq > 0) {
  628. rc = devm_request_threaded_irq(epm->dev, epm->irq,
  629. NULL, epm_sdam_irq_handler,
  630. IRQF_ONESHOT, "qti_epm_irq", epm);
  631. if (rc < 0) {
  632. dev_err(epm->dev,
  633. "Failed to request IRQ for epm, rc=%d\n", rc);
  634. return rc;
  635. }
  636. }
  637. rc = initialize_epm_tz(epm);
  638. epm->initialized = true;
  639. /* Update first reading for all channels */
  640. qti_epm_read_data_update(epm);
  641. return 0;
  642. }
  643. static void qti_epm_hw_release(struct epm_priv *epm)
  644. {
  645. }
  646. struct epm_ops epm_hw_ops = {
  647. .init = qti_epm_hw_init,
  648. .get_mode = qti_epm_get_mode,
  649. .get_power = qti_epm_get_power, // only for ACAT mode
  650. .get_temp = qti_epm_get_temp,
  651. .release = qti_epm_hw_release,
  652. };