qg-util.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/alarmtimer.h>
  7. #include <linux/cdev.h>
  8. #include <linux/device.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/power_supply.h>
  11. #include <linux/regmap.h>
  12. #include <linux/rtc.h>
  13. #include <linux/iio/consumer.h>
  14. #include <uapi/linux/qg.h>
  15. #include "qg-sdam.h"
  16. #include "qg-core.h"
  17. #include "qg-reg.h"
  18. #include "qg-defs.h"
  19. #include "qg-iio.h"
  20. #include "qg-util.h"
  21. static inline bool is_sticky_register(u32 addr)
  22. {
  23. if ((addr & 0xFF) == QG_STATUS2_REG)
  24. return true;
  25. return false;
  26. }
  27. int qg_read(struct qpnp_qg *chip, u32 addr, u8 *val, int len)
  28. {
  29. int rc, i;
  30. u32 dummy = 0;
  31. rc = regmap_bulk_read(chip->regmap, addr, val, len);
  32. if (rc < 0) {
  33. pr_err("Failed regmap_read for address %04x rc=%d\n", addr, rc);
  34. return rc;
  35. }
  36. if (is_sticky_register(addr)) {
  37. /* write to the sticky register to clear it */
  38. rc = regmap_write(chip->regmap, addr, dummy);
  39. if (rc < 0) {
  40. pr_err("Failed regmap_write for %04x rc=%d\n",
  41. addr, rc);
  42. return rc;
  43. }
  44. }
  45. if (*chip->debug_mask & QG_DEBUG_BUS_READ) {
  46. pr_info("length %d addr=%04x\n", len, addr);
  47. for (i = 0; i < len; i++)
  48. pr_info("val[%d]: %02x\n", i, val[i]);
  49. }
  50. return 0;
  51. }
  52. int qg_write(struct qpnp_qg *chip, u32 addr, u8 *val, int len)
  53. {
  54. int rc, i;
  55. mutex_lock(&chip->bus_lock);
  56. if (len > 1)
  57. rc = regmap_bulk_write(chip->regmap, addr, val, len);
  58. else
  59. rc = regmap_write(chip->regmap, addr, *val);
  60. if (rc < 0) {
  61. pr_err("Failed regmap_write for address %04x rc=%d\n",
  62. addr, rc);
  63. goto out;
  64. }
  65. if (*chip->debug_mask & QG_DEBUG_BUS_WRITE) {
  66. pr_info("length %d addr=%04x\n", len, addr);
  67. for (i = 0; i < len; i++)
  68. pr_info("val[%d]: %02x\n", i, val[i]);
  69. }
  70. out:
  71. mutex_unlock(&chip->bus_lock);
  72. return rc;
  73. }
  74. int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val)
  75. {
  76. int rc;
  77. mutex_lock(&chip->bus_lock);
  78. rc = regmap_update_bits(chip->regmap, addr, mask, val);
  79. if (rc < 0) {
  80. pr_err("Failed regmap_update_bits for address %04x rc=%d\n",
  81. addr, rc);
  82. goto out;
  83. }
  84. if (*chip->debug_mask & QG_DEBUG_BUS_WRITE)
  85. pr_info("addr=%04x mask: %02x val: %02x\n", addr, mask, val);
  86. out:
  87. mutex_unlock(&chip->bus_lock);
  88. return rc;
  89. }
  90. int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data)
  91. {
  92. int rc;
  93. u8 reg[2] = {0};
  94. rc = qg_read(chip, chip->qg_base + addr, &reg[0], 2);
  95. if (rc < 0) {
  96. pr_err("Failed to read QG addr %d rc=%d\n", addr, rc);
  97. return rc;
  98. }
  99. *data = reg[0] | (reg[1] << 8);
  100. return rc;
  101. }
  102. s64 qg_iraw_to_ua(struct qpnp_qg *chip, int iraw)
  103. {
  104. if (chip->qg_subtype == QG_ADC_IBAT_5A)
  105. return div_s64(152588LL * (s64)iraw, 1000);
  106. else
  107. return div_s64(305176LL * (s64)iraw, 1000);
  108. }
  109. int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt)
  110. {
  111. int rc;
  112. u8 reg = 0;
  113. u32 addr;
  114. addr = rt ? QG_STATUS3_REG : QG_S2_NORMAL_MEAS_CTL2_REG;
  115. rc = qg_read(chip, chip->qg_base + addr, &reg, 1);
  116. if (rc < 0) {
  117. pr_err("Failed to read FIFO length rc=%d\n", rc);
  118. return rc;
  119. }
  120. if (rt) {
  121. *fifo_length = reg & COUNT_FIFO_RT_MASK;
  122. } else {
  123. *fifo_length = (reg & FIFO_LENGTH_MASK) >> FIFO_LENGTH_SHIFT;
  124. *fifo_length += 1;
  125. }
  126. return rc;
  127. }
  128. int get_sample_count(struct qpnp_qg *chip, u32 *sample_count)
  129. {
  130. int rc;
  131. u8 reg = 0;
  132. rc = qg_read(chip, chip->qg_base + QG_S2_NORMAL_MEAS_CTL2_REG,
  133. &reg, 1);
  134. if (rc < 0) {
  135. pr_err("Failed to read FIFO sample count rc=%d\n", rc);
  136. return rc;
  137. }
  138. *sample_count = 1 << ((reg & NUM_OF_ACCUM_MASK) + 1);
  139. return rc;
  140. }
  141. #define QG_CLK_RATE 32000
  142. #define QG_ACTUAL_CLK_RATE 32764
  143. int get_sample_interval(struct qpnp_qg *chip, u32 *sample_interval)
  144. {
  145. int rc;
  146. u8 reg = 0;
  147. rc = qg_read(chip, chip->qg_base + QG_S2_NORMAL_MEAS_CTL3_REG,
  148. &reg, 1);
  149. if (rc < 0) {
  150. pr_err("Failed to read FIFO sample interval rc=%d\n", rc);
  151. return rc;
  152. }
  153. *sample_interval = reg * 10;
  154. if (chip->wa_flags & QG_CLK_ADJUST_WA) {
  155. *sample_interval = DIV_ROUND_CLOSEST(
  156. *sample_interval * QG_CLK_RATE, QG_ACTUAL_CLK_RATE);
  157. }
  158. return rc;
  159. }
  160. int get_rtc_time(unsigned long *rtc_time)
  161. {
  162. struct rtc_time tm;
  163. struct rtc_device *rtc;
  164. int rc;
  165. rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
  166. if (rtc == NULL) {
  167. pr_err("Failed to open rtc device (%s)\n",
  168. CONFIG_RTC_HCTOSYS_DEVICE);
  169. return -EINVAL;
  170. }
  171. rc = rtc_read_time(rtc, &tm);
  172. if (rc) {
  173. pr_err("Failed to read rtc time (%s) : %d\n",
  174. CONFIG_RTC_HCTOSYS_DEVICE, rc);
  175. goto close_time;
  176. }
  177. rc = rtc_valid_tm(&tm);
  178. if (rc) {
  179. pr_err("Invalid RTC time (%s): %d\n",
  180. CONFIG_RTC_HCTOSYS_DEVICE, rc);
  181. goto close_time;
  182. }
  183. *rtc_time = rtc_tm_to_time64(&tm);
  184. close_time:
  185. rtc_class_close(rtc);
  186. return rc;
  187. }
  188. int get_fifo_done_time(struct qpnp_qg *chip, bool rt, int *time_ms)
  189. {
  190. int rc, length = 0;
  191. u32 sample_count = 0, sample_interval = 0, acc_count = 0;
  192. rc = get_fifo_length(chip, &length, rt ? true : false);
  193. if (rc < 0)
  194. return rc;
  195. rc = get_sample_count(chip, &sample_count);
  196. if (rc < 0)
  197. return rc;
  198. rc = get_sample_interval(chip, &sample_interval);
  199. if (rc < 0)
  200. return rc;
  201. *time_ms = length * sample_count * sample_interval;
  202. if (rt) {
  203. rc = qg_read(chip, chip->qg_base + QG_ACCUM_CNT_RT_REG,
  204. (u8 *)&acc_count, 1);
  205. if (rc < 0)
  206. return rc;
  207. *time_ms += ((sample_count - acc_count) * sample_interval);
  208. }
  209. return 0;
  210. }
  211. static bool is_usb_available(struct qpnp_qg *chip)
  212. {
  213. if (chip->usb_psy)
  214. return true;
  215. chip->usb_psy = power_supply_get_by_name("usb");
  216. if (!chip->usb_psy)
  217. return false;
  218. return true;
  219. }
  220. static bool is_dc_available(struct qpnp_qg *chip)
  221. {
  222. if (chip->dc_psy)
  223. return true;
  224. chip->dc_psy = power_supply_get_by_name("dc");
  225. if (!chip->dc_psy)
  226. return false;
  227. return true;
  228. }
  229. bool is_usb_present(struct qpnp_qg *chip)
  230. {
  231. union power_supply_propval pval = {0, };
  232. if (is_usb_available(chip))
  233. power_supply_get_property(chip->usb_psy,
  234. POWER_SUPPLY_PROP_PRESENT, &pval);
  235. return pval.intval ? true : false;
  236. }
  237. bool is_dc_present(struct qpnp_qg *chip)
  238. {
  239. union power_supply_propval pval = {0, };
  240. if (is_dc_available(chip))
  241. power_supply_get_property(chip->dc_psy,
  242. POWER_SUPPLY_PROP_PRESENT, &pval);
  243. return pval.intval ? true : false;
  244. }
  245. bool is_input_present(struct qpnp_qg *chip)
  246. {
  247. return is_usb_present(chip) || is_dc_present(chip);
  248. }
  249. bool is_parallel_available(struct qpnp_qg *chip)
  250. {
  251. if (is_chan_valid(chip, PARALLEL_CHARGING_ENABLED))
  252. return true;
  253. return false;
  254. }
  255. bool is_cp_available(struct qpnp_qg *chip)
  256. {
  257. if (chip->cp_psy)
  258. return true;
  259. chip->cp_psy = power_supply_get_by_name("charge_pump_master");
  260. if (!chip->cp_psy)
  261. return false;
  262. return true;
  263. }
  264. bool is_parallel_enabled(struct qpnp_qg *chip)
  265. {
  266. int val = 0;
  267. if (is_parallel_available(chip))
  268. qg_read_iio_chan(chip, PARALLEL_CHARGING_ENABLED, &val);
  269. else if (is_cp_available(chip))
  270. qg_read_iio_chan(chip, CP_CHARGING_ENABLED, &val);
  271. return val ? true : false;
  272. }
  273. int qg_write_monotonic_soc(struct qpnp_qg *chip, int msoc)
  274. {
  275. u8 reg = 0;
  276. int rc;
  277. reg = (msoc * 255) / 100;
  278. rc = qg_write(chip, chip->qg_base + QG_SOC_MONOTONIC_REG,
  279. &reg, 1);
  280. if (rc < 0)
  281. pr_err("Failed to update QG_SOC_MONOTINIC reg rc=%d\n", rc);
  282. return rc;
  283. }
  284. int qg_get_battery_temp(struct qpnp_qg *chip, int *temp)
  285. {
  286. int rc = 0;
  287. if (chip->battery_missing) {
  288. *temp = 250;
  289. return 0;
  290. }
  291. rc = iio_read_channel_processed(chip->batt_therm_chan, temp);
  292. if (rc < 0) {
  293. pr_err("Failed reading BAT_TEMP over ADC rc=%d\n", rc);
  294. return rc;
  295. }
  296. pr_debug("batt_temp = %d\n", *temp);
  297. return 0;
  298. }
  299. int qg_get_battery_current(struct qpnp_qg *chip, int *ibat_ua)
  300. {
  301. int rc = 0, last_ibat = 0;
  302. if (chip->battery_missing) {
  303. *ibat_ua = 0;
  304. return 0;
  305. }
  306. if (chip->qg_mode == QG_V_MODE) {
  307. *ibat_ua = chip->qg_v_ibat;
  308. return 0;
  309. }
  310. /* hold data */
  311. rc = qg_masked_write(chip, chip->qg_base + QG_DATA_CTL2_REG,
  312. BURST_AVG_HOLD_FOR_READ_BIT,
  313. BURST_AVG_HOLD_FOR_READ_BIT);
  314. if (rc < 0) {
  315. pr_err("Failed to hold burst-avg data rc=%d\n", rc);
  316. goto release;
  317. }
  318. rc = qg_read(chip, chip->qg_base + QG_LAST_BURST_AVG_I_DATA0_REG,
  319. (u8 *)&last_ibat, 2);
  320. if (rc < 0) {
  321. pr_err("Failed to read LAST_BURST_AVG_I reg, rc=%d\n", rc);
  322. goto release;
  323. }
  324. last_ibat = sign_extend32(last_ibat, 15);
  325. *ibat_ua = qg_iraw_to_ua(chip, last_ibat);
  326. release:
  327. /* release */
  328. qg_masked_write(chip, chip->qg_base + QG_DATA_CTL2_REG,
  329. BURST_AVG_HOLD_FOR_READ_BIT, 0);
  330. return rc;
  331. }
  332. int qg_get_battery_voltage(struct qpnp_qg *chip, int *vbat_uv)
  333. {
  334. int rc = 0;
  335. u64 last_vbat = 0;
  336. if (chip->battery_missing) {
  337. *vbat_uv = 3700000;
  338. return 0;
  339. }
  340. rc = qg_read(chip, chip->qg_base + QG_LAST_ADC_V_DATA0_REG,
  341. (u8 *)&last_vbat, 2);
  342. if (rc < 0) {
  343. pr_err("Failed to read LAST_ADV_V reg, rc=%d\n", rc);
  344. return rc;
  345. }
  346. *vbat_uv = V_RAW_TO_UV(last_vbat);
  347. return rc;
  348. }
  349. int qg_get_vbat_avg(struct qpnp_qg *chip, int *vbat_uv)
  350. {
  351. int rc = 0;
  352. u64 last_vbat = 0;
  353. rc = qg_read(chip, chip->qg_base + QG_S2_NORMAL_AVG_V_DATA0_REG,
  354. (u8 *)&last_vbat, 2);
  355. if (rc < 0) {
  356. pr_err("Failed to read S2_NORMAL_AVG_V reg, rc=%d\n", rc);
  357. return rc;
  358. }
  359. *vbat_uv = V_RAW_TO_UV(last_vbat);
  360. return 0;
  361. }
  362. int qg_get_ibat_avg(struct qpnp_qg *chip, int *ibat_ua)
  363. {
  364. int rc = 0;
  365. int last_ibat = 0;
  366. rc = qg_read(chip, chip->qg_base + QG_S2_NORMAL_AVG_I_DATA0_REG,
  367. (u8 *)&last_ibat, 2);
  368. if (rc < 0) {
  369. pr_err("Failed to read S2_NORMAL_AVG_I reg, rc=%d\n", rc);
  370. return rc;
  371. }
  372. if (last_ibat == FIFO_I_RESET_VAL) {
  373. /* First FIFO is not complete, read instantaneous IBAT */
  374. rc = qg_get_battery_current(chip, ibat_ua);
  375. if (rc < 0)
  376. pr_err("Failed to read inst. IBAT rc=%d\n", rc);
  377. return rc;
  378. }
  379. last_ibat = sign_extend32(last_ibat, 15);
  380. *ibat_ua = qg_iraw_to_ua(chip, last_ibat);
  381. return 0;
  382. }
  383. bool is_chan_valid(struct qpnp_qg *chip,
  384. enum qg_ext_iio_channels chan)
  385. {
  386. int rc;
  387. if (IS_ERR(chip->ext_iio_chans[chan]))
  388. return false;
  389. if (!chip->ext_iio_chans[chan]) {
  390. chip->ext_iio_chans[chan] = devm_iio_channel_get(chip->dev,
  391. qg_ext_iio_chan_name[chan]);
  392. if (IS_ERR(chip->ext_iio_chans[chan])) {
  393. rc = PTR_ERR(chip->ext_iio_chans[chan]);
  394. if (rc == -EPROBE_DEFER)
  395. chip->ext_iio_chans[chan] = NULL;
  396. pr_err("Failed to get IIO channel %s, rc=%d\n",
  397. qg_ext_iio_chan_name[chan], rc);
  398. return false;
  399. }
  400. }
  401. return true;
  402. }
  403. int qg_read_iio_chan(struct qpnp_qg *chip,
  404. enum qg_ext_iio_channels chan, int *val)
  405. {
  406. int rc;
  407. if (is_chan_valid(chip, chan)) {
  408. rc = iio_read_channel_processed(
  409. chip->ext_iio_chans[chan], val);
  410. return (rc < 0) ? rc : 0;
  411. }
  412. return -EINVAL;
  413. }
  414. int qg_write_iio_chan(struct qpnp_qg *chip,
  415. enum qg_ext_iio_channels chan, int val)
  416. {
  417. if (is_chan_valid(chip, chan))
  418. return iio_write_channel_raw(chip->ext_iio_chans[chan],
  419. val);
  420. return -EINVAL;
  421. }
  422. int qg_read_int_iio_chan(struct iio_channel *iio_chan_list, int chan_id,
  423. int *val)
  424. {
  425. int rc;
  426. do {
  427. if (iio_chan_list->channel->channel == chan_id) {
  428. rc = iio_read_channel_processed(iio_chan_list,
  429. val);
  430. return (rc < 0) ? rc : 0;
  431. }
  432. } while (iio_chan_list++);
  433. return -ENOENT;
  434. }
  435. int qg_read_range_data_from_node(struct device_node *node,
  436. const char *prop_str, struct range_data *ranges,
  437. int max_threshold, u32 max_value)
  438. {
  439. int rc = 0, i, length, per_tuple_length, tuples;
  440. if (!node || !prop_str || !ranges) {
  441. pr_err("Invalid parameters passed\n");
  442. return -EINVAL;
  443. }
  444. rc = of_property_count_elems_of_size(node, prop_str, sizeof(u32));
  445. if (rc < 0) {
  446. pr_err("Count %s failed, rc=%d\n", prop_str, rc);
  447. return rc;
  448. }
  449. length = rc;
  450. per_tuple_length = sizeof(struct range_data) / sizeof(u32);
  451. if (length % per_tuple_length) {
  452. pr_err("%s length (%d) should be multiple of %d\n",
  453. prop_str, length, per_tuple_length);
  454. return -EINVAL;
  455. }
  456. tuples = length / per_tuple_length;
  457. if (tuples > MAX_STEP_CHG_ENTRIES) {
  458. pr_err("too many entries(%d), only %d allowed\n",
  459. tuples, MAX_STEP_CHG_ENTRIES);
  460. return -EINVAL;
  461. }
  462. rc = of_property_read_u32_array(node, prop_str,
  463. (u32 *)ranges, length);
  464. if (rc) {
  465. pr_err("Read %s failed, rc=%d\n", prop_str, rc);
  466. return rc;
  467. }
  468. for (i = 0; i < tuples; i++) {
  469. if (ranges[i].low_threshold >
  470. ranges[i].high_threshold) {
  471. pr_err("%s thresholds should be in ascendant ranges\n",
  472. prop_str);
  473. rc = -EINVAL;
  474. goto clean;
  475. }
  476. if (i != 0) {
  477. if (ranges[i - 1].high_threshold >
  478. ranges[i].low_threshold) {
  479. pr_err("%s thresholds should be in ascendant ranges\n",
  480. prop_str);
  481. rc = -EINVAL;
  482. goto clean;
  483. }
  484. }
  485. if (ranges[i].low_threshold > max_threshold)
  486. ranges[i].low_threshold = max_threshold;
  487. if (ranges[i].high_threshold > max_threshold)
  488. ranges[i].high_threshold = max_threshold;
  489. if (ranges[i].value > max_value)
  490. ranges[i].value = max_value;
  491. }
  492. return rc;
  493. clean:
  494. memset(ranges, 0, tuples * sizeof(struct range_data));
  495. return rc;
  496. }