qmi_cooling.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
  7. #include <linux/module.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/thermal.h>
  10. #include <linux/err.h>
  11. #include <linux/slab.h>
  12. #include <linux/of.h>
  13. #include <linux/soc/qcom/qmi.h>
  14. #include <linux/net.h>
  15. #include "thermal_mitigation_device_service_v01.h"
  16. #define QMI_CDEV_DRIVER "qmi-cooling-device"
  17. #define QMI_TMD_RESP_TOUT msecs_to_jiffies(100)
  18. struct qmi_cooling_device {
  19. struct device_node *np;
  20. char cdev_name[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01];
  21. char qmi_name[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01];
  22. bool connection_active;
  23. struct list_head qmi_node;
  24. struct thermal_cooling_device *cdev;
  25. unsigned int mtgn_state;
  26. unsigned int max_level;
  27. struct qmi_tmd_instance *tmd;
  28. };
  29. struct qmi_tmd_instance {
  30. struct device *dev;
  31. struct qmi_handle handle;
  32. struct mutex mutex;
  33. uint32_t inst_id;
  34. struct list_head tmd_cdev_list;
  35. struct work_struct svc_arrive_work;
  36. };
  37. static struct qmi_tmd_instance *tmd_instances;
  38. static int tmd_inst_cnt;
  39. static char device_clients[][QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01] = {
  40. {"pa"},
  41. {"pa_fr1"},
  42. {"cx_vdd_limit"},
  43. {"modem"},
  44. {"modem_current"},
  45. {"modem_skin"},
  46. {"modem_bw"},
  47. {"modem_bw_backoff"},
  48. {"vbatt_low"},
  49. {"charge_state"},
  50. {"mmw0"},
  51. {"mmw1"},
  52. {"mmw2"},
  53. {"mmw3"},
  54. {"mmw_skin0"},
  55. {"mmw_skin1"},
  56. {"mmw_skin2"},
  57. {"mmw_skin3"},
  58. {"wlan"},
  59. {"wlan_bw"},
  60. {"mmw_skin0_dsc"},
  61. {"mmw_skin1_dsc"},
  62. {"mmw_skin2_dsc"},
  63. {"mmw_skin3_dsc"},
  64. {"modem_skin_lte_dsc"},
  65. {"modem_skin_nr_dsc"},
  66. {"pa_dsc"},
  67. {"pa_fr1_dsc"},
  68. {"cdsp_sw"},
  69. {"cdsp_sw_hvx"},
  70. {"cdsp_sw_hmx"},
  71. {"cdsp_hw"},
  72. {"cpuv_restriction_cold"},
  73. {"cpr_cold"},
  74. {"modem_lte_dsc"},
  75. {"modem_nr_dsc"},
  76. {"modem_nr_scg_dsc"},
  77. {"sdr0_lte_dsc"},
  78. {"sdr1_lte_dsc"},
  79. {"sdr0_nr_dsc"},
  80. {"sdr1_nr_dsc"},
  81. {"sdr0_nr_scg_dsc"},
  82. {"sdr1_nr_scg_dsc"},
  83. {"pa_lte_sdr0_dsc"},
  84. {"pa_lte_sdr1_dsc"},
  85. {"pa_nr_sdr0_dsc"},
  86. {"pa_nr_sdr1_dsc"},
  87. {"pa_nr_sdr0_scg_dsc"},
  88. {"pa_nr_sdr1_scg_dsc"},
  89. {"mmw0_dsc"},
  90. {"mmw1_dsc"},
  91. {"mmw2_dsc"},
  92. {"mmw3_dsc"},
  93. {"mmw_ific_dsc"},
  94. {"modem_lte_sub1_dsc"},
  95. {"modem_nr_sub1_dsc"},
  96. {"modem_nr_scg_sub1_dsc"},
  97. {"sdr0_lte_sub1_dsc"},
  98. {"sdr1_lte_sub1_dsc"},
  99. {"sdr0_nr_sub1_dsc"},
  100. {"sdr1_nr_sub1_dsc"},
  101. {"pa_lte_sdr0_sub1_dsc"},
  102. {"pa_lte_sdr1_sub1_dsc"},
  103. {"pa_nr_sdr0_sub1_dsc"},
  104. {"pa_nr_sdr1_sub1_dsc"},
  105. {"pa_nr_sdr0_scg_sub1_dsc"},
  106. {"pa_nr_sdr1_scg_sub1_dsc"},
  107. {"mmw0_sub1_dsc"},
  108. {"mmw1_sub1_dsc"},
  109. {"mmw2_sub1_dsc"},
  110. {"mmw3_sub1_dsc"},
  111. {"mmw_ific_sub1_dsc"},
  112. {"bcl"},
  113. };
  114. static int qmi_get_max_state(struct thermal_cooling_device *cdev,
  115. unsigned long *state)
  116. {
  117. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  118. if (!qmi_cdev)
  119. return -EINVAL;
  120. *state = qmi_cdev->max_level;
  121. return 0;
  122. }
  123. static int qmi_get_cur_state(struct thermal_cooling_device *cdev,
  124. unsigned long *state)
  125. {
  126. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  127. if (!qmi_cdev)
  128. return -EINVAL;
  129. *state = qmi_cdev->mtgn_state;
  130. return 0;
  131. }
  132. static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev,
  133. uint8_t state)
  134. {
  135. int ret = 0;
  136. struct tmd_set_mitigation_level_req_msg_v01 req;
  137. struct tmd_set_mitigation_level_resp_msg_v01 tmd_resp;
  138. struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
  139. struct qmi_txn txn;
  140. memset(&req, 0, sizeof(req));
  141. memset(&tmd_resp, 0, sizeof(tmd_resp));
  142. strscpy(req.mitigation_dev_id.mitigation_dev_id, qmi_cdev->qmi_name,
  143. QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01);
  144. req.mitigation_level = state;
  145. mutex_lock(&tmd->mutex);
  146. ret = qmi_txn_init(&tmd->handle, &txn,
  147. tmd_set_mitigation_level_resp_msg_v01_ei, &tmd_resp);
  148. if (ret < 0) {
  149. pr_err("qmi set state:%d txn init failed for %s ret:%d\n",
  150. state, qmi_cdev->cdev_name, ret);
  151. goto qmi_send_exit;
  152. }
  153. ret = qmi_send_request(&tmd->handle, NULL, &txn,
  154. QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01,
  155. TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN,
  156. tmd_set_mitigation_level_req_msg_v01_ei, &req);
  157. if (ret < 0) {
  158. pr_err("qmi set state:%d txn send failed for %s ret:%d\n",
  159. state, qmi_cdev->cdev_name, ret);
  160. qmi_txn_cancel(&txn);
  161. goto qmi_send_exit;
  162. }
  163. ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TOUT);
  164. if (ret < 0) {
  165. pr_err("qmi set state:%d txn wait failed for %s ret:%d\n",
  166. state, qmi_cdev->cdev_name, ret);
  167. goto qmi_send_exit;
  168. }
  169. if (tmd_resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  170. ret = tmd_resp.resp.result;
  171. pr_err("qmi set state:%d NOT success for %s ret:%d\n",
  172. state, qmi_cdev->cdev_name, ret);
  173. goto qmi_send_exit;
  174. }
  175. ret = 0;
  176. pr_info("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name);
  177. qmi_send_exit:
  178. mutex_unlock(&tmd->mutex);
  179. return ret;
  180. }
  181. static int qmi_set_cur_state(struct thermal_cooling_device *cdev,
  182. unsigned long state)
  183. {
  184. struct qmi_cooling_device *qmi_cdev = cdev->devdata;
  185. int ret = 0;
  186. if (!qmi_cdev)
  187. return -EINVAL;
  188. if (state > qmi_cdev->max_level)
  189. return -EINVAL;
  190. if (qmi_cdev->mtgn_state == state)
  191. return 0;
  192. /* save it and return if server exit */
  193. if (!qmi_cdev->connection_active) {
  194. qmi_cdev->mtgn_state = state;
  195. pr_debug("Pending request:%ld for %s\n", state,
  196. qmi_cdev->cdev_name);
  197. return 0;
  198. }
  199. /* It is best effort to save state even if QMI fail */
  200. ret = qmi_tmd_send_state_request(qmi_cdev, (uint8_t)state);
  201. qmi_cdev->mtgn_state = state;
  202. return ret;
  203. }
  204. static struct thermal_cooling_device_ops qmi_device_ops = {
  205. .get_max_state = qmi_get_max_state,
  206. .get_cur_state = qmi_get_cur_state,
  207. .set_cur_state = qmi_set_cur_state,
  208. };
  209. static int qmi_register_cooling_device(struct qmi_cooling_device *qmi_cdev)
  210. {
  211. qmi_cdev->cdev = thermal_of_cooling_device_register(
  212. qmi_cdev->np,
  213. qmi_cdev->cdev_name,
  214. qmi_cdev,
  215. &qmi_device_ops);
  216. if (IS_ERR(qmi_cdev->cdev)) {
  217. pr_err("Cooling register failed for %s, ret:%ld\n",
  218. qmi_cdev->cdev_name, PTR_ERR(qmi_cdev->cdev));
  219. return PTR_ERR(qmi_cdev->cdev);
  220. }
  221. pr_debug("Cooling register success for %s\n", qmi_cdev->cdev_name);
  222. return 0;
  223. }
  224. static int verify_devices_and_register(struct qmi_tmd_instance *tmd)
  225. {
  226. struct tmd_get_mitigation_device_list_req_msg_v01 req;
  227. struct tmd_get_mitigation_device_list_resp_msg_v01 *tmd_resp;
  228. int ret = 0, i;
  229. struct qmi_txn txn;
  230. memset(&req, 0, sizeof(req));
  231. /* size of tmd_resp is very high, use heap memory rather than stack */
  232. tmd_resp = kzalloc(sizeof(*tmd_resp), GFP_KERNEL);
  233. if (!tmd_resp)
  234. return -ENOMEM;
  235. mutex_lock(&tmd->mutex);
  236. ret = qmi_txn_init(&tmd->handle, &txn,
  237. tmd_get_mitigation_device_list_resp_msg_v01_ei, tmd_resp);
  238. if (ret < 0) {
  239. pr_err("Transaction Init error for inst_id:0x%x ret:%d\n",
  240. tmd->inst_id, ret);
  241. goto reg_exit;
  242. }
  243. ret = qmi_send_request(&tmd->handle, NULL, &txn,
  244. QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01,
  245. TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN,
  246. tmd_get_mitigation_device_list_req_msg_v01_ei,
  247. &req);
  248. if (ret < 0) {
  249. qmi_txn_cancel(&txn);
  250. goto reg_exit;
  251. }
  252. ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TOUT);
  253. if (ret < 0) {
  254. pr_err("Transaction wait error for inst_id:0x%x ret:%d\n",
  255. tmd->inst_id, ret);
  256. goto reg_exit;
  257. }
  258. if (tmd_resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  259. ret = tmd_resp->resp.result;
  260. pr_err("Get device list NOT success for inst_id:0x%x ret:%d\n",
  261. tmd->inst_id, ret);
  262. goto reg_exit;
  263. }
  264. mutex_unlock(&tmd->mutex);
  265. for (i = 0; i < tmd_resp->mitigation_device_list_len; i++) {
  266. struct qmi_cooling_device *qmi_cdev = NULL;
  267. list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list,
  268. qmi_node) {
  269. struct tmd_mitigation_dev_list_type_v01 *device =
  270. &tmd_resp->mitigation_device_list[i];
  271. if ((strncasecmp(qmi_cdev->qmi_name,
  272. device->mitigation_dev_id.mitigation_dev_id,
  273. QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01)))
  274. continue;
  275. qmi_cdev->connection_active = true;
  276. qmi_cdev->max_level = device->max_mitigation_level;
  277. /*
  278. * It is better to set current state
  279. * initially or during restart
  280. */
  281. qmi_tmd_send_state_request(qmi_cdev,
  282. qmi_cdev->mtgn_state);
  283. if (!qmi_cdev->cdev)
  284. ret = qmi_register_cooling_device(qmi_cdev);
  285. break;
  286. }
  287. }
  288. for (i = 0; tmd_resp->mitigation_device_list_ext01_valid &&
  289. i < tmd_resp->mitigation_device_list_ext01_len; i++) {
  290. struct qmi_cooling_device *qmi_cdev = NULL;
  291. list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list,
  292. qmi_node) {
  293. struct tmd_mitigation_dev_list_type_v01 *device =
  294. &tmd_resp->mitigation_device_list_ext01[i];
  295. if ((strncasecmp(qmi_cdev->qmi_name,
  296. device->mitigation_dev_id.mitigation_dev_id,
  297. QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01)))
  298. continue;
  299. qmi_cdev->connection_active = true;
  300. qmi_cdev->max_level = device->max_mitigation_level;
  301. /*
  302. * It is better to set current state
  303. * initially or during restart
  304. */
  305. qmi_tmd_send_state_request(qmi_cdev,
  306. qmi_cdev->mtgn_state);
  307. if (!qmi_cdev->cdev)
  308. ret = qmi_register_cooling_device(qmi_cdev);
  309. break;
  310. }
  311. }
  312. kfree(tmd_resp);
  313. return ret;
  314. reg_exit:
  315. mutex_unlock(&tmd->mutex);
  316. kfree(tmd_resp);
  317. return ret;
  318. }
  319. static void qmi_tmd_svc_arrive(struct work_struct *work)
  320. {
  321. struct qmi_tmd_instance *tmd = container_of(work,
  322. struct qmi_tmd_instance,
  323. svc_arrive_work);
  324. verify_devices_and_register(tmd);
  325. }
  326. static void thermal_qmi_net_reset(struct qmi_handle *qmi)
  327. {
  328. struct qmi_tmd_instance *tmd = container_of(qmi,
  329. struct qmi_tmd_instance,
  330. handle);
  331. struct qmi_cooling_device *qmi_cdev = NULL;
  332. list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list,
  333. qmi_node) {
  334. if (qmi_cdev->connection_active)
  335. qmi_tmd_send_state_request(qmi_cdev,
  336. qmi_cdev->mtgn_state);
  337. }
  338. }
  339. static void thermal_qmi_del_server(struct qmi_handle *qmi,
  340. struct qmi_service *service)
  341. {
  342. struct qmi_tmd_instance *tmd = container_of(qmi,
  343. struct qmi_tmd_instance,
  344. handle);
  345. struct qmi_cooling_device *qmi_cdev = NULL;
  346. list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list, qmi_node)
  347. qmi_cdev->connection_active = false;
  348. }
  349. static int thermal_qmi_new_server(struct qmi_handle *qmi,
  350. struct qmi_service *service)
  351. {
  352. struct qmi_tmd_instance *tmd = container_of(qmi,
  353. struct qmi_tmd_instance,
  354. handle);
  355. struct sockaddr_qrtr sq = {AF_QIPCRTR, service->node, service->port};
  356. mutex_lock(&tmd->mutex);
  357. kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
  358. mutex_unlock(&tmd->mutex);
  359. queue_work(system_highpri_wq, &tmd->svc_arrive_work);
  360. return 0;
  361. }
  362. static struct qmi_ops thermal_qmi_event_ops = {
  363. .new_server = thermal_qmi_new_server,
  364. .del_server = thermal_qmi_del_server,
  365. .net_reset = thermal_qmi_net_reset,
  366. };
  367. static void qmi_tmd_cleanup(void)
  368. {
  369. int idx = 0;
  370. struct qmi_tmd_instance *tmd = tmd_instances;
  371. struct qmi_cooling_device *qmi_cdev, *c_next;
  372. for (; idx < tmd_inst_cnt; idx++) {
  373. mutex_lock(&tmd[idx].mutex);
  374. list_for_each_entry_safe(qmi_cdev, c_next,
  375. &tmd[idx].tmd_cdev_list, qmi_node) {
  376. qmi_cdev->connection_active = false;
  377. if (qmi_cdev->cdev)
  378. thermal_cooling_device_unregister(
  379. qmi_cdev->cdev);
  380. list_del(&qmi_cdev->qmi_node);
  381. }
  382. qmi_handle_release(&tmd[idx].handle);
  383. mutex_unlock(&tmd[idx].mutex);
  384. }
  385. }
  386. static int of_get_qmi_tmd_platform_data(struct device *dev)
  387. {
  388. int ret = 0, idx = 0, i = 0, subsys_cnt = 0;
  389. struct device_node *np = dev->of_node;
  390. struct device_node *subsys_np = NULL, *cdev_np = NULL;
  391. struct qmi_tmd_instance *tmd;
  392. struct qmi_cooling_device *qmi_cdev;
  393. subsys_cnt = of_get_available_child_count(np);
  394. if (!subsys_cnt) {
  395. dev_err(dev, "No child node to process\n");
  396. return -EFAULT;
  397. }
  398. tmd = devm_kcalloc(dev, subsys_cnt, sizeof(*tmd), GFP_KERNEL);
  399. if (!tmd)
  400. return -ENOMEM;
  401. for_each_available_child_of_node(np, subsys_np) {
  402. if (idx >= subsys_cnt) {
  403. of_node_put(subsys_np);
  404. break;
  405. }
  406. ret = of_property_read_u32(subsys_np, "qcom,instance-id",
  407. &tmd[idx].inst_id);
  408. if (ret) {
  409. dev_err(dev, "error reading qcom,insance-id. ret:%d\n",
  410. ret);
  411. goto data_subsys_error;
  412. }
  413. tmd[idx].dev = dev;
  414. mutex_init(&tmd[idx].mutex);
  415. INIT_LIST_HEAD(&tmd[idx].tmd_cdev_list);
  416. INIT_WORK(&tmd[idx].svc_arrive_work, qmi_tmd_svc_arrive);
  417. for_each_available_child_of_node(subsys_np, cdev_np) {
  418. const char *qmi_name;
  419. qmi_cdev = devm_kzalloc(dev, sizeof(*qmi_cdev),
  420. GFP_KERNEL);
  421. if (!qmi_cdev) {
  422. ret = -ENOMEM;
  423. goto data_error;
  424. }
  425. strscpy(qmi_cdev->cdev_name, cdev_np->name,
  426. QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01);
  427. if (!of_property_read_string(cdev_np,
  428. "qcom,qmi-dev-name",
  429. &qmi_name)) {
  430. strscpy(qmi_cdev->qmi_name, qmi_name,
  431. QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01);
  432. } else {
  433. dev_err(dev, "Fail to parse dev name for %s\n",
  434. cdev_np->name);
  435. break;
  436. }
  437. /* Check for supported qmi dev*/
  438. for (i = 0; i < ARRAY_SIZE(device_clients); i++) {
  439. if (strcmp(device_clients[i],
  440. qmi_cdev->qmi_name) == 0)
  441. break;
  442. }
  443. if (i >= ARRAY_SIZE(device_clients)) {
  444. dev_err(dev, "Not supported dev name for %s\n",
  445. cdev_np->name);
  446. break;
  447. }
  448. qmi_cdev->tmd = &tmd[idx];
  449. qmi_cdev->np = cdev_np;
  450. qmi_cdev->mtgn_state = 0;
  451. list_add(&qmi_cdev->qmi_node, &tmd[idx].tmd_cdev_list);
  452. }
  453. idx++;
  454. }
  455. of_node_put(np);
  456. tmd_instances = tmd;
  457. tmd_inst_cnt = subsys_cnt;
  458. return 0;
  459. data_error:
  460. of_node_put(cdev_np);
  461. data_subsys_error:
  462. of_node_put(subsys_np);
  463. of_node_put(np);
  464. return ret;
  465. }
  466. static int qmi_device_probe(struct platform_device *pdev)
  467. {
  468. struct device *dev = &pdev->dev;
  469. int ret = 0, idx = 0;
  470. ret = of_get_qmi_tmd_platform_data(dev);
  471. if (ret)
  472. goto probe_err;
  473. if (!tmd_instances || !tmd_inst_cnt) {
  474. dev_err(dev, "Empty tmd instances\n");
  475. return -EINVAL;
  476. }
  477. for (; idx < tmd_inst_cnt; idx++) {
  478. struct qmi_tmd_instance *tmd = &tmd_instances[idx];
  479. if (list_empty(&tmd->tmd_cdev_list))
  480. continue;
  481. ret = qmi_handle_init(&tmd->handle,
  482. TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN,
  483. &thermal_qmi_event_ops, NULL);
  484. if (ret < 0) {
  485. dev_err(dev, "QMI[0x%x] handle init failed. err:%d\n",
  486. tmd->inst_id, ret);
  487. tmd_inst_cnt = idx;
  488. goto probe_err;
  489. }
  490. ret = qmi_add_lookup(&tmd->handle, TMD_SERVICE_ID_V01,
  491. TMD_SERVICE_VERS_V01,
  492. tmd->inst_id);
  493. if (ret < 0) {
  494. dev_err(dev, "QMI register failed for 0x%x, ret:%d\n",
  495. tmd->inst_id, ret);
  496. goto probe_err;
  497. }
  498. }
  499. return 0;
  500. probe_err:
  501. qmi_tmd_cleanup();
  502. return ret;
  503. }
  504. static int qmi_device_remove(struct platform_device *pdev)
  505. {
  506. qmi_tmd_cleanup();
  507. return 0;
  508. }
  509. static const struct of_device_id qmi_device_match[] = {
  510. {.compatible = "qcom,qmi-cooling-devices"},
  511. {}
  512. };
  513. static struct platform_driver qmi_device_driver = {
  514. .probe = qmi_device_probe,
  515. .remove = qmi_device_remove,
  516. .driver = {
  517. .name = QMI_CDEV_DRIVER,
  518. .of_match_table = qmi_device_match,
  519. },
  520. };
  521. static int __init qmi_device_init(void)
  522. {
  523. return platform_driver_register(&qmi_device_driver);
  524. }
  525. module_init(qmi_device_init);
  526. static void __exit qmi_device_exit(void)
  527. {
  528. platform_driver_unregister(&qmi_device_driver);
  529. }
  530. module_exit(qmi_device_exit);
  531. MODULE_LICENSE("GPL");
  532. MODULE_DESCRIPTION("QTI QMI cooling device driver");