mmrm_vm_fe_api.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/gunyah/gh_msgq.h>
  6. #include <linux/gunyah/gh_rm_drv.h>
  7. #include <linux/module.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/of.h>
  10. #include <linux/sysfs.h>
  11. #include <linux/pm.h>
  12. #include <linux/suspend.h>
  13. #include <linux/delay.h>
  14. #include <linux/slab.h>
  15. #include "mmrm_vm_fe.h"
  16. #include "mmrm_vm_interface.h"
  17. #include "mmrm_vm_msgq.h"
  18. #include "mmrm_vm_debug.h"
  19. #define get_client_handle_2_id(client) (client->client_uid)
  20. extern struct mmrm_vm_driver_data *drv_vm_fe;
  21. #define MAX_TIMEOUT_MS 300
  22. #define CHECK_SKIP_MMRM_CLK_RSRC(drv_data) \
  23. { \
  24. if (!drv_data->is_clk_scaling_supported) { \
  25. d_mpr_h("%s: mmrm clk rsrc not supported\n", __func__);\
  26. goto skip_mmrm; \
  27. } \
  28. }
  29. int mmrm_fe_append_work_list(struct mmrm_vm_msg_q *msg_q, int msg_sz)
  30. {
  31. struct mmrm_vm_request_msg_pkt *msg_pkt = msg_q->m_req;
  32. struct mmrm_vm_fe_priv *fe_data = drv_vm_fe->vm_pvt_data;
  33. unsigned long waited_time_ms;
  34. init_completion(&msg_q->complete);
  35. mutex_lock(&fe_data->resp_works_lock);
  36. list_add_tail(&msg_q->link, &fe_data->resp_works);
  37. mutex_unlock(&fe_data->resp_works_lock);
  38. mutex_lock(&fe_data->msg_send_lock);
  39. msg_pkt->msg.hd.seq_no = fe_data->seq_no++;
  40. mutex_unlock(&fe_data->msg_send_lock);
  41. d_mpr_w("%s: seq no:%d\n", __func__, msg_pkt->msg.hd.seq_no);
  42. mmrm_vm_fe_request_send(drv_vm_fe, msg_pkt, msg_sz);
  43. waited_time_ms = wait_for_completion_timeout(&msg_q->complete,
  44. msecs_to_jiffies(MAX_TIMEOUT_MS));
  45. if (waited_time_ms >= MAX_TIMEOUT_MS) {
  46. d_mpr_e("%s: request send timeout\n", __func__);
  47. return -1;
  48. }
  49. return 0;
  50. }
  51. struct mmrm_vm_msg_q *get_msg_work(void)
  52. {
  53. struct mmrm_vm_msg_q *msg_q;
  54. struct mmrm_vm_fe_pkt *data;
  55. data = kzalloc(sizeof(struct mmrm_vm_fe_pkt), GFP_KERNEL);
  56. if (data == NULL)
  57. goto err_mem_fail;
  58. msg_q = &data->msgq;
  59. msg_q->m_req = &data->req_pkt;
  60. msg_q->m_resp = &data->resp_pkt;
  61. return msg_q;
  62. err_mem_fail:
  63. d_mpr_e("%s: failed to alloc msg buffer\n", __func__);
  64. return NULL;
  65. }
  66. void release_msg_work(struct mmrm_vm_msg_q *msg_q)
  67. {
  68. struct mmrm_vm_fe_pkt *data;
  69. if (msg_q == NULL) {
  70. d_mpr_e("%s: release null msg ptr\n", __func__);
  71. return;
  72. }
  73. data = container_of(msg_q, struct mmrm_vm_fe_pkt, msgq);
  74. kfree(data);
  75. }
  76. struct mmrm_client *mmrm_client_register(struct mmrm_client_desc *desc)
  77. {
  78. struct mmrm_vm_msg_q *msg_q;
  79. struct mmrm_vm_api_request_msg *api_msg;
  80. struct mmrm_vm_register_request *reg_data;
  81. size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
  82. int rc = 0;
  83. struct mmrm_client *client = NULL;
  84. if (mmrm_vm_fe_clk_src_get(desc) == NULL) {
  85. d_mpr_e("%s: FE doesn't support clk domain=%d client id=%d\n", __func__,
  86. desc->client_info.desc.client_domain, desc->client_info.desc.client_id);
  87. goto err_clk_src;
  88. }
  89. msg_q = get_msg_work();
  90. if (msg_q == NULL) {
  91. d_mpr_e("%s: failed to alloc msg buf\n", __func__);
  92. goto err_no_mem;
  93. }
  94. api_msg = &msg_q->m_req->msg;
  95. reg_data = &api_msg->data.reg;
  96. api_msg->hd.cmd_id = MMRM_VM_REQUEST_REGISTER;
  97. reg_data->client_type = desc->client_type;
  98. reg_data->priority = desc->priority;
  99. memcpy(&reg_data->desc, &desc->client_info.desc, sizeof(reg_data->desc));
  100. rc = mmrm_fe_append_work_list(msg_q, msg_size);
  101. if (rc == 0) {
  102. client = mmrm_vm_fe_get_client(msg_q->m_resp->msg.data.reg.client_id);
  103. };
  104. release_msg_work(msg_q);
  105. err_no_mem:
  106. err_clk_src:
  107. return client;
  108. }
  109. EXPORT_SYMBOL(mmrm_client_register);
  110. int mmrm_client_deregister(struct mmrm_client *client)
  111. {
  112. int rc = -1;
  113. struct mmrm_vm_api_request_msg *api_msg;
  114. struct mmrm_vm_deregister_request *reg_data;
  115. struct mmrm_vm_msg_q *msg_q;
  116. size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
  117. msg_q = get_msg_work();
  118. if (msg_q == NULL) {
  119. d_mpr_e("%s: failed to alloc msg buf\n", __func__);
  120. goto err_no_mem;
  121. }
  122. api_msg = &msg_q->m_req->msg;
  123. reg_data = &api_msg->data.dereg;
  124. api_msg->hd.cmd_id = MMRM_VM_REQUEST_DEREGISTER;
  125. reg_data->client_id = get_client_handle_2_id(client);
  126. rc = mmrm_fe_append_work_list(msg_q, msg_size);
  127. if (rc == 0)
  128. rc = msg_q->m_resp->msg.data.dereg.ret_code;
  129. release_msg_work(msg_q);
  130. err_no_mem:
  131. return rc;
  132. }
  133. EXPORT_SYMBOL(mmrm_client_deregister);
  134. int mmrm_client_set_value(struct mmrm_client *client,
  135. struct mmrm_client_data *client_data, unsigned long val)
  136. {
  137. int rc = -1;
  138. struct mmrm_vm_api_request_msg *api_msg;
  139. struct mmrm_vm_setvalue_request *reg_data;
  140. struct mmrm_vm_msg_q *msg_q;
  141. size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
  142. msg_q = get_msg_work();
  143. if (msg_q == NULL) {
  144. d_mpr_e("%s: failed to alloc msg buf\n", __func__);
  145. goto err_no_mem;
  146. }
  147. api_msg = &msg_q->m_req->msg;
  148. reg_data = &api_msg->data.setval;
  149. api_msg->hd.cmd_id = MMRM_VM_REQUEST_SETVALUE;
  150. reg_data->client_id = get_client_handle_2_id(client);
  151. reg_data->data.flags = client_data->flags;
  152. reg_data->data.num_hw_blocks = client_data->num_hw_blocks;
  153. reg_data->val = val;
  154. rc = mmrm_fe_append_work_list(msg_q, msg_size);
  155. if (rc != 0)
  156. return rc;
  157. rc = msg_q->m_resp->msg.data.setval.val;
  158. d_mpr_h("%s: done rc=%d\n", __func__, rc);
  159. err_no_mem:
  160. return rc;
  161. }
  162. EXPORT_SYMBOL(mmrm_client_set_value);
  163. int mmrm_client_set_value_in_range(struct mmrm_client *client,
  164. struct mmrm_client_data *client_data,
  165. struct mmrm_client_res_value *val)
  166. {
  167. int rc = -1;
  168. struct mmrm_vm_api_request_msg *api_msg ;
  169. struct mmrm_vm_setvalue_inrange_request *reg_data;
  170. size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
  171. struct mmrm_vm_msg_q *msg_q;
  172. msg_q = get_msg_work();
  173. if (msg_q == NULL) {
  174. d_mpr_e("%s: failed to alloc msg buf\n", __func__);
  175. goto err_no_mem;
  176. }
  177. api_msg = &msg_q->m_req->msg;
  178. reg_data = &api_msg->data.setval_range;
  179. api_msg->hd.cmd_id = MMRM_VM_REQUEST_SETVALUE_INRANGE;
  180. reg_data->client_id = get_client_handle_2_id(client);
  181. reg_data->data.flags = client_data->flags;
  182. reg_data->data.num_hw_blocks = client_data->num_hw_blocks;
  183. reg_data->val.cur = val->cur;
  184. reg_data->val.max = val->max;
  185. reg_data->val.min = val->min;
  186. rc = mmrm_fe_append_work_list(msg_q, msg_size);
  187. err_no_mem:
  188. return rc;
  189. }
  190. EXPORT_SYMBOL(mmrm_client_set_value_in_range);
  191. int mmrm_client_get_value(struct mmrm_client *client,
  192. struct mmrm_client_res_value *val)
  193. {
  194. int rc = -1;
  195. struct mmrm_vm_api_request_msg *api_msg;
  196. struct mmrm_vm_getvalue_request *reg_data;
  197. size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
  198. struct mmrm_vm_msg_q *msg_q;
  199. msg_q = get_msg_work();
  200. if (msg_q == NULL) {
  201. d_mpr_e("%s: failed to alloc msg buf\n", __func__);
  202. goto err_no_mem;
  203. }
  204. api_msg = &msg_q->m_req->msg;
  205. reg_data = &api_msg->data.getval;
  206. api_msg->hd.cmd_id = MMRM_VM_REQUEST_GETVALUE;
  207. reg_data->client_id = get_client_handle_2_id(client);
  208. rc = mmrm_fe_append_work_list(msg_q, msg_size);
  209. if (rc == 0) {
  210. val->cur = msg_q->m_resp->msg.data.getval.val.cur;
  211. val->max = msg_q->m_resp->msg.data.getval.val.max;
  212. val->min = msg_q->m_resp->msg.data.getval.val.min;
  213. }
  214. err_no_mem:
  215. return rc;
  216. }
  217. EXPORT_SYMBOL(mmrm_client_get_value);
  218. bool mmrm_client_check_scaling_supported(enum mmrm_client_type client_type, u32 client_domain)
  219. {
  220. struct mmrm_vm_fe_priv *fe_data;
  221. if (drv_vm_fe == (void *)-EPROBE_DEFER) {
  222. d_mpr_e("%s: mmrm probe_init not done\n", __func__);
  223. goto err_exit;
  224. }
  225. fe_data = drv_vm_fe->vm_pvt_data;
  226. if (client_type == MMRM_CLIENT_CLOCK) {
  227. CHECK_SKIP_MMRM_CLK_RSRC(fe_data);
  228. }
  229. return true;
  230. err_exit:
  231. d_mpr_e("%s: error exit\n", __func__);
  232. skip_mmrm:
  233. return false;
  234. }
  235. EXPORT_SYMBOL(mmrm_client_check_scaling_supported);