hab_open.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "hab.h"
  7. #define HAB_OPEN_REQ_EXPIRE_TIME_S (3600*10)
  8. void hab_open_request_init(struct hab_open_request *request,
  9. int type,
  10. struct physical_channel *pchan,
  11. int vchan_id,
  12. int sub_id,
  13. int open_id)
  14. {
  15. request->type = type;
  16. request->pchan = pchan;
  17. request->xdata.vchan_id = vchan_id;
  18. request->xdata.sub_id = sub_id;
  19. request->xdata.open_id = open_id;
  20. request->xdata.ver_proto = HAB_VER_PROT;
  21. }
  22. int hab_open_request_send(struct hab_open_request *request)
  23. {
  24. struct hab_header header = HAB_HEADER_INITIALIZER;
  25. int ret = 0;
  26. HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
  27. HAB_HEADER_SET_TYPE(header, request->type);
  28. ret = physical_channel_send(request->pchan, &header, &request->xdata,
  29. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  30. if (ret != 0)
  31. pr_err("pchan %s failed to send open req msg %d\n",
  32. request->pchan->name, ret);
  33. return ret;
  34. }
  35. /*
  36. * called when remote sends in open-request.
  37. * The sanity of the arg sizebytes is ensured by its caller hab_msg_recv.
  38. * The sizebytes should be equal to sizeof(struct hab_open_send_data)
  39. */
  40. int hab_open_request_add(struct physical_channel *pchan,
  41. size_t sizebytes, int request_type)
  42. {
  43. struct hab_open_node *node;
  44. struct hab_device *dev = pchan->habdev;
  45. struct hab_open_request *request;
  46. struct timespec64 ts = {0};
  47. int irqs_disabled = irqs_disabled();
  48. node = kzalloc(sizeof(*node), GFP_ATOMIC);
  49. if (!node)
  50. return -ENOMEM;
  51. request = &node->request;
  52. if (physical_channel_read(pchan, &request->xdata, sizebytes)
  53. != sizebytes)
  54. return -EIO;
  55. request->type = request_type;
  56. request->pchan = pchan;
  57. ktime_get_ts64(&ts);
  58. node->age = ts.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
  59. ts.tv_nsec/NSEC_PER_SEC;
  60. hab_pchan_get(pchan);
  61. hab_spin_lock(&dev->openlock, irqs_disabled);
  62. list_add_tail(&node->node, &dev->openq_list);
  63. dev->openq_cnt++;
  64. hab_spin_unlock(&dev->openlock, irqs_disabled);
  65. return 0;
  66. }
  67. /* local only */
  68. static int hab_open_request_find(struct uhab_context *ctx,
  69. struct hab_device *dev,
  70. struct hab_open_request *listen,
  71. struct hab_open_request **recv_request)
  72. {
  73. struct hab_open_node *node, *tmp;
  74. struct hab_open_request *request;
  75. struct timespec64 ts = {0};
  76. int ret = 0;
  77. if (ctx->closing ||
  78. (listen->pchan && listen->pchan->closed)) {
  79. *recv_request = NULL;
  80. return 1;
  81. }
  82. spin_lock_bh(&dev->openlock);
  83. if (list_empty(&dev->openq_list))
  84. goto done;
  85. ktime_get_ts64(&ts);
  86. list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
  87. request = (struct hab_open_request *)node;
  88. if ((request->type == listen->type ||
  89. request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) &&
  90. (request->xdata.sub_id == listen->xdata.sub_id) &&
  91. (!listen->xdata.open_id ||
  92. request->xdata.open_id == listen->xdata.open_id) &&
  93. (!listen->pchan ||
  94. request->pchan == listen->pchan)) {
  95. list_del(&node->node);
  96. dev->openq_cnt--;
  97. *recv_request = request;
  98. ret = 1;
  99. break;
  100. }
  101. if (node->age < (int64_t)ts.tv_sec + ts.tv_nsec/NSEC_PER_SEC) {
  102. pr_warn("open request type %d sub %d open %d\n",
  103. request->type, request->xdata.sub_id,
  104. request->xdata.sub_id);
  105. list_del(&node->node);
  106. hab_open_request_free(request);
  107. }
  108. }
  109. done:
  110. spin_unlock_bh(&dev->openlock);
  111. return ret;
  112. }
  113. void hab_open_request_free(struct hab_open_request *request)
  114. {
  115. if (request) {
  116. hab_pchan_put(request->pchan);
  117. kfree(request);
  118. } else
  119. pr_err("empty request found\n");
  120. }
  121. int hab_open_listen(struct uhab_context *ctx,
  122. struct hab_device *dev,
  123. struct hab_open_request *listen,
  124. struct hab_open_request **recv_request,
  125. int ms_timeout,
  126. unsigned int flags)
  127. {
  128. int ret = 0;
  129. unsigned int uninterruptible = 0;
  130. if (!ctx || !listen || !recv_request) {
  131. pr_err("listen failed ctx %pK listen %pK request %pK\n",
  132. ctx, listen, recv_request);
  133. return -EINVAL;
  134. }
  135. /* This flag is for HAB clients in kernel space only, to avoid calling any
  136. * unexpected uninterruptible habmm_socket_open() since it is not killable.
  137. */
  138. if (ctx->kernel)
  139. uninterruptible = (flags & HABMM_SOCKET_OPEN_FLAGS_UNINTERRUPTIBLE);
  140. *recv_request = NULL;
  141. if (ms_timeout > 0) { /* be case */
  142. ms_timeout = msecs_to_jiffies(ms_timeout);
  143. ret = wait_event_interruptible_timeout(dev->openq,
  144. hab_open_request_find(ctx, dev, listen, recv_request),
  145. ms_timeout);
  146. if (!ret) {
  147. pr_debug("%s timeout in open listen\n", dev->name);
  148. ret = -EAGAIN; /* condition not met */
  149. } else if (-ERESTARTSYS == ret) {
  150. pr_warn("something failed in open listen ret %d\n",
  151. ret);
  152. ret = -EINTR; /* condition not met */
  153. } else if (ret > 0)
  154. ret = 0; /* condition met */
  155. } else { /* fe case */
  156. if (uninterruptible) { /* fe uinterruptible case */
  157. wait_event(dev->openq,
  158. hab_open_request_find(ctx, dev, listen, recv_request));
  159. if (ctx->closing) {
  160. pr_warn("local closing during open ret %d\n", ret);
  161. ret = -ENODEV;
  162. }
  163. } else { /* fe interruptible case */
  164. ret = wait_event_interruptible(dev->openq,
  165. hab_open_request_find(ctx, dev, listen, recv_request));
  166. if (ctx->closing) {
  167. pr_warn("local closing during open ret %d\n", ret);
  168. ret = -ENODEV;
  169. } else if (-ERESTARTSYS == ret) {
  170. pr_warn("local interrupted ret %d\n", ret);
  171. ret = -EINTR;
  172. }
  173. }
  174. }
  175. return ret;
  176. }
  177. /*
  178. * called when receiving remote's cancel init from FE or init-ack from BE.
  179. * The sanity of the arg sizebytes is ensured by its caller hab_msg_recv.
  180. * The sizebytes should be equal to sizeof(struct hab_open_send_data)
  181. */
  182. int hab_open_receive_cancel(struct physical_channel *pchan,
  183. size_t sizebytes)
  184. {
  185. struct hab_device *dev = pchan->habdev;
  186. struct hab_open_send_data data = {0};
  187. struct hab_open_request *request;
  188. struct hab_open_node *node, *tmp;
  189. int bfound = 0;
  190. struct timespec64 ts = {0};
  191. int irqs_disabled = irqs_disabled();
  192. if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
  193. return -EIO;
  194. hab_spin_lock(&dev->openlock, irqs_disabled);
  195. list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
  196. request = &node->request;
  197. /* check if open request has been serviced or not */
  198. if ((request->type == HAB_PAYLOAD_TYPE_INIT ||
  199. request->type == HAB_PAYLOAD_TYPE_INIT_ACK) &&
  200. (request->xdata.sub_id == data.sub_id) &&
  201. (request->xdata.open_id == data.open_id) &&
  202. (request->xdata.vchan_id == data.vchan_id)) {
  203. list_del(&node->node);
  204. kfree(node);
  205. dev->openq_cnt--;
  206. pr_info("open cancelled on pchan %s vcid %x subid %d openid %d\n",
  207. pchan->name, data.vchan_id,
  208. data.sub_id, data.open_id);
  209. /* found un-serviced open request, delete it */
  210. bfound = 1;
  211. break;
  212. }
  213. }
  214. hab_spin_unlock(&dev->openlock, irqs_disabled);
  215. if (!bfound) {
  216. pr_info("init waiting is in-flight. vcid %x sub %d open %d\n",
  217. data.vchan_id, data.sub_id, data.open_id);
  218. /* add cancel to the openq to let the waiting open bail out */
  219. node = kzalloc(sizeof(*node), GFP_ATOMIC);
  220. if (!node)
  221. return -ENOMEM;
  222. request = &node->request;
  223. request->type = HAB_PAYLOAD_TYPE_INIT_CANCEL;
  224. request->pchan = pchan;
  225. request->xdata.vchan_id = data.vchan_id;
  226. request->xdata.sub_id = data.sub_id;
  227. request->xdata.open_id = data.open_id;
  228. request->xdata.ver_fe = data.ver_fe;
  229. request->xdata.ver_be = data.ver_be;
  230. ktime_get_ts64(&ts);
  231. node->age = ts.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
  232. ts.tv_nsec/NSEC_PER_SEC;
  233. /* put when this node is handled in open path */
  234. hab_pchan_get(pchan);
  235. hab_spin_lock(&dev->openlock, irqs_disabled);
  236. list_add_tail(&node->node, &dev->openq_list);
  237. dev->openq_cnt++;
  238. hab_spin_unlock(&dev->openlock, irqs_disabled);
  239. wake_up(&dev->openq);
  240. }
  241. return 0;
  242. }
  243. /* calls locally to send cancel pending open to remote */
  244. int hab_open_cancel_notify(struct hab_open_request *request)
  245. {
  246. struct hab_header header = HAB_HEADER_INITIALIZER;
  247. int ret = 0;
  248. HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
  249. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_INIT_CANCEL);
  250. ret = physical_channel_send(request->pchan, &header, &request->xdata,
  251. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  252. if (ret != 0)
  253. pr_err("pchan %s failed to send open cancel msg %d\n",
  254. request->pchan->name, ret);
  255. return ret;
  256. }
  257. /*
  258. * There will be scheduling in habmm_socket_open, which cannot be called
  259. * in the atomic context. Therefore, there is no need to consider such
  260. * atomic caller context which already disables h/w irq when using
  261. * hab_write_lock/hab_write_unlock here.
  262. */
  263. int hab_open_pending_enter(struct uhab_context *ctx,
  264. struct physical_channel *pchan,
  265. struct hab_open_node *pending)
  266. {
  267. hab_write_lock(&ctx->ctx_lock, !ctx->kernel);
  268. list_add_tail(&pending->node, &ctx->pending_open);
  269. ctx->pending_cnt++;
  270. hab_write_unlock(&ctx->ctx_lock, !ctx->kernel);
  271. return 0;
  272. }
  273. int hab_open_pending_exit(struct uhab_context *ctx,
  274. struct physical_channel *pchan,
  275. struct hab_open_node *pending)
  276. {
  277. struct hab_open_node *node, *tmp;
  278. int ret = -ENOENT;
  279. hab_write_lock(&ctx->ctx_lock, !ctx->kernel);
  280. list_for_each_entry_safe(node, tmp, &ctx->pending_open, node) {
  281. if ((node->request.type == pending->request.type) &&
  282. (node->request.pchan
  283. == pending->request.pchan) &&
  284. (node->request.xdata.vchan_id
  285. == pending->request.xdata.vchan_id) &&
  286. (node->request.xdata.sub_id
  287. == pending->request.xdata.sub_id) &&
  288. (node->request.xdata.open_id
  289. == pending->request.xdata.open_id)) {
  290. list_del(&node->node);
  291. ctx->pending_cnt--;
  292. ret = 0;
  293. }
  294. }
  295. hab_write_unlock(&ctx->ctx_lock, !ctx->kernel);
  296. return ret;
  297. }