qvm_comm.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "hab.h"
  7. #include "hab_qvm.h"
  8. #include "hab_trace_os.h"
  9. static unsigned long long xvm_sche_tx_tv_buffer[2];
  10. static void pipe_read_trace(struct qvm_channel *dev,
  11. int size, int ret)
  12. {
  13. struct hab_pipe_endpoint *ep = dev->pipe_ep;
  14. struct hab_shared_buf *sh_buf = dev->rx_buf;
  15. struct dbg_items *its = dev->dbg_itms;
  16. struct dbg_item *it = &its->it[its->idx];
  17. it->rd_cnt = sh_buf->rd_count;
  18. it->wr_cnt = sh_buf->wr_count;
  19. it->va = (void *)&sh_buf->data[ep->rx_info.index];
  20. it->index = ep->rx_info.index;
  21. it->sz = size;
  22. it->ret = ret;
  23. its->idx++;
  24. if (its->idx >= DBG_ITEM_SIZE)
  25. its->idx = 0;
  26. }
  27. /* this is only used to read payload, never the head! */
  28. int physical_channel_read(struct physical_channel *pchan,
  29. void *payload,
  30. size_t read_size)
  31. {
  32. struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
  33. if (dev) {
  34. int ret = hab_pipe_read(dev->pipe_ep,
  35. dev->rx_buf, PIPE_SHMEM_SIZE,
  36. payload, read_size, 0);
  37. /* log */
  38. pipe_read_trace(dev, read_size, ret);
  39. return ret;
  40. } else
  41. return 0;
  42. }
  43. int physical_channel_send(struct physical_channel *pchan,
  44. struct hab_header *header,
  45. void *payload,
  46. unsigned int flags)
  47. {
  48. size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
  49. struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
  50. size_t total_size = sizeof(*header) + sizebytes;
  51. uint32_t buf_size = PIPE_SHMEM_SIZE;
  52. int irqs_disabled = irqs_disabled();
  53. /* Only used in virtio arch */
  54. (void)flags;
  55. if (total_size > buf_size)
  56. return -EINVAL; /* too much data for ring */
  57. hab_spin_lock(&dev->io_lock, irqs_disabled);
  58. trace_hab_pchan_send_start(pchan);
  59. if ((buf_size -
  60. (dev->pipe_ep->tx_info.wr_count -
  61. dev->tx_buf->rd_count)) < total_size) {
  62. hab_spin_unlock(&dev->io_lock, irqs_disabled);
  63. return -EAGAIN; /* not enough free space */
  64. }
  65. header->sequence = pchan->sequence_tx + 1;
  66. header->signature = HAB_HEAD_SIGNATURE;
  67. if (hab_pipe_write(dev->pipe_ep, dev->tx_buf, buf_size,
  68. (unsigned char *)header,
  69. sizeof(*header)) != sizeof(*header)) {
  70. hab_spin_unlock(&dev->io_lock, irqs_disabled);
  71. pr_err("***incompleted pchan send id-type %x size %x session %d seq# %d\n",
  72. header->id_type, header->payload_size,
  73. header->session_id,
  74. header->sequence);
  75. return -EIO;
  76. }
  77. if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
  78. struct timespec64 ts = {0};
  79. struct habmm_xing_vm_stat *pstat =
  80. (struct habmm_xing_vm_stat *)payload;
  81. if (pstat) {
  82. ktime_get_ts64(&ts);
  83. pstat->tx_sec = ts.tv_sec;
  84. pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
  85. } else {
  86. hab_spin_unlock(&dev->io_lock, irqs_disabled);
  87. pr_err("***incompleted pchan send prof id-type %x size %x session %d seq# %d\n",
  88. header->id_type, header->payload_size,
  89. header->session_id,
  90. header->sequence);
  91. return -EINVAL;
  92. }
  93. } else if (HAB_HEADER_GET_TYPE(*header)
  94. == HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ) {
  95. ((unsigned long long *)payload)[0] = xvm_sche_tx_tv_buffer[0];
  96. } else if (HAB_HEADER_GET_TYPE(*header)
  97. == HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP) {
  98. ((unsigned long long *)payload)[2] = xvm_sche_tx_tv_buffer[1];
  99. }
  100. if (sizebytes) {
  101. if (hab_pipe_write(dev->pipe_ep, dev->tx_buf, buf_size,
  102. (unsigned char *)payload,
  103. sizebytes) != sizebytes) {
  104. hab_spin_unlock(&dev->io_lock, irqs_disabled);
  105. pr_err("***incompleted pchan send id-type %x size %x session %d seq# %d\n",
  106. header->id_type, header->payload_size,
  107. header->session_id,
  108. header->sequence);
  109. return -EIO;
  110. }
  111. }
  112. hab_pipe_write_commit(dev->pipe_ep, dev->tx_buf);
  113. /* locally +1 as late as possible but before unlock */
  114. ++pchan->sequence_tx;
  115. trace_hab_pchan_send_done(pchan);
  116. hab_spin_unlock(&dev->io_lock, irqs_disabled);
  117. if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_SCHE_MSG)
  118. xvm_sche_tx_tv_buffer[0] = msm_timer_get_sclk_ticks();
  119. else if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_SCHE_MSG_ACK)
  120. xvm_sche_tx_tv_buffer[1] = msm_timer_get_sclk_ticks();
  121. habhyp_notify(dev);
  122. return 0;
  123. }
  124. void physical_channel_rx_dispatch(unsigned long data)
  125. {
  126. struct hab_header header;
  127. struct physical_channel *pchan = (struct physical_channel *)data;
  128. struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
  129. int irqs_disabled = irqs_disabled();
  130. uint32_t buf_size = PIPE_SHMEM_SIZE;
  131. hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
  132. while (1) {
  133. uint32_t rd, wr, idx;
  134. int ret;
  135. ret = hab_pipe_read(dev->pipe_ep,
  136. dev->rx_buf, buf_size,
  137. (unsigned char *)&header,
  138. sizeof(header), 1); /* clear head after read */
  139. /* debug */
  140. pipe_read_trace(dev, sizeof(header), ret);
  141. if (ret == 0xFFFFFFFF) { /* signature mismatched first time */
  142. hab_pipe_rxinfo(dev->pipe_ep, dev->rx_buf, &rd, &wr, &idx);
  143. pr_err("!!!!! HAB signature mismatch expect %X received %X, id_type %X size %X session %X sequence %X\n",
  144. HAB_HEAD_SIGNATURE, header.signature,
  145. header.id_type,
  146. header.payload_size,
  147. header.session_id,
  148. header.sequence);
  149. pr_err("!!!!! rxinfo rd %d wr %d index %X\n",
  150. rd, wr, idx);
  151. memcpy(dev->side_buf,
  152. (void *)&dev->rx_buf->data[0],
  153. buf_size);
  154. hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
  155. /* cannot run in elevated context */
  156. dump_hab_wq(pchan);
  157. hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
  158. } else if (ret == 0xFFFFFFFE) { /* continuous signature mismatches */
  159. continue;
  160. } else if (ret != sizeof(header))
  161. break; /* no data available */
  162. if (pchan->sequence_rx + 1 != header.sequence)
  163. pr_err("%s: expected sequence_rx is %u, received is %u\n",
  164. pchan->name, pchan->sequence_rx, header.sequence);
  165. pchan->sequence_rx = header.sequence;
  166. /* log msg recv timestamp: enter pchan dispatcher */
  167. trace_hab_pchan_recv_start(pchan);
  168. hab_msg_recv(pchan, &header);
  169. }
  170. hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
  171. }