dfc_qmap.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <net/pkt_sched.h>
  6. #include <linux/module.h>
  7. #include "rmnet_qmap.h"
  8. #include "dfc_defs.h"
  9. #include "rmnet_qmi.h"
  10. #include "qmi_rmnet.h"
  11. #include "dfc.h"
  12. #define QMAP_DFC_VER 1
  13. struct qmap_dfc_config {
  14. struct qmap_cmd_hdr hdr;
  15. u8 cmd_ver;
  16. u8 cmd_id;
  17. u8 reserved;
  18. u8 tx_info:1;
  19. u8 reserved2:7;
  20. __be32 ep_type;
  21. __be32 iface_id;
  22. u32 reserved3;
  23. } __aligned(1);
  24. struct qmap_dfc_ind {
  25. struct qmap_cmd_hdr hdr;
  26. u8 cmd_ver;
  27. u8 reserved;
  28. __be16 seq_num;
  29. u8 reserved2;
  30. u8 tx_info_valid:1;
  31. u8 tx_info:1;
  32. u8 rx_bytes_valid:1;
  33. u8 reserved3:5;
  34. u8 bearer_id;
  35. u8 tcp_bidir:1;
  36. u8 bearer_status:3;
  37. u8 reserved4:4;
  38. __be32 grant;
  39. __be32 rx_bytes;
  40. u32 reserved6;
  41. } __aligned(1);
  42. struct qmap_dfc_query {
  43. struct qmap_cmd_hdr hdr;
  44. u8 cmd_ver;
  45. u8 reserved;
  46. u8 bearer_id;
  47. u8 reserved2;
  48. u32 reserved3;
  49. } __aligned(1);
  50. struct qmap_dfc_query_resp {
  51. struct qmap_cmd_hdr hdr;
  52. u8 cmd_ver;
  53. u8 bearer_id;
  54. u8 tcp_bidir:1;
  55. u8 rx_bytes_valid:1;
  56. u8 reserved:6;
  57. u8 invalid:1;
  58. u8 reserved2:7;
  59. __be32 grant;
  60. __be32 rx_bytes;
  61. u32 reserved4;
  62. } __aligned(1);
  63. struct qmap_dfc_end_marker_req {
  64. struct qmap_cmd_hdr hdr;
  65. u8 cmd_ver;
  66. u8 reserved;
  67. u8 bearer_id;
  68. u8 reserved2;
  69. u16 reserved3;
  70. __be16 seq_num;
  71. u32 reserved4;
  72. } __aligned(1);
  73. struct qmap_dfc_end_marker_cnf {
  74. struct qmap_cmd_hdr hdr;
  75. u8 cmd_ver;
  76. u8 reserved;
  77. u8 bearer_id;
  78. u8 reserved2;
  79. u16 reserved3;
  80. __be16 seq_num;
  81. u32 reserved4;
  82. } __aligned(1);
  83. static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
  84. static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
  85. static struct dfc_qmi_data __rcu *qmap_dfc_data;
  86. static bool dfc_config_acked;
  87. static void dfc_qmap_send_config(struct dfc_qmi_data *data);
  88. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  89. struct rmnet_bearer_map *bearer,
  90. u16 seq, u32 tx_id);
  91. static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
  92. struct sk_buff *skb)
  93. {
  94. struct qmap_dfc_ind *cmd;
  95. if (skb->len < sizeof(struct qmap_dfc_ind))
  96. return QMAP_CMD_INVALID;
  97. cmd = (struct qmap_dfc_ind *)skb->data;
  98. if (cmd->tx_info_valid) {
  99. memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
  100. qmap_tx_ind.tx_status = cmd->tx_info;
  101. qmap_tx_ind.bearer_info_valid = 1;
  102. qmap_tx_ind.bearer_info_len = 1;
  103. qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
  104. qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
  105. dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
  106. /* Ignore grant since it is always 0 */
  107. goto done;
  108. }
  109. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  110. qmap_flow_ind.flow_status_valid = 1;
  111. qmap_flow_ind.flow_status_len = 1;
  112. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  113. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  114. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  115. qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
  116. if (cmd->rx_bytes_valid) {
  117. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  118. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  119. }
  120. if (cmd->tcp_bidir) {
  121. qmap_flow_ind.ancillary_info_valid = 1;
  122. qmap_flow_ind.ancillary_info_len = 1;
  123. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  124. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  125. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  126. }
  127. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, false);
  128. done:
  129. return QMAP_CMD_ACK;
  130. }
  131. static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
  132. struct sk_buff *skb)
  133. {
  134. struct qmap_dfc_query_resp *cmd;
  135. if (skb->len < sizeof(struct qmap_dfc_query_resp))
  136. return QMAP_CMD_DONE;
  137. cmd = (struct qmap_dfc_query_resp *)skb->data;
  138. if (cmd->invalid)
  139. return QMAP_CMD_DONE;
  140. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  141. qmap_flow_ind.flow_status_valid = 1;
  142. qmap_flow_ind.flow_status_len = 1;
  143. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  144. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  145. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  146. qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
  147. if (cmd->rx_bytes_valid) {
  148. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  149. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  150. }
  151. if (cmd->tcp_bidir) {
  152. qmap_flow_ind.ancillary_info_valid = 1;
  153. qmap_flow_ind.ancillary_info_len = 1;
  154. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  155. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  156. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  157. }
  158. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, true);
  159. return QMAP_CMD_DONE;
  160. }
  161. static int dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
  162. u8 bearer_id, u16 seq_num, u32 tx_id)
  163. {
  164. struct net_device *dev;
  165. struct qos_info *qos;
  166. struct rmnet_bearer_map *bearer;
  167. int rc = QMAP_CMD_ACK;
  168. dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
  169. if (!dev)
  170. return rc;
  171. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  172. if (!qos)
  173. return rc;
  174. spin_lock_bh(&qos->qos_lock);
  175. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  176. if (!bearer) {
  177. spin_unlock_bh(&qos->qos_lock);
  178. return rc;
  179. }
  180. if (bearer->last_seq == seq_num && bearer->grant_size) {
  181. bearer->ack_req = 1;
  182. bearer->ack_txid = tx_id;
  183. } else {
  184. dfc_qmap_send_end_marker_cnf(qos, bearer, seq_num, tx_id);
  185. }
  186. spin_unlock_bh(&qos->qos_lock);
  187. return QMAP_CMD_DONE;
  188. }
  189. static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
  190. struct sk_buff *skb)
  191. {
  192. struct qmap_dfc_end_marker_req *cmd;
  193. if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
  194. return QMAP_CMD_INVALID;
  195. cmd = (struct qmap_dfc_end_marker_req *)skb->data;
  196. return dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
  197. ntohs(cmd->seq_num),
  198. ntohl(cmd->hdr.tx_id));
  199. }
  200. int dfc_qmap_cmd_handler(struct sk_buff *skb)
  201. {
  202. struct qmap_cmd_hdr *cmd;
  203. struct dfc_qmi_data *dfc;
  204. int rc = QMAP_CMD_DONE;
  205. cmd = (struct qmap_cmd_hdr *)skb->data;
  206. if (cmd->cmd_name == QMAP_DFC_QUERY) {
  207. if (cmd->cmd_type != QMAP_CMD_ACK)
  208. return rc;
  209. } else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
  210. if (cmd->cmd_type == QMAP_CMD_ACK &&
  211. cmd->cmd_name == QMAP_DFC_CONFIG)
  212. dfc_config_acked = true;
  213. return rc;
  214. }
  215. dfc = rcu_dereference(qmap_dfc_data);
  216. if (!dfc || READ_ONCE(dfc->restart_state))
  217. return rc;
  218. /* Re-send DFC config once if needed */
  219. if (unlikely(!dfc_config_acked)) {
  220. dfc_qmap_send_config(dfc);
  221. dfc_config_acked = true;
  222. }
  223. switch (cmd->cmd_name) {
  224. case QMAP_DFC_IND:
  225. rc = dfc_qmap_handle_ind(dfc, skb);
  226. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  227. break;
  228. case QMAP_DFC_QUERY:
  229. rc = dfc_qmap_handle_query_resp(dfc, skb);
  230. break;
  231. case QMAP_DFC_END_MARKER:
  232. rc = dfc_qmap_handle_end_marker_req(dfc, skb);
  233. break;
  234. default:
  235. if (cmd->cmd_type == QMAP_CMD_REQUEST)
  236. rc = QMAP_CMD_UNSUPPORTED;
  237. }
  238. return rc;
  239. }
  240. static void dfc_qmap_send_config(struct dfc_qmi_data *data)
  241. {
  242. struct sk_buff *skb;
  243. struct qmap_dfc_config *dfc_config;
  244. unsigned int len = sizeof(struct qmap_dfc_config);
  245. skb = alloc_skb(len, GFP_ATOMIC);
  246. if (!skb)
  247. return;
  248. skb->protocol = htons(ETH_P_MAP);
  249. dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
  250. memset(dfc_config, 0, len);
  251. dfc_config->hdr.cd_bit = 1;
  252. dfc_config->hdr.mux_id = 0;
  253. dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  254. dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
  255. dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
  256. dfc_config->hdr.tx_id = htonl(rmnet_qmap_next_txid());
  257. dfc_config->cmd_ver = QMAP_DFC_VER;
  258. dfc_config->cmd_id = QMAP_DFC_IND;
  259. dfc_config->tx_info = 1;
  260. dfc_config->ep_type = htonl(data->svc.ep_type);
  261. dfc_config->iface_id = htonl(data->svc.iface_id);
  262. rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  263. }
  264. static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
  265. {
  266. struct sk_buff *skb;
  267. struct qmap_dfc_query *dfc_query;
  268. unsigned int len = sizeof(struct qmap_dfc_query);
  269. skb = alloc_skb(len, GFP_ATOMIC);
  270. if (!skb)
  271. return;
  272. skb->protocol = htons(ETH_P_MAP);
  273. dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
  274. memset(dfc_query, 0, len);
  275. dfc_query->hdr.cd_bit = 1;
  276. dfc_query->hdr.mux_id = mux_id;
  277. dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  278. dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
  279. dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
  280. dfc_query->hdr.tx_id = htonl(rmnet_qmap_next_txid());
  281. dfc_query->cmd_ver = QMAP_DFC_VER;
  282. dfc_query->bearer_id = bearer_id;
  283. rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  284. }
  285. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  286. struct rmnet_bearer_map *bearer,
  287. u16 seq, u32 tx_id)
  288. {
  289. struct sk_buff *skb;
  290. struct qmap_dfc_end_marker_cnf *em_cnf;
  291. unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
  292. skb = alloc_skb(len, GFP_ATOMIC);
  293. if (!skb)
  294. return;
  295. em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
  296. memset(em_cnf, 0, len);
  297. em_cnf->hdr.cd_bit = 1;
  298. em_cnf->hdr.mux_id = qos->mux_id;
  299. em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  300. em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
  301. em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
  302. em_cnf->hdr.tx_id = htonl(tx_id);
  303. em_cnf->cmd_ver = QMAP_DFC_VER;
  304. em_cnf->bearer_id = bearer->bearer_id;
  305. em_cnf->seq_num = htons(seq);
  306. /* This cmd needs to be sent in-band after data on the currnet
  307. * channel. But due to IPA bug, it cannot be sent over LLC so send
  308. * it over QMAP channel if current channel is LLC.
  309. */
  310. if (bearer->ch_switch.current_ch == RMNET_CH_DEFAULT)
  311. rmnet_qmap_send(skb, bearer->ch_switch.current_ch, true);
  312. else
  313. rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  314. }
  315. void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
  316. {
  317. struct rmnet_bearer_map *bearer;
  318. if (type == DFC_ACK_TYPE_DISABLE) {
  319. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  320. if (bearer)
  321. dfc_qmap_send_end_marker_cnf(qos, bearer,
  322. seq, bearer->ack_txid);
  323. } else if (type == DFC_ACK_TYPE_THRESHOLD) {
  324. dfc_qmap_send_query(qos->mux_id, bearer_id);
  325. }
  326. }
  327. int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
  328. struct qmi_info *qmi)
  329. {
  330. struct dfc_qmi_data *data;
  331. if (!port || !qmi)
  332. return -EINVAL;
  333. /* Prevent double init */
  334. data = rcu_dereference(qmap_dfc_data);
  335. if (data)
  336. return -EINVAL;
  337. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  338. if (!data)
  339. return -ENOMEM;
  340. data->rmnet_port = port;
  341. data->index = index;
  342. memcpy(&data->svc, psvc, sizeof(data->svc));
  343. qmi->dfc_clients[index] = (void *)data;
  344. rcu_assign_pointer(qmap_dfc_data, data);
  345. rmnet_qmap_init(port);
  346. trace_dfc_client_state_up(data->index, data->svc.instance,
  347. data->svc.ep_type, data->svc.iface_id);
  348. pr_info("DFC QMAP init\n");
  349. dfc_config_acked = false;
  350. dfc_qmap_send_config(data);
  351. return 0;
  352. }
  353. void dfc_qmap_client_exit(void *dfc_data)
  354. {
  355. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  356. if (!data) {
  357. pr_err("%s() data is null\n", __func__);
  358. return;
  359. }
  360. trace_dfc_client_state_down(data->index, 0);
  361. rmnet_qmap_exit();
  362. WRITE_ONCE(data->restart_state, 1);
  363. RCU_INIT_POINTER(qmap_dfc_data, NULL);
  364. synchronize_rcu();
  365. kfree(data);
  366. pr_info("DFC QMAP exit\n");
  367. }