dfc_qmap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <net/pkt_sched.h>
  6. #include <linux/module.h>
  7. #include "rmnet_ctl.h"
  8. #include "dfc_defs.h"
  9. #include "rmnet_qmi.h"
  10. #include "qmi_rmnet.h"
  11. #include "dfc.h"
  12. #define QMAP_DFC_VER 1
  13. #define QMAP_CMD_DONE -1
  14. #define QMAP_CMD_REQUEST 0
  15. #define QMAP_CMD_ACK 1
  16. #define QMAP_CMD_UNSUPPORTED 2
  17. #define QMAP_CMD_INVALID 3
  18. #define QMAP_DFC_CONFIG 10
  19. #define QMAP_DFC_IND 11
  20. #define QMAP_DFC_QUERY 12
  21. #define QMAP_DFC_END_MARKER 13
  22. struct qmap_hdr {
  23. u8 cd_pad;
  24. u8 mux_id;
  25. __be16 pkt_len;
  26. } __aligned(1);
  27. #define QMAP_HDR_LEN sizeof(struct qmap_hdr)
  28. struct qmap_cmd_hdr {
  29. u8 pad_len:6;
  30. u8 reserved_bit:1;
  31. u8 cd_bit:1;
  32. u8 mux_id;
  33. __be16 pkt_len;
  34. u8 cmd_name;
  35. u8 cmd_type:2;
  36. u8 reserved:6;
  37. u16 reserved2;
  38. __be32 tx_id;
  39. } __aligned(1);
  40. struct qmap_dfc_config {
  41. struct qmap_cmd_hdr hdr;
  42. u8 cmd_ver;
  43. u8 cmd_id;
  44. u8 reserved;
  45. u8 tx_info:1;
  46. u8 reserved2:7;
  47. __be32 ep_type;
  48. __be32 iface_id;
  49. u32 reserved3;
  50. } __aligned(1);
  51. struct qmap_dfc_ind {
  52. struct qmap_cmd_hdr hdr;
  53. u8 cmd_ver;
  54. u8 reserved;
  55. __be16 seq_num;
  56. u8 reserved2;
  57. u8 tx_info_valid:1;
  58. u8 tx_info:1;
  59. u8 rx_bytes_valid:1;
  60. u8 reserved3:5;
  61. u8 bearer_id;
  62. u8 tcp_bidir:1;
  63. u8 bearer_status:3;
  64. u8 reserved4:4;
  65. __be32 grant;
  66. __be32 rx_bytes;
  67. u32 reserved6;
  68. } __aligned(1);
  69. struct qmap_dfc_query {
  70. struct qmap_cmd_hdr hdr;
  71. u8 cmd_ver;
  72. u8 reserved;
  73. u8 bearer_id;
  74. u8 reserved2;
  75. u32 reserved3;
  76. } __aligned(1);
  77. struct qmap_dfc_query_resp {
  78. struct qmap_cmd_hdr hdr;
  79. u8 cmd_ver;
  80. u8 bearer_id;
  81. u8 tcp_bidir:1;
  82. u8 rx_bytes_valid:1;
  83. u8 reserved:6;
  84. u8 invalid:1;
  85. u8 reserved2:7;
  86. __be32 grant;
  87. __be32 rx_bytes;
  88. u32 reserved4;
  89. } __aligned(1);
  90. struct qmap_dfc_end_marker_req {
  91. struct qmap_cmd_hdr hdr;
  92. u8 cmd_ver;
  93. u8 reserved;
  94. u8 bearer_id;
  95. u8 reserved2;
  96. u16 reserved3;
  97. __be16 seq_num;
  98. u32 reserved4;
  99. } __aligned(1);
  100. struct qmap_dfc_end_marker_cnf {
  101. struct qmap_cmd_hdr hdr;
  102. u8 cmd_ver;
  103. u8 reserved;
  104. u8 bearer_id;
  105. u8 reserved2;
  106. u16 reserved3;
  107. __be16 seq_num;
  108. u32 reserved4;
  109. } __aligned(1);
  110. static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
  111. static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
  112. static struct dfc_qmi_data __rcu *qmap_dfc_data;
  113. static atomic_t qmap_txid;
  114. static void *rmnet_ctl_handle;
  115. static bool dfc_config_acked;
  116. static struct rmnet_ctl_client_if *rmnet_ctl;
  117. static void dfc_qmap_send_config(struct dfc_qmi_data *data);
  118. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  119. u8 bearer_id, u16 seq, u32 tx_id);
  120. static void dfc_qmap_send_cmd(struct sk_buff *skb)
  121. {
  122. trace_dfc_qmap(skb->data, skb->len, false);
  123. if (unlikely(!rmnet_ctl || !rmnet_ctl->send) ||
  124. rmnet_ctl->send(rmnet_ctl_handle, skb)) {
  125. pr_err("Failed to send to rmnet ctl\n");
  126. kfree_skb(skb);
  127. }
  128. }
  129. static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
  130. struct sk_buff *skb)
  131. {
  132. struct qmap_cmd_hdr *cmd;
  133. cmd = (struct qmap_cmd_hdr *)skb->data;
  134. skb->protocol = htons(ETH_P_MAP);
  135. skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
  136. if (likely(rmnet_ctl && rmnet_ctl->log))
  137. rmnet_ctl->log(RMNET_CTL_LOG_DEBUG, "TXI", 0,
  138. skb->data, skb->len);
  139. trace_dfc_qmap(skb->data, skb->len, false);
  140. dev_queue_xmit(skb);
  141. }
  142. static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
  143. struct sk_buff *skb)
  144. {
  145. struct qmap_dfc_ind *cmd;
  146. if (skb->len < sizeof(struct qmap_dfc_ind))
  147. return QMAP_CMD_INVALID;
  148. cmd = (struct qmap_dfc_ind *)skb->data;
  149. if (cmd->tx_info_valid) {
  150. memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
  151. qmap_tx_ind.tx_status = cmd->tx_info;
  152. qmap_tx_ind.bearer_info_valid = 1;
  153. qmap_tx_ind.bearer_info_len = 1;
  154. qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
  155. qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
  156. dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
  157. /* Ignore grant since it is always 0 */
  158. goto done;
  159. }
  160. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  161. qmap_flow_ind.flow_status_valid = 1;
  162. qmap_flow_ind.flow_status_len = 1;
  163. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  164. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  165. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  166. qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
  167. if (cmd->rx_bytes_valid) {
  168. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  169. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  170. }
  171. if (cmd->tcp_bidir) {
  172. qmap_flow_ind.ancillary_info_valid = 1;
  173. qmap_flow_ind.ancillary_info_len = 1;
  174. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  175. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  176. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  177. }
  178. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, false);
  179. done:
  180. return QMAP_CMD_ACK;
  181. }
  182. static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
  183. struct sk_buff *skb)
  184. {
  185. struct qmap_dfc_query_resp *cmd;
  186. if (skb->len < sizeof(struct qmap_dfc_query_resp))
  187. return QMAP_CMD_DONE;
  188. cmd = (struct qmap_dfc_query_resp *)skb->data;
  189. if (cmd->invalid)
  190. return QMAP_CMD_DONE;
  191. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  192. qmap_flow_ind.flow_status_valid = 1;
  193. qmap_flow_ind.flow_status_len = 1;
  194. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  195. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  196. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  197. qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
  198. if (cmd->rx_bytes_valid) {
  199. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  200. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  201. }
  202. if (cmd->tcp_bidir) {
  203. qmap_flow_ind.ancillary_info_valid = 1;
  204. qmap_flow_ind.ancillary_info_len = 1;
  205. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  206. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  207. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  208. }
  209. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, true);
  210. return QMAP_CMD_DONE;
  211. }
  212. static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
  213. u8 bearer_id, u16 seq_num, u32 tx_id)
  214. {
  215. struct net_device *dev;
  216. struct qos_info *qos;
  217. struct rmnet_bearer_map *bearer;
  218. dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
  219. if (!dev)
  220. return;
  221. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  222. if (!qos)
  223. return;
  224. spin_lock_bh(&qos->qos_lock);
  225. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  226. if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
  227. bearer->ack_req = 1;
  228. bearer->ack_txid = tx_id;
  229. } else {
  230. dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
  231. }
  232. spin_unlock_bh(&qos->qos_lock);
  233. }
  234. static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
  235. struct sk_buff *skb)
  236. {
  237. struct qmap_dfc_end_marker_req *cmd;
  238. if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
  239. return QMAP_CMD_INVALID;
  240. cmd = (struct qmap_dfc_end_marker_req *)skb->data;
  241. dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
  242. ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));
  243. return QMAP_CMD_DONE;
  244. }
  245. static void dfc_qmap_cmd_handler(struct sk_buff *skb)
  246. {
  247. struct qmap_cmd_hdr *cmd;
  248. struct dfc_qmi_data *dfc;
  249. int rc = QMAP_CMD_DONE;
  250. if (!skb)
  251. return;
  252. trace_dfc_qmap(skb->data, skb->len, true);
  253. if (skb->len < sizeof(struct qmap_cmd_hdr))
  254. goto free_skb;
  255. cmd = (struct qmap_cmd_hdr *)skb->data;
  256. if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
  257. goto free_skb;
  258. if (cmd->cmd_name == QMAP_DFC_QUERY) {
  259. if (cmd->cmd_type != QMAP_CMD_ACK)
  260. goto free_skb;
  261. } else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
  262. if (cmd->cmd_type == QMAP_CMD_ACK &&
  263. cmd->cmd_name == QMAP_DFC_CONFIG)
  264. dfc_config_acked = true;
  265. goto free_skb;
  266. }
  267. rcu_read_lock();
  268. dfc = rcu_dereference(qmap_dfc_data);
  269. if (!dfc || READ_ONCE(dfc->restart_state)) {
  270. rcu_read_unlock();
  271. goto free_skb;
  272. }
  273. /* Re-send DFC config once if needed */
  274. if (unlikely(!dfc_config_acked)) {
  275. dfc_qmap_send_config(dfc);
  276. dfc_config_acked = true;
  277. }
  278. switch (cmd->cmd_name) {
  279. case QMAP_DFC_IND:
  280. rc = dfc_qmap_handle_ind(dfc, skb);
  281. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  282. break;
  283. case QMAP_DFC_QUERY:
  284. rc = dfc_qmap_handle_query_resp(dfc, skb);
  285. break;
  286. case QMAP_DFC_END_MARKER:
  287. rc = dfc_qmap_handle_end_marker_req(dfc, skb);
  288. break;
  289. default:
  290. rc = QMAP_CMD_UNSUPPORTED;
  291. }
  292. /* Send ack */
  293. if (rc != QMAP_CMD_DONE) {
  294. cmd->cmd_type = rc;
  295. if (cmd->cmd_name == QMAP_DFC_IND)
  296. dfc_qmap_send_inband_ack(dfc, skb);
  297. else
  298. dfc_qmap_send_cmd(skb);
  299. rcu_read_unlock();
  300. return;
  301. }
  302. rcu_read_unlock();
  303. free_skb:
  304. kfree_skb(skb);
  305. }
  306. static void dfc_qmap_send_config(struct dfc_qmi_data *data)
  307. {
  308. struct sk_buff *skb;
  309. struct qmap_dfc_config *dfc_config;
  310. unsigned int len = sizeof(struct qmap_dfc_config);
  311. skb = alloc_skb(len, GFP_ATOMIC);
  312. if (!skb)
  313. return;
  314. skb->protocol = htons(ETH_P_MAP);
  315. dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
  316. memset(dfc_config, 0, len);
  317. dfc_config->hdr.cd_bit = 1;
  318. dfc_config->hdr.mux_id = 0;
  319. dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  320. dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
  321. dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
  322. dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
  323. dfc_config->cmd_ver = QMAP_DFC_VER;
  324. dfc_config->cmd_id = QMAP_DFC_IND;
  325. dfc_config->tx_info = 1;
  326. dfc_config->ep_type = htonl(data->svc.ep_type);
  327. dfc_config->iface_id = htonl(data->svc.iface_id);
  328. dfc_qmap_send_cmd(skb);
  329. }
  330. static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
  331. {
  332. struct sk_buff *skb;
  333. struct qmap_dfc_query *dfc_query;
  334. unsigned int len = sizeof(struct qmap_dfc_query);
  335. skb = alloc_skb(len, GFP_ATOMIC);
  336. if (!skb)
  337. return;
  338. skb->protocol = htons(ETH_P_MAP);
  339. dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
  340. memset(dfc_query, 0, len);
  341. dfc_query->hdr.cd_bit = 1;
  342. dfc_query->hdr.mux_id = mux_id;
  343. dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  344. dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
  345. dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
  346. dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
  347. dfc_query->cmd_ver = QMAP_DFC_VER;
  348. dfc_query->bearer_id = bearer_id;
  349. dfc_qmap_send_cmd(skb);
  350. }
  351. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  352. u8 bearer_id, u16 seq, u32 tx_id)
  353. {
  354. struct sk_buff *skb;
  355. struct qmap_dfc_end_marker_cnf *em_cnf;
  356. unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
  357. skb = alloc_skb(len, GFP_ATOMIC);
  358. if (!skb)
  359. return;
  360. em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
  361. memset(em_cnf, 0, len);
  362. em_cnf->hdr.cd_bit = 1;
  363. em_cnf->hdr.mux_id = qos->mux_id;
  364. em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  365. em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
  366. em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
  367. em_cnf->hdr.tx_id = htonl(tx_id);
  368. em_cnf->cmd_ver = QMAP_DFC_VER;
  369. em_cnf->bearer_id = bearer_id;
  370. em_cnf->seq_num = htons(seq);
  371. skb->protocol = htons(ETH_P_MAP);
  372. skb->dev = qos->real_dev;
  373. /* This cmd needs to be sent in-band */
  374. if (likely(rmnet_ctl && rmnet_ctl->log))
  375. rmnet_ctl->log(RMNET_CTL_LOG_INFO, "TXI", 0,
  376. skb->data, skb->len);
  377. trace_dfc_qmap(skb->data, skb->len, false);
  378. rmnet_map_tx_qmap_cmd(skb);
  379. }
  380. void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
  381. {
  382. struct rmnet_bearer_map *bearer;
  383. if (type == DFC_ACK_TYPE_DISABLE) {
  384. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  385. if (bearer)
  386. dfc_qmap_send_end_marker_cnf(qos, bearer_id,
  387. seq, bearer->ack_txid);
  388. } else if (type == DFC_ACK_TYPE_THRESHOLD) {
  389. dfc_qmap_send_query(qos->mux_id, bearer_id);
  390. }
  391. }
  392. static struct rmnet_ctl_client_hooks cb = {
  393. .ctl_dl_client_hook = dfc_qmap_cmd_handler,
  394. };
  395. int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
  396. struct qmi_info *qmi)
  397. {
  398. struct dfc_qmi_data *data;
  399. if (!port || !qmi)
  400. return -EINVAL;
  401. /* Prevent double init */
  402. data = rcu_dereference(qmap_dfc_data);
  403. if (data)
  404. return -EINVAL;
  405. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  406. if (!data)
  407. return -ENOMEM;
  408. data->rmnet_port = port;
  409. data->index = index;
  410. memcpy(&data->svc, psvc, sizeof(data->svc));
  411. qmi->dfc_clients[index] = (void *)data;
  412. rcu_assign_pointer(qmap_dfc_data, data);
  413. atomic_set(&qmap_txid, 0);
  414. rmnet_ctl = rmnet_ctl_if();
  415. if (!rmnet_ctl) {
  416. pr_err("rmnet_ctl module not loaded\n");
  417. goto out;
  418. }
  419. if (rmnet_ctl->reg)
  420. rmnet_ctl_handle = rmnet_ctl->reg(&cb);
  421. if (!rmnet_ctl_handle)
  422. pr_err("Failed to register with rmnet ctl\n");
  423. trace_dfc_client_state_up(data->index, data->svc.instance,
  424. data->svc.ep_type, data->svc.iface_id);
  425. pr_info("DFC QMAP init\n");
  426. dfc_config_acked = false;
  427. dfc_qmap_send_config(data);
  428. out:
  429. return 0;
  430. }
  431. void dfc_qmap_client_exit(void *dfc_data)
  432. {
  433. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  434. if (!data) {
  435. pr_err("%s() data is null\n", __func__);
  436. return;
  437. }
  438. trace_dfc_client_state_down(data->index, 0);
  439. if (rmnet_ctl && rmnet_ctl->dereg)
  440. rmnet_ctl->dereg(rmnet_ctl_handle);
  441. rmnet_ctl_handle = NULL;
  442. WRITE_ONCE(data->restart_state, 1);
  443. RCU_INIT_POINTER(qmap_dfc_data, NULL);
  444. synchronize_rcu();
  445. kfree(data);
  446. rmnet_ctl = NULL;
  447. pr_info("DFC QMAP exit\n");
  448. }