dfc_qmap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <net/pkt_sched.h>
  6. #include <linux/module.h>
  7. #include "rmnet_ctl.h"
  8. #include "dfc_defs.h"
  9. #include "rmnet_qmi.h"
  10. #include "qmi_rmnet.h"
  11. #include "dfc.h"
  12. #define QMAP_DFC_VER 1
  13. #define QMAP_CMD_DONE -1
  14. #define QMAP_CMD_REQUEST 0
  15. #define QMAP_CMD_ACK 1
  16. #define QMAP_CMD_UNSUPPORTED 2
  17. #define QMAP_CMD_INVALID 3
  18. #define QMAP_DFC_CONFIG 10
  19. #define QMAP_DFC_IND 11
  20. #define QMAP_DFC_QUERY 12
  21. #define QMAP_DFC_END_MARKER 13
  22. struct qmap_hdr {
  23. u8 cd_pad;
  24. u8 mux_id;
  25. __be16 pkt_len;
  26. } __aligned(1);
  27. #define QMAP_HDR_LEN sizeof(struct qmap_hdr)
  28. struct qmap_cmd_hdr {
  29. u8 pad_len:6;
  30. u8 reserved_bit:1;
  31. u8 cd_bit:1;
  32. u8 mux_id;
  33. __be16 pkt_len;
  34. u8 cmd_name;
  35. u8 cmd_type:2;
  36. u8 reserved:6;
  37. u16 reserved2;
  38. __be32 tx_id;
  39. } __aligned(1);
  40. struct qmap_dfc_config {
  41. struct qmap_cmd_hdr hdr;
  42. u8 cmd_ver;
  43. u8 cmd_id;
  44. u8 reserved;
  45. u8 tx_info:1;
  46. u8 reserved2:7;
  47. __be32 ep_type;
  48. __be32 iface_id;
  49. u32 reserved3;
  50. } __aligned(1);
  51. struct qmap_dfc_ind {
  52. struct qmap_cmd_hdr hdr;
  53. u8 cmd_ver;
  54. u8 reserved;
  55. __be16 seq_num;
  56. u8 reserved2;
  57. u8 tx_info_valid:1;
  58. u8 tx_info:1;
  59. u8 rx_bytes_valid:1;
  60. u8 reserved3:5;
  61. u8 bearer_id;
  62. u8 tcp_bidir:1;
  63. u8 bearer_status:3;
  64. u8 reserved4:4;
  65. __be32 grant;
  66. __be32 rx_bytes;
  67. u32 reserved6;
  68. } __aligned(1);
  69. struct qmap_dfc_query {
  70. struct qmap_cmd_hdr hdr;
  71. u8 cmd_ver;
  72. u8 reserved;
  73. u8 bearer_id;
  74. u8 reserved2;
  75. u32 reserved3;
  76. } __aligned(1);
  77. struct qmap_dfc_query_resp {
  78. struct qmap_cmd_hdr hdr;
  79. u8 cmd_ver;
  80. u8 bearer_id;
  81. u8 tcp_bidir:1;
  82. u8 rx_bytes_valid:1;
  83. u8 reserved:6;
  84. u8 invalid:1;
  85. u8 reserved2:7;
  86. __be32 grant;
  87. __be32 rx_bytes;
  88. u32 reserved4;
  89. } __aligned(1);
  90. struct qmap_dfc_end_marker_req {
  91. struct qmap_cmd_hdr hdr;
  92. u8 cmd_ver;
  93. u8 reserved;
  94. u8 bearer_id;
  95. u8 reserved2;
  96. u16 reserved3;
  97. __be16 seq_num;
  98. u32 reserved4;
  99. } __aligned(1);
  100. struct qmap_dfc_end_marker_cnf {
  101. struct qmap_cmd_hdr hdr;
  102. u8 cmd_ver;
  103. u8 reserved;
  104. u8 bearer_id;
  105. u8 reserved2;
  106. u16 reserved3;
  107. __be16 seq_num;
  108. u32 reserved4;
  109. } __aligned(1);
  110. static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
  111. static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
  112. static struct dfc_qmi_data __rcu *qmap_dfc_data;
  113. static atomic_t qmap_txid;
  114. static void *rmnet_ctl_handle;
  115. static struct rmnet_ctl_client_if *rmnet_ctl;
  116. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  117. u8 bearer_id, u16 seq, u32 tx_id);
  118. static void dfc_qmap_send_cmd(struct sk_buff *skb)
  119. {
  120. trace_dfc_qmap(skb->data, skb->len, false);
  121. if (unlikely(!rmnet_ctl || !rmnet_ctl->send) ||
  122. rmnet_ctl->send(rmnet_ctl_handle, skb)) {
  123. pr_err("Failed to send to rmnet ctl\n");
  124. kfree_skb(skb);
  125. }
  126. }
  127. static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
  128. struct sk_buff *skb)
  129. {
  130. struct qmap_cmd_hdr *cmd;
  131. cmd = (struct qmap_cmd_hdr *)skb->data;
  132. skb->protocol = htons(ETH_P_MAP);
  133. skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
  134. if (likely(rmnet_ctl && rmnet_ctl->log))
  135. rmnet_ctl->log(RMNET_CTL_LOG_DEBUG, "TXI", 0,
  136. skb->data, skb->len);
  137. trace_dfc_qmap(skb->data, skb->len, false);
  138. dev_queue_xmit(skb);
  139. }
  140. static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
  141. struct sk_buff *skb)
  142. {
  143. struct qmap_dfc_ind *cmd;
  144. if (skb->len < sizeof(struct qmap_dfc_ind))
  145. return QMAP_CMD_INVALID;
  146. cmd = (struct qmap_dfc_ind *)skb->data;
  147. if (cmd->tx_info_valid) {
  148. memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
  149. qmap_tx_ind.tx_status = cmd->tx_info;
  150. qmap_tx_ind.bearer_info_valid = 1;
  151. qmap_tx_ind.bearer_info_len = 1;
  152. qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
  153. qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
  154. dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
  155. /* Ignore grant since it is always 0 */
  156. goto done;
  157. }
  158. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  159. qmap_flow_ind.flow_status_valid = 1;
  160. qmap_flow_ind.flow_status_len = 1;
  161. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  162. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  163. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  164. qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
  165. if (cmd->rx_bytes_valid) {
  166. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  167. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  168. }
  169. if (cmd->tcp_bidir) {
  170. qmap_flow_ind.ancillary_info_valid = 1;
  171. qmap_flow_ind.ancillary_info_len = 1;
  172. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  173. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  174. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  175. }
  176. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, false);
  177. done:
  178. return QMAP_CMD_ACK;
  179. }
  180. static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
  181. struct sk_buff *skb)
  182. {
  183. struct qmap_dfc_query_resp *cmd;
  184. if (skb->len < sizeof(struct qmap_dfc_query_resp))
  185. return QMAP_CMD_DONE;
  186. cmd = (struct qmap_dfc_query_resp *)skb->data;
  187. if (cmd->invalid)
  188. return QMAP_CMD_DONE;
  189. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  190. qmap_flow_ind.flow_status_valid = 1;
  191. qmap_flow_ind.flow_status_len = 1;
  192. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  193. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  194. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  195. qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
  196. if (cmd->rx_bytes_valid) {
  197. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  198. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  199. }
  200. if (cmd->tcp_bidir) {
  201. qmap_flow_ind.ancillary_info_valid = 1;
  202. qmap_flow_ind.ancillary_info_len = 1;
  203. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  204. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  205. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  206. }
  207. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, true);
  208. return QMAP_CMD_DONE;
  209. }
  210. static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
  211. u8 bearer_id, u16 seq_num, u32 tx_id)
  212. {
  213. struct net_device *dev;
  214. struct qos_info *qos;
  215. struct rmnet_bearer_map *bearer;
  216. dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
  217. if (!dev)
  218. return;
  219. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  220. if (!qos)
  221. return;
  222. spin_lock_bh(&qos->qos_lock);
  223. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  224. if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
  225. bearer->ack_req = 1;
  226. bearer->ack_txid = tx_id;
  227. } else {
  228. dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
  229. }
  230. spin_unlock_bh(&qos->qos_lock);
  231. }
  232. static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
  233. struct sk_buff *skb)
  234. {
  235. struct qmap_dfc_end_marker_req *cmd;
  236. if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
  237. return QMAP_CMD_INVALID;
  238. cmd = (struct qmap_dfc_end_marker_req *)skb->data;
  239. dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
  240. ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));
  241. return QMAP_CMD_DONE;
  242. }
  243. static void dfc_qmap_cmd_handler(struct sk_buff *skb)
  244. {
  245. struct qmap_cmd_hdr *cmd;
  246. struct dfc_qmi_data *dfc;
  247. int rc = QMAP_CMD_DONE;
  248. if (!skb)
  249. return;
  250. trace_dfc_qmap(skb->data, skb->len, true);
  251. if (skb->len < sizeof(struct qmap_cmd_hdr))
  252. goto free_skb;
  253. cmd = (struct qmap_cmd_hdr *)skb->data;
  254. if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
  255. goto free_skb;
  256. if (cmd->cmd_name == QMAP_DFC_QUERY) {
  257. if (cmd->cmd_type != QMAP_CMD_ACK)
  258. goto free_skb;
  259. } else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
  260. goto free_skb;
  261. }
  262. rcu_read_lock();
  263. dfc = rcu_dereference(qmap_dfc_data);
  264. if (!dfc || READ_ONCE(dfc->restart_state)) {
  265. rcu_read_unlock();
  266. goto free_skb;
  267. }
  268. switch (cmd->cmd_name) {
  269. case QMAP_DFC_IND:
  270. rc = dfc_qmap_handle_ind(dfc, skb);
  271. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  272. break;
  273. case QMAP_DFC_QUERY:
  274. rc = dfc_qmap_handle_query_resp(dfc, skb);
  275. break;
  276. case QMAP_DFC_END_MARKER:
  277. rc = dfc_qmap_handle_end_marker_req(dfc, skb);
  278. break;
  279. default:
  280. rc = QMAP_CMD_UNSUPPORTED;
  281. }
  282. /* Send ack */
  283. if (rc != QMAP_CMD_DONE) {
  284. cmd->cmd_type = rc;
  285. if (cmd->cmd_name == QMAP_DFC_IND)
  286. dfc_qmap_send_inband_ack(dfc, skb);
  287. else
  288. dfc_qmap_send_cmd(skb);
  289. rcu_read_unlock();
  290. return;
  291. }
  292. rcu_read_unlock();
  293. free_skb:
  294. kfree_skb(skb);
  295. }
  296. static void dfc_qmap_send_config(struct dfc_qmi_data *data)
  297. {
  298. struct sk_buff *skb;
  299. struct qmap_dfc_config *dfc_config;
  300. unsigned int len = sizeof(struct qmap_dfc_config);
  301. skb = alloc_skb(len, GFP_ATOMIC);
  302. if (!skb)
  303. return;
  304. skb->protocol = htons(ETH_P_MAP);
  305. dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
  306. memset(dfc_config, 0, len);
  307. dfc_config->hdr.cd_bit = 1;
  308. dfc_config->hdr.mux_id = 0;
  309. dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  310. dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
  311. dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
  312. dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
  313. dfc_config->cmd_ver = QMAP_DFC_VER;
  314. dfc_config->cmd_id = QMAP_DFC_IND;
  315. dfc_config->tx_info = 1;
  316. dfc_config->ep_type = htonl(data->svc.ep_type);
  317. dfc_config->iface_id = htonl(data->svc.iface_id);
  318. dfc_qmap_send_cmd(skb);
  319. }
  320. static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
  321. {
  322. struct sk_buff *skb;
  323. struct qmap_dfc_query *dfc_query;
  324. unsigned int len = sizeof(struct qmap_dfc_query);
  325. skb = alloc_skb(len, GFP_ATOMIC);
  326. if (!skb)
  327. return;
  328. skb->protocol = htons(ETH_P_MAP);
  329. dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
  330. memset(dfc_query, 0, len);
  331. dfc_query->hdr.cd_bit = 1;
  332. dfc_query->hdr.mux_id = mux_id;
  333. dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  334. dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
  335. dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
  336. dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
  337. dfc_query->cmd_ver = QMAP_DFC_VER;
  338. dfc_query->bearer_id = bearer_id;
  339. dfc_qmap_send_cmd(skb);
  340. }
  341. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  342. u8 bearer_id, u16 seq, u32 tx_id)
  343. {
  344. struct sk_buff *skb;
  345. struct qmap_dfc_end_marker_cnf *em_cnf;
  346. unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
  347. skb = alloc_skb(len, GFP_ATOMIC);
  348. if (!skb)
  349. return;
  350. em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
  351. memset(em_cnf, 0, len);
  352. em_cnf->hdr.cd_bit = 1;
  353. em_cnf->hdr.mux_id = qos->mux_id;
  354. em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  355. em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
  356. em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
  357. em_cnf->hdr.tx_id = htonl(tx_id);
  358. em_cnf->cmd_ver = QMAP_DFC_VER;
  359. em_cnf->bearer_id = bearer_id;
  360. em_cnf->seq_num = htons(seq);
  361. skb->protocol = htons(ETH_P_MAP);
  362. skb->dev = qos->real_dev;
  363. /* This cmd needs to be sent in-band */
  364. if (likely(rmnet_ctl && rmnet_ctl->log))
  365. rmnet_ctl->log(RMNET_CTL_LOG_INFO, "TXI", 0,
  366. skb->data, skb->len);
  367. trace_dfc_qmap(skb->data, skb->len, false);
  368. rmnet_map_tx_qmap_cmd(skb);
  369. }
  370. void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
  371. {
  372. struct rmnet_bearer_map *bearer;
  373. if (type == DFC_ACK_TYPE_DISABLE) {
  374. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  375. if (bearer)
  376. dfc_qmap_send_end_marker_cnf(qos, bearer_id,
  377. seq, bearer->ack_txid);
  378. } else if (type == DFC_ACK_TYPE_THRESHOLD) {
  379. dfc_qmap_send_query(qos->mux_id, bearer_id);
  380. }
  381. }
  382. static struct rmnet_ctl_client_hooks cb = {
  383. .ctl_dl_client_hook = dfc_qmap_cmd_handler,
  384. };
  385. int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
  386. struct qmi_info *qmi)
  387. {
  388. struct dfc_qmi_data *data;
  389. if (!port || !qmi)
  390. return -EINVAL;
  391. /* Prevent double init */
  392. data = rcu_dereference(qmap_dfc_data);
  393. if (data)
  394. return -EINVAL;
  395. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  396. if (!data)
  397. return -ENOMEM;
  398. data->rmnet_port = port;
  399. data->index = index;
  400. memcpy(&data->svc, psvc, sizeof(data->svc));
  401. qmi->dfc_clients[index] = (void *)data;
  402. rcu_assign_pointer(qmap_dfc_data, data);
  403. atomic_set(&qmap_txid, 0);
  404. rmnet_ctl = rmnet_ctl_if();
  405. if (!rmnet_ctl) {
  406. pr_err("rmnet_ctl module not loaded\n");
  407. goto out;
  408. }
  409. if (rmnet_ctl->reg)
  410. rmnet_ctl_handle = rmnet_ctl->reg(&cb);
  411. if (!rmnet_ctl_handle)
  412. pr_err("Failed to register with rmnet ctl\n");
  413. trace_dfc_client_state_up(data->index, data->svc.instance,
  414. data->svc.ep_type, data->svc.iface_id);
  415. pr_info("DFC QMAP init\n");
  416. dfc_qmap_send_config(data);
  417. out:
  418. return 0;
  419. }
  420. void dfc_qmap_client_exit(void *dfc_data)
  421. {
  422. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  423. if (!data) {
  424. pr_err("%s() data is null\n", __func__);
  425. return;
  426. }
  427. trace_dfc_client_state_down(data->index, 0);
  428. if (rmnet_ctl && rmnet_ctl->dereg)
  429. rmnet_ctl->dereg(rmnet_ctl_handle);
  430. rmnet_ctl_handle = NULL;
  431. WRITE_ONCE(data->restart_state, 1);
  432. RCU_INIT_POINTER(qmap_dfc_data, NULL);
  433. synchronize_rcu();
  434. kfree(data);
  435. rmnet_ctl = NULL;
  436. pr_info("DFC QMAP exit\n");
  437. }