dfc_qmap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <net/pkt_sched.h>
  7. #include <linux/module.h>
  8. #include "rmnet_qmap.h"
  9. #include "dfc_defs.h"
  10. #include "rmnet_qmi.h"
  11. #include "qmi_rmnet.h"
  12. #include "dfc.h"
  13. #define QMAP_DFC_VER 1
  14. struct qmap_dfc_config {
  15. struct qmap_cmd_hdr hdr;
  16. u8 cmd_ver;
  17. u8 cmd_id;
  18. u8 reserved;
  19. u8 tx_info:1;
  20. u8 reserved2:7;
  21. __be32 ep_type;
  22. __be32 iface_id;
  23. u32 reserved3;
  24. } __aligned(1);
  25. struct qmap_dfc_ind {
  26. struct qmap_cmd_hdr hdr;
  27. u8 cmd_ver;
  28. u8 reserved;
  29. __be16 seq_num;
  30. u8 reserved2;
  31. u8 tx_info_valid:1;
  32. u8 tx_info:1;
  33. u8 rx_bytes_valid:1;
  34. u8 reserved3:5;
  35. u8 bearer_id;
  36. u8 tcp_bidir:1;
  37. u8 bearer_status:3;
  38. u8 ll_status:1;
  39. u8 reserved4:3;
  40. __be32 grant;
  41. __be32 rx_bytes;
  42. u32 reserved6;
  43. } __aligned(1);
  44. struct qmap_dfc_query {
  45. struct qmap_cmd_hdr hdr;
  46. u8 cmd_ver;
  47. u8 reserved;
  48. u8 bearer_id;
  49. u8 reserved2;
  50. u32 reserved3;
  51. } __aligned(1);
  52. struct qmap_dfc_query_resp {
  53. struct qmap_cmd_hdr hdr;
  54. u8 cmd_ver;
  55. u8 bearer_id;
  56. u8 tcp_bidir:1;
  57. u8 rx_bytes_valid:1;
  58. u8 reserved:6;
  59. u8 invalid:1;
  60. u8 reserved2:7;
  61. __be32 grant;
  62. __be32 rx_bytes;
  63. u32 reserved4;
  64. } __aligned(1);
  65. struct qmap_dfc_end_marker_req {
  66. struct qmap_cmd_hdr hdr;
  67. u8 cmd_ver;
  68. u8 reserved;
  69. u8 bearer_id;
  70. u8 reserved2;
  71. u16 reserved3;
  72. __be16 seq_num;
  73. u32 reserved4;
  74. } __aligned(1);
  75. struct qmap_dfc_end_marker_cnf {
  76. struct qmap_cmd_hdr hdr;
  77. u8 cmd_ver;
  78. u8 reserved;
  79. u8 bearer_id;
  80. u8 reserved2;
  81. u16 reserved3;
  82. __be16 seq_num;
  83. u32 reserved4;
  84. } __aligned(1);
  85. struct qmap_dfc_powersave_req {
  86. struct qmap_cmd_hdr hdr;
  87. u8 cmd_ver;
  88. u8 allow:1;
  89. u8 autoshut:1;
  90. u8 reserved:6;
  91. u8 reserved2;
  92. u8 mode:1;
  93. u8 reserved3:7;
  94. __be32 ep_type;
  95. __be32 iface_id;
  96. u8 num_bearers;
  97. u8 bearer_id[PS_MAX_BEARERS];
  98. u8 reserved4[3];
  99. } __aligned(1);
  100. static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
  101. static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
  102. static struct dfc_qmi_data __rcu *qmap_dfc_data;
  103. static bool dfc_config_acked;
  104. static void dfc_qmap_send_config(struct dfc_qmi_data *data);
  105. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  106. struct rmnet_bearer_map *bearer,
  107. u16 seq, u32 tx_id);
  108. static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
  109. struct sk_buff *skb)
  110. {
  111. struct qmap_dfc_ind *cmd;
  112. if (skb->len < sizeof(struct qmap_dfc_ind))
  113. return QMAP_CMD_INVALID;
  114. cmd = (struct qmap_dfc_ind *)skb->data;
  115. if (cmd->tx_info_valid) {
  116. memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
  117. qmap_tx_ind.tx_status = cmd->tx_info;
  118. qmap_tx_ind.bearer_info_valid = 1;
  119. qmap_tx_ind.bearer_info_len = 1;
  120. qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
  121. qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
  122. dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
  123. /* Ignore grant since it is always 0 */
  124. goto done;
  125. }
  126. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  127. qmap_flow_ind.flow_status_valid = 1;
  128. qmap_flow_ind.flow_status_len = 1;
  129. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  130. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  131. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  132. qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
  133. qmap_flow_ind.flow_status[0].ll_status = cmd->ll_status;
  134. if (cmd->rx_bytes_valid) {
  135. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  136. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  137. }
  138. if (cmd->tcp_bidir) {
  139. qmap_flow_ind.ancillary_info_valid = 1;
  140. qmap_flow_ind.ancillary_info_len = 1;
  141. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  142. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  143. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  144. }
  145. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, false);
  146. done:
  147. return QMAP_CMD_ACK;
  148. }
  149. static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
  150. struct sk_buff *skb)
  151. {
  152. struct qmap_dfc_query_resp *cmd;
  153. if (skb->len < sizeof(struct qmap_dfc_query_resp))
  154. return QMAP_CMD_DONE;
  155. cmd = (struct qmap_dfc_query_resp *)skb->data;
  156. if (cmd->invalid)
  157. return QMAP_CMD_DONE;
  158. memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
  159. qmap_flow_ind.flow_status_valid = 1;
  160. qmap_flow_ind.flow_status_len = 1;
  161. qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
  162. qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
  163. qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
  164. qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
  165. if (cmd->rx_bytes_valid) {
  166. qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
  167. qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
  168. }
  169. if (cmd->tcp_bidir) {
  170. qmap_flow_ind.ancillary_info_valid = 1;
  171. qmap_flow_ind.ancillary_info_len = 1;
  172. qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
  173. qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
  174. qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
  175. }
  176. dfc_do_burst_flow_control(dfc, &qmap_flow_ind, true);
  177. return QMAP_CMD_DONE;
  178. }
  179. static int dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
  180. u8 bearer_id, u16 seq_num, u32 tx_id)
  181. {
  182. struct net_device *dev;
  183. struct qos_info *qos;
  184. struct rmnet_bearer_map *bearer;
  185. int rc = QMAP_CMD_ACK;
  186. dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
  187. if (!dev)
  188. return rc;
  189. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  190. if (!qos)
  191. return rc;
  192. spin_lock_bh(&qos->qos_lock);
  193. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  194. if (!bearer) {
  195. spin_unlock_bh(&qos->qos_lock);
  196. return rc;
  197. }
  198. if (bearer->last_seq == seq_num && bearer->grant_size) {
  199. bearer->ack_req = 1;
  200. bearer->ack_txid = tx_id;
  201. } else {
  202. dfc_qmap_send_end_marker_cnf(qos, bearer, seq_num, tx_id);
  203. }
  204. spin_unlock_bh(&qos->qos_lock);
  205. return QMAP_CMD_DONE;
  206. }
  207. static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
  208. struct sk_buff *skb)
  209. {
  210. struct qmap_dfc_end_marker_req *cmd;
  211. if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
  212. return QMAP_CMD_INVALID;
  213. cmd = (struct qmap_dfc_end_marker_req *)skb->data;
  214. return dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
  215. ntohs(cmd->seq_num),
  216. ntohl(cmd->hdr.tx_id));
  217. }
  218. int dfc_qmap_cmd_handler(struct sk_buff *skb)
  219. {
  220. struct qmap_cmd_hdr *cmd;
  221. struct dfc_qmi_data *dfc;
  222. int rc = QMAP_CMD_DONE;
  223. cmd = (struct qmap_cmd_hdr *)skb->data;
  224. if (cmd->cmd_name == QMAP_DFC_QUERY) {
  225. if (cmd->cmd_type != QMAP_CMD_ACK)
  226. return rc;
  227. } else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
  228. if (cmd->cmd_type == QMAP_CMD_ACK &&
  229. cmd->cmd_name == QMAP_DFC_CONFIG)
  230. dfc_config_acked = true;
  231. return rc;
  232. }
  233. dfc = rcu_dereference(qmap_dfc_data);
  234. if (!dfc || READ_ONCE(dfc->restart_state))
  235. return rc;
  236. /* Re-send DFC config once if needed */
  237. if (unlikely(!dfc_config_acked)) {
  238. dfc_qmap_send_config(dfc);
  239. dfc_config_acked = true;
  240. }
  241. switch (cmd->cmd_name) {
  242. case QMAP_DFC_IND:
  243. rc = dfc_qmap_handle_ind(dfc, skb);
  244. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  245. break;
  246. case QMAP_DFC_QUERY:
  247. rc = dfc_qmap_handle_query_resp(dfc, skb);
  248. break;
  249. case QMAP_DFC_END_MARKER:
  250. rc = dfc_qmap_handle_end_marker_req(dfc, skb);
  251. break;
  252. default:
  253. if (cmd->cmd_type == QMAP_CMD_REQUEST)
  254. rc = QMAP_CMD_UNSUPPORTED;
  255. }
  256. return rc;
  257. }
  258. static void dfc_qmap_send_config(struct dfc_qmi_data *data)
  259. {
  260. struct sk_buff *skb;
  261. struct qmap_dfc_config *dfc_config;
  262. unsigned int len = sizeof(struct qmap_dfc_config);
  263. skb = alloc_skb(len, GFP_ATOMIC);
  264. if (!skb)
  265. return;
  266. skb->protocol = htons(ETH_P_MAP);
  267. dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
  268. memset(dfc_config, 0, len);
  269. dfc_config->hdr.cd_bit = 1;
  270. dfc_config->hdr.mux_id = 0;
  271. dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  272. dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
  273. dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
  274. dfc_config->hdr.tx_id = htonl(rmnet_qmap_next_txid());
  275. dfc_config->cmd_ver = QMAP_DFC_VER;
  276. dfc_config->cmd_id = QMAP_DFC_IND;
  277. dfc_config->tx_info = 1;
  278. dfc_config->ep_type = htonl(data->svc.ep_type);
  279. dfc_config->iface_id = htonl(data->svc.iface_id);
  280. rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  281. }
  282. static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
  283. {
  284. struct sk_buff *skb;
  285. struct qmap_dfc_query *dfc_query;
  286. unsigned int len = sizeof(struct qmap_dfc_query);
  287. skb = alloc_skb(len, GFP_ATOMIC);
  288. if (!skb)
  289. return;
  290. skb->protocol = htons(ETH_P_MAP);
  291. dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
  292. memset(dfc_query, 0, len);
  293. dfc_query->hdr.cd_bit = 1;
  294. dfc_query->hdr.mux_id = mux_id;
  295. dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  296. dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
  297. dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
  298. dfc_query->hdr.tx_id = htonl(rmnet_qmap_next_txid());
  299. dfc_query->cmd_ver = QMAP_DFC_VER;
  300. dfc_query->bearer_id = bearer_id;
  301. rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  302. }
  303. static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
  304. struct rmnet_bearer_map *bearer,
  305. u16 seq, u32 tx_id)
  306. {
  307. struct sk_buff *skb;
  308. struct qmap_dfc_end_marker_cnf *em_cnf;
  309. unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
  310. skb = alloc_skb(len, GFP_ATOMIC);
  311. if (!skb)
  312. return;
  313. em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
  314. memset(em_cnf, 0, len);
  315. em_cnf->hdr.cd_bit = 1;
  316. em_cnf->hdr.mux_id = qos->mux_id;
  317. em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  318. em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
  319. em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
  320. em_cnf->hdr.tx_id = htonl(tx_id);
  321. em_cnf->cmd_ver = QMAP_DFC_VER;
  322. em_cnf->bearer_id = bearer->bearer_id;
  323. em_cnf->seq_num = htons(seq);
  324. /* This cmd needs to be sent in-band after data on the currnet
  325. * channel. But due to IPA bug, it cannot be sent over LLC so send
  326. * it over QMAP channel if current channel is LLC.
  327. */
  328. if (bearer->ch_switch.current_ch == RMNET_CH_DEFAULT)
  329. rmnet_qmap_send(skb, bearer->ch_switch.current_ch, true);
  330. else
  331. rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  332. }
  333. static int dfc_qmap_send_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
  334. {
  335. struct sk_buff *skb;
  336. struct qmap_dfc_powersave_req *dfc_powersave;
  337. unsigned int len = sizeof(struct qmap_dfc_powersave_req);
  338. struct dfc_qmi_data *dfc;
  339. u32 ep_type = 0;
  340. u32 iface_id = 0;
  341. rcu_read_lock();
  342. dfc = rcu_dereference(qmap_dfc_data);
  343. if (dfc) {
  344. ep_type = dfc->svc.ep_type;
  345. iface_id = dfc->svc.iface_id;
  346. } else {
  347. rcu_read_unlock();
  348. return -EINVAL;
  349. }
  350. rcu_read_unlock();
  351. skb = alloc_skb(len, GFP_ATOMIC);
  352. if (!skb)
  353. return -ENOMEM;
  354. skb->protocol = htons(ETH_P_MAP);
  355. dfc_powersave = (struct qmap_dfc_powersave_req *)skb_put(skb, len);
  356. memset(dfc_powersave, 0, len);
  357. dfc_powersave->hdr.cd_bit = 1;
  358. dfc_powersave->hdr.mux_id = 0;
  359. dfc_powersave->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  360. dfc_powersave->hdr.cmd_name = QMAP_DFC_POWERSAVE;
  361. dfc_powersave->hdr.cmd_type = QMAP_CMD_REQUEST;
  362. dfc_powersave->hdr.tx_id = htonl(rmnet_qmap_next_txid());
  363. dfc_powersave->cmd_ver = 3;
  364. dfc_powersave->mode = enable ? 1 : 0;
  365. if (enable && num_bearers) {
  366. if (unlikely(num_bearers > PS_MAX_BEARERS))
  367. num_bearers = PS_MAX_BEARERS;
  368. dfc_powersave->allow = 1;
  369. dfc_powersave->autoshut = 1;
  370. dfc_powersave->num_bearers = num_bearers;
  371. memcpy(dfc_powersave->bearer_id, bearer_id, num_bearers);
  372. }
  373. dfc_powersave->ep_type = htonl(ep_type);
  374. dfc_powersave->iface_id = htonl(iface_id);
  375. return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  376. }
  377. int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
  378. {
  379. trace_dfc_set_powersave_mode(enable);
  380. return dfc_qmap_send_powersave(enable, num_bearers, bearer_id);
  381. }
  382. void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
  383. {
  384. struct rmnet_bearer_map *bearer;
  385. if (type == DFC_ACK_TYPE_DISABLE) {
  386. bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
  387. if (bearer)
  388. dfc_qmap_send_end_marker_cnf(qos, bearer,
  389. seq, bearer->ack_txid);
  390. } else if (type == DFC_ACK_TYPE_THRESHOLD) {
  391. dfc_qmap_send_query(qos->mux_id, bearer_id);
  392. }
  393. }
  394. int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
  395. struct qmi_info *qmi)
  396. {
  397. struct dfc_qmi_data *data;
  398. if (!port || !qmi)
  399. return -EINVAL;
  400. /* Prevent double init */
  401. data = rcu_dereference(qmap_dfc_data);
  402. if (data)
  403. return -EINVAL;
  404. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  405. if (!data)
  406. return -ENOMEM;
  407. data->rmnet_port = port;
  408. data->index = index;
  409. memcpy(&data->svc, psvc, sizeof(data->svc));
  410. qmi->dfc_clients[index] = (void *)data;
  411. rcu_assign_pointer(qmap_dfc_data, data);
  412. rmnet_qmap_init(port);
  413. trace_dfc_client_state_up(data->index, data->svc.instance,
  414. data->svc.ep_type, data->svc.iface_id);
  415. pr_info("DFC QMAP init\n");
  416. /* Currently if powersave ext is enabled, no need to do dfc config
  417. * which only enables tx_info */
  418. if (qmi->ps_ext) {
  419. dfc_config_acked = true;
  420. } else {
  421. dfc_config_acked = false;
  422. dfc_qmap_send_config(data);
  423. }
  424. return 0;
  425. }
  426. void dfc_qmap_client_exit(void *dfc_data)
  427. {
  428. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  429. if (!data) {
  430. pr_err("%s() data is null\n", __func__);
  431. return;
  432. }
  433. trace_dfc_client_state_down(data->index, 0);
  434. rmnet_qmap_exit();
  435. WRITE_ONCE(data->restart_state, 1);
  436. RCU_INIT_POINTER(qmap_dfc_data, NULL);
  437. synchronize_rcu();
  438. kfree(data);
  439. pr_info("DFC QMAP exit\n");
  440. }