rmnet_ll_qmap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. /*
  2. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/netlink.h>
  14. #include <uapi/linux/rtnetlink.h>
  15. #include <linux/net.h>
  16. #include <linux/workqueue.h>
  17. #include <net/sock.h>
  18. #include "dfc.h"
  19. #include "rmnet_qmi.h"
  20. #include "rmnet_qmap.h"
  21. #include "qmi_rmnet_i.h"
  22. #define QMAP_LL_VER 1
  23. #define QMAP_LL_MAX_BEARER 15
  24. #define QMAP_SWITCH_TO_LL 1
  25. #define QMAP_SWITCH_TO_DEFAULT 2
  26. #define QMAP_SWITCH_QUERY 3
  27. /* Switch status from modem */
  28. #define SWITCH_STATUS_ERROR 0
  29. #define SWITCH_STATUS_SUCCESS 1
  30. #define SWITCH_STATUS_DEFAULT 2
  31. #define SWITCH_STATUS_LL 3
  32. #define SWITCH_STATUS_FAIL_TEMP 4
  33. #define SWITCH_STATUS_FAIL_PERM 5
  34. /* Internal switch status */
  35. #define SWITCH_STATUS_NONE 0xFF
  36. #define SWITCH_STATUS_TIMEOUT 0xFE
  37. #define SWITCH_STATUS_NO_EFFECT 0xFD
  38. #define LL_MASK_NL_ACK 1
  39. #define LL_MASK_AUTO_RETRY 2
  40. #define LL_TIMEOUT (5 * HZ)
  41. #define LL_RETRY_TIME (10 * HZ)
  42. #define LL_MAX_RETRY (3)
  43. struct qmap_ll_bearer {
  44. u8 bearer_id;
  45. u8 status;
  46. u8 reserved[2];
  47. } __aligned(1);
  48. struct qmap_ll_switch {
  49. struct qmap_cmd_hdr hdr;
  50. u8 cmd_ver;
  51. u8 reserved;
  52. u8 request_type;
  53. u8 num_bearers;
  54. struct qmap_ll_bearer bearer[0];
  55. } __aligned(1);
  56. struct qmap_ll_switch_resp {
  57. struct qmap_cmd_hdr hdr;
  58. u8 cmd_ver;
  59. u8 reserved[2];
  60. u8 num_bearers;
  61. struct qmap_ll_bearer bearer[0];
  62. } __aligned(1);
  63. struct qmap_ll_switch_status {
  64. struct qmap_cmd_hdr hdr;
  65. u8 cmd_ver;
  66. u8 reserved[2];
  67. u8 num_bearers;
  68. struct qmap_ll_bearer bearer[0];
  69. } __aligned(1);
  70. /*
  71. * LL workqueue
  72. */
  73. static DEFINE_SPINLOCK(ll_wq_lock);
  74. static struct workqueue_struct *ll_wq;
  75. struct ll_ack_work {
  76. struct work_struct work;
  77. u32 nl_pid;
  78. u32 nl_seq;
  79. u8 bearer_id;
  80. u8 status_code;
  81. u8 current_ch;
  82. };
  83. static void ll_ack_fn(struct work_struct *work)
  84. {
  85. struct ll_ack_work *ack_work;
  86. struct sk_buff *skb;
  87. struct nlmsghdr *nlh;
  88. struct nlmsgerr *errmsg;
  89. unsigned int flags = NLM_F_CAPPED;
  90. ack_work = container_of(work, struct ll_ack_work, work);
  91. skb = nlmsg_new(sizeof(*errmsg), GFP_KERNEL);
  92. if (!skb)
  93. goto out;
  94. nlh = __nlmsg_put(skb, ack_work->nl_pid,
  95. ack_work->nl_seq, NLMSG_ERROR,
  96. sizeof(*errmsg), flags);
  97. errmsg = nlmsg_data(nlh);
  98. errmsg->error = 0;
  99. errmsg->msg.nlmsg_len = sizeof(struct nlmsghdr);
  100. errmsg->msg.nlmsg_type = ack_work->bearer_id;
  101. errmsg->msg.nlmsg_flags = ack_work->status_code;
  102. errmsg->msg.nlmsg_seq = ack_work->current_ch;
  103. errmsg->msg.nlmsg_pid = ack_work->nl_pid;
  104. nlmsg_end(skb, nlh);
  105. rtnl_unicast(skb, &init_net, ack_work->nl_pid);
  106. out:
  107. kfree(ack_work);
  108. }
  109. static void ll_send_nl_ack(struct rmnet_bearer_map *bearer)
  110. {
  111. struct ll_ack_work *ack_work;
  112. if (!(bearer->ch_switch.flags & LL_MASK_NL_ACK))
  113. return;
  114. ack_work = kzalloc(sizeof(*ack_work), GFP_ATOMIC);
  115. if (!ack_work)
  116. return;
  117. ack_work->nl_pid = bearer->ch_switch.nl_pid;
  118. ack_work->nl_seq = bearer->ch_switch.nl_seq;
  119. ack_work->bearer_id = bearer->bearer_id;
  120. ack_work->status_code = bearer->ch_switch.status_code;
  121. ack_work->current_ch = bearer->ch_switch.current_ch;
  122. INIT_WORK(&ack_work->work, ll_ack_fn);
  123. spin_lock_bh(&ll_wq_lock);
  124. if (ll_wq)
  125. queue_work(ll_wq, &ack_work->work);
  126. else
  127. kfree(ack_work);
  128. spin_unlock_bh(&ll_wq_lock);
  129. }
  130. void rmnet_ll_wq_init(void)
  131. {
  132. WARN_ON(ll_wq);
  133. ll_wq = alloc_ordered_workqueue("rmnet_ll_wq", 0);
  134. }
  135. void rmnet_ll_wq_exit(void)
  136. {
  137. struct workqueue_struct *tmp = NULL;
  138. spin_lock_bh(&ll_wq_lock);
  139. if (ll_wq) {
  140. tmp = ll_wq;
  141. ll_wq = NULL;
  142. }
  143. spin_unlock_bh(&ll_wq_lock);
  144. if (tmp)
  145. destroy_workqueue(tmp);
  146. }
  147. /*
  148. * LLC switch
  149. */
  150. static void ll_qmap_maybe_set_ch(struct qos_info *qos,
  151. struct rmnet_bearer_map *bearer, u8 status)
  152. {
  153. u8 ch;
  154. if (status == SWITCH_STATUS_DEFAULT)
  155. ch = RMNET_CH_DEFAULT;
  156. else if (status == SWITCH_STATUS_LL)
  157. ch = RMNET_CH_LL;
  158. else
  159. return;
  160. bearer->ch_switch.current_ch = ch;
  161. if (bearer->mq_idx < MAX_MQ_NUM)
  162. qos->mq[bearer->mq_idx].is_ll_ch = ch;
  163. }
  164. static void ll_switch_complete(struct rmnet_bearer_map *bearer, u8 status)
  165. {
  166. bearer->ch_switch.status_code = status;
  167. if (status == SWITCH_STATUS_FAIL_TEMP &&
  168. bearer->ch_switch.retry_left) {
  169. /* Temp failure retry */
  170. bearer->ch_switch.state = CH_SWITCH_FAILED_RETRY;
  171. mod_timer(&bearer->ch_switch.guard_timer,
  172. jiffies + LL_RETRY_TIME);
  173. bearer->ch_switch.retry_left--;
  174. } else {
  175. /* Success or permanent failure */
  176. bearer->ch_switch.timer_quit = true;
  177. del_timer(&bearer->ch_switch.guard_timer);
  178. bearer->ch_switch.state = CH_SWITCH_NONE;
  179. bearer->ch_switch.retry_left = 0;
  180. ll_send_nl_ack(bearer);
  181. bearer->ch_switch.flags = 0;
  182. }
  183. }
  184. static int ll_qmap_handle_switch_resp(struct sk_buff *skb)
  185. {
  186. struct qmap_ll_switch_resp *cmd;
  187. struct rmnet_bearer_map *bearer;
  188. struct qos_info *qos;
  189. struct net_device *dev;
  190. int i;
  191. if (skb->len < sizeof(struct qmap_ll_switch_resp))
  192. return QMAP_CMD_DONE;
  193. cmd = (struct qmap_ll_switch_resp *)skb->data;
  194. if (!cmd->num_bearers)
  195. return QMAP_CMD_DONE;
  196. if (skb->len < sizeof(*cmd) +
  197. cmd->num_bearers * sizeof(struct qmap_ll_bearer))
  198. return QMAP_CMD_DONE;
  199. dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
  200. if (!dev)
  201. return QMAP_CMD_DONE;
  202. qos = rmnet_get_qos_pt(dev);
  203. if (!qos)
  204. return QMAP_CMD_DONE;
  205. trace_dfc_ll_switch("ACK", 0, cmd->num_bearers, cmd->bearer);
  206. spin_lock_bh(&qos->qos_lock);
  207. for (i = 0; i < cmd->num_bearers; i++) {
  208. bearer = qmi_rmnet_get_bearer_map(qos,
  209. cmd->bearer[i].bearer_id);
  210. if (!bearer)
  211. continue;
  212. ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
  213. if (bearer->ch_switch.state == CH_SWITCH_STARTED &&
  214. bearer->ch_switch.switch_txid == cmd->hdr.tx_id) {
  215. /* This is an ACK to the switch request */
  216. if (cmd->bearer[i].status == SWITCH_STATUS_SUCCESS)
  217. bearer->ch_switch.state = CH_SWITCH_ACKED;
  218. else
  219. ll_switch_complete(bearer,
  220. cmd->bearer[i].status);
  221. }
  222. }
  223. spin_unlock_bh(&qos->qos_lock);
  224. return QMAP_CMD_DONE;
  225. }
  226. static int ll_qmap_handle_switch_status(struct sk_buff *skb)
  227. {
  228. struct qmap_ll_switch_status *cmd;
  229. struct rmnet_bearer_map *bearer;
  230. struct qos_info *qos;
  231. struct net_device *dev;
  232. int i;
  233. if (skb->len < sizeof(struct qmap_ll_switch_status))
  234. return QMAP_CMD_INVALID;
  235. cmd = (struct qmap_ll_switch_status *)skb->data;
  236. if (!cmd->num_bearers)
  237. return QMAP_CMD_ACK;
  238. if (skb->len < sizeof(*cmd) +
  239. cmd->num_bearers * sizeof(struct qmap_ll_bearer))
  240. return QMAP_CMD_INVALID;
  241. dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
  242. if (!dev)
  243. return QMAP_CMD_ACK;
  244. qos = rmnet_get_qos_pt(dev);
  245. if (!qos)
  246. return QMAP_CMD_ACK;
  247. trace_dfc_ll_switch("STS", 0, cmd->num_bearers, cmd->bearer);
  248. spin_lock_bh(&qos->qos_lock);
  249. for (i = 0; i < cmd->num_bearers; i++) {
  250. bearer = qmi_rmnet_get_bearer_map(qos,
  251. cmd->bearer[i].bearer_id);
  252. if (!bearer)
  253. continue;
  254. ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
  255. if (bearer->ch_switch.state == CH_SWITCH_ACKED)
  256. ll_switch_complete(bearer, cmd->bearer[i].status);
  257. }
  258. spin_unlock_bh(&qos->qos_lock);
  259. return QMAP_CMD_ACK;
  260. }
  261. int ll_qmap_cmd_handler(struct sk_buff *skb)
  262. {
  263. struct qmap_cmd_hdr *cmd;
  264. int rc = QMAP_CMD_DONE;
  265. cmd = (struct qmap_cmd_hdr *)skb->data;
  266. if (cmd->cmd_name == QMAP_LL_SWITCH) {
  267. if (cmd->cmd_type != QMAP_CMD_ACK)
  268. return rc;
  269. } else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
  270. return rc;
  271. }
  272. switch (cmd->cmd_name) {
  273. case QMAP_LL_SWITCH:
  274. rc = ll_qmap_handle_switch_resp(skb);
  275. break;
  276. case QMAP_LL_SWITCH_STATUS:
  277. rc = ll_qmap_handle_switch_status(skb);
  278. break;
  279. default:
  280. if (cmd->cmd_type == QMAP_CMD_REQUEST)
  281. rc = QMAP_CMD_UNSUPPORTED;
  282. }
  283. return rc;
  284. }
  285. static int ll_qmap_send_switch(u8 mux_id, u8 channel, u8 num_bearers,
  286. u8 *bearer_list, __be32 *txid)
  287. {
  288. struct sk_buff *skb;
  289. struct qmap_ll_switch *ll_switch;
  290. unsigned int len;
  291. int i;
  292. if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER || !bearer_list)
  293. return -EINVAL;
  294. len = sizeof(struct qmap_ll_switch) +
  295. num_bearers * sizeof(struct qmap_ll_bearer);
  296. skb = alloc_skb(len, GFP_ATOMIC);
  297. if (!skb)
  298. return -ENOMEM;
  299. skb->protocol = htons(ETH_P_MAP);
  300. ll_switch = skb_put(skb, len);
  301. memset(ll_switch, 0, len);
  302. ll_switch->hdr.cd_bit = 1;
  303. ll_switch->hdr.mux_id = mux_id;
  304. ll_switch->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
  305. ll_switch->hdr.cmd_name = QMAP_LL_SWITCH;
  306. ll_switch->hdr.cmd_type = QMAP_CMD_REQUEST;
  307. ll_switch->hdr.tx_id = htonl(rmnet_qmap_next_txid());
  308. ll_switch->cmd_ver = QMAP_LL_VER;
  309. if (channel == RMNET_CH_CTL)
  310. ll_switch->request_type = QMAP_SWITCH_QUERY;
  311. else if (channel == RMNET_CH_LL)
  312. ll_switch->request_type = QMAP_SWITCH_TO_LL;
  313. else
  314. ll_switch->request_type = QMAP_SWITCH_TO_DEFAULT;
  315. ll_switch->num_bearers = num_bearers;
  316. for (i = 0; i < num_bearers; i++)
  317. ll_switch->bearer[i].bearer_id = bearer_list[i];
  318. if (txid)
  319. *txid = ll_switch->hdr.tx_id;
  320. trace_dfc_ll_switch("REQ", ll_switch->request_type,
  321. ll_switch->num_bearers, ll_switch->bearer);
  322. return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
  323. }
  324. /*
  325. * Start channel switch. The switch request is sent only if all bearers
  326. * are eligible to switch. Return 0 if switch request is sent.
  327. */
  328. int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen)
  329. {
  330. u8 switch_to_ch;
  331. u8 num_bearers;
  332. u8 *bearer_list;
  333. u32 flags;
  334. struct qos_info *qos;
  335. struct rmnet_bearer_map *bearer;
  336. __be32 txid;
  337. int i;
  338. int j;
  339. int rc = -EINVAL;
  340. if (!dev || !tcm)
  341. return -EINVAL;
  342. /*
  343. * tcm__pad1: switch type (ch #, 0xFF query)
  344. * tcm__pad2: num bearers
  345. * tcm_info: flags
  346. * tcm_ifindex: netlink fd
  347. * tcm_handle: pid
  348. * tcm_parent: seq
  349. */
  350. switch_to_ch = tcm->tcm__pad1;
  351. num_bearers = tcm->tcm__pad2;
  352. flags = tcm->tcm_info;
  353. if (switch_to_ch != RMNET_CH_CTL && switch_to_ch >= RMNET_CH_MAX)
  354. return -EOPNOTSUPP;
  355. if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER)
  356. return -EINVAL;
  357. if (attrlen - sizeof(*tcm) < num_bearers)
  358. return -EINVAL;
  359. bearer_list = (u8 *)tcm + sizeof(*tcm);
  360. for (i = 0; i < num_bearers; i++)
  361. for (j = 0; j < num_bearers; j++)
  362. if (j != i && bearer_list[i] == bearer_list[j])
  363. return -EINVAL;
  364. qos = rmnet_get_qos_pt(dev);
  365. if (!qos)
  366. return -EINVAL;
  367. spin_lock_bh(&qos->qos_lock);
  368. /* Validate the bearer list */
  369. for (i = 0; i < num_bearers; i++) {
  370. bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
  371. if (!bearer) {
  372. rc = -EFAULT;
  373. goto out;
  374. }
  375. if (bearer->ch_switch.state != CH_SWITCH_NONE) {
  376. rc = -EBUSY;
  377. goto out;
  378. }
  379. }
  380. /* Send QMAP switch command */
  381. rc = ll_qmap_send_switch(qos->mux_id, switch_to_ch,
  382. num_bearers, bearer_list, &txid);
  383. if (rc)
  384. goto out;
  385. /* Update state */
  386. for (i = 0; i < num_bearers; i++) {
  387. bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
  388. if (!bearer)
  389. continue;
  390. bearer->ch_switch.switch_to_ch = switch_to_ch;
  391. bearer->ch_switch.switch_txid = txid;
  392. bearer->ch_switch.state = CH_SWITCH_STARTED;
  393. bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
  394. bearer->ch_switch.retry_left =
  395. (flags & LL_MASK_AUTO_RETRY) ? LL_MAX_RETRY : 0;
  396. bearer->ch_switch.flags = flags;
  397. bearer->ch_switch.timer_quit = false;
  398. mod_timer(&bearer->ch_switch.guard_timer,
  399. jiffies + LL_TIMEOUT);
  400. bearer->ch_switch.nl_pid = tcm->tcm_handle;
  401. bearer->ch_switch.nl_seq = tcm->tcm_parent;
  402. }
  403. out:
  404. spin_unlock_bh(&qos->qos_lock);
  405. return rc;
  406. }
  407. void rmnet_ll_guard_fn(struct timer_list *t)
  408. {
  409. struct rmnet_ch_switch *ch_switch;
  410. struct rmnet_bearer_map *bearer;
  411. int switch_status = SWITCH_STATUS_TIMEOUT;
  412. __be32 txid;
  413. int rc;
  414. ch_switch = container_of(t, struct rmnet_ch_switch, guard_timer);
  415. bearer = container_of(ch_switch, struct rmnet_bearer_map, ch_switch);
  416. spin_lock_bh(&bearer->qos->qos_lock);
  417. if (bearer->ch_switch.timer_quit ||
  418. bearer->ch_switch.state == CH_SWITCH_NONE)
  419. goto out;
  420. if (bearer->ch_switch.state == CH_SWITCH_FAILED_RETRY) {
  421. if (bearer->ch_switch.current_ch ==
  422. bearer->ch_switch.switch_to_ch) {
  423. switch_status = SWITCH_STATUS_NO_EFFECT;
  424. goto send_err;
  425. }
  426. rc = ll_qmap_send_switch(bearer->qos->mux_id,
  427. bearer->ch_switch.switch_to_ch,
  428. 1,
  429. &bearer->bearer_id,
  430. &txid);
  431. if (!rc) {
  432. bearer->ch_switch.switch_txid = txid;
  433. bearer->ch_switch.state = CH_SWITCH_STARTED;
  434. bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
  435. goto out;
  436. }
  437. }
  438. send_err:
  439. bearer->ch_switch.state = CH_SWITCH_NONE;
  440. bearer->ch_switch.status_code = switch_status;
  441. bearer->ch_switch.retry_left = 0;
  442. ll_send_nl_ack(bearer);
  443. bearer->ch_switch.flags = 0;
  444. out:
  445. spin_unlock_bh(&bearer->qos->qos_lock);
  446. }