qmi_rmnet.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/soc/qcom/qmi.h>
  14. #include "qmi_rmnet_i.h"
  15. #include "qmi_rmnet.h"
  16. #include "rmnet_qmi.h"
  17. #include "dfc.h"
  18. #include <linux/rtnetlink.h>
  19. #include <uapi/linux/rtnetlink.h>
  20. #include <net/pkt_sched.h>
  21. #include <linux/module.h>
  22. #include <linux/moduleparam.h>
  23. #include <linux/ip.h>
  24. #include <linux/ipv6.h>
  25. #include <linux/alarmtimer.h>
  26. #define NLMSG_FLOW_ACTIVATE 1
  27. #define NLMSG_FLOW_DEACTIVATE 2
  28. #define NLMSG_CLIENT_SETUP 4
  29. #define NLMSG_CLIENT_DELETE 5
  30. #define NLMSG_SCALE_FACTOR 6
  31. #define NLMSG_WQ_FREQUENCY 7
  32. #define FLAG_DFC_MASK 0x000F
  33. #define FLAG_POWERSAVE_MASK 0x0010
  34. #define FLAG_QMAP_MASK 0x0020
  35. #define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
  36. #define DFC_SUPPORTED_MODE(m) \
  37. ((m) == DFC_MODE_SA)
  38. #define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
  39. int dfc_mode;
  40. int dfc_qmap;
  41. unsigned int rmnet_wq_frequency __read_mostly = 1000;
  42. #define PS_WORK_ACTIVE_BIT 0
  43. #define PS_INTERVAL (((!rmnet_wq_frequency) ? \
  44. 1 : rmnet_wq_frequency/10) * (HZ/100))
  45. #define NO_DELAY (0x0000 * HZ)
  46. #define PS_INTERVAL_KT (ms_to_ktime(1000))
  47. #define WATCHDOG_EXPIRE_JF (msecs_to_jiffies(50))
  48. #ifdef CONFIG_QTI_QMI_DFC
  49. static unsigned int qmi_rmnet_scale_factor = 5;
  50. static LIST_HEAD(qos_cleanup_list);
  51. #endif
  52. static int
  53. qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
  54. struct qmi_info *qmi);
  55. struct qmi_elem_info data_ep_id_type_v01_ei[] = {
  56. {
  57. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  58. .elem_len = 1,
  59. .elem_size = sizeof(enum data_ep_type_enum_v01),
  60. .array_type = NO_ARRAY,
  61. .tlv_type = QMI_COMMON_TLV_TYPE,
  62. .offset = offsetof(struct data_ep_id_type_v01,
  63. ep_type),
  64. .ei_array = NULL,
  65. },
  66. {
  67. .data_type = QMI_UNSIGNED_4_BYTE,
  68. .elem_len = 1,
  69. .elem_size = sizeof(u32),
  70. .array_type = NO_ARRAY,
  71. .tlv_type = QMI_COMMON_TLV_TYPE,
  72. .offset = offsetof(struct data_ep_id_type_v01,
  73. iface_id),
  74. .ei_array = NULL,
  75. },
  76. {
  77. .data_type = QMI_EOTI,
  78. .elem_len = 0,
  79. .elem_size = 0,
  80. .array_type = NO_ARRAY,
  81. .tlv_type = QMI_COMMON_TLV_TYPE,
  82. .offset = 0,
  83. .ei_array = NULL,
  84. },
  85. };
  86. EXPORT_SYMBOL(data_ep_id_type_v01_ei);
  87. void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
  88. {
  89. int i;
  90. if (!qmi)
  91. return NULL;
  92. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  93. if (qmi->dfc_clients[i])
  94. return qmi->dfc_clients[i];
  95. }
  96. return NULL;
  97. }
  98. static inline int
  99. qmi_rmnet_has_client(struct qmi_info *qmi)
  100. {
  101. if (qmi->wda_client)
  102. return 1;
  103. return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
  104. }
  105. static int
  106. qmi_rmnet_has_pending(struct qmi_info *qmi)
  107. {
  108. int i;
  109. if (qmi->wda_pending)
  110. return 1;
  111. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  112. if (qmi->dfc_pending[i])
  113. return 1;
  114. }
  115. return 0;
  116. }
  117. #ifdef CONFIG_QTI_QMI_DFC
  118. static void
  119. qmi_rmnet_clean_flow_list(struct qos_info *qos)
  120. {
  121. struct rmnet_bearer_map *bearer, *br_tmp;
  122. struct rmnet_flow_map *itm, *fl_tmp;
  123. ASSERT_RTNL();
  124. list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) {
  125. list_del(&itm->list);
  126. kfree(itm);
  127. }
  128. list_for_each_entry_safe(bearer, br_tmp, &qos->bearer_head, list) {
  129. list_del(&bearer->list);
  130. kfree(bearer);
  131. }
  132. memset(qos->mq, 0, sizeof(qos->mq));
  133. }
  134. struct rmnet_flow_map *
  135. qmi_rmnet_get_flow_map(struct qos_info *qos, u32 flow_id, int ip_type)
  136. {
  137. struct rmnet_flow_map *itm;
  138. if (!qos)
  139. return NULL;
  140. list_for_each_entry(itm, &qos->flow_head, list) {
  141. if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type))
  142. return itm;
  143. }
  144. return NULL;
  145. }
  146. struct rmnet_bearer_map *
  147. qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id)
  148. {
  149. struct rmnet_bearer_map *itm;
  150. if (!qos)
  151. return NULL;
  152. list_for_each_entry(itm, &qos->bearer_head, list) {
  153. if (itm->bearer_id == bearer_id)
  154. return itm;
  155. }
  156. return NULL;
  157. }
  158. static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
  159. struct rmnet_flow_map *new_map)
  160. {
  161. itm->bearer_id = new_map->bearer_id;
  162. itm->flow_id = new_map->flow_id;
  163. itm->ip_type = new_map->ip_type;
  164. itm->mq_idx = new_map->mq_idx;
  165. }
  166. int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
  167. {
  168. struct netdev_queue *q;
  169. if (unlikely(mq_idx >= dev->num_tx_queues))
  170. return 0;
  171. q = netdev_get_tx_queue(dev, mq_idx);
  172. if (unlikely(!q))
  173. return 0;
  174. if (enable)
  175. netif_tx_wake_queue(q);
  176. else
  177. netif_tx_stop_queue(q);
  178. trace_dfc_qmi_tc(dev->name, mq_idx, enable);
  179. return 0;
  180. }
  181. static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
  182. {
  183. struct Qdisc *qdisc;
  184. if (unlikely(txq >= dev->num_tx_queues))
  185. return;
  186. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, txq)->qdisc);
  187. if (qdisc) {
  188. spin_lock_bh(qdisc_lock(qdisc));
  189. qdisc_reset(qdisc);
  190. spin_unlock_bh(qdisc_lock(qdisc));
  191. }
  192. }
  193. /**
  194. * qmi_rmnet_watchdog_fn - watchdog timer func
  195. */
  196. static void qmi_rmnet_watchdog_fn(struct timer_list *t)
  197. {
  198. struct rmnet_bearer_map *bearer;
  199. bearer = container_of(t, struct rmnet_bearer_map, watchdog);
  200. trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 2);
  201. spin_lock_bh(&bearer->qos->qos_lock);
  202. if (bearer->watchdog_quit)
  203. goto done;
  204. /*
  205. * Possible stall, try to recover. Enable 80% query and jumpstart
  206. * the bearer if disabled.
  207. */
  208. bearer->watchdog_expire_cnt++;
  209. bearer->bytes_in_flight = 0;
  210. if (!bearer->grant_size) {
  211. bearer->grant_size = DEFAULT_CALL_GRANT;
  212. bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
  213. dfc_bearer_flow_ctl(bearer->qos->vnd_dev, bearer, bearer->qos);
  214. } else {
  215. bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
  216. }
  217. done:
  218. bearer->watchdog_started = false;
  219. spin_unlock_bh(&bearer->qos->qos_lock);
  220. }
  221. /**
  222. * qmi_rmnet_watchdog_add - add the bearer to watch
  223. * Needs to be called with qos_lock
  224. */
  225. void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer)
  226. {
  227. bearer->watchdog_quit = false;
  228. if (bearer->watchdog_started)
  229. return;
  230. bearer->watchdog_started = true;
  231. mod_timer(&bearer->watchdog, jiffies + WATCHDOG_EXPIRE_JF);
  232. trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 1);
  233. }
  234. /**
  235. * qmi_rmnet_watchdog_remove - remove the bearer from watch
  236. * Needs to be called with qos_lock
  237. */
  238. void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
  239. {
  240. bearer->watchdog_quit = true;
  241. if (!bearer->watchdog_started)
  242. return;
  243. del_timer(&bearer->watchdog);
  244. bearer->watchdog_started = false;
  245. trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 0);
  246. }
  247. /**
  248. * qmi_rmnet_bearer_clean - clean the removed bearer
  249. * Needs to be called with rtn_lock but not qos_lock
  250. */
  251. static void qmi_rmnet_bearer_clean(struct qos_info *qos)
  252. {
  253. if (qos->removed_bearer) {
  254. qos->removed_bearer->watchdog_quit = true;
  255. del_timer_sync(&qos->removed_bearer->watchdog);
  256. kfree(qos->removed_bearer);
  257. qos->removed_bearer = NULL;
  258. }
  259. }
  260. static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
  261. struct qos_info *qos_info, u8 bearer_id)
  262. {
  263. struct rmnet_bearer_map *bearer;
  264. bearer = qmi_rmnet_get_bearer_map(qos_info, bearer_id);
  265. if (bearer) {
  266. bearer->flow_ref++;
  267. } else {
  268. bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
  269. if (!bearer)
  270. return NULL;
  271. bearer->bearer_id = bearer_id;
  272. bearer->flow_ref = 1;
  273. bearer->grant_size = DEFAULT_CALL_GRANT;
  274. bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
  275. bearer->mq_idx = INVALID_MQ;
  276. bearer->ack_mq_idx = INVALID_MQ;
  277. bearer->qos = qos_info;
  278. timer_setup(&bearer->watchdog, qmi_rmnet_watchdog_fn, 0);
  279. list_add(&bearer->list, &qos_info->bearer_head);
  280. }
  281. return bearer;
  282. }
  283. static void __qmi_rmnet_bearer_put(struct net_device *dev,
  284. struct qos_info *qos_info,
  285. struct rmnet_bearer_map *bearer,
  286. bool reset)
  287. {
  288. struct mq_map *mq;
  289. int i, j;
  290. if (bearer && --bearer->flow_ref == 0) {
  291. for (i = 0; i < MAX_MQ_NUM; i++) {
  292. mq = &qos_info->mq[i];
  293. if (mq->bearer != bearer)
  294. continue;
  295. mq->bearer = NULL;
  296. if (reset) {
  297. qmi_rmnet_reset_txq(dev, i);
  298. qmi_rmnet_flow_control(dev, i, 1);
  299. if (dfc_mode == DFC_MODE_SA) {
  300. j = i + ACK_MQ_OFFSET;
  301. qmi_rmnet_reset_txq(dev, j);
  302. qmi_rmnet_flow_control(dev, j, 1);
  303. }
  304. }
  305. }
  306. /* Remove from bearer map */
  307. list_del(&bearer->list);
  308. qos_info->removed_bearer = bearer;
  309. }
  310. }
  311. static void __qmi_rmnet_update_mq(struct net_device *dev,
  312. struct qos_info *qos_info,
  313. struct rmnet_bearer_map *bearer,
  314. struct rmnet_flow_map *itm)
  315. {
  316. struct mq_map *mq;
  317. /* In SA mode default mq is not associated with any bearer */
  318. if (dfc_mode == DFC_MODE_SA && itm->mq_idx == DEFAULT_MQ_NUM)
  319. return;
  320. mq = &qos_info->mq[itm->mq_idx];
  321. if (!mq->bearer) {
  322. mq->bearer = bearer;
  323. if (dfc_mode == DFC_MODE_SA) {
  324. bearer->mq_idx = itm->mq_idx;
  325. bearer->ack_mq_idx = itm->mq_idx + ACK_MQ_OFFSET;
  326. } else {
  327. bearer->mq_idx = itm->mq_idx;
  328. }
  329. qmi_rmnet_flow_control(dev, itm->mq_idx,
  330. bearer->grant_size > 0 ? 1 : 0);
  331. if (dfc_mode == DFC_MODE_SA)
  332. qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
  333. bearer->grant_size > 0 ? 1 : 0);
  334. }
  335. }
  336. static int __qmi_rmnet_rebind_flow(struct net_device *dev,
  337. struct qos_info *qos_info,
  338. struct rmnet_flow_map *itm,
  339. struct rmnet_flow_map *new_map)
  340. {
  341. struct rmnet_bearer_map *bearer;
  342. __qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, false);
  343. bearer = __qmi_rmnet_bearer_get(qos_info, new_map->bearer_id);
  344. if (!bearer)
  345. return -ENOMEM;
  346. qmi_rmnet_update_flow_map(itm, new_map);
  347. itm->bearer = bearer;
  348. __qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
  349. return 0;
  350. }
  351. static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
  352. struct qmi_info *qmi)
  353. {
  354. struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
  355. struct rmnet_flow_map new_map, *itm;
  356. struct rmnet_bearer_map *bearer;
  357. struct tcmsg tmp_tcm;
  358. int rc = 0;
  359. if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
  360. return -EINVAL;
  361. ASSERT_RTNL();
  362. /* flow activate
  363. * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
  364. * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - mq_idx
  365. */
  366. new_map.bearer_id = tcm->tcm__pad1;
  367. new_map.flow_id = tcm->tcm_parent;
  368. new_map.ip_type = tcm->tcm_ifindex;
  369. new_map.mq_idx = tcm->tcm_handle;
  370. trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
  371. new_map.ip_type, new_map.mq_idx, 1);
  372. again:
  373. spin_lock_bh(&qos_info->qos_lock);
  374. itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
  375. new_map.ip_type);
  376. if (itm) {
  377. if (itm->bearer_id != new_map.bearer_id) {
  378. rc = __qmi_rmnet_rebind_flow(
  379. dev, qos_info, itm, &new_map);
  380. goto done;
  381. } else if (itm->mq_idx != new_map.mq_idx) {
  382. tmp_tcm.tcm__pad1 = itm->bearer_id;
  383. tmp_tcm.tcm_parent = itm->flow_id;
  384. tmp_tcm.tcm_ifindex = itm->ip_type;
  385. tmp_tcm.tcm_handle = itm->mq_idx;
  386. spin_unlock_bh(&qos_info->qos_lock);
  387. qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
  388. goto again;
  389. } else {
  390. goto done;
  391. }
  392. }
  393. /* Create flow map */
  394. itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
  395. if (!itm) {
  396. spin_unlock_bh(&qos_info->qos_lock);
  397. return -ENOMEM;
  398. }
  399. qmi_rmnet_update_flow_map(itm, &new_map);
  400. list_add(&itm->list, &qos_info->flow_head);
  401. /* Create or update bearer map */
  402. bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
  403. if (!bearer) {
  404. rc = -ENOMEM;
  405. goto done;
  406. }
  407. itm->bearer = bearer;
  408. __qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
  409. done:
  410. spin_unlock_bh(&qos_info->qos_lock);
  411. qmi_rmnet_bearer_clean(qos_info);
  412. return rc;
  413. }
  414. static int
  415. qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
  416. struct qmi_info *qmi)
  417. {
  418. struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
  419. struct rmnet_flow_map new_map, *itm;
  420. if (!qos_info)
  421. return -EINVAL;
  422. ASSERT_RTNL();
  423. /* flow deactivate
  424. * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
  425. * tcm->tcm_ifindex - ip_type
  426. */
  427. spin_lock_bh(&qos_info->qos_lock);
  428. new_map.bearer_id = tcm->tcm__pad1;
  429. new_map.flow_id = tcm->tcm_parent;
  430. new_map.ip_type = tcm->tcm_ifindex;
  431. itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
  432. new_map.ip_type);
  433. if (itm) {
  434. trace_dfc_flow_info(dev->name, new_map.bearer_id,
  435. new_map.flow_id, new_map.ip_type,
  436. itm->mq_idx, 0);
  437. __qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
  438. /* Remove from flow map */
  439. list_del(&itm->list);
  440. kfree(itm);
  441. }
  442. if (list_empty(&qos_info->flow_head))
  443. netif_tx_wake_all_queues(dev);
  444. spin_unlock_bh(&qos_info->qos_lock);
  445. qmi_rmnet_bearer_clean(qos_info);
  446. return 0;
  447. }
  448. static void qmi_rmnet_query_flows(struct qmi_info *qmi)
  449. {
  450. int i;
  451. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  452. if (qmi->dfc_clients[i] && !dfc_qmap &&
  453. !qmi->dfc_client_exiting[i])
  454. dfc_qmi_query_flow(qmi->dfc_clients[i]);
  455. }
  456. }
  457. struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
  458. u8 bearer_id)
  459. {
  460. struct rmnet_bearer_map *bearer;
  461. bearer = __qmi_rmnet_bearer_get(qos_info, bearer_id);
  462. if (bearer)
  463. bearer->flow_ref--;
  464. return bearer;
  465. }
  466. #else
  467. static inline void
  468. qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
  469. struct rmnet_flow_map *new_map)
  470. {
  471. }
  472. static inline int
  473. qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
  474. struct qmi_info *qmi)
  475. {
  476. return -EINVAL;
  477. }
  478. static inline int
  479. qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
  480. struct qmi_info *qmi)
  481. {
  482. return -EINVAL;
  483. }
  484. static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
  485. {
  486. }
  487. #endif
  488. static int
  489. qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
  490. {
  491. int idx, err = 0;
  492. struct svc_info svc;
  493. ASSERT_RTNL();
  494. /* client setup
  495. * tcm->tcm_handle - instance, tcm->tcm_info - ep_type,
  496. * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags
  497. */
  498. idx = (tcm->tcm_handle == 0) ? 0 : 1;
  499. if (!qmi) {
  500. qmi = kzalloc(sizeof(struct qmi_info), GFP_ATOMIC);
  501. if (!qmi)
  502. return -ENOMEM;
  503. rmnet_init_qmi_pt(port, qmi);
  504. }
  505. qmi->flag = tcm->tcm_ifindex;
  506. svc.instance = tcm->tcm_handle;
  507. svc.ep_type = tcm->tcm_info;
  508. svc.iface_id = tcm->tcm_parent;
  509. if (DFC_SUPPORTED_MODE(dfc_mode) &&
  510. !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
  511. if (dfc_qmap)
  512. err = dfc_qmap_client_init(port, idx, &svc, qmi);
  513. else
  514. err = dfc_qmi_client_init(port, idx, &svc, qmi);
  515. qmi->dfc_client_exiting[idx] = false;
  516. }
  517. if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
  518. (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
  519. err = wda_qmi_client_init(port, &svc, qmi);
  520. }
  521. return err;
  522. }
  523. static int
  524. __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
  525. {
  526. void *data = NULL;
  527. ASSERT_RTNL();
  528. if (qmi->dfc_clients[idx])
  529. data = qmi->dfc_clients[idx];
  530. else if (qmi->dfc_pending[idx])
  531. data = qmi->dfc_pending[idx];
  532. if (data) {
  533. if (dfc_qmap)
  534. dfc_qmap_client_exit(data);
  535. else
  536. dfc_qmi_client_exit(data);
  537. qmi->dfc_clients[idx] = NULL;
  538. qmi->dfc_pending[idx] = NULL;
  539. }
  540. if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) {
  541. rmnet_reset_qmi_pt(port);
  542. kfree(qmi);
  543. return 0;
  544. }
  545. return 1;
  546. }
  547. static void
  548. qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
  549. {
  550. int idx;
  551. void *data = NULL;
  552. /* client delete: tcm->tcm_handle - instance*/
  553. idx = (tcm->tcm_handle == 0) ? 0 : 1;
  554. ASSERT_RTNL();
  555. if (qmi->wda_client)
  556. data = qmi->wda_client;
  557. else if (qmi->wda_pending)
  558. data = qmi->wda_pending;
  559. if ((idx == 0) && data) {
  560. wda_qmi_client_exit(data);
  561. qmi->wda_client = NULL;
  562. qmi->wda_pending = NULL;
  563. } else {
  564. qmi->dfc_client_exiting[idx] = true;
  565. qmi_rmnet_flush_ps_wq();
  566. }
  567. __qmi_rmnet_delete_client(port, qmi, idx);
  568. }
  569. void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
  570. {
  571. struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  572. struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
  573. switch (tcm->tcm_family) {
  574. case NLMSG_FLOW_ACTIVATE:
  575. if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
  576. !qmi_rmnet_has_dfc_client(qmi))
  577. return;
  578. qmi_rmnet_add_flow(dev, tcm, qmi);
  579. break;
  580. case NLMSG_FLOW_DEACTIVATE:
  581. if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
  582. return;
  583. qmi_rmnet_del_flow(dev, tcm, qmi);
  584. break;
  585. case NLMSG_CLIENT_SETUP:
  586. dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
  587. dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
  588. if (!DFC_SUPPORTED_MODE(dfc_mode) &&
  589. !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
  590. return;
  591. if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
  592. /* retrieve qmi again as it could have been changed */
  593. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  594. if (qmi &&
  595. !qmi_rmnet_has_client(qmi) &&
  596. !qmi_rmnet_has_pending(qmi)) {
  597. rmnet_reset_qmi_pt(port);
  598. kfree(qmi);
  599. }
  600. } else if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
  601. qmi_rmnet_work_init(port);
  602. rmnet_set_powersave_format(port);
  603. }
  604. break;
  605. case NLMSG_CLIENT_DELETE:
  606. if (!qmi)
  607. return;
  608. if (tcm->tcm_handle == 0) { /* instance 0 */
  609. rmnet_clear_powersave_format(port);
  610. qmi_rmnet_work_exit(port);
  611. }
  612. qmi_rmnet_delete_client(port, qmi, tcm);
  613. break;
  614. case NLMSG_SCALE_FACTOR:
  615. if (!tcm->tcm_ifindex)
  616. return;
  617. qmi_rmnet_scale_factor = tcm->tcm_ifindex;
  618. break;
  619. case NLMSG_WQ_FREQUENCY:
  620. rmnet_wq_frequency = tcm->tcm_ifindex;
  621. break;
  622. default:
  623. pr_debug("%s(): No handler\n", __func__);
  624. break;
  625. }
  626. }
  627. EXPORT_SYMBOL(qmi_rmnet_change_link);
  628. void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
  629. {
  630. struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
  631. int i;
  632. void *data = NULL;
  633. if (!qmi)
  634. return;
  635. ASSERT_RTNL();
  636. qmi_rmnet_work_exit(port);
  637. if (qmi->wda_client)
  638. data = qmi->wda_client;
  639. else if (qmi->wda_pending)
  640. data = qmi->wda_pending;
  641. if (data) {
  642. wda_qmi_client_exit(data);
  643. qmi->wda_client = NULL;
  644. qmi->wda_pending = NULL;
  645. }
  646. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  647. if (!__qmi_rmnet_delete_client(port, qmi, i))
  648. return;
  649. }
  650. }
  651. EXPORT_SYMBOL(qmi_rmnet_qmi_exit);
  652. void qmi_rmnet_enable_all_flows(struct net_device *dev)
  653. {
  654. struct qos_info *qos;
  655. struct rmnet_bearer_map *bearer;
  656. bool do_wake;
  657. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  658. if (!qos)
  659. return;
  660. spin_lock_bh(&qos->qos_lock);
  661. list_for_each_entry(bearer, &qos->bearer_head, list) {
  662. bearer->seq = 0;
  663. bearer->ack_req = 0;
  664. bearer->bytes_in_flight = 0;
  665. bearer->tcp_bidir = false;
  666. bearer->rat_switch = false;
  667. qmi_rmnet_watchdog_remove(bearer);
  668. if (bearer->tx_off)
  669. continue;
  670. do_wake = !bearer->grant_size;
  671. bearer->grant_size = DEFAULT_GRANT;
  672. bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
  673. if (do_wake)
  674. dfc_bearer_flow_ctl(dev, bearer, qos);
  675. }
  676. spin_unlock_bh(&qos->qos_lock);
  677. }
  678. EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
  679. bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
  680. {
  681. struct qos_info *qos;
  682. struct rmnet_bearer_map *bearer;
  683. bool ret = true;
  684. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  685. if (!qos)
  686. return true;
  687. spin_lock_bh(&qos->qos_lock);
  688. list_for_each_entry(bearer, &qos->bearer_head, list) {
  689. if (!bearer->grant_size) {
  690. ret = false;
  691. break;
  692. }
  693. }
  694. spin_unlock_bh(&qos->qos_lock);
  695. return ret;
  696. }
  697. EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
  698. #ifdef CONFIG_QTI_QMI_DFC
  699. void qmi_rmnet_burst_fc_check(struct net_device *dev,
  700. int ip_type, u32 mark, unsigned int len)
  701. {
  702. struct qos_info *qos = rmnet_get_qos_pt(dev);
  703. if (!qos)
  704. return;
  705. dfc_qmi_burst_check(dev, qos, ip_type, mark, len);
  706. }
  707. EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
  708. static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
  709. {
  710. unsigned int len = skb->len;
  711. switch (skb->protocol) {
  712. /* TCPv4 ACKs */
  713. case htons(ETH_P_IP):
  714. if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
  715. (ip_hdr(skb)->ihl == 5) &&
  716. (len == 40 || len == 52) &&
  717. ((tcp_flag_word(tcp_hdr(skb)) &
  718. cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
  719. return true;
  720. break;
  721. /* TCPv6 ACKs */
  722. case htons(ETH_P_IPV6):
  723. if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
  724. (len == 60 || len == 72) &&
  725. ((tcp_flag_word(tcp_hdr(skb)) &
  726. cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
  727. return true;
  728. break;
  729. }
  730. return false;
  731. }
  732. static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb)
  733. {
  734. struct rmnet_flow_map *itm;
  735. int ip_type;
  736. int txq = DEFAULT_MQ_NUM;
  737. /* Put NDP in default mq */
  738. if (skb->protocol == htons(ETH_P_IPV6) &&
  739. ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6 &&
  740. icmp6_hdr(skb)->icmp6_type >= 133 &&
  741. icmp6_hdr(skb)->icmp6_type <= 137) {
  742. return DEFAULT_MQ_NUM;
  743. }
  744. ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
  745. spin_lock_bh(&qos->qos_lock);
  746. itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
  747. if (unlikely(!itm))
  748. goto done;
  749. /* Put the packet in the assigned mq except TCP ack */
  750. if (likely(itm->bearer) && qmi_rmnet_is_tcp_ack(skb))
  751. txq = itm->bearer->ack_mq_idx;
  752. else
  753. txq = itm->mq_idx;
  754. done:
  755. spin_unlock_bh(&qos->qos_lock);
  756. return txq;
  757. }
  758. int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
  759. {
  760. struct qos_info *qos = rmnet_get_qos_pt(dev);
  761. int txq = 0, ip_type = AF_INET;
  762. struct rmnet_flow_map *itm;
  763. u32 mark = skb->mark;
  764. if (!qos)
  765. return 0;
  766. if (likely(dfc_mode == DFC_MODE_SA))
  767. return qmi_rmnet_get_queue_sa(qos, skb);
  768. ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
  769. spin_lock_bh(&qos->qos_lock);
  770. itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
  771. if (itm)
  772. txq = itm->mq_idx;
  773. spin_unlock_bh(&qos->qos_lock);
  774. return txq;
  775. }
  776. EXPORT_SYMBOL(qmi_rmnet_get_queue);
  777. inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
  778. {
  779. return grant / qmi_rmnet_scale_factor;
  780. }
  781. EXPORT_SYMBOL(qmi_rmnet_grant_per);
  782. void *qmi_rmnet_qos_init(struct net_device *real_dev,
  783. struct net_device *vnd_dev, u8 mux_id)
  784. {
  785. struct qos_info *qos;
  786. qos = kzalloc(sizeof(*qos), GFP_KERNEL);
  787. if (!qos)
  788. return NULL;
  789. qos->mux_id = mux_id;
  790. qos->real_dev = real_dev;
  791. qos->vnd_dev = vnd_dev;
  792. qos->tran_num = 0;
  793. INIT_LIST_HEAD(&qos->flow_head);
  794. INIT_LIST_HEAD(&qos->bearer_head);
  795. spin_lock_init(&qos->qos_lock);
  796. return qos;
  797. }
  798. EXPORT_SYMBOL(qmi_rmnet_qos_init);
  799. void qmi_rmnet_qos_exit_pre(void *qos)
  800. {
  801. struct qos_info *qosi = (struct qos_info *)qos;
  802. struct rmnet_bearer_map *bearer;
  803. if (!qos)
  804. return;
  805. list_for_each_entry(bearer, &qosi->bearer_head, list) {
  806. bearer->watchdog_quit = true;
  807. del_timer_sync(&bearer->watchdog);
  808. }
  809. list_add(&qosi->list, &qos_cleanup_list);
  810. }
  811. EXPORT_SYMBOL(qmi_rmnet_qos_exit_pre);
  812. void qmi_rmnet_qos_exit_post(void)
  813. {
  814. struct qos_info *qos, *tmp;
  815. synchronize_rcu();
  816. list_for_each_entry_safe(qos, tmp, &qos_cleanup_list, list) {
  817. list_del(&qos->list);
  818. qmi_rmnet_clean_flow_list(qos);
  819. kfree(qos);
  820. }
  821. }
  822. EXPORT_SYMBOL(qmi_rmnet_qos_exit_post);
  823. #endif
  824. #ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
  825. static struct workqueue_struct *rmnet_ps_wq;
  826. static struct rmnet_powersave_work *rmnet_work;
  827. static bool rmnet_work_quit;
  828. static bool rmnet_work_inited;
  829. static LIST_HEAD(ps_list);
  830. struct rmnet_powersave_work {
  831. struct delayed_work work;
  832. struct alarm atimer;
  833. void *port;
  834. u64 old_rx_pkts;
  835. u64 old_tx_pkts;
  836. };
  837. void qmi_rmnet_ps_on_notify(void *port)
  838. {
  839. struct qmi_rmnet_ps_ind *tmp;
  840. list_for_each_entry_rcu(tmp, &ps_list, list)
  841. tmp->ps_on_handler(port);
  842. }
  843. EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
  844. void qmi_rmnet_ps_off_notify(void *port)
  845. {
  846. struct qmi_rmnet_ps_ind *tmp;
  847. list_for_each_entry_rcu(tmp, &ps_list, list)
  848. tmp->ps_off_handler(port);
  849. }
  850. EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
  851. int qmi_rmnet_ps_ind_register(void *port,
  852. struct qmi_rmnet_ps_ind *ps_ind)
  853. {
  854. if (!port || !ps_ind || !ps_ind->ps_on_handler ||
  855. !ps_ind->ps_off_handler)
  856. return -EINVAL;
  857. list_add_rcu(&ps_ind->list, &ps_list);
  858. return 0;
  859. }
  860. EXPORT_SYMBOL(qmi_rmnet_ps_ind_register);
  861. int qmi_rmnet_ps_ind_deregister(void *port,
  862. struct qmi_rmnet_ps_ind *ps_ind)
  863. {
  864. struct qmi_rmnet_ps_ind *tmp;
  865. if (!port || !ps_ind)
  866. return -EINVAL;
  867. list_for_each_entry_rcu(tmp, &ps_list, list) {
  868. if (tmp == ps_ind) {
  869. list_del_rcu(&ps_ind->list);
  870. goto done;
  871. }
  872. }
  873. done:
  874. return 0;
  875. }
  876. EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister);
  877. int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
  878. {
  879. int rc = -EINVAL;
  880. struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  881. if (!qmi || !qmi->wda_client)
  882. return rc;
  883. rc = wda_set_powersave_mode(qmi->wda_client, enable);
  884. if (rc < 0) {
  885. pr_err("%s() failed set powersave mode[%u], err=%d\n",
  886. __func__, enable, rc);
  887. return rc;
  888. }
  889. return 0;
  890. }
  891. EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
  892. static void qmi_rmnet_work_restart(void *port)
  893. {
  894. rcu_read_lock();
  895. if (!rmnet_work_quit)
  896. queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
  897. rcu_read_unlock();
  898. }
  899. static enum alarmtimer_restart qmi_rmnet_work_alarm(struct alarm *atimer,
  900. ktime_t now)
  901. {
  902. struct rmnet_powersave_work *real_work;
  903. real_work = container_of(atimer, struct rmnet_powersave_work, atimer);
  904. qmi_rmnet_work_restart(real_work->port);
  905. return ALARMTIMER_NORESTART;
  906. }
  907. static void qmi_rmnet_check_stats(struct work_struct *work)
  908. {
  909. struct rmnet_powersave_work *real_work;
  910. struct qmi_info *qmi;
  911. u64 rxd, txd;
  912. u64 rx, tx;
  913. bool dl_msg_active;
  914. bool use_alarm_timer = true;
  915. real_work = container_of(to_delayed_work(work),
  916. struct rmnet_powersave_work, work);
  917. if (unlikely(!real_work || !real_work->port))
  918. return;
  919. qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
  920. if (unlikely(!qmi))
  921. return;
  922. if (qmi->ps_enabled) {
  923. /* Ready to accept grant */
  924. qmi->ps_ignore_grant = false;
  925. /* Register to get QMI DFC and DL marker */
  926. if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0)
  927. goto end;
  928. qmi->ps_enabled = false;
  929. /* Do a query when coming out of powersave */
  930. qmi_rmnet_query_flows(qmi);
  931. if (rmnet_get_powersave_notif(real_work->port))
  932. qmi_rmnet_ps_off_notify(real_work->port);
  933. goto end;
  934. }
  935. rmnet_get_packets(real_work->port, &rx, &tx);
  936. rxd = rx - real_work->old_rx_pkts;
  937. txd = tx - real_work->old_tx_pkts;
  938. real_work->old_rx_pkts = rx;
  939. real_work->old_tx_pkts = tx;
  940. dl_msg_active = qmi->dl_msg_active;
  941. qmi->dl_msg_active = false;
  942. if (!rxd && !txd) {
  943. /* If no DL msg received and there is a flow disabled,
  944. * (likely in RLF), no need to enter powersave
  945. */
  946. if (!dl_msg_active &&
  947. !rmnet_all_flows_enabled(real_work->port)) {
  948. use_alarm_timer = false;
  949. goto end;
  950. }
  951. /* Deregister to suppress QMI DFC and DL marker */
  952. if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0)
  953. goto end;
  954. qmi->ps_enabled = true;
  955. /* Ignore grant after going into powersave */
  956. qmi->ps_ignore_grant = true;
  957. /* Clear the bit before enabling flow so pending packets
  958. * can trigger the work again
  959. */
  960. clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
  961. rmnet_enable_all_flows(real_work->port);
  962. if (rmnet_get_powersave_notif(real_work->port))
  963. qmi_rmnet_ps_on_notify(real_work->port);
  964. return;
  965. }
  966. end:
  967. rcu_read_lock();
  968. if (!rmnet_work_quit) {
  969. if (use_alarm_timer)
  970. alarm_start_relative(&real_work->atimer,
  971. PS_INTERVAL_KT);
  972. else
  973. queue_delayed_work(rmnet_ps_wq, &real_work->work,
  974. PS_INTERVAL);
  975. }
  976. rcu_read_unlock();
  977. }
  978. static void qmi_rmnet_work_set_active(void *port, int status)
  979. {
  980. struct qmi_info *qmi;
  981. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  982. if (unlikely(!qmi))
  983. return;
  984. if (status)
  985. set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
  986. else
  987. clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
  988. }
  989. void qmi_rmnet_work_init(void *port)
  990. {
  991. if (rmnet_ps_wq)
  992. return;
  993. rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
  994. WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
  995. if (!rmnet_ps_wq)
  996. return;
  997. rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
  998. if (!rmnet_work) {
  999. destroy_workqueue(rmnet_ps_wq);
  1000. rmnet_ps_wq = NULL;
  1001. return;
  1002. }
  1003. INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
  1004. alarm_init(&rmnet_work->atimer, ALARM_BOOTTIME, qmi_rmnet_work_alarm);
  1005. rmnet_work->port = port;
  1006. rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
  1007. &rmnet_work->old_tx_pkts);
  1008. rmnet_work_quit = false;
  1009. qmi_rmnet_work_set_active(rmnet_work->port, 1);
  1010. queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
  1011. rmnet_work_inited = true;
  1012. }
  1013. EXPORT_SYMBOL(qmi_rmnet_work_init);
  1014. void qmi_rmnet_work_maybe_restart(void *port)
  1015. {
  1016. struct qmi_info *qmi;
  1017. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1018. if (unlikely(!qmi || !rmnet_work_inited))
  1019. return;
  1020. if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active))
  1021. qmi_rmnet_work_restart(port);
  1022. }
  1023. EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);
  1024. void qmi_rmnet_work_exit(void *port)
  1025. {
  1026. if (!rmnet_ps_wq || !rmnet_work)
  1027. return;
  1028. rmnet_work_quit = true;
  1029. synchronize_rcu();
  1030. rmnet_work_inited = false;
  1031. alarm_cancel(&rmnet_work->atimer);
  1032. cancel_delayed_work_sync(&rmnet_work->work);
  1033. destroy_workqueue(rmnet_ps_wq);
  1034. qmi_rmnet_work_set_active(port, 0);
  1035. rmnet_ps_wq = NULL;
  1036. kfree(rmnet_work);
  1037. rmnet_work = NULL;
  1038. }
  1039. EXPORT_SYMBOL(qmi_rmnet_work_exit);
  1040. void qmi_rmnet_set_dl_msg_active(void *port)
  1041. {
  1042. struct qmi_info *qmi;
  1043. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1044. if (unlikely(!qmi))
  1045. return;
  1046. qmi->dl_msg_active = true;
  1047. }
  1048. EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active);
  1049. void qmi_rmnet_flush_ps_wq(void)
  1050. {
  1051. if (rmnet_ps_wq)
  1052. flush_workqueue(rmnet_ps_wq);
  1053. }
  1054. bool qmi_rmnet_ignore_grant(void *port)
  1055. {
  1056. struct qmi_info *qmi;
  1057. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1058. if (unlikely(!qmi))
  1059. return false;
  1060. return qmi->ps_ignore_grant;
  1061. }
  1062. EXPORT_SYMBOL(qmi_rmnet_ignore_grant);
  1063. #endif