qmi_rmnet.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551
  1. /*
  2. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/soc/qcom/qmi.h>
  14. #include "qmi_rmnet_i.h"
  15. #include "qmi_rmnet.h"
  16. #include "rmnet_qmi.h"
  17. #include "dfc.h"
  18. #include <linux/rtnetlink.h>
  19. #include <uapi/linux/rtnetlink.h>
  20. #include <net/pkt_sched.h>
  21. #include <net/tcp.h>
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/ip.h>
  25. #include <linux/ipv6.h>
  26. #include <linux/alarmtimer.h>
  27. #define NLMSG_FLOW_ACTIVATE 1
  28. #define NLMSG_FLOW_DEACTIVATE 2
  29. #define NLMSG_CLIENT_SETUP 4
  30. #define NLMSG_CLIENT_DELETE 5
  31. #define NLMSG_SCALE_FACTOR 6
  32. #define NLMSG_WQ_FREQUENCY 7
  33. #define NLMSG_CHANNEL_SWITCH 8
  34. #define FLAG_DFC_MASK 0x000F
  35. #define FLAG_POWERSAVE_MASK 0x0010
  36. #define FLAG_QMAP_MASK 0x0020
  37. #define FLAG_PS_EXT_MASK 0x0040
  38. #define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
  39. #define DFC_SUPPORTED_MODE(m) \
  40. ((m) == DFC_MODE_SA)
  41. #define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
  42. #define FLAG_TO_PS_EXT(f) ((f) & FLAG_PS_EXT_MASK)
  43. int dfc_mode;
  44. int dfc_qmap;
  45. int dfc_ps_ext;
  46. unsigned int rmnet_wq_frequency __read_mostly = 1000;
  47. #define PS_WORK_ACTIVE_BIT 0
  48. #define PS_INTERVAL (((!rmnet_wq_frequency) ? \
  49. 1 : rmnet_wq_frequency/10) * (HZ/100))
  50. #define NO_DELAY (0x0000 * HZ)
  51. #define PS_INTERVAL_KT (ms_to_ktime(1000))
  52. #define WATCHDOG_EXPIRE_JF (msecs_to_jiffies(50))
  53. #ifdef CONFIG_QTI_QMI_DFC
  54. static unsigned int qmi_rmnet_scale_factor = 5;
  55. static LIST_HEAD(qos_cleanup_list);
  56. #endif
  57. static int
  58. qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
  59. struct qmi_info *qmi);
  60. struct qmi_elem_info data_ep_id_type_v01_ei[] = {
  61. {
  62. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  63. .elem_len = 1,
  64. .elem_size = sizeof(enum data_ep_type_enum_v01),
  65. .array_type = NO_ARRAY,
  66. .tlv_type = QMI_COMMON_TLV_TYPE,
  67. .offset = offsetof(struct data_ep_id_type_v01,
  68. ep_type),
  69. .ei_array = NULL,
  70. },
  71. {
  72. .data_type = QMI_UNSIGNED_4_BYTE,
  73. .elem_len = 1,
  74. .elem_size = sizeof(u32),
  75. .array_type = NO_ARRAY,
  76. .tlv_type = QMI_COMMON_TLV_TYPE,
  77. .offset = offsetof(struct data_ep_id_type_v01,
  78. iface_id),
  79. .ei_array = NULL,
  80. },
  81. {
  82. .data_type = QMI_EOTI,
  83. .elem_len = 0,
  84. .elem_size = 0,
  85. .array_type = NO_ARRAY,
  86. .tlv_type = QMI_COMMON_TLV_TYPE,
  87. .offset = 0,
  88. .ei_array = NULL,
  89. },
  90. };
  91. EXPORT_SYMBOL(data_ep_id_type_v01_ei);
  92. void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
  93. {
  94. int i;
  95. if (!qmi)
  96. return NULL;
  97. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  98. if (qmi->dfc_clients[i])
  99. return qmi->dfc_clients[i];
  100. }
  101. return NULL;
  102. }
  103. static inline int
  104. qmi_rmnet_has_client(struct qmi_info *qmi)
  105. {
  106. if (qmi->wda_client)
  107. return 1;
  108. return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
  109. }
  110. static int
  111. qmi_rmnet_has_pending(struct qmi_info *qmi)
  112. {
  113. int i;
  114. if (qmi->wda_pending)
  115. return 1;
  116. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  117. if (qmi->dfc_pending[i])
  118. return 1;
  119. }
  120. return 0;
  121. }
  122. #ifdef CONFIG_QTI_QMI_DFC
  123. static void
  124. qmi_rmnet_clean_flow_list(struct qos_info *qos)
  125. {
  126. struct rmnet_bearer_map *bearer, *br_tmp;
  127. struct rmnet_flow_map *itm, *fl_tmp;
  128. ASSERT_RTNL();
  129. list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) {
  130. list_del(&itm->list);
  131. kfree(itm);
  132. }
  133. list_for_each_entry_safe(bearer, br_tmp, &qos->bearer_head, list) {
  134. list_del(&bearer->list);
  135. kfree(bearer);
  136. }
  137. memset(qos->mq, 0, sizeof(qos->mq));
  138. }
  139. struct rmnet_flow_map *
  140. qmi_rmnet_get_flow_map(struct qos_info *qos, u32 flow_id, int ip_type)
  141. {
  142. struct rmnet_flow_map *itm;
  143. if (!qos)
  144. return NULL;
  145. list_for_each_entry(itm, &qos->flow_head, list) {
  146. if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type))
  147. return itm;
  148. }
  149. return NULL;
  150. }
  151. struct rmnet_bearer_map *
  152. qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id)
  153. {
  154. struct rmnet_bearer_map *itm;
  155. if (!qos)
  156. return NULL;
  157. list_for_each_entry(itm, &qos->bearer_head, list) {
  158. if (itm->bearer_id == bearer_id)
  159. return itm;
  160. }
  161. return NULL;
  162. }
  163. static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
  164. struct rmnet_flow_map *new_map)
  165. {
  166. itm->bearer_id = new_map->bearer_id;
  167. itm->flow_id = new_map->flow_id;
  168. itm->ip_type = new_map->ip_type;
  169. itm->mq_idx = new_map->mq_idx;
  170. }
  171. int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
  172. {
  173. struct netdev_queue *q;
  174. if (unlikely(mq_idx >= dev->num_tx_queues))
  175. return 0;
  176. q = netdev_get_tx_queue(dev, mq_idx);
  177. if (unlikely(!q))
  178. return 0;
  179. if (enable)
  180. netif_tx_wake_queue(q);
  181. else
  182. netif_tx_stop_queue(q);
  183. trace_dfc_qmi_tc(dev->name, mq_idx, enable);
  184. return 0;
  185. }
  186. static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
  187. {
  188. struct Qdisc *qdisc;
  189. if (unlikely(txq >= dev->num_tx_queues))
  190. return;
  191. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, txq)->qdisc);
  192. if (qdisc) {
  193. spin_lock_bh(qdisc_lock(qdisc));
  194. qdisc_reset(qdisc);
  195. spin_unlock_bh(qdisc_lock(qdisc));
  196. }
  197. }
  198. /**
  199. * qmi_rmnet_watchdog_fn - watchdog timer func
  200. */
  201. static void qmi_rmnet_watchdog_fn(struct timer_list *t)
  202. {
  203. struct rmnet_bearer_map *bearer;
  204. bearer = container_of(t, struct rmnet_bearer_map, watchdog);
  205. trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 2);
  206. spin_lock_bh(&bearer->qos->qos_lock);
  207. if (bearer->watchdog_quit)
  208. goto done;
  209. /*
  210. * Possible stall, try to recover. Enable 80% query and jumpstart
  211. * the bearer if disabled.
  212. */
  213. bearer->watchdog_expire_cnt++;
  214. bearer->bytes_in_flight = 0;
  215. if (!bearer->grant_size) {
  216. bearer->grant_size = DEFAULT_CALL_GRANT;
  217. bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
  218. dfc_bearer_flow_ctl(bearer->qos->vnd_dev, bearer, bearer->qos);
  219. } else {
  220. bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
  221. }
  222. done:
  223. bearer->watchdog_started = false;
  224. spin_unlock_bh(&bearer->qos->qos_lock);
  225. }
  226. /**
  227. * qmi_rmnet_watchdog_add - add the bearer to watch
  228. * Needs to be called with qos_lock
  229. */
  230. void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer)
  231. {
  232. bearer->watchdog_quit = false;
  233. if (bearer->watchdog_started)
  234. return;
  235. bearer->watchdog_started = true;
  236. mod_timer(&bearer->watchdog, jiffies + WATCHDOG_EXPIRE_JF);
  237. trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 1);
  238. }
  239. /**
  240. * qmi_rmnet_watchdog_remove - remove the bearer from watch
  241. * Needs to be called with qos_lock
  242. */
  243. void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
  244. {
  245. bearer->watchdog_quit = true;
  246. if (!bearer->watchdog_started)
  247. return;
  248. del_timer(&bearer->watchdog);
  249. bearer->watchdog_started = false;
  250. trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 0);
  251. }
  252. /**
  253. * qmi_rmnet_bearer_clean - clean the removed bearer
  254. * Needs to be called with rtn_lock but not qos_lock
  255. */
  256. static void qmi_rmnet_bearer_clean(struct qos_info *qos)
  257. {
  258. if (qos->removed_bearer) {
  259. qos->removed_bearer->watchdog_quit = true;
  260. del_timer_sync(&qos->removed_bearer->watchdog);
  261. qos->removed_bearer->ch_switch.timer_quit = true;
  262. del_timer_sync(&qos->removed_bearer->ch_switch.guard_timer);
  263. kfree(qos->removed_bearer);
  264. qos->removed_bearer = NULL;
  265. }
  266. }
  267. static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
  268. struct qos_info *qos_info, u8 bearer_id)
  269. {
  270. struct rmnet_bearer_map *bearer;
  271. bearer = qmi_rmnet_get_bearer_map(qos_info, bearer_id);
  272. if (bearer) {
  273. bearer->flow_ref++;
  274. } else {
  275. bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
  276. if (!bearer)
  277. return NULL;
  278. bearer->bearer_id = bearer_id;
  279. bearer->flow_ref = 1;
  280. bearer->grant_size = DEFAULT_CALL_GRANT;
  281. bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
  282. bearer->mq_idx = INVALID_MQ;
  283. bearer->ack_mq_idx = INVALID_MQ;
  284. bearer->qos = qos_info;
  285. timer_setup(&bearer->watchdog, qmi_rmnet_watchdog_fn, 0);
  286. timer_setup(&bearer->ch_switch.guard_timer,
  287. rmnet_ll_guard_fn, 0);
  288. list_add(&bearer->list, &qos_info->bearer_head);
  289. }
  290. return bearer;
  291. }
  292. static void __qmi_rmnet_bearer_put(struct net_device *dev,
  293. struct qos_info *qos_info,
  294. struct rmnet_bearer_map *bearer,
  295. bool reset)
  296. {
  297. struct mq_map *mq;
  298. int i, j;
  299. if (bearer && --bearer->flow_ref == 0) {
  300. for (i = 0; i < MAX_MQ_NUM; i++) {
  301. mq = &qos_info->mq[i];
  302. if (mq->bearer != bearer)
  303. continue;
  304. mq->bearer = NULL;
  305. mq->is_ll_ch = false;
  306. if (reset) {
  307. qmi_rmnet_reset_txq(dev, i);
  308. qmi_rmnet_flow_control(dev, i, 1);
  309. if (dfc_mode == DFC_MODE_SA) {
  310. j = i + ACK_MQ_OFFSET;
  311. qmi_rmnet_reset_txq(dev, j);
  312. qmi_rmnet_flow_control(dev, j, 1);
  313. }
  314. }
  315. }
  316. /* Remove from bearer map */
  317. list_del(&bearer->list);
  318. qos_info->removed_bearer = bearer;
  319. }
  320. }
  321. static void __qmi_rmnet_update_mq(struct net_device *dev,
  322. struct qos_info *qos_info,
  323. struct rmnet_bearer_map *bearer,
  324. struct rmnet_flow_map *itm)
  325. {
  326. struct mq_map *mq;
  327. /* In SA mode default mq is not associated with any bearer */
  328. if (dfc_mode == DFC_MODE_SA && itm->mq_idx == DEFAULT_MQ_NUM)
  329. return;
  330. mq = &qos_info->mq[itm->mq_idx];
  331. if (!mq->bearer) {
  332. mq->bearer = bearer;
  333. mq->is_ll_ch = bearer->ch_switch.current_ch;
  334. if (dfc_mode == DFC_MODE_SA) {
  335. bearer->mq_idx = itm->mq_idx;
  336. bearer->ack_mq_idx = itm->mq_idx + ACK_MQ_OFFSET;
  337. } else {
  338. bearer->mq_idx = itm->mq_idx;
  339. }
  340. qmi_rmnet_flow_control(dev, itm->mq_idx,
  341. bearer->grant_size > 0 ? 1 : 0);
  342. if (dfc_mode == DFC_MODE_SA)
  343. qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
  344. bearer->grant_size > 0 ? 1 : 0);
  345. }
  346. }
  347. static int __qmi_rmnet_rebind_flow(struct net_device *dev,
  348. struct qos_info *qos_info,
  349. struct rmnet_flow_map *itm,
  350. struct rmnet_flow_map *new_map)
  351. {
  352. struct rmnet_bearer_map *bearer;
  353. __qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, false);
  354. bearer = __qmi_rmnet_bearer_get(qos_info, new_map->bearer_id);
  355. if (!bearer)
  356. return -ENOMEM;
  357. qmi_rmnet_update_flow_map(itm, new_map);
  358. itm->bearer = bearer;
  359. __qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
  360. return 0;
  361. }
  362. static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
  363. struct qmi_info *qmi)
  364. {
  365. struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
  366. struct rmnet_flow_map new_map, *itm;
  367. struct rmnet_bearer_map *bearer;
  368. struct tcmsg tmp_tcm;
  369. int rc = 0;
  370. if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
  371. return -EINVAL;
  372. ASSERT_RTNL();
  373. /* flow activate
  374. * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
  375. * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - mq_idx
  376. */
  377. new_map.bearer_id = tcm->tcm__pad1;
  378. new_map.flow_id = tcm->tcm_parent;
  379. new_map.ip_type = tcm->tcm_ifindex;
  380. new_map.mq_idx = tcm->tcm_handle;
  381. trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
  382. new_map.ip_type, new_map.mq_idx, 1);
  383. again:
  384. spin_lock_bh(&qos_info->qos_lock);
  385. itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
  386. new_map.ip_type);
  387. if (itm) {
  388. if (itm->bearer_id != new_map.bearer_id) {
  389. rc = __qmi_rmnet_rebind_flow(
  390. dev, qos_info, itm, &new_map);
  391. goto done;
  392. } else if (itm->mq_idx != new_map.mq_idx) {
  393. tmp_tcm.tcm__pad1 = itm->bearer_id;
  394. tmp_tcm.tcm_parent = itm->flow_id;
  395. tmp_tcm.tcm_ifindex = itm->ip_type;
  396. tmp_tcm.tcm_handle = itm->mq_idx;
  397. spin_unlock_bh(&qos_info->qos_lock);
  398. qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
  399. goto again;
  400. } else {
  401. goto done;
  402. }
  403. }
  404. /* Create flow map */
  405. itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
  406. if (!itm) {
  407. spin_unlock_bh(&qos_info->qos_lock);
  408. return -ENOMEM;
  409. }
  410. qmi_rmnet_update_flow_map(itm, &new_map);
  411. list_add(&itm->list, &qos_info->flow_head);
  412. /* Create or update bearer map */
  413. bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
  414. if (!bearer) {
  415. rc = -ENOMEM;
  416. goto done;
  417. }
  418. itm->bearer = bearer;
  419. __qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
  420. done:
  421. spin_unlock_bh(&qos_info->qos_lock);
  422. qmi_rmnet_bearer_clean(qos_info);
  423. return rc;
  424. }
  425. static int
  426. qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
  427. struct qmi_info *qmi)
  428. {
  429. struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
  430. struct rmnet_flow_map new_map, *itm;
  431. if (!qos_info)
  432. return -EINVAL;
  433. ASSERT_RTNL();
  434. /* flow deactivate
  435. * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
  436. * tcm->tcm_ifindex - ip_type
  437. */
  438. spin_lock_bh(&qos_info->qos_lock);
  439. new_map.bearer_id = tcm->tcm__pad1;
  440. new_map.flow_id = tcm->tcm_parent;
  441. new_map.ip_type = tcm->tcm_ifindex;
  442. itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
  443. new_map.ip_type);
  444. if (itm) {
  445. trace_dfc_flow_info(dev->name, new_map.bearer_id,
  446. new_map.flow_id, new_map.ip_type,
  447. itm->mq_idx, 0);
  448. __qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
  449. /* Remove from flow map */
  450. list_del(&itm->list);
  451. kfree(itm);
  452. }
  453. if (list_empty(&qos_info->flow_head))
  454. netif_tx_wake_all_queues(dev);
  455. spin_unlock_bh(&qos_info->qos_lock);
  456. qmi_rmnet_bearer_clean(qos_info);
  457. return 0;
  458. }
  459. static void qmi_rmnet_query_flows(struct qmi_info *qmi)
  460. {
  461. int i;
  462. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  463. if (qmi->dfc_clients[i] && !dfc_qmap &&
  464. !qmi->dfc_client_exiting[i])
  465. dfc_qmi_query_flow(qmi->dfc_clients[i]);
  466. }
  467. }
  468. struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
  469. u8 bearer_id)
  470. {
  471. struct rmnet_bearer_map *bearer;
  472. bearer = __qmi_rmnet_bearer_get(qos_info, bearer_id);
  473. if (bearer)
  474. bearer->flow_ref--;
  475. return bearer;
  476. }
  477. #else
  478. static inline void
  479. qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
  480. struct rmnet_flow_map *new_map)
  481. {
  482. }
  483. static inline int
  484. qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
  485. struct qmi_info *qmi)
  486. {
  487. return -EINVAL;
  488. }
  489. static inline int
  490. qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
  491. struct qmi_info *qmi)
  492. {
  493. return -EINVAL;
  494. }
  495. static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
  496. {
  497. }
  498. #endif
  499. static int
  500. qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
  501. {
  502. int idx, err = 0;
  503. struct svc_info svc;
  504. ASSERT_RTNL();
  505. /* client setup
  506. * tcm->tcm_handle - instance, tcm->tcm_info - ep_type,
  507. * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags
  508. */
  509. idx = (tcm->tcm_handle == 0) ? 0 : 1;
  510. if (!qmi) {
  511. qmi = kzalloc(sizeof(struct qmi_info), GFP_ATOMIC);
  512. if (!qmi)
  513. return -ENOMEM;
  514. rmnet_init_qmi_pt(port, qmi);
  515. }
  516. qmi->flag = tcm->tcm_ifindex;
  517. qmi->ps_ext = FLAG_TO_PS_EXT(qmi->flag);
  518. svc.instance = tcm->tcm_handle;
  519. svc.ep_type = tcm->tcm_info;
  520. svc.iface_id = tcm->tcm_parent;
  521. if (DFC_SUPPORTED_MODE(dfc_mode) &&
  522. !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
  523. if (dfc_qmap)
  524. err = dfc_qmap_client_init(port, idx, &svc, qmi);
  525. else
  526. err = dfc_qmi_client_init(port, idx, &svc, qmi);
  527. qmi->dfc_client_exiting[idx] = false;
  528. }
  529. if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
  530. (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
  531. err = wda_qmi_client_init(port, &svc, qmi);
  532. }
  533. return err;
  534. }
  535. static int
  536. __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
  537. {
  538. void *data = NULL;
  539. ASSERT_RTNL();
  540. if (qmi->dfc_clients[idx])
  541. data = qmi->dfc_clients[idx];
  542. else if (qmi->dfc_pending[idx])
  543. data = qmi->dfc_pending[idx];
  544. if (data) {
  545. if (dfc_qmap)
  546. dfc_qmap_client_exit(data);
  547. else
  548. dfc_qmi_client_exit(data);
  549. qmi->dfc_clients[idx] = NULL;
  550. qmi->dfc_pending[idx] = NULL;
  551. }
  552. if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) {
  553. rmnet_reset_qmi_pt(port);
  554. kfree(qmi);
  555. return 0;
  556. }
  557. return 1;
  558. }
  559. static void
  560. qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
  561. {
  562. int idx;
  563. void *data = NULL;
  564. /* client delete: tcm->tcm_handle - instance*/
  565. idx = (tcm->tcm_handle == 0) ? 0 : 1;
  566. ASSERT_RTNL();
  567. if (qmi->wda_client)
  568. data = qmi->wda_client;
  569. else if (qmi->wda_pending)
  570. data = qmi->wda_pending;
  571. if ((idx == 0) && data) {
  572. wda_qmi_client_exit(data);
  573. qmi->wda_client = NULL;
  574. qmi->wda_pending = NULL;
  575. } else {
  576. qmi->dfc_client_exiting[idx] = true;
  577. qmi_rmnet_flush_ps_wq();
  578. }
  579. __qmi_rmnet_delete_client(port, qmi, idx);
  580. }
  581. int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
  582. int attr_len)
  583. {
  584. struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  585. struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
  586. void *wda_data = NULL;
  587. int rc = 0;
  588. switch (tcm->tcm_family) {
  589. case NLMSG_FLOW_ACTIVATE:
  590. if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
  591. !qmi_rmnet_has_dfc_client(qmi))
  592. return rc;
  593. qmi_rmnet_add_flow(dev, tcm, qmi);
  594. break;
  595. case NLMSG_FLOW_DEACTIVATE:
  596. if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
  597. return rc;
  598. qmi_rmnet_del_flow(dev, tcm, qmi);
  599. break;
  600. case NLMSG_CLIENT_SETUP:
  601. dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
  602. dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
  603. dfc_ps_ext = FLAG_TO_PS_EXT(tcm->tcm_ifindex);
  604. if (!DFC_SUPPORTED_MODE(dfc_mode) &&
  605. !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
  606. return rc;
  607. if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
  608. /* retrieve qmi again as it could have been changed */
  609. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  610. if (qmi &&
  611. !qmi_rmnet_has_client(qmi) &&
  612. !qmi_rmnet_has_pending(qmi)) {
  613. rmnet_reset_qmi_pt(port);
  614. kfree(qmi);
  615. }
  616. return rc;
  617. }
  618. if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
  619. qmi_rmnet_work_init(port);
  620. rmnet_set_powersave_format(port);
  621. }
  622. rmnet_ll_wq_init();
  623. break;
  624. case NLMSG_CLIENT_DELETE:
  625. if (!qmi)
  626. return rc;
  627. if (tcm->tcm_handle == 0) { /* instance 0 */
  628. rmnet_clear_powersave_format(port);
  629. if (qmi->wda_client)
  630. wda_data = qmi->wda_client;
  631. else if (qmi->wda_pending)
  632. wda_data = qmi->wda_pending;
  633. wda_qmi_client_release(wda_data);
  634. qmi_rmnet_work_exit(port);
  635. }
  636. qmi_rmnet_delete_client(port, qmi, tcm);
  637. rmnet_ll_wq_exit();
  638. break;
  639. case NLMSG_SCALE_FACTOR:
  640. if (!tcm->tcm_ifindex)
  641. return rc;
  642. qmi_rmnet_scale_factor = tcm->tcm_ifindex;
  643. break;
  644. case NLMSG_WQ_FREQUENCY:
  645. rmnet_wq_frequency = tcm->tcm_ifindex;
  646. break;
  647. case NLMSG_CHANNEL_SWITCH:
  648. if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
  649. !qmi_rmnet_has_dfc_client(qmi))
  650. return rc;
  651. rc = rmnet_ll_switch(dev, tcm, attr_len);
  652. break;
  653. default:
  654. pr_debug("%s(): No handler\n", __func__);
  655. break;
  656. }
  657. return rc;
  658. }
  659. EXPORT_SYMBOL(qmi_rmnet_change_link);
  660. void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
  661. {
  662. struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
  663. int i;
  664. void *data = NULL;
  665. if (!qmi)
  666. return;
  667. ASSERT_RTNL();
  668. if (qmi->wda_client)
  669. data = qmi->wda_client;
  670. else if (qmi->wda_pending)
  671. data = qmi->wda_pending;
  672. wda_qmi_client_release(data);
  673. qmi_rmnet_work_exit(port);
  674. rmnet_ll_wq_exit();
  675. if (data) {
  676. wda_qmi_client_exit(data);
  677. qmi->wda_client = NULL;
  678. qmi->wda_pending = NULL;
  679. }
  680. for (i = 0; i < MAX_CLIENT_NUM; i++) {
  681. if (!__qmi_rmnet_delete_client(port, qmi, i))
  682. return;
  683. }
  684. }
  685. EXPORT_SYMBOL(qmi_rmnet_qmi_exit);
  686. void qmi_rmnet_enable_all_flows(struct net_device *dev)
  687. {
  688. struct qos_info *qos;
  689. struct rmnet_bearer_map *bearer;
  690. bool do_wake;
  691. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  692. if (!qos)
  693. return;
  694. spin_lock_bh(&qos->qos_lock);
  695. list_for_each_entry(bearer, &qos->bearer_head, list) {
  696. bearer->seq = 0;
  697. bearer->ack_req = 0;
  698. bearer->bytes_in_flight = 0;
  699. bearer->tcp_bidir = false;
  700. bearer->rat_switch = false;
  701. qmi_rmnet_watchdog_remove(bearer);
  702. if (bearer->tx_off)
  703. continue;
  704. do_wake = !bearer->grant_size;
  705. bearer->grant_size = DEFAULT_GRANT;
  706. bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
  707. if (do_wake)
  708. dfc_bearer_flow_ctl(dev, bearer, qos);
  709. }
  710. spin_unlock_bh(&qos->qos_lock);
  711. }
  712. EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
  713. bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
  714. {
  715. struct qos_info *qos;
  716. struct rmnet_bearer_map *bearer;
  717. bool ret = true;
  718. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  719. if (!qos)
  720. return true;
  721. spin_lock_bh(&qos->qos_lock);
  722. list_for_each_entry(bearer, &qos->bearer_head, list) {
  723. if (!bearer->grant_size) {
  724. ret = false;
  725. break;
  726. }
  727. }
  728. spin_unlock_bh(&qos->qos_lock);
  729. return ret;
  730. }
  731. EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
  732. /**
  733. * qmi_rmnet_lock_unlock_all_flows - lock or unlock all bearers
  734. */
  735. void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev, bool lock)
  736. {
  737. struct qos_info *qos;
  738. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  739. if (!qos)
  740. return;
  741. if (lock)
  742. spin_lock_bh(&qos->qos_lock);
  743. else
  744. spin_unlock_bh(&qos->qos_lock);
  745. }
  746. EXPORT_SYMBOL(qmi_rmnet_lock_unlock_all_flows);
  747. /**
  748. * qmi_rmnet_get_disabled_flows - get disabled bearers
  749. * Needs to be called with qos_lock
  750. */
  751. void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
  752. u8 *bearer_id)
  753. {
  754. struct qos_info *qos;
  755. struct rmnet_bearer_map *bearer;
  756. u8 current_num_bearers = 0;
  757. u8 num_bearers_left = 0;
  758. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  759. if (!qos || !num_bearers)
  760. return;
  761. num_bearers_left = *num_bearers;
  762. list_for_each_entry(bearer, &qos->bearer_head, list) {
  763. if (!bearer->grant_size && num_bearers_left) {
  764. if (bearer_id)
  765. bearer_id[current_num_bearers] =
  766. bearer->bearer_id;
  767. current_num_bearers++;
  768. num_bearers_left--;
  769. }
  770. }
  771. *num_bearers = current_num_bearers;
  772. }
  773. EXPORT_SYMBOL(qmi_rmnet_get_disabled_flows);
  774. /**
  775. * qmi_rmnet_reset_enabled_flows - reset enabled bearers for powersave
  776. * Needs to be called with qos_lock
  777. */
  778. void qmi_rmnet_reset_enabled_flows(struct net_device *dev)
  779. {
  780. struct qos_info *qos;
  781. struct rmnet_bearer_map *bearer;
  782. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  783. if (!qos)
  784. return;
  785. list_for_each_entry(bearer, &qos->bearer_head, list) {
  786. if (bearer->grant_size) {
  787. bearer->seq = 0;
  788. bearer->ack_req = 0;
  789. bearer->bytes_in_flight = 0;
  790. bearer->tcp_bidir = false;
  791. bearer->rat_switch = false;
  792. qmi_rmnet_watchdog_remove(bearer);
  793. bearer->grant_size = DEFAULT_GRANT;
  794. bearer->grant_thresh =
  795. qmi_rmnet_grant_per(DEFAULT_GRANT);
  796. }
  797. }
  798. }
  799. EXPORT_SYMBOL(qmi_rmnet_reset_enabled_flows);
  800. #ifdef CONFIG_QTI_QMI_DFC
  801. bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
  802. struct sk_buff *skb)
  803. {
  804. struct qos_info *qos = rmnet_get_qos_pt(dev);
  805. int txq = skb->queue_mapping;
  806. if (txq > ACK_MQ_OFFSET)
  807. txq -= ACK_MQ_OFFSET;
  808. if (unlikely(!qos || txq >= MAX_MQ_NUM))
  809. return false;
  810. return qos->mq[txq].is_ll_ch;
  811. }
  812. EXPORT_SYMBOL(qmi_rmnet_flow_is_low_latency);
  813. void qmi_rmnet_burst_fc_check(struct net_device *dev,
  814. int ip_type, u32 mark, unsigned int len)
  815. {
  816. struct qos_info *qos = rmnet_get_qos_pt(dev);
  817. if (!qos)
  818. return;
  819. dfc_qmi_burst_check(dev, qos, ip_type, mark, len);
  820. }
  821. EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
  822. static bool _qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
  823. {
  824. switch (skb->protocol) {
  825. /* TCPv4 ACKs */
  826. case htons(ETH_P_IP):
  827. if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
  828. (ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2) ==
  829. tcp_hdr(skb)->doff << 2) &&
  830. ((tcp_flag_word(tcp_hdr(skb)) &
  831. cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
  832. return true;
  833. break;
  834. /* TCPv6 ACKs */
  835. case htons(ETH_P_IPV6):
  836. if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
  837. (ntohs(ipv6_hdr(skb)->payload_len) ==
  838. (tcp_hdr(skb)->doff) << 2) &&
  839. ((tcp_flag_word(tcp_hdr(skb)) &
  840. cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
  841. return true;
  842. break;
  843. }
  844. return false;
  845. }
  846. static inline bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
  847. {
  848. /* Locally generated TCP acks */
  849. if (skb_is_tcp_pure_ack(skb))
  850. return true;
  851. /* Forwarded */
  852. if (unlikely(_qmi_rmnet_is_tcp_ack(skb)))
  853. return true;
  854. return false;
  855. }
  856. static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb)
  857. {
  858. struct rmnet_flow_map *itm;
  859. int ip_type;
  860. int txq = DEFAULT_MQ_NUM;
  861. /* Put NDP in default mq */
  862. if (skb->protocol == htons(ETH_P_IPV6) &&
  863. ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6 &&
  864. icmp6_hdr(skb)->icmp6_type >= 133 &&
  865. icmp6_hdr(skb)->icmp6_type <= 137) {
  866. return DEFAULT_MQ_NUM;
  867. }
  868. ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
  869. spin_lock_bh(&qos->qos_lock);
  870. itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
  871. if (unlikely(!itm))
  872. goto done;
  873. /* Put the packet in the assigned mq except TCP ack */
  874. if (likely(itm->bearer) && qmi_rmnet_is_tcp_ack(skb))
  875. txq = itm->bearer->ack_mq_idx;
  876. else
  877. txq = itm->mq_idx;
  878. done:
  879. spin_unlock_bh(&qos->qos_lock);
  880. return txq;
  881. }
  882. int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
  883. {
  884. struct qos_info *qos = rmnet_get_qos_pt(dev);
  885. int txq = 0, ip_type = AF_INET;
  886. struct rmnet_flow_map *itm;
  887. u32 mark = skb->mark;
  888. if (!qos)
  889. return 0;
  890. if (likely(dfc_mode == DFC_MODE_SA))
  891. return qmi_rmnet_get_queue_sa(qos, skb);
  892. ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
  893. spin_lock_bh(&qos->qos_lock);
  894. itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
  895. if (itm)
  896. txq = itm->mq_idx;
  897. spin_unlock_bh(&qos->qos_lock);
  898. return txq;
  899. }
  900. EXPORT_SYMBOL(qmi_rmnet_get_queue);
  901. inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
  902. {
  903. return grant / qmi_rmnet_scale_factor;
  904. }
  905. EXPORT_SYMBOL(qmi_rmnet_grant_per);
  906. void *qmi_rmnet_qos_init(struct net_device *real_dev,
  907. struct net_device *vnd_dev, u8 mux_id)
  908. {
  909. struct qos_info *qos;
  910. qos = kzalloc(sizeof(*qos), GFP_KERNEL);
  911. if (!qos)
  912. return NULL;
  913. qos->mux_id = mux_id;
  914. qos->real_dev = real_dev;
  915. qos->vnd_dev = vnd_dev;
  916. qos->tran_num = 0;
  917. INIT_LIST_HEAD(&qos->flow_head);
  918. INIT_LIST_HEAD(&qos->bearer_head);
  919. spin_lock_init(&qos->qos_lock);
  920. return qos;
  921. }
  922. EXPORT_SYMBOL(qmi_rmnet_qos_init);
  923. void qmi_rmnet_qos_exit_pre(void *qos)
  924. {
  925. struct qos_info *qosi = (struct qos_info *)qos;
  926. struct rmnet_bearer_map *bearer;
  927. if (!qos)
  928. return;
  929. list_for_each_entry(bearer, &qosi->bearer_head, list) {
  930. bearer->watchdog_quit = true;
  931. del_timer_sync(&bearer->watchdog);
  932. bearer->ch_switch.timer_quit = true;
  933. del_timer_sync(&bearer->ch_switch.guard_timer);
  934. }
  935. list_add(&qosi->list, &qos_cleanup_list);
  936. }
  937. EXPORT_SYMBOL(qmi_rmnet_qos_exit_pre);
  938. void qmi_rmnet_qos_exit_post(void)
  939. {
  940. struct qos_info *qos, *tmp;
  941. synchronize_rcu();
  942. list_for_each_entry_safe(qos, tmp, &qos_cleanup_list, list) {
  943. list_del(&qos->list);
  944. qmi_rmnet_clean_flow_list(qos);
  945. kfree(qos);
  946. }
  947. }
  948. EXPORT_SYMBOL(qmi_rmnet_qos_exit_post);
  949. #endif
  950. #ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
  951. static struct workqueue_struct *rmnet_ps_wq;
  952. static struct rmnet_powersave_work *rmnet_work;
  953. static bool rmnet_work_quit;
  954. static bool rmnet_work_inited;
  955. static LIST_HEAD(ps_list);
  956. static u8 ps_bearer_id[32];
  957. struct rmnet_powersave_work {
  958. struct delayed_work work;
  959. struct alarm atimer;
  960. void *port;
  961. u64 old_rx_pkts;
  962. u64 old_tx_pkts;
  963. };
  964. void qmi_rmnet_ps_on_notify(void *port)
  965. {
  966. struct qmi_rmnet_ps_ind *tmp;
  967. list_for_each_entry_rcu(tmp, &ps_list, list)
  968. tmp->ps_on_handler(port);
  969. }
  970. EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
  971. void qmi_rmnet_ps_off_notify(void *port)
  972. {
  973. struct qmi_rmnet_ps_ind *tmp;
  974. list_for_each_entry_rcu(tmp, &ps_list, list)
  975. tmp->ps_off_handler(port);
  976. }
  977. EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
  978. int qmi_rmnet_ps_ind_register(void *port,
  979. struct qmi_rmnet_ps_ind *ps_ind)
  980. {
  981. if (!port || !ps_ind || !ps_ind->ps_on_handler ||
  982. !ps_ind->ps_off_handler)
  983. return -EINVAL;
  984. list_add_rcu(&ps_ind->list, &ps_list);
  985. return 0;
  986. }
  987. EXPORT_SYMBOL(qmi_rmnet_ps_ind_register);
  988. int qmi_rmnet_ps_ind_deregister(void *port,
  989. struct qmi_rmnet_ps_ind *ps_ind)
  990. {
  991. struct qmi_rmnet_ps_ind *tmp;
  992. if (!port || !ps_ind)
  993. return -EINVAL;
  994. list_for_each_entry_rcu(tmp, &ps_list, list) {
  995. if (tmp == ps_ind) {
  996. list_del_rcu(&ps_ind->list);
  997. goto done;
  998. }
  999. }
  1000. done:
  1001. return 0;
  1002. }
  1003. EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister);
  1004. int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
  1005. {
  1006. int rc = -EINVAL;
  1007. struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1008. if (!qmi || !qmi->wda_client)
  1009. return rc;
  1010. rc = wda_set_powersave_mode(qmi->wda_client, enable);
  1011. if (rc < 0) {
  1012. pr_err("%s() failed set powersave mode[%u], err=%d\n",
  1013. __func__, enable, rc);
  1014. return rc;
  1015. }
  1016. return 0;
  1017. }
  1018. EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
  1019. static void qmi_rmnet_work_restart(void *port)
  1020. {
  1021. rcu_read_lock();
  1022. if (!rmnet_work_quit)
  1023. queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
  1024. rcu_read_unlock();
  1025. }
  1026. static enum alarmtimer_restart qmi_rmnet_work_alarm(struct alarm *atimer,
  1027. ktime_t now)
  1028. {
  1029. struct rmnet_powersave_work *real_work;
  1030. real_work = container_of(atimer, struct rmnet_powersave_work, atimer);
  1031. qmi_rmnet_work_restart(real_work->port);
  1032. return ALARMTIMER_NORESTART;
  1033. }
  1034. static void qmi_rmnet_check_stats(struct work_struct *work)
  1035. {
  1036. struct rmnet_powersave_work *real_work;
  1037. struct qmi_info *qmi;
  1038. u64 rxd, txd;
  1039. u64 rx, tx;
  1040. bool dl_msg_active;
  1041. bool use_alarm_timer = true;
  1042. real_work = container_of(to_delayed_work(work),
  1043. struct rmnet_powersave_work, work);
  1044. if (unlikely(!real_work || !real_work->port))
  1045. return;
  1046. qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
  1047. if (unlikely(!qmi))
  1048. return;
  1049. if (qmi->ps_enabled) {
  1050. /* Ready to accept grant */
  1051. qmi->ps_ignore_grant = false;
  1052. /* Register to get QMI DFC and DL marker */
  1053. if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0)
  1054. goto end;
  1055. qmi->ps_enabled = false;
  1056. /* Do a query when coming out of powersave */
  1057. qmi_rmnet_query_flows(qmi);
  1058. if (rmnet_get_powersave_notif(real_work->port))
  1059. qmi_rmnet_ps_off_notify(real_work->port);
  1060. goto end;
  1061. }
  1062. rmnet_get_packets(real_work->port, &rx, &tx);
  1063. rxd = rx - real_work->old_rx_pkts;
  1064. txd = tx - real_work->old_tx_pkts;
  1065. real_work->old_rx_pkts = rx;
  1066. real_work->old_tx_pkts = tx;
  1067. dl_msg_active = qmi->dl_msg_active;
  1068. qmi->dl_msg_active = false;
  1069. if (!rxd && !txd) {
  1070. /* If no DL msg received and there is a flow disabled,
  1071. * (likely in RLF), no need to enter powersave
  1072. */
  1073. if (!dl_msg_active &&
  1074. !rmnet_all_flows_enabled(real_work->port)) {
  1075. use_alarm_timer = false;
  1076. goto end;
  1077. }
  1078. /* Deregister to suppress QMI DFC and DL marker */
  1079. if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0)
  1080. goto end;
  1081. qmi->ps_enabled = true;
  1082. /* Ignore grant after going into powersave */
  1083. qmi->ps_ignore_grant = true;
  1084. /* Clear the bit before enabling flow so pending packets
  1085. * can trigger the work again
  1086. */
  1087. clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
  1088. rmnet_enable_all_flows(real_work->port);
  1089. if (rmnet_get_powersave_notif(real_work->port))
  1090. qmi_rmnet_ps_on_notify(real_work->port);
  1091. return;
  1092. }
  1093. end:
  1094. rcu_read_lock();
  1095. if (!rmnet_work_quit) {
  1096. if (use_alarm_timer)
  1097. alarm_start_relative(&real_work->atimer,
  1098. PS_INTERVAL_KT);
  1099. else
  1100. queue_delayed_work(rmnet_ps_wq, &real_work->work,
  1101. PS_INTERVAL);
  1102. }
  1103. rcu_read_unlock();
  1104. }
  1105. static void qmi_rmnet_check_stats_2(struct work_struct *work)
  1106. {
  1107. struct rmnet_powersave_work *real_work;
  1108. struct qmi_info *qmi;
  1109. u64 rxd, txd;
  1110. u64 rx, tx;
  1111. u8 num_bearers;
  1112. real_work = container_of(to_delayed_work(work),
  1113. struct rmnet_powersave_work, work);
  1114. if (unlikely(!real_work->port))
  1115. return;
  1116. qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
  1117. if (unlikely(!qmi))
  1118. return;
  1119. if (qmi->ps_enabled) {
  1120. /* Ready to accept grant */
  1121. qmi->ps_ignore_grant = false;
  1122. /* Out of powersave */
  1123. if (dfc_qmap_set_powersave(0, 0, NULL))
  1124. goto end;
  1125. qmi->ps_enabled = false;
  1126. if (rmnet_get_powersave_notif(real_work->port))
  1127. qmi_rmnet_ps_off_notify(real_work->port);
  1128. goto end;
  1129. }
  1130. rmnet_get_packets(real_work->port, &rx, &tx);
  1131. rxd = rx - real_work->old_rx_pkts;
  1132. txd = tx - real_work->old_tx_pkts;
  1133. real_work->old_rx_pkts = rx;
  1134. real_work->old_tx_pkts = tx;
  1135. if (!rxd && !txd) {
  1136. rmnet_lock_unlock_all_flows(real_work->port, true);
  1137. num_bearers = sizeof(ps_bearer_id);
  1138. memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
  1139. rmnet_get_disabled_flows(real_work->port, &num_bearers,
  1140. ps_bearer_id);
  1141. /* Enter powersave */
  1142. if (dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id)) {
  1143. rmnet_lock_unlock_all_flows(real_work->port, false);
  1144. goto end;
  1145. }
  1146. rmnet_reset_enabled_flows(real_work->port);
  1147. qmi->ps_ignore_grant = true;
  1148. qmi->ps_enabled = true;
  1149. clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
  1150. rmnet_lock_unlock_all_flows(real_work->port, false);
  1151. if (rmnet_get_powersave_notif(real_work->port))
  1152. qmi_rmnet_ps_on_notify(real_work->port);
  1153. return;
  1154. }
  1155. end:
  1156. rcu_read_lock();
  1157. if (!rmnet_work_quit) {
  1158. alarm_start_relative(&real_work->atimer, PS_INTERVAL_KT);
  1159. }
  1160. rcu_read_unlock();
  1161. }
  1162. static void qmi_rmnet_work_set_active(void *port, int status)
  1163. {
  1164. struct qmi_info *qmi;
  1165. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1166. if (unlikely(!qmi))
  1167. return;
  1168. if (status)
  1169. set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
  1170. else
  1171. clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
  1172. }
  1173. void qmi_rmnet_work_init(void *port)
  1174. {
  1175. if (rmnet_ps_wq)
  1176. return;
  1177. rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
  1178. WQ_CPU_INTENSIVE, 1);
  1179. if (!rmnet_ps_wq)
  1180. return;
  1181. rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
  1182. if (!rmnet_work) {
  1183. destroy_workqueue(rmnet_ps_wq);
  1184. rmnet_ps_wq = NULL;
  1185. return;
  1186. }
  1187. if (dfc_qmap && dfc_ps_ext)
  1188. INIT_DEFERRABLE_WORK(&rmnet_work->work,
  1189. qmi_rmnet_check_stats_2);
  1190. else
  1191. INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
  1192. alarm_init(&rmnet_work->atimer, ALARM_BOOTTIME, qmi_rmnet_work_alarm);
  1193. rmnet_work->port = port;
  1194. rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
  1195. &rmnet_work->old_tx_pkts);
  1196. rmnet_work_quit = false;
  1197. qmi_rmnet_work_set_active(rmnet_work->port, 0);
  1198. rmnet_work_inited = true;
  1199. }
  1200. EXPORT_SYMBOL(qmi_rmnet_work_init);
  1201. void qmi_rmnet_work_maybe_restart(void *port)
  1202. {
  1203. struct qmi_info *qmi;
  1204. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1205. if (unlikely(!qmi || !rmnet_work_inited))
  1206. return;
  1207. if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active)) {
  1208. qmi->ps_ignore_grant = false;
  1209. qmi_rmnet_work_restart(port);
  1210. }
  1211. }
  1212. EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);
  1213. void qmi_rmnet_work_exit(void *port)
  1214. {
  1215. if (!rmnet_ps_wq || !rmnet_work)
  1216. return;
  1217. rmnet_work_quit = true;
  1218. synchronize_rcu();
  1219. rmnet_work_inited = false;
  1220. alarm_cancel(&rmnet_work->atimer);
  1221. cancel_delayed_work_sync(&rmnet_work->work);
  1222. destroy_workqueue(rmnet_ps_wq);
  1223. qmi_rmnet_work_set_active(port, 0);
  1224. rmnet_ps_wq = NULL;
  1225. kfree(rmnet_work);
  1226. rmnet_work = NULL;
  1227. }
  1228. EXPORT_SYMBOL(qmi_rmnet_work_exit);
  1229. void qmi_rmnet_set_dl_msg_active(void *port)
  1230. {
  1231. struct qmi_info *qmi;
  1232. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1233. if (unlikely(!qmi))
  1234. return;
  1235. qmi->dl_msg_active = true;
  1236. }
  1237. EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active);
  1238. void qmi_rmnet_flush_ps_wq(void)
  1239. {
  1240. if (rmnet_ps_wq)
  1241. flush_workqueue(rmnet_ps_wq);
  1242. }
  1243. bool qmi_rmnet_ignore_grant(void *port)
  1244. {
  1245. struct qmi_info *qmi;
  1246. qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
  1247. if (unlikely(!qmi))
  1248. return false;
  1249. return qmi->ps_ignore_grant;
  1250. }
  1251. EXPORT_SYMBOL(qmi_rmnet_ignore_grant);
  1252. #endif