dfc_qmi.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558
  1. /*
  2. * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <net/pkt_sched.h>
  14. #include "rmnet_qmi.h"
  15. #include "qmi_rmnet.h"
  16. #include "dfc_defs.h"
  17. #define CREATE_TRACE_POINTS
  18. #include "dfc.h"
  19. struct dfc_qmap_header {
  20. u8 pad_len:6;
  21. u8 reserved_bit:1;
  22. u8 cd_bit:1;
  23. u8 mux_id;
  24. __be16 pkt_len;
  25. } __aligned(1);
  26. struct dfc_ack_cmd {
  27. struct dfc_qmap_header header;
  28. u8 command_name;
  29. u8 cmd_type:2;
  30. u8 reserved:6;
  31. u16 reserved2;
  32. u32 transaction_id;
  33. u8 ver:2;
  34. u8 reserved3:6;
  35. u8 type:2;
  36. u8 reserved4:6;
  37. u16 dfc_seq;
  38. u8 reserved5[3];
  39. u8 bearer_id;
  40. } __aligned(1);
  41. static void dfc_svc_init(struct work_struct *work);
  42. /* **************************************************** */
  43. #define DFC_SERVICE_ID_V01 0x4E
  44. #define DFC_SERVICE_VERS_V01 0x01
  45. #define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
  46. #define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
  47. #define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
  48. #define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN 11
  49. #define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7
  50. #define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001
  51. #define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001
  52. #define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 8
  53. #define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7
  54. #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
  55. #define QMI_DFC_TX_LINK_STATUS_IND_V01 0x0024
  56. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
  57. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
  58. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
  59. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
  60. struct dfc_bind_client_req_msg_v01 {
  61. u8 ep_id_valid;
  62. struct data_ep_id_type_v01 ep_id;
  63. };
  64. struct dfc_bind_client_resp_msg_v01 {
  65. struct qmi_response_type_v01 resp;
  66. };
  67. struct dfc_indication_register_req_msg_v01 {
  68. u8 report_flow_status_valid;
  69. u8 report_flow_status;
  70. u8 report_tx_link_status_valid;
  71. u8 report_tx_link_status;
  72. };
  73. struct dfc_indication_register_resp_msg_v01 {
  74. struct qmi_response_type_v01 resp;
  75. };
  76. static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
  77. {
  78. .data_type = QMI_UNSIGNED_4_BYTE,
  79. .elem_len = 1,
  80. .elem_size = sizeof(u32),
  81. .array_type = NO_ARRAY,
  82. .tlv_type = QMI_COMMON_TLV_TYPE,
  83. .offset = offsetof(struct dfc_qos_id_type_v01,
  84. qos_id),
  85. .ei_array = NULL,
  86. },
  87. {
  88. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  89. .elem_len = 1,
  90. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  91. .array_type = NO_ARRAY,
  92. .tlv_type = QMI_COMMON_TLV_TYPE,
  93. .offset = offsetof(struct dfc_qos_id_type_v01,
  94. ip_type),
  95. .ei_array = NULL,
  96. },
  97. {
  98. .data_type = QMI_EOTI,
  99. .array_type = NO_ARRAY,
  100. .tlv_type = QMI_COMMON_TLV_TYPE,
  101. },
  102. };
  103. static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
  104. {
  105. .data_type = QMI_UNSIGNED_1_BYTE,
  106. .elem_len = 1,
  107. .elem_size = sizeof(u8),
  108. .array_type = NO_ARRAY,
  109. .tlv_type = QMI_COMMON_TLV_TYPE,
  110. .offset = offsetof(struct
  111. dfc_flow_status_info_type_v01,
  112. subs_id),
  113. .ei_array = NULL,
  114. },
  115. {
  116. .data_type = QMI_UNSIGNED_1_BYTE,
  117. .elem_len = 1,
  118. .elem_size = sizeof(u8),
  119. .array_type = NO_ARRAY,
  120. .tlv_type = QMI_COMMON_TLV_TYPE,
  121. .offset = offsetof(struct
  122. dfc_flow_status_info_type_v01,
  123. mux_id),
  124. .ei_array = NULL,
  125. },
  126. {
  127. .data_type = QMI_UNSIGNED_1_BYTE,
  128. .elem_len = 1,
  129. .elem_size = sizeof(u8),
  130. .array_type = NO_ARRAY,
  131. .tlv_type = QMI_COMMON_TLV_TYPE,
  132. .offset = offsetof(struct
  133. dfc_flow_status_info_type_v01,
  134. bearer_id),
  135. .ei_array = NULL,
  136. },
  137. {
  138. .data_type = QMI_UNSIGNED_4_BYTE,
  139. .elem_len = 1,
  140. .elem_size = sizeof(u32),
  141. .array_type = NO_ARRAY,
  142. .tlv_type = QMI_COMMON_TLV_TYPE,
  143. .offset = offsetof(struct
  144. dfc_flow_status_info_type_v01,
  145. num_bytes),
  146. .ei_array = NULL,
  147. },
  148. {
  149. .data_type = QMI_UNSIGNED_2_BYTE,
  150. .elem_len = 1,
  151. .elem_size = sizeof(u16),
  152. .array_type = NO_ARRAY,
  153. .tlv_type = QMI_COMMON_TLV_TYPE,
  154. .offset = offsetof(struct
  155. dfc_flow_status_info_type_v01,
  156. seq_num),
  157. .ei_array = NULL,
  158. },
  159. {
  160. .data_type = QMI_DATA_LEN,
  161. .elem_len = 1,
  162. .elem_size = sizeof(u8),
  163. .array_type = NO_ARRAY,
  164. .tlv_type = QMI_COMMON_TLV_TYPE,
  165. .offset = offsetof(struct
  166. dfc_flow_status_info_type_v01,
  167. qos_ids_len),
  168. .ei_array = NULL,
  169. },
  170. {
  171. .data_type = QMI_STRUCT,
  172. .elem_len = DFC_MAX_QOS_ID_V01,
  173. .elem_size = sizeof(struct dfc_qos_id_type_v01),
  174. .array_type = VAR_LEN_ARRAY,
  175. .tlv_type = 0x10,
  176. .offset = offsetof(struct
  177. dfc_flow_status_info_type_v01,
  178. qos_ids),
  179. .ei_array = dfc_qos_id_type_v01_ei,
  180. },
  181. {
  182. .data_type = QMI_EOTI,
  183. .array_type = NO_ARRAY,
  184. .tlv_type = QMI_COMMON_TLV_TYPE,
  185. },
  186. };
  187. static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
  188. {
  189. .data_type = QMI_UNSIGNED_1_BYTE,
  190. .elem_len = 1,
  191. .elem_size = sizeof(u8),
  192. .array_type = NO_ARRAY,
  193. .tlv_type = QMI_COMMON_TLV_TYPE,
  194. .offset = offsetof(struct
  195. dfc_ancillary_info_type_v01,
  196. subs_id),
  197. .ei_array = NULL,
  198. },
  199. {
  200. .data_type = QMI_UNSIGNED_1_BYTE,
  201. .elem_len = 1,
  202. .elem_size = sizeof(u8),
  203. .array_type = NO_ARRAY,
  204. .tlv_type = QMI_COMMON_TLV_TYPE,
  205. .offset = offsetof(struct
  206. dfc_ancillary_info_type_v01,
  207. mux_id),
  208. .ei_array = NULL,
  209. },
  210. {
  211. .data_type = QMI_UNSIGNED_1_BYTE,
  212. .elem_len = 1,
  213. .elem_size = sizeof(u8),
  214. .array_type = NO_ARRAY,
  215. .tlv_type = QMI_COMMON_TLV_TYPE,
  216. .offset = offsetof(struct
  217. dfc_ancillary_info_type_v01,
  218. bearer_id),
  219. .ei_array = NULL,
  220. },
  221. {
  222. .data_type = QMI_UNSIGNED_4_BYTE,
  223. .elem_len = 1,
  224. .elem_size = sizeof(u32),
  225. .array_type = NO_ARRAY,
  226. .tlv_type = QMI_COMMON_TLV_TYPE,
  227. .offset = offsetof(struct
  228. dfc_ancillary_info_type_v01,
  229. reserved),
  230. .ei_array = NULL,
  231. },
  232. {
  233. .data_type = QMI_EOTI,
  234. .array_type = NO_ARRAY,
  235. .tlv_type = QMI_COMMON_TLV_TYPE,
  236. },
  237. };
  238. struct dfc_get_flow_status_req_msg_v01 {
  239. u8 bearer_id_list_valid;
  240. u8 bearer_id_list_len;
  241. u8 bearer_id_list[DFC_MAX_BEARERS_V01];
  242. };
  243. struct dfc_get_flow_status_resp_msg_v01 {
  244. struct qmi_response_type_v01 resp;
  245. u8 flow_status_valid;
  246. u8 flow_status_len;
  247. struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
  248. };
  249. struct dfc_svc_ind {
  250. struct list_head list;
  251. u16 msg_id;
  252. union {
  253. struct dfc_flow_status_ind_msg_v01 dfc_info;
  254. struct dfc_tx_link_status_ind_msg_v01 tx_status;
  255. } d;
  256. };
  257. static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
  258. {
  259. .data_type = QMI_OPT_FLAG,
  260. .elem_len = 1,
  261. .elem_size = sizeof(u8),
  262. .array_type = NO_ARRAY,
  263. .tlv_type = 0x10,
  264. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  265. ep_id_valid),
  266. .ei_array = NULL,
  267. },
  268. {
  269. .data_type = QMI_STRUCT,
  270. .elem_len = 1,
  271. .elem_size = sizeof(struct data_ep_id_type_v01),
  272. .array_type = NO_ARRAY,
  273. .tlv_type = 0x10,
  274. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  275. ep_id),
  276. .ei_array = data_ep_id_type_v01_ei,
  277. },
  278. {
  279. .data_type = QMI_EOTI,
  280. .array_type = NO_ARRAY,
  281. .tlv_type = QMI_COMMON_TLV_TYPE,
  282. },
  283. };
  284. static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = {
  285. {
  286. .data_type = QMI_STRUCT,
  287. .elem_len = 1,
  288. .elem_size = sizeof(struct qmi_response_type_v01),
  289. .array_type = NO_ARRAY,
  290. .tlv_type = 0x02,
  291. .offset = offsetof(struct dfc_bind_client_resp_msg_v01,
  292. resp),
  293. .ei_array = qmi_response_type_v01_ei,
  294. },
  295. {
  296. .data_type = QMI_EOTI,
  297. .array_type = NO_ARRAY,
  298. .tlv_type = QMI_COMMON_TLV_TYPE,
  299. },
  300. };
  301. static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = {
  302. {
  303. .data_type = QMI_OPT_FLAG,
  304. .elem_len = 1,
  305. .elem_size = sizeof(u8),
  306. .array_type = NO_ARRAY,
  307. .tlv_type = 0x10,
  308. .offset = offsetof(struct
  309. dfc_indication_register_req_msg_v01,
  310. report_flow_status_valid),
  311. .ei_array = NULL,
  312. },
  313. {
  314. .data_type = QMI_UNSIGNED_1_BYTE,
  315. .elem_len = 1,
  316. .elem_size = sizeof(u8),
  317. .array_type = NO_ARRAY,
  318. .tlv_type = 0x10,
  319. .offset = offsetof(struct
  320. dfc_indication_register_req_msg_v01,
  321. report_flow_status),
  322. .ei_array = NULL,
  323. },
  324. {
  325. .data_type = QMI_OPT_FLAG,
  326. .elem_len = 1,
  327. .elem_size = sizeof(u8),
  328. .array_type = NO_ARRAY,
  329. .tlv_type = 0x11,
  330. .offset = offsetof(struct
  331. dfc_indication_register_req_msg_v01,
  332. report_tx_link_status_valid),
  333. .ei_array = NULL,
  334. },
  335. {
  336. .data_type = QMI_UNSIGNED_1_BYTE,
  337. .elem_len = 1,
  338. .elem_size = sizeof(u8),
  339. .array_type = NO_ARRAY,
  340. .tlv_type = 0x11,
  341. .offset = offsetof(struct
  342. dfc_indication_register_req_msg_v01,
  343. report_tx_link_status),
  344. .ei_array = NULL,
  345. },
  346. {
  347. .data_type = QMI_EOTI,
  348. .array_type = NO_ARRAY,
  349. .tlv_type = QMI_COMMON_TLV_TYPE,
  350. },
  351. };
  352. static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = {
  353. {
  354. .data_type = QMI_STRUCT,
  355. .elem_len = 1,
  356. .elem_size = sizeof(struct qmi_response_type_v01),
  357. .array_type = NO_ARRAY,
  358. .tlv_type = 0x02,
  359. .offset = offsetof(struct
  360. dfc_indication_register_resp_msg_v01,
  361. resp),
  362. .ei_array = qmi_response_type_v01_ei,
  363. },
  364. {
  365. .data_type = QMI_EOTI,
  366. .array_type = NO_ARRAY,
  367. .tlv_type = QMI_COMMON_TLV_TYPE,
  368. },
  369. };
  370. static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = {
  371. {
  372. .data_type = QMI_OPT_FLAG,
  373. .elem_len = 1,
  374. .elem_size = sizeof(u8),
  375. .array_type = NO_ARRAY,
  376. .tlv_type = 0x10,
  377. .offset = offsetof(struct
  378. dfc_flow_status_ind_msg_v01,
  379. flow_status_valid),
  380. .ei_array = NULL,
  381. },
  382. {
  383. .data_type = QMI_DATA_LEN,
  384. .elem_len = 1,
  385. .elem_size = sizeof(u8),
  386. .array_type = NO_ARRAY,
  387. .tlv_type = 0x10,
  388. .offset = offsetof(struct
  389. dfc_flow_status_ind_msg_v01,
  390. flow_status_len),
  391. .ei_array = NULL,
  392. },
  393. {
  394. .data_type = QMI_STRUCT,
  395. .elem_len = DFC_MAX_BEARERS_V01,
  396. .elem_size = sizeof(struct
  397. dfc_flow_status_info_type_v01),
  398. .array_type = VAR_LEN_ARRAY,
  399. .tlv_type = 0x10,
  400. .offset = offsetof(struct
  401. dfc_flow_status_ind_msg_v01,
  402. flow_status),
  403. .ei_array = dfc_flow_status_info_type_v01_ei,
  404. },
  405. {
  406. .data_type = QMI_OPT_FLAG,
  407. .elem_len = 1,
  408. .elem_size = sizeof(u8),
  409. .array_type = NO_ARRAY,
  410. .tlv_type = 0x11,
  411. .offset = offsetof(struct
  412. dfc_flow_status_ind_msg_v01,
  413. eod_ack_reqd_valid),
  414. .ei_array = NULL,
  415. },
  416. {
  417. .data_type = QMI_UNSIGNED_1_BYTE,
  418. .elem_len = 1,
  419. .elem_size = sizeof(u8),
  420. .array_type = NO_ARRAY,
  421. .tlv_type = 0x11,
  422. .offset = offsetof(struct
  423. dfc_flow_status_ind_msg_v01,
  424. eod_ack_reqd),
  425. .ei_array = NULL,
  426. },
  427. {
  428. .data_type = QMI_OPT_FLAG,
  429. .elem_len = 1,
  430. .elem_size = sizeof(u8),
  431. .array_type = NO_ARRAY,
  432. .tlv_type = 0x12,
  433. .offset = offsetof(struct
  434. dfc_flow_status_ind_msg_v01,
  435. ancillary_info_valid),
  436. .ei_array = NULL,
  437. },
  438. {
  439. .data_type = QMI_DATA_LEN,
  440. .elem_len = 1,
  441. .elem_size = sizeof(u8),
  442. .array_type = NO_ARRAY,
  443. .tlv_type = 0x12,
  444. .offset = offsetof(struct
  445. dfc_flow_status_ind_msg_v01,
  446. ancillary_info_len),
  447. .ei_array = NULL,
  448. },
  449. {
  450. .data_type = QMI_STRUCT,
  451. .elem_len = DFC_MAX_BEARERS_V01,
  452. .elem_size = sizeof(struct
  453. dfc_ancillary_info_type_v01),
  454. .array_type = VAR_LEN_ARRAY,
  455. .tlv_type = 0x12,
  456. .offset = offsetof(struct
  457. dfc_flow_status_ind_msg_v01,
  458. ancillary_info),
  459. .ei_array = dfc_ancillary_info_type_v01_ei,
  460. },
  461. {
  462. .data_type = QMI_EOTI,
  463. .array_type = NO_ARRAY,
  464. .tlv_type = QMI_COMMON_TLV_TYPE,
  465. },
  466. };
  467. static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
  468. {
  469. .data_type = QMI_OPT_FLAG,
  470. .elem_len = 1,
  471. .elem_size = sizeof(u8),
  472. .array_type = NO_ARRAY,
  473. .tlv_type = 0x10,
  474. .offset = offsetof(struct
  475. dfc_get_flow_status_req_msg_v01,
  476. bearer_id_list_valid),
  477. .ei_array = NULL,
  478. },
  479. {
  480. .data_type = QMI_DATA_LEN,
  481. .elem_len = 1,
  482. .elem_size = sizeof(u8),
  483. .array_type = NO_ARRAY,
  484. .tlv_type = 0x10,
  485. .offset = offsetof(struct
  486. dfc_get_flow_status_req_msg_v01,
  487. bearer_id_list_len),
  488. .ei_array = NULL,
  489. },
  490. {
  491. .data_type = QMI_UNSIGNED_1_BYTE,
  492. .elem_len = DFC_MAX_BEARERS_V01,
  493. .elem_size = sizeof(u8),
  494. .array_type = VAR_LEN_ARRAY,
  495. .tlv_type = 0x10,
  496. .offset = offsetof(struct
  497. dfc_get_flow_status_req_msg_v01,
  498. bearer_id_list),
  499. .ei_array = NULL,
  500. },
  501. {
  502. .data_type = QMI_EOTI,
  503. .array_type = NO_ARRAY,
  504. .tlv_type = QMI_COMMON_TLV_TYPE,
  505. },
  506. };
  507. static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
  508. {
  509. .data_type = QMI_STRUCT,
  510. .elem_len = 1,
  511. .elem_size = sizeof(struct qmi_response_type_v01),
  512. .array_type = NO_ARRAY,
  513. .tlv_type = 0x02,
  514. .offset = offsetof(struct
  515. dfc_get_flow_status_resp_msg_v01,
  516. resp),
  517. .ei_array = qmi_response_type_v01_ei,
  518. },
  519. {
  520. .data_type = QMI_OPT_FLAG,
  521. .elem_len = 1,
  522. .elem_size = sizeof(u8),
  523. .array_type = NO_ARRAY,
  524. .tlv_type = 0x10,
  525. .offset = offsetof(struct
  526. dfc_get_flow_status_resp_msg_v01,
  527. flow_status_valid),
  528. .ei_array = NULL,
  529. },
  530. {
  531. .data_type = QMI_DATA_LEN,
  532. .elem_len = 1,
  533. .elem_size = sizeof(u8),
  534. .array_type = NO_ARRAY,
  535. .tlv_type = 0x10,
  536. .offset = offsetof(struct
  537. dfc_get_flow_status_resp_msg_v01,
  538. flow_status_len),
  539. .ei_array = NULL,
  540. },
  541. {
  542. .data_type = QMI_STRUCT,
  543. .elem_len = DFC_MAX_BEARERS_V01,
  544. .elem_size = sizeof(struct
  545. dfc_flow_status_info_type_v01),
  546. .array_type = VAR_LEN_ARRAY,
  547. .tlv_type = 0x10,
  548. .offset = offsetof(struct
  549. dfc_get_flow_status_resp_msg_v01,
  550. flow_status),
  551. .ei_array = dfc_flow_status_info_type_v01_ei,
  552. },
  553. {
  554. .data_type = QMI_EOTI,
  555. .array_type = NO_ARRAY,
  556. .tlv_type = QMI_COMMON_TLV_TYPE,
  557. },
  558. };
  559. static struct qmi_elem_info dfc_bearer_info_type_v01_ei[] = {
  560. {
  561. .data_type = QMI_UNSIGNED_1_BYTE,
  562. .elem_len = 1,
  563. .elem_size = sizeof(u8),
  564. .array_type = NO_ARRAY,
  565. .tlv_type = QMI_COMMON_TLV_TYPE,
  566. .offset = offsetof(struct
  567. dfc_bearer_info_type_v01,
  568. subs_id),
  569. .ei_array = NULL,
  570. },
  571. {
  572. .data_type = QMI_UNSIGNED_1_BYTE,
  573. .elem_len = 1,
  574. .elem_size = sizeof(u8),
  575. .array_type = NO_ARRAY,
  576. .tlv_type = QMI_COMMON_TLV_TYPE,
  577. .offset = offsetof(struct
  578. dfc_bearer_info_type_v01,
  579. mux_id),
  580. .ei_array = NULL,
  581. },
  582. {
  583. .data_type = QMI_UNSIGNED_1_BYTE,
  584. .elem_len = 1,
  585. .elem_size = sizeof(u8),
  586. .array_type = NO_ARRAY,
  587. .tlv_type = QMI_COMMON_TLV_TYPE,
  588. .offset = offsetof(struct
  589. dfc_bearer_info_type_v01,
  590. bearer_id),
  591. .ei_array = NULL,
  592. },
  593. {
  594. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  595. .elem_len = 1,
  596. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  597. .array_type = NO_ARRAY,
  598. .tlv_type = QMI_COMMON_TLV_TYPE,
  599. .offset = offsetof(struct
  600. dfc_bearer_info_type_v01,
  601. ip_type),
  602. .ei_array = NULL,
  603. },
  604. {
  605. .data_type = QMI_EOTI,
  606. .array_type = NO_ARRAY,
  607. .tlv_type = QMI_COMMON_TLV_TYPE,
  608. },
  609. };
  610. static struct qmi_elem_info dfc_tx_link_status_ind_v01_ei[] = {
  611. {
  612. .data_type = QMI_UNSIGNED_1_BYTE,
  613. .elem_len = 1,
  614. .elem_size = sizeof(u8),
  615. .array_type = NO_ARRAY,
  616. .tlv_type = 0x01,
  617. .offset = offsetof(struct
  618. dfc_tx_link_status_ind_msg_v01,
  619. tx_status),
  620. .ei_array = NULL,
  621. },
  622. {
  623. .data_type = QMI_OPT_FLAG,
  624. .elem_len = 1,
  625. .elem_size = sizeof(u8),
  626. .array_type = NO_ARRAY,
  627. .tlv_type = 0x10,
  628. .offset = offsetof(struct
  629. dfc_tx_link_status_ind_msg_v01,
  630. bearer_info_valid),
  631. .ei_array = NULL,
  632. },
  633. {
  634. .data_type = QMI_DATA_LEN,
  635. .elem_len = 1,
  636. .elem_size = sizeof(u8),
  637. .array_type = NO_ARRAY,
  638. .tlv_type = 0x10,
  639. .offset = offsetof(struct
  640. dfc_tx_link_status_ind_msg_v01,
  641. bearer_info_len),
  642. .ei_array = NULL,
  643. },
  644. {
  645. .data_type = QMI_STRUCT,
  646. .elem_len = DFC_MAX_BEARERS_V01,
  647. .elem_size = sizeof(struct
  648. dfc_bearer_info_type_v01),
  649. .array_type = VAR_LEN_ARRAY,
  650. .tlv_type = 0x10,
  651. .offset = offsetof(struct
  652. dfc_tx_link_status_ind_msg_v01,
  653. bearer_info),
  654. .ei_array = dfc_bearer_info_type_v01_ei,
  655. },
  656. {
  657. .data_type = QMI_EOTI,
  658. .array_type = NO_ARRAY,
  659. .tlv_type = QMI_COMMON_TLV_TYPE,
  660. },
  661. };
  662. static int
  663. dfc_bind_client_req(struct qmi_handle *dfc_handle,
  664. struct sockaddr_qrtr *ssctl, struct svc_info *svc)
  665. {
  666. struct dfc_bind_client_resp_msg_v01 *resp;
  667. struct dfc_bind_client_req_msg_v01 *req;
  668. struct qmi_txn txn;
  669. int ret;
  670. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  671. if (!req)
  672. return -ENOMEM;
  673. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  674. if (!resp) {
  675. kfree(req);
  676. return -ENOMEM;
  677. }
  678. ret = qmi_txn_init(dfc_handle, &txn,
  679. dfc_bind_client_resp_msg_v01_ei, resp);
  680. if (ret < 0) {
  681. pr_err("%s() Failed init for response, err: %d\n",
  682. __func__, ret);
  683. goto out;
  684. }
  685. req->ep_id_valid = 1;
  686. req->ep_id.ep_type = svc->ep_type;
  687. req->ep_id.iface_id = svc->iface_id;
  688. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  689. QMI_DFC_BIND_CLIENT_REQ_V01,
  690. QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN,
  691. dfc_bind_client_req_msg_v01_ei, req);
  692. if (ret < 0) {
  693. qmi_txn_cancel(&txn);
  694. pr_err("%s() Failed sending request, err: %d\n",
  695. __func__, ret);
  696. goto out;
  697. }
  698. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  699. if (ret < 0) {
  700. pr_err("%s() Response waiting failed, err: %d\n",
  701. __func__, ret);
  702. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  703. pr_err("%s() Request rejected, result: %d, err: %d\n",
  704. __func__, resp->resp.result, resp->resp.error);
  705. ret = -resp->resp.result;
  706. }
  707. out:
  708. kfree(resp);
  709. kfree(req);
  710. return ret;
  711. }
  712. static int
  713. dfc_indication_register_req(struct qmi_handle *dfc_handle,
  714. struct sockaddr_qrtr *ssctl, u8 reg)
  715. {
  716. struct dfc_indication_register_resp_msg_v01 *resp;
  717. struct dfc_indication_register_req_msg_v01 *req;
  718. struct qmi_txn txn;
  719. int ret;
  720. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  721. if (!req)
  722. return -ENOMEM;
  723. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  724. if (!resp) {
  725. kfree(req);
  726. return -ENOMEM;
  727. }
  728. ret = qmi_txn_init(dfc_handle, &txn,
  729. dfc_indication_register_resp_msg_v01_ei, resp);
  730. if (ret < 0) {
  731. pr_err("%s() Failed init for response, err: %d\n",
  732. __func__, ret);
  733. goto out;
  734. }
  735. req->report_flow_status_valid = 1;
  736. req->report_flow_status = reg;
  737. req->report_tx_link_status_valid = 1;
  738. req->report_tx_link_status = reg;
  739. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  740. QMI_DFC_INDICATION_REGISTER_REQ_V01,
  741. QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN,
  742. dfc_indication_register_req_msg_v01_ei, req);
  743. if (ret < 0) {
  744. qmi_txn_cancel(&txn);
  745. pr_err("%s() Failed sending request, err: %d\n",
  746. __func__, ret);
  747. goto out;
  748. }
  749. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  750. if (ret < 0) {
  751. pr_err("%s() Response waiting failed, err: %d\n",
  752. __func__, ret);
  753. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  754. pr_err("%s() Request rejected, result: %d, err: %d\n",
  755. __func__, resp->resp.result, resp->resp.error);
  756. ret = -resp->resp.result;
  757. }
  758. out:
  759. kfree(resp);
  760. kfree(req);
  761. return ret;
  762. }
  763. static int
  764. dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
  765. struct sockaddr_qrtr *ssctl,
  766. struct dfc_get_flow_status_resp_msg_v01 *resp)
  767. {
  768. struct dfc_get_flow_status_req_msg_v01 *req;
  769. struct qmi_txn *txn;
  770. int ret;
  771. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  772. if (!req)
  773. return -ENOMEM;
  774. txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
  775. if (!txn) {
  776. kfree(req);
  777. return -ENOMEM;
  778. }
  779. ret = qmi_txn_init(dfc_handle, txn,
  780. dfc_get_flow_status_resp_msg_v01_ei, resp);
  781. if (ret < 0) {
  782. pr_err("%s() Failed init for response, err: %d\n",
  783. __func__, ret);
  784. goto out;
  785. }
  786. ret = qmi_send_request(dfc_handle, ssctl, txn,
  787. QMI_DFC_GET_FLOW_STATUS_REQ_V01,
  788. QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
  789. dfc_get_flow_status_req_msg_v01_ei, req);
  790. if (ret < 0) {
  791. qmi_txn_cancel(txn);
  792. pr_err("%s() Failed sending request, err: %d\n",
  793. __func__, ret);
  794. goto out;
  795. }
  796. ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
  797. if (ret < 0) {
  798. pr_err("%s() Response waiting failed, err: %d\n",
  799. __func__, ret);
  800. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  801. pr_err("%s() Request rejected, result: %d, err: %d\n",
  802. __func__, resp->resp.result, resp->resp.error);
  803. ret = -resp->resp.result;
  804. }
  805. out:
  806. kfree(txn);
  807. kfree(req);
  808. return ret;
  809. }
  810. static int dfc_init_service(struct dfc_qmi_data *data)
  811. {
  812. int rc;
  813. rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
  814. if (rc < 0)
  815. return rc;
  816. return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
  817. }
  818. static void
  819. dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
  820. {
  821. struct qos_info *qos = rmnet_get_qos_pt(dev);
  822. struct sk_buff *skb;
  823. struct dfc_ack_cmd *msg;
  824. int data_size = sizeof(struct dfc_ack_cmd);
  825. int header_size = sizeof(struct dfc_qmap_header);
  826. if (!qos)
  827. return;
  828. if (dfc_qmap) {
  829. dfc_qmap_send_ack(qos, bearer_id, seq, type);
  830. return;
  831. }
  832. skb = alloc_skb(data_size, GFP_ATOMIC);
  833. if (!skb)
  834. return;
  835. msg = (struct dfc_ack_cmd *)skb_put(skb, data_size);
  836. memset(msg, 0, data_size);
  837. msg->header.cd_bit = 1;
  838. msg->header.mux_id = mux_id;
  839. msg->header.pkt_len = htons(data_size - header_size);
  840. msg->bearer_id = bearer_id;
  841. msg->command_name = 4;
  842. msg->cmd_type = 0;
  843. msg->dfc_seq = htons(seq);
  844. msg->type = type;
  845. msg->ver = 2;
  846. msg->transaction_id = htonl(qos->tran_num);
  847. skb->dev = qos->real_dev;
  848. skb->protocol = htons(ETH_P_MAP);
  849. trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
  850. qos->tran_num++;
  851. rmnet_map_tx_qmap_cmd(skb);
  852. }
  853. int dfc_bearer_flow_ctl(struct net_device *dev,
  854. struct rmnet_bearer_map *bearer,
  855. struct qos_info *qos)
  856. {
  857. bool enable;
  858. enable = bearer->grant_size ? true : false;
  859. qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
  860. /* Do not flow disable tcp ack q in tcp bidir */
  861. if (bearer->ack_mq_idx != INVALID_MQ &&
  862. (enable || !bearer->tcp_bidir))
  863. qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, enable);
  864. if (!enable && bearer->ack_req)
  865. dfc_send_ack(dev, bearer->bearer_id,
  866. bearer->seq, qos->mux_id,
  867. DFC_ACK_TYPE_DISABLE);
  868. return 0;
  869. }
  870. static int dfc_all_bearer_flow_ctl(struct net_device *dev,
  871. struct qos_info *qos, u8 ack_req, u32 ancillary,
  872. struct dfc_flow_status_info_type_v01 *fc_info)
  873. {
  874. struct rmnet_bearer_map *bearer;
  875. list_for_each_entry(bearer, &qos->bearer_head, list) {
  876. bearer->grant_size = fc_info->num_bytes;
  877. bearer->grant_thresh =
  878. qmi_rmnet_grant_per(bearer->grant_size);
  879. bearer->seq = fc_info->seq_num;
  880. bearer->ack_req = ack_req;
  881. bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  882. bearer->last_grant = fc_info->num_bytes;
  883. bearer->last_seq = fc_info->seq_num;
  884. bearer->last_adjusted_grant = fc_info->num_bytes;
  885. dfc_bearer_flow_ctl(dev, bearer, qos);
  886. }
  887. return 0;
  888. }
  889. static u32 dfc_adjust_grant(struct rmnet_bearer_map *bearer,
  890. struct dfc_flow_status_info_type_v01 *fc_info)
  891. {
  892. u32 grant;
  893. if (!fc_info->rx_bytes_valid)
  894. return fc_info->num_bytes;
  895. if (bearer->bytes_in_flight > fc_info->rx_bytes)
  896. bearer->bytes_in_flight -= fc_info->rx_bytes;
  897. else
  898. bearer->bytes_in_flight = 0;
  899. /* Adjusted grant = grant - bytes_in_flight */
  900. if (fc_info->num_bytes > bearer->bytes_in_flight)
  901. grant = fc_info->num_bytes - bearer->bytes_in_flight;
  902. else
  903. grant = 0;
  904. trace_dfc_adjust_grant(fc_info->mux_id, fc_info->bearer_id,
  905. fc_info->num_bytes, fc_info->rx_bytes,
  906. bearer->bytes_in_flight, grant);
  907. return grant;
  908. }
  909. static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
  910. u8 ack_req, u32 ancillary,
  911. struct dfc_flow_status_info_type_v01 *fc_info,
  912. bool is_query)
  913. {
  914. struct rmnet_bearer_map *itm = NULL;
  915. int rc = 0;
  916. bool action = false;
  917. u32 adjusted_grant;
  918. itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
  919. if (!itm)
  920. itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
  921. if (itm) {
  922. /* The RAT switch flag indicates the start and end of
  923. * the switch. Ignore indications in between.
  924. */
  925. if (DFC_IS_RAT_SWITCH(ancillary))
  926. itm->rat_switch = !fc_info->num_bytes;
  927. else
  928. if (itm->rat_switch)
  929. return 0;
  930. /* If TX is OFF but we received grant, ignore it */
  931. if (itm->tx_off && fc_info->num_bytes > 0)
  932. return 0;
  933. /* Adjuste grant for query */
  934. if (dfc_qmap && is_query) {
  935. adjusted_grant = dfc_adjust_grant(itm, fc_info);
  936. } else {
  937. adjusted_grant = fc_info->num_bytes;
  938. itm->bytes_in_flight = 0;
  939. }
  940. if ((itm->grant_size == 0 && adjusted_grant > 0) ||
  941. (itm->grant_size > 0 && adjusted_grant == 0))
  942. action = true;
  943. /* This is needed by qmap */
  944. if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
  945. dfc_qmap_send_ack(qos, itm->bearer_id,
  946. itm->seq, DFC_ACK_TYPE_DISABLE);
  947. itm->grant_size = adjusted_grant;
  948. /* No further query if the adjusted grant is less
  949. * than 20% of the original grant. Add to watch to
  950. * recover if no indication is received.
  951. */
  952. if (dfc_qmap && is_query &&
  953. itm->grant_size < (fc_info->num_bytes / 5)) {
  954. itm->grant_thresh = itm->grant_size;
  955. qmi_rmnet_watchdog_add(itm);
  956. } else {
  957. itm->grant_thresh =
  958. qmi_rmnet_grant_per(itm->grant_size);
  959. qmi_rmnet_watchdog_remove(itm);
  960. }
  961. itm->seq = fc_info->seq_num;
  962. itm->ack_req = ack_req;
  963. itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  964. itm->last_grant = fc_info->num_bytes;
  965. itm->last_seq = fc_info->seq_num;
  966. itm->last_adjusted_grant = adjusted_grant;
  967. if (action)
  968. rc = dfc_bearer_flow_ctl(dev, itm, qos);
  969. }
  970. return rc;
  971. }
  972. void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
  973. struct dfc_flow_status_ind_msg_v01 *ind,
  974. bool is_query)
  975. {
  976. struct net_device *dev;
  977. struct qos_info *qos;
  978. struct dfc_flow_status_info_type_v01 *flow_status;
  979. struct dfc_ancillary_info_type_v01 *ai;
  980. u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
  981. u32 ancillary;
  982. int i, j;
  983. rcu_read_lock();
  984. for (i = 0; i < ind->flow_status_len; i++) {
  985. flow_status = &ind->flow_status[i];
  986. ancillary = 0;
  987. if (ind->ancillary_info_valid) {
  988. for (j = 0; j < ind->ancillary_info_len; j++) {
  989. ai = &ind->ancillary_info[j];
  990. if (ai->mux_id == flow_status->mux_id &&
  991. ai->bearer_id == flow_status->bearer_id) {
  992. ancillary = ai->reserved;
  993. break;
  994. }
  995. }
  996. }
  997. trace_dfc_flow_ind(dfc->index,
  998. i, flow_status->mux_id,
  999. flow_status->bearer_id,
  1000. flow_status->num_bytes,
  1001. flow_status->seq_num,
  1002. ack_req,
  1003. ancillary);
  1004. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1005. flow_status->mux_id);
  1006. if (!dev)
  1007. goto clean_out;
  1008. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1009. if (!qos)
  1010. continue;
  1011. spin_lock_bh(&qos->qos_lock);
  1012. if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
  1013. spin_unlock_bh(&qos->qos_lock);
  1014. continue;
  1015. }
  1016. if (unlikely(flow_status->bearer_id == 0xFF))
  1017. dfc_all_bearer_flow_ctl(
  1018. dev, qos, ack_req, ancillary, flow_status);
  1019. else
  1020. dfc_update_fc_map(
  1021. dev, qos, ack_req, ancillary, flow_status,
  1022. is_query);
  1023. spin_unlock_bh(&qos->qos_lock);
  1024. }
  1025. clean_out:
  1026. rcu_read_unlock();
  1027. }
  1028. static void dfc_update_tx_link_status(struct net_device *dev,
  1029. struct qos_info *qos, u8 tx_status,
  1030. struct dfc_bearer_info_type_v01 *binfo)
  1031. {
  1032. struct rmnet_bearer_map *itm = NULL;
  1033. itm = qmi_rmnet_get_bearer_map(qos, binfo->bearer_id);
  1034. if (!itm)
  1035. return;
  1036. /* If no change in tx status, ignore */
  1037. if (itm->tx_off == !tx_status)
  1038. return;
  1039. if (itm->grant_size && !tx_status) {
  1040. itm->grant_size = 0;
  1041. itm->tcp_bidir = false;
  1042. itm->bytes_in_flight = 0;
  1043. qmi_rmnet_watchdog_remove(itm);
  1044. dfc_bearer_flow_ctl(dev, itm, qos);
  1045. } else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
  1046. itm->grant_size = DEFAULT_GRANT;
  1047. itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
  1048. itm->seq = 0;
  1049. itm->ack_req = 0;
  1050. dfc_bearer_flow_ctl(dev, itm, qos);
  1051. }
  1052. itm->tx_off = !tx_status;
  1053. }
  1054. void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
  1055. struct dfc_tx_link_status_ind_msg_v01 *ind)
  1056. {
  1057. struct net_device *dev;
  1058. struct qos_info *qos;
  1059. struct dfc_bearer_info_type_v01 *bearer_info;
  1060. int i;
  1061. rcu_read_lock();
  1062. for (i = 0; i < ind->bearer_info_len; i++) {
  1063. bearer_info = &ind->bearer_info[i];
  1064. trace_dfc_tx_link_status_ind(dfc->index, i,
  1065. ind->tx_status,
  1066. bearer_info->mux_id,
  1067. bearer_info->bearer_id);
  1068. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1069. bearer_info->mux_id);
  1070. if (!dev)
  1071. goto clean_out;
  1072. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1073. if (!qos)
  1074. continue;
  1075. spin_lock_bh(&qos->qos_lock);
  1076. dfc_update_tx_link_status(
  1077. dev, qos, ind->tx_status, bearer_info);
  1078. spin_unlock_bh(&qos->qos_lock);
  1079. }
  1080. clean_out:
  1081. rcu_read_unlock();
  1082. }
  1083. static void dfc_qmi_ind_work(struct work_struct *work)
  1084. {
  1085. struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
  1086. qmi_ind_work);
  1087. struct dfc_svc_ind *svc_ind;
  1088. unsigned long flags;
  1089. if (!dfc)
  1090. return;
  1091. local_bh_disable();
  1092. do {
  1093. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1094. svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
  1095. struct dfc_svc_ind, list);
  1096. if (svc_ind)
  1097. list_del(&svc_ind->list);
  1098. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1099. if (!svc_ind)
  1100. break;
  1101. if (!dfc->restart_state) {
  1102. if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
  1103. dfc_do_burst_flow_control(
  1104. dfc, &svc_ind->d.dfc_info,
  1105. false);
  1106. else if (svc_ind->msg_id ==
  1107. QMI_DFC_TX_LINK_STATUS_IND_V01)
  1108. dfc_handle_tx_link_status_ind(
  1109. dfc, &svc_ind->d.tx_status);
  1110. }
  1111. kfree(svc_ind);
  1112. } while (1);
  1113. local_bh_enable();
  1114. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  1115. }
  1116. static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
  1117. struct qmi_txn *txn, const void *data)
  1118. {
  1119. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1120. handle);
  1121. struct dfc_flow_status_ind_msg_v01 *ind_msg;
  1122. struct dfc_svc_ind *svc_ind;
  1123. unsigned long flags;
  1124. if (qmi != &dfc->handle)
  1125. return;
  1126. ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data;
  1127. if (ind_msg->flow_status_valid) {
  1128. if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) {
  1129. pr_err("%s() Invalid fc info len: %d\n",
  1130. __func__, ind_msg->flow_status_len);
  1131. return;
  1132. }
  1133. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1134. if (!svc_ind)
  1135. return;
  1136. svc_ind->msg_id = QMI_DFC_FLOW_STATUS_IND_V01;
  1137. memcpy(&svc_ind->d.dfc_info, ind_msg, sizeof(*ind_msg));
  1138. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1139. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1140. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1141. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1142. }
  1143. }
  1144. static void dfc_tx_link_status_ind_cb(struct qmi_handle *qmi,
  1145. struct sockaddr_qrtr *sq,
  1146. struct qmi_txn *txn, const void *data)
  1147. {
  1148. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1149. handle);
  1150. struct dfc_tx_link_status_ind_msg_v01 *ind_msg;
  1151. struct dfc_svc_ind *svc_ind;
  1152. unsigned long flags;
  1153. if (qmi != &dfc->handle)
  1154. return;
  1155. ind_msg = (struct dfc_tx_link_status_ind_msg_v01 *)data;
  1156. if (ind_msg->bearer_info_valid) {
  1157. if (ind_msg->bearer_info_len > DFC_MAX_BEARERS_V01) {
  1158. pr_err("%s() Invalid bearer info len: %d\n",
  1159. __func__, ind_msg->bearer_info_len);
  1160. return;
  1161. }
  1162. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1163. if (!svc_ind)
  1164. return;
  1165. svc_ind->msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01;
  1166. memcpy(&svc_ind->d.tx_status, ind_msg, sizeof(*ind_msg));
  1167. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1168. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1169. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1170. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1171. }
  1172. }
  1173. static void dfc_svc_init(struct work_struct *work)
  1174. {
  1175. int rc = 0;
  1176. struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data,
  1177. svc_arrive);
  1178. struct qmi_info *qmi;
  1179. if (data->restart_state == 1)
  1180. return;
  1181. rc = dfc_init_service(data);
  1182. if (rc < 0) {
  1183. pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
  1184. return;
  1185. }
  1186. if (data->restart_state == 1)
  1187. return;
  1188. while (!rtnl_trylock()) {
  1189. if (!data->restart_state)
  1190. cond_resched();
  1191. else
  1192. return;
  1193. }
  1194. qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
  1195. if (!qmi) {
  1196. rtnl_unlock();
  1197. return;
  1198. }
  1199. qmi->dfc_pending[data->index] = NULL;
  1200. qmi->dfc_clients[data->index] = (void *)data;
  1201. trace_dfc_client_state_up(data->index,
  1202. data->svc.instance,
  1203. data->svc.ep_type,
  1204. data->svc.iface_id);
  1205. rtnl_unlock();
  1206. pr_info("Connection established with the DFC Service\n");
  1207. }
  1208. static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
  1209. {
  1210. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1211. handle);
  1212. data->ssctl.sq_family = AF_QIPCRTR;
  1213. data->ssctl.sq_node = svc->node;
  1214. data->ssctl.sq_port = svc->port;
  1215. queue_work(data->dfc_wq, &data->svc_arrive);
  1216. return 0;
  1217. }
  1218. static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
  1219. {
  1220. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1221. handle);
  1222. if (!data)
  1223. pr_debug("%s() data is null\n", __func__);
  1224. }
  1225. static struct qmi_ops server_ops = {
  1226. .new_server = dfc_svc_arrive,
  1227. .del_server = dfc_svc_exit,
  1228. };
  1229. static struct qmi_msg_handler qmi_indication_handler[] = {
  1230. {
  1231. .type = QMI_INDICATION,
  1232. .msg_id = QMI_DFC_FLOW_STATUS_IND_V01,
  1233. .ei = dfc_flow_status_ind_v01_ei,
  1234. .decoded_size = sizeof(struct dfc_flow_status_ind_msg_v01),
  1235. .fn = dfc_clnt_ind_cb,
  1236. },
  1237. {
  1238. .type = QMI_INDICATION,
  1239. .msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01,
  1240. .ei = dfc_tx_link_status_ind_v01_ei,
  1241. .decoded_size = sizeof(struct dfc_tx_link_status_ind_msg_v01),
  1242. .fn = dfc_tx_link_status_ind_cb,
  1243. },
  1244. {},
  1245. };
  1246. int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
  1247. struct qmi_info *qmi)
  1248. {
  1249. struct dfc_qmi_data *data;
  1250. int rc = -ENOMEM;
  1251. if (!port || !qmi)
  1252. return -EINVAL;
  1253. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  1254. if (!data)
  1255. return -ENOMEM;
  1256. data->rmnet_port = port;
  1257. data->index = index;
  1258. data->restart_state = 0;
  1259. memcpy(&data->svc, psvc, sizeof(data->svc));
  1260. INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
  1261. INIT_LIST_HEAD(&data->qmi_ind_q);
  1262. spin_lock_init(&data->qmi_ind_lock);
  1263. data->dfc_wq = create_singlethread_workqueue("dfc_wq");
  1264. if (!data->dfc_wq) {
  1265. pr_err("%s Could not create workqueue\n", __func__);
  1266. goto err0;
  1267. }
  1268. INIT_WORK(&data->svc_arrive, dfc_svc_init);
  1269. rc = qmi_handle_init(&data->handle,
  1270. QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
  1271. &server_ops, qmi_indication_handler);
  1272. if (rc < 0) {
  1273. pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
  1274. goto err1;
  1275. }
  1276. rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
  1277. DFC_SERVICE_VERS_V01,
  1278. psvc->instance);
  1279. if (rc < 0) {
  1280. pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
  1281. goto err2;
  1282. }
  1283. qmi->dfc_pending[index] = (void *)data;
  1284. return 0;
  1285. err2:
  1286. qmi_handle_release(&data->handle);
  1287. err1:
  1288. destroy_workqueue(data->dfc_wq);
  1289. err0:
  1290. kfree(data);
  1291. return rc;
  1292. }
  1293. void dfc_qmi_client_exit(void *dfc_data)
  1294. {
  1295. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1296. if (!data) {
  1297. pr_err("%s() data is null\n", __func__);
  1298. return;
  1299. }
  1300. data->restart_state = 1;
  1301. trace_dfc_client_state_down(data->index, 0);
  1302. qmi_handle_release(&data->handle);
  1303. drain_workqueue(data->dfc_wq);
  1304. destroy_workqueue(data->dfc_wq);
  1305. kfree(data);
  1306. }
  1307. void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
  1308. int ip_type, u32 mark, unsigned int len)
  1309. {
  1310. struct rmnet_bearer_map *bearer = NULL;
  1311. struct rmnet_flow_map *itm;
  1312. u32 start_grant;
  1313. spin_lock_bh(&qos->qos_lock);
  1314. /* Mark is flow_id */
  1315. itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
  1316. if (likely(itm))
  1317. bearer = itm->bearer;
  1318. if (unlikely(!bearer))
  1319. goto out;
  1320. trace_dfc_flow_check(dev->name, bearer->bearer_id,
  1321. len, mark, bearer->grant_size);
  1322. bearer->bytes_in_flight += len;
  1323. if (!bearer->grant_size)
  1324. goto out;
  1325. start_grant = bearer->grant_size;
  1326. if (len >= bearer->grant_size)
  1327. bearer->grant_size = 0;
  1328. else
  1329. bearer->grant_size -= len;
  1330. if (start_grant > bearer->grant_thresh &&
  1331. bearer->grant_size <= bearer->grant_thresh) {
  1332. dfc_send_ack(dev, bearer->bearer_id,
  1333. bearer->seq, qos->mux_id,
  1334. DFC_ACK_TYPE_THRESHOLD);
  1335. }
  1336. if (!bearer->grant_size)
  1337. dfc_bearer_flow_ctl(dev, bearer, qos);
  1338. out:
  1339. spin_unlock_bh(&qos->qos_lock);
  1340. }
  1341. void dfc_qmi_query_flow(void *dfc_data)
  1342. {
  1343. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1344. struct dfc_get_flow_status_resp_msg_v01 *resp;
  1345. struct dfc_svc_ind *svc_ind;
  1346. int rc;
  1347. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  1348. if (!resp)
  1349. return;
  1350. svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
  1351. if (!svc_ind) {
  1352. kfree(resp);
  1353. return;
  1354. }
  1355. if (!data)
  1356. goto done;
  1357. rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
  1358. if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
  1359. resp->flow_status_len > DFC_MAX_BEARERS_V01)
  1360. goto done;
  1361. svc_ind->d.dfc_info.flow_status_valid = resp->flow_status_valid;
  1362. svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
  1363. memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
  1364. sizeof(resp->flow_status[0]) * resp->flow_status_len);
  1365. dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info, true);
  1366. done:
  1367. kfree(svc_ind);
  1368. kfree(resp);
  1369. }