dfc_qmi.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592
  1. /*
  2. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <net/pkt_sched.h>
  15. #include "rmnet_qmi.h"
  16. #include "qmi_rmnet.h"
  17. #include "dfc_defs.h"
  18. #define CREATE_TRACE_POINTS
  19. #include "dfc.h"
  20. struct dfc_qmap_header {
  21. u8 pad_len:6;
  22. u8 reserved_bit:1;
  23. u8 cd_bit:1;
  24. u8 mux_id;
  25. __be16 pkt_len;
  26. } __aligned(1);
  27. struct dfc_ack_cmd {
  28. struct dfc_qmap_header header;
  29. u8 command_name;
  30. u8 cmd_type:2;
  31. u8 reserved:6;
  32. u16 reserved2;
  33. u32 transaction_id;
  34. u8 ver:2;
  35. u8 reserved3:6;
  36. u8 type:2;
  37. u8 reserved4:6;
  38. u16 dfc_seq;
  39. u8 reserved5[3];
  40. u8 bearer_id;
  41. } __aligned(1);
  42. static void dfc_svc_init(struct work_struct *work);
  43. extern int dfc_ps_ext;
  44. /* **************************************************** */
  45. #define DFC_SERVICE_ID_V01 0x4E
  46. #define DFC_SERVICE_VERS_V01 0x01
  47. #define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
  48. #define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
  49. #define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
  50. #define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN 11
  51. #define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7
  52. #define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001
  53. #define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001
  54. #define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 8
  55. #define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7
  56. #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
  57. #define QMI_DFC_TX_LINK_STATUS_IND_V01 0x0024
  58. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
  59. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
  60. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
  61. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
  62. struct dfc_bind_client_req_msg_v01 {
  63. u8 ep_id_valid;
  64. struct data_ep_id_type_v01 ep_id;
  65. };
  66. struct dfc_bind_client_resp_msg_v01 {
  67. struct qmi_response_type_v01 resp;
  68. };
  69. struct dfc_indication_register_req_msg_v01 {
  70. u8 report_flow_status_valid;
  71. u8 report_flow_status;
  72. u8 report_tx_link_status_valid;
  73. u8 report_tx_link_status;
  74. };
  75. struct dfc_indication_register_resp_msg_v01 {
  76. struct qmi_response_type_v01 resp;
  77. };
  78. static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
  79. {
  80. .data_type = QMI_UNSIGNED_4_BYTE,
  81. .elem_len = 1,
  82. .elem_size = sizeof(u32),
  83. .array_type = NO_ARRAY,
  84. .tlv_type = QMI_COMMON_TLV_TYPE,
  85. .offset = offsetof(struct dfc_qos_id_type_v01,
  86. qos_id),
  87. .ei_array = NULL,
  88. },
  89. {
  90. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  91. .elem_len = 1,
  92. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  93. .array_type = NO_ARRAY,
  94. .tlv_type = QMI_COMMON_TLV_TYPE,
  95. .offset = offsetof(struct dfc_qos_id_type_v01,
  96. ip_type),
  97. .ei_array = NULL,
  98. },
  99. {
  100. .data_type = QMI_EOTI,
  101. .array_type = NO_ARRAY,
  102. .tlv_type = QMI_COMMON_TLV_TYPE,
  103. },
  104. };
  105. static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
  106. {
  107. .data_type = QMI_UNSIGNED_1_BYTE,
  108. .elem_len = 1,
  109. .elem_size = sizeof(u8),
  110. .array_type = NO_ARRAY,
  111. .tlv_type = QMI_COMMON_TLV_TYPE,
  112. .offset = offsetof(struct
  113. dfc_flow_status_info_type_v01,
  114. subs_id),
  115. .ei_array = NULL,
  116. },
  117. {
  118. .data_type = QMI_UNSIGNED_1_BYTE,
  119. .elem_len = 1,
  120. .elem_size = sizeof(u8),
  121. .array_type = NO_ARRAY,
  122. .tlv_type = QMI_COMMON_TLV_TYPE,
  123. .offset = offsetof(struct
  124. dfc_flow_status_info_type_v01,
  125. mux_id),
  126. .ei_array = NULL,
  127. },
  128. {
  129. .data_type = QMI_UNSIGNED_1_BYTE,
  130. .elem_len = 1,
  131. .elem_size = sizeof(u8),
  132. .array_type = NO_ARRAY,
  133. .tlv_type = QMI_COMMON_TLV_TYPE,
  134. .offset = offsetof(struct
  135. dfc_flow_status_info_type_v01,
  136. bearer_id),
  137. .ei_array = NULL,
  138. },
  139. {
  140. .data_type = QMI_UNSIGNED_4_BYTE,
  141. .elem_len = 1,
  142. .elem_size = sizeof(u32),
  143. .array_type = NO_ARRAY,
  144. .tlv_type = QMI_COMMON_TLV_TYPE,
  145. .offset = offsetof(struct
  146. dfc_flow_status_info_type_v01,
  147. num_bytes),
  148. .ei_array = NULL,
  149. },
  150. {
  151. .data_type = QMI_UNSIGNED_2_BYTE,
  152. .elem_len = 1,
  153. .elem_size = sizeof(u16),
  154. .array_type = NO_ARRAY,
  155. .tlv_type = QMI_COMMON_TLV_TYPE,
  156. .offset = offsetof(struct
  157. dfc_flow_status_info_type_v01,
  158. seq_num),
  159. .ei_array = NULL,
  160. },
  161. {
  162. .data_type = QMI_DATA_LEN,
  163. .elem_len = 1,
  164. .elem_size = sizeof(u8),
  165. .array_type = NO_ARRAY,
  166. .tlv_type = QMI_COMMON_TLV_TYPE,
  167. .offset = offsetof(struct
  168. dfc_flow_status_info_type_v01,
  169. qos_ids_len),
  170. .ei_array = NULL,
  171. },
  172. {
  173. .data_type = QMI_STRUCT,
  174. .elem_len = DFC_MAX_QOS_ID_V01,
  175. .elem_size = sizeof(struct dfc_qos_id_type_v01),
  176. .array_type = VAR_LEN_ARRAY,
  177. .tlv_type = 0x10,
  178. .offset = offsetof(struct
  179. dfc_flow_status_info_type_v01,
  180. qos_ids),
  181. .ei_array = dfc_qos_id_type_v01_ei,
  182. },
  183. {
  184. .data_type = QMI_EOTI,
  185. .array_type = NO_ARRAY,
  186. .tlv_type = QMI_COMMON_TLV_TYPE,
  187. },
  188. };
  189. static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
  190. {
  191. .data_type = QMI_UNSIGNED_1_BYTE,
  192. .elem_len = 1,
  193. .elem_size = sizeof(u8),
  194. .array_type = NO_ARRAY,
  195. .tlv_type = QMI_COMMON_TLV_TYPE,
  196. .offset = offsetof(struct
  197. dfc_ancillary_info_type_v01,
  198. subs_id),
  199. .ei_array = NULL,
  200. },
  201. {
  202. .data_type = QMI_UNSIGNED_1_BYTE,
  203. .elem_len = 1,
  204. .elem_size = sizeof(u8),
  205. .array_type = NO_ARRAY,
  206. .tlv_type = QMI_COMMON_TLV_TYPE,
  207. .offset = offsetof(struct
  208. dfc_ancillary_info_type_v01,
  209. mux_id),
  210. .ei_array = NULL,
  211. },
  212. {
  213. .data_type = QMI_UNSIGNED_1_BYTE,
  214. .elem_len = 1,
  215. .elem_size = sizeof(u8),
  216. .array_type = NO_ARRAY,
  217. .tlv_type = QMI_COMMON_TLV_TYPE,
  218. .offset = offsetof(struct
  219. dfc_ancillary_info_type_v01,
  220. bearer_id),
  221. .ei_array = NULL,
  222. },
  223. {
  224. .data_type = QMI_UNSIGNED_4_BYTE,
  225. .elem_len = 1,
  226. .elem_size = sizeof(u32),
  227. .array_type = NO_ARRAY,
  228. .tlv_type = QMI_COMMON_TLV_TYPE,
  229. .offset = offsetof(struct
  230. dfc_ancillary_info_type_v01,
  231. reserved),
  232. .ei_array = NULL,
  233. },
  234. {
  235. .data_type = QMI_EOTI,
  236. .array_type = NO_ARRAY,
  237. .tlv_type = QMI_COMMON_TLV_TYPE,
  238. },
  239. };
  240. struct dfc_get_flow_status_req_msg_v01 {
  241. u8 bearer_id_list_valid;
  242. u8 bearer_id_list_len;
  243. u8 bearer_id_list[DFC_MAX_BEARERS_V01];
  244. };
  245. struct dfc_get_flow_status_resp_msg_v01 {
  246. struct qmi_response_type_v01 resp;
  247. u8 flow_status_valid;
  248. u8 flow_status_len;
  249. struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
  250. };
  251. struct dfc_svc_ind {
  252. struct list_head list;
  253. u16 msg_id;
  254. union {
  255. struct dfc_flow_status_ind_msg_v01 dfc_info;
  256. struct dfc_tx_link_status_ind_msg_v01 tx_status;
  257. } d;
  258. };
  259. static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
  260. {
  261. .data_type = QMI_OPT_FLAG,
  262. .elem_len = 1,
  263. .elem_size = sizeof(u8),
  264. .array_type = NO_ARRAY,
  265. .tlv_type = 0x10,
  266. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  267. ep_id_valid),
  268. .ei_array = NULL,
  269. },
  270. {
  271. .data_type = QMI_STRUCT,
  272. .elem_len = 1,
  273. .elem_size = sizeof(struct data_ep_id_type_v01),
  274. .array_type = NO_ARRAY,
  275. .tlv_type = 0x10,
  276. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  277. ep_id),
  278. .ei_array = data_ep_id_type_v01_ei,
  279. },
  280. {
  281. .data_type = QMI_EOTI,
  282. .array_type = NO_ARRAY,
  283. .tlv_type = QMI_COMMON_TLV_TYPE,
  284. },
  285. };
  286. static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = {
  287. {
  288. .data_type = QMI_STRUCT,
  289. .elem_len = 1,
  290. .elem_size = sizeof(struct qmi_response_type_v01),
  291. .array_type = NO_ARRAY,
  292. .tlv_type = 0x02,
  293. .offset = offsetof(struct dfc_bind_client_resp_msg_v01,
  294. resp),
  295. .ei_array = qmi_response_type_v01_ei,
  296. },
  297. {
  298. .data_type = QMI_EOTI,
  299. .array_type = NO_ARRAY,
  300. .tlv_type = QMI_COMMON_TLV_TYPE,
  301. },
  302. };
  303. static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = {
  304. {
  305. .data_type = QMI_OPT_FLAG,
  306. .elem_len = 1,
  307. .elem_size = sizeof(u8),
  308. .array_type = NO_ARRAY,
  309. .tlv_type = 0x10,
  310. .offset = offsetof(struct
  311. dfc_indication_register_req_msg_v01,
  312. report_flow_status_valid),
  313. .ei_array = NULL,
  314. },
  315. {
  316. .data_type = QMI_UNSIGNED_1_BYTE,
  317. .elem_len = 1,
  318. .elem_size = sizeof(u8),
  319. .array_type = NO_ARRAY,
  320. .tlv_type = 0x10,
  321. .offset = offsetof(struct
  322. dfc_indication_register_req_msg_v01,
  323. report_flow_status),
  324. .ei_array = NULL,
  325. },
  326. {
  327. .data_type = QMI_OPT_FLAG,
  328. .elem_len = 1,
  329. .elem_size = sizeof(u8),
  330. .array_type = NO_ARRAY,
  331. .tlv_type = 0x11,
  332. .offset = offsetof(struct
  333. dfc_indication_register_req_msg_v01,
  334. report_tx_link_status_valid),
  335. .ei_array = NULL,
  336. },
  337. {
  338. .data_type = QMI_UNSIGNED_1_BYTE,
  339. .elem_len = 1,
  340. .elem_size = sizeof(u8),
  341. .array_type = NO_ARRAY,
  342. .tlv_type = 0x11,
  343. .offset = offsetof(struct
  344. dfc_indication_register_req_msg_v01,
  345. report_tx_link_status),
  346. .ei_array = NULL,
  347. },
  348. {
  349. .data_type = QMI_EOTI,
  350. .array_type = NO_ARRAY,
  351. .tlv_type = QMI_COMMON_TLV_TYPE,
  352. },
  353. };
  354. static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = {
  355. {
  356. .data_type = QMI_STRUCT,
  357. .elem_len = 1,
  358. .elem_size = sizeof(struct qmi_response_type_v01),
  359. .array_type = NO_ARRAY,
  360. .tlv_type = 0x02,
  361. .offset = offsetof(struct
  362. dfc_indication_register_resp_msg_v01,
  363. resp),
  364. .ei_array = qmi_response_type_v01_ei,
  365. },
  366. {
  367. .data_type = QMI_EOTI,
  368. .array_type = NO_ARRAY,
  369. .tlv_type = QMI_COMMON_TLV_TYPE,
  370. },
  371. };
  372. static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = {
  373. {
  374. .data_type = QMI_OPT_FLAG,
  375. .elem_len = 1,
  376. .elem_size = sizeof(u8),
  377. .array_type = NO_ARRAY,
  378. .tlv_type = 0x10,
  379. .offset = offsetof(struct
  380. dfc_flow_status_ind_msg_v01,
  381. flow_status_valid),
  382. .ei_array = NULL,
  383. },
  384. {
  385. .data_type = QMI_DATA_LEN,
  386. .elem_len = 1,
  387. .elem_size = sizeof(u8),
  388. .array_type = NO_ARRAY,
  389. .tlv_type = 0x10,
  390. .offset = offsetof(struct
  391. dfc_flow_status_ind_msg_v01,
  392. flow_status_len),
  393. .ei_array = NULL,
  394. },
  395. {
  396. .data_type = QMI_STRUCT,
  397. .elem_len = DFC_MAX_BEARERS_V01,
  398. .elem_size = sizeof(struct
  399. dfc_flow_status_info_type_v01),
  400. .array_type = VAR_LEN_ARRAY,
  401. .tlv_type = 0x10,
  402. .offset = offsetof(struct
  403. dfc_flow_status_ind_msg_v01,
  404. flow_status),
  405. .ei_array = dfc_flow_status_info_type_v01_ei,
  406. },
  407. {
  408. .data_type = QMI_OPT_FLAG,
  409. .elem_len = 1,
  410. .elem_size = sizeof(u8),
  411. .array_type = NO_ARRAY,
  412. .tlv_type = 0x11,
  413. .offset = offsetof(struct
  414. dfc_flow_status_ind_msg_v01,
  415. eod_ack_reqd_valid),
  416. .ei_array = NULL,
  417. },
  418. {
  419. .data_type = QMI_UNSIGNED_1_BYTE,
  420. .elem_len = 1,
  421. .elem_size = sizeof(u8),
  422. .array_type = NO_ARRAY,
  423. .tlv_type = 0x11,
  424. .offset = offsetof(struct
  425. dfc_flow_status_ind_msg_v01,
  426. eod_ack_reqd),
  427. .ei_array = NULL,
  428. },
  429. {
  430. .data_type = QMI_OPT_FLAG,
  431. .elem_len = 1,
  432. .elem_size = sizeof(u8),
  433. .array_type = NO_ARRAY,
  434. .tlv_type = 0x12,
  435. .offset = offsetof(struct
  436. dfc_flow_status_ind_msg_v01,
  437. ancillary_info_valid),
  438. .ei_array = NULL,
  439. },
  440. {
  441. .data_type = QMI_DATA_LEN,
  442. .elem_len = 1,
  443. .elem_size = sizeof(u8),
  444. .array_type = NO_ARRAY,
  445. .tlv_type = 0x12,
  446. .offset = offsetof(struct
  447. dfc_flow_status_ind_msg_v01,
  448. ancillary_info_len),
  449. .ei_array = NULL,
  450. },
  451. {
  452. .data_type = QMI_STRUCT,
  453. .elem_len = DFC_MAX_BEARERS_V01,
  454. .elem_size = sizeof(struct
  455. dfc_ancillary_info_type_v01),
  456. .array_type = VAR_LEN_ARRAY,
  457. .tlv_type = 0x12,
  458. .offset = offsetof(struct
  459. dfc_flow_status_ind_msg_v01,
  460. ancillary_info),
  461. .ei_array = dfc_ancillary_info_type_v01_ei,
  462. },
  463. {
  464. .data_type = QMI_EOTI,
  465. .array_type = NO_ARRAY,
  466. .tlv_type = QMI_COMMON_TLV_TYPE,
  467. },
  468. };
  469. static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
  470. {
  471. .data_type = QMI_OPT_FLAG,
  472. .elem_len = 1,
  473. .elem_size = sizeof(u8),
  474. .array_type = NO_ARRAY,
  475. .tlv_type = 0x10,
  476. .offset = offsetof(struct
  477. dfc_get_flow_status_req_msg_v01,
  478. bearer_id_list_valid),
  479. .ei_array = NULL,
  480. },
  481. {
  482. .data_type = QMI_DATA_LEN,
  483. .elem_len = 1,
  484. .elem_size = sizeof(u8),
  485. .array_type = NO_ARRAY,
  486. .tlv_type = 0x10,
  487. .offset = offsetof(struct
  488. dfc_get_flow_status_req_msg_v01,
  489. bearer_id_list_len),
  490. .ei_array = NULL,
  491. },
  492. {
  493. .data_type = QMI_UNSIGNED_1_BYTE,
  494. .elem_len = DFC_MAX_BEARERS_V01,
  495. .elem_size = sizeof(u8),
  496. .array_type = VAR_LEN_ARRAY,
  497. .tlv_type = 0x10,
  498. .offset = offsetof(struct
  499. dfc_get_flow_status_req_msg_v01,
  500. bearer_id_list),
  501. .ei_array = NULL,
  502. },
  503. {
  504. .data_type = QMI_EOTI,
  505. .array_type = NO_ARRAY,
  506. .tlv_type = QMI_COMMON_TLV_TYPE,
  507. },
  508. };
  509. static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
  510. {
  511. .data_type = QMI_STRUCT,
  512. .elem_len = 1,
  513. .elem_size = sizeof(struct qmi_response_type_v01),
  514. .array_type = NO_ARRAY,
  515. .tlv_type = 0x02,
  516. .offset = offsetof(struct
  517. dfc_get_flow_status_resp_msg_v01,
  518. resp),
  519. .ei_array = qmi_response_type_v01_ei,
  520. },
  521. {
  522. .data_type = QMI_OPT_FLAG,
  523. .elem_len = 1,
  524. .elem_size = sizeof(u8),
  525. .array_type = NO_ARRAY,
  526. .tlv_type = 0x10,
  527. .offset = offsetof(struct
  528. dfc_get_flow_status_resp_msg_v01,
  529. flow_status_valid),
  530. .ei_array = NULL,
  531. },
  532. {
  533. .data_type = QMI_DATA_LEN,
  534. .elem_len = 1,
  535. .elem_size = sizeof(u8),
  536. .array_type = NO_ARRAY,
  537. .tlv_type = 0x10,
  538. .offset = offsetof(struct
  539. dfc_get_flow_status_resp_msg_v01,
  540. flow_status_len),
  541. .ei_array = NULL,
  542. },
  543. {
  544. .data_type = QMI_STRUCT,
  545. .elem_len = DFC_MAX_BEARERS_V01,
  546. .elem_size = sizeof(struct
  547. dfc_flow_status_info_type_v01),
  548. .array_type = VAR_LEN_ARRAY,
  549. .tlv_type = 0x10,
  550. .offset = offsetof(struct
  551. dfc_get_flow_status_resp_msg_v01,
  552. flow_status),
  553. .ei_array = dfc_flow_status_info_type_v01_ei,
  554. },
  555. {
  556. .data_type = QMI_EOTI,
  557. .array_type = NO_ARRAY,
  558. .tlv_type = QMI_COMMON_TLV_TYPE,
  559. },
  560. };
  561. static struct qmi_elem_info dfc_bearer_info_type_v01_ei[] = {
  562. {
  563. .data_type = QMI_UNSIGNED_1_BYTE,
  564. .elem_len = 1,
  565. .elem_size = sizeof(u8),
  566. .array_type = NO_ARRAY,
  567. .tlv_type = QMI_COMMON_TLV_TYPE,
  568. .offset = offsetof(struct
  569. dfc_bearer_info_type_v01,
  570. subs_id),
  571. .ei_array = NULL,
  572. },
  573. {
  574. .data_type = QMI_UNSIGNED_1_BYTE,
  575. .elem_len = 1,
  576. .elem_size = sizeof(u8),
  577. .array_type = NO_ARRAY,
  578. .tlv_type = QMI_COMMON_TLV_TYPE,
  579. .offset = offsetof(struct
  580. dfc_bearer_info_type_v01,
  581. mux_id),
  582. .ei_array = NULL,
  583. },
  584. {
  585. .data_type = QMI_UNSIGNED_1_BYTE,
  586. .elem_len = 1,
  587. .elem_size = sizeof(u8),
  588. .array_type = NO_ARRAY,
  589. .tlv_type = QMI_COMMON_TLV_TYPE,
  590. .offset = offsetof(struct
  591. dfc_bearer_info_type_v01,
  592. bearer_id),
  593. .ei_array = NULL,
  594. },
  595. {
  596. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  597. .elem_len = 1,
  598. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  599. .array_type = NO_ARRAY,
  600. .tlv_type = QMI_COMMON_TLV_TYPE,
  601. .offset = offsetof(struct
  602. dfc_bearer_info_type_v01,
  603. ip_type),
  604. .ei_array = NULL,
  605. },
  606. {
  607. .data_type = QMI_EOTI,
  608. .array_type = NO_ARRAY,
  609. .tlv_type = QMI_COMMON_TLV_TYPE,
  610. },
  611. };
  612. static struct qmi_elem_info dfc_tx_link_status_ind_v01_ei[] = {
  613. {
  614. .data_type = QMI_UNSIGNED_1_BYTE,
  615. .elem_len = 1,
  616. .elem_size = sizeof(u8),
  617. .array_type = NO_ARRAY,
  618. .tlv_type = 0x01,
  619. .offset = offsetof(struct
  620. dfc_tx_link_status_ind_msg_v01,
  621. tx_status),
  622. .ei_array = NULL,
  623. },
  624. {
  625. .data_type = QMI_OPT_FLAG,
  626. .elem_len = 1,
  627. .elem_size = sizeof(u8),
  628. .array_type = NO_ARRAY,
  629. .tlv_type = 0x10,
  630. .offset = offsetof(struct
  631. dfc_tx_link_status_ind_msg_v01,
  632. bearer_info_valid),
  633. .ei_array = NULL,
  634. },
  635. {
  636. .data_type = QMI_DATA_LEN,
  637. .elem_len = 1,
  638. .elem_size = sizeof(u8),
  639. .array_type = NO_ARRAY,
  640. .tlv_type = 0x10,
  641. .offset = offsetof(struct
  642. dfc_tx_link_status_ind_msg_v01,
  643. bearer_info_len),
  644. .ei_array = NULL,
  645. },
  646. {
  647. .data_type = QMI_STRUCT,
  648. .elem_len = DFC_MAX_BEARERS_V01,
  649. .elem_size = sizeof(struct
  650. dfc_bearer_info_type_v01),
  651. .array_type = VAR_LEN_ARRAY,
  652. .tlv_type = 0x10,
  653. .offset = offsetof(struct
  654. dfc_tx_link_status_ind_msg_v01,
  655. bearer_info),
  656. .ei_array = dfc_bearer_info_type_v01_ei,
  657. },
  658. {
  659. .data_type = QMI_EOTI,
  660. .array_type = NO_ARRAY,
  661. .tlv_type = QMI_COMMON_TLV_TYPE,
  662. },
  663. };
  664. static int
  665. dfc_bind_client_req(struct qmi_handle *dfc_handle,
  666. struct sockaddr_qrtr *ssctl, struct svc_info *svc)
  667. {
  668. struct dfc_bind_client_resp_msg_v01 *resp;
  669. struct dfc_bind_client_req_msg_v01 *req;
  670. struct qmi_txn txn;
  671. int ret;
  672. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  673. if (!req)
  674. return -ENOMEM;
  675. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  676. if (!resp) {
  677. kfree(req);
  678. return -ENOMEM;
  679. }
  680. ret = qmi_txn_init(dfc_handle, &txn,
  681. dfc_bind_client_resp_msg_v01_ei, resp);
  682. if (ret < 0) {
  683. pr_err("%s() Failed init for response, err: %d\n",
  684. __func__, ret);
  685. goto out;
  686. }
  687. req->ep_id_valid = 1;
  688. req->ep_id.ep_type = svc->ep_type;
  689. req->ep_id.iface_id = svc->iface_id;
  690. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  691. QMI_DFC_BIND_CLIENT_REQ_V01,
  692. QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN,
  693. dfc_bind_client_req_msg_v01_ei, req);
  694. if (ret < 0) {
  695. qmi_txn_cancel(&txn);
  696. pr_err("%s() Failed sending request, err: %d\n",
  697. __func__, ret);
  698. goto out;
  699. }
  700. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  701. if (ret < 0) {
  702. pr_err("%s() Response waiting failed, err: %d\n",
  703. __func__, ret);
  704. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  705. pr_err("%s() Request rejected, result: %d, err: %d\n",
  706. __func__, resp->resp.result, resp->resp.error);
  707. ret = -resp->resp.result;
  708. }
  709. out:
  710. kfree(resp);
  711. kfree(req);
  712. return ret;
  713. }
  714. static int
  715. dfc_indication_register_req(struct qmi_handle *dfc_handle,
  716. struct sockaddr_qrtr *ssctl, u8 reg)
  717. {
  718. struct dfc_indication_register_resp_msg_v01 *resp;
  719. struct dfc_indication_register_req_msg_v01 *req;
  720. struct qmi_txn txn;
  721. int ret;
  722. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  723. if (!req)
  724. return -ENOMEM;
  725. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  726. if (!resp) {
  727. kfree(req);
  728. return -ENOMEM;
  729. }
  730. ret = qmi_txn_init(dfc_handle, &txn,
  731. dfc_indication_register_resp_msg_v01_ei, resp);
  732. if (ret < 0) {
  733. pr_err("%s() Failed init for response, err: %d\n",
  734. __func__, ret);
  735. goto out;
  736. }
  737. req->report_flow_status_valid = 1;
  738. req->report_flow_status = reg;
  739. if (!dfc_ps_ext) {
  740. req->report_tx_link_status_valid = 1;
  741. req->report_tx_link_status = reg;
  742. }
  743. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  744. QMI_DFC_INDICATION_REGISTER_REQ_V01,
  745. QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN,
  746. dfc_indication_register_req_msg_v01_ei, req);
  747. if (ret < 0) {
  748. qmi_txn_cancel(&txn);
  749. pr_err("%s() Failed sending request, err: %d\n",
  750. __func__, ret);
  751. goto out;
  752. }
  753. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  754. if (ret < 0) {
  755. pr_err("%s() Response waiting failed, err: %d\n",
  756. __func__, ret);
  757. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  758. pr_err("%s() Request rejected, result: %d, err: %d\n",
  759. __func__, resp->resp.result, resp->resp.error);
  760. ret = -resp->resp.result;
  761. }
  762. out:
  763. kfree(resp);
  764. kfree(req);
  765. return ret;
  766. }
  767. static int
  768. dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
  769. struct sockaddr_qrtr *ssctl,
  770. struct dfc_get_flow_status_resp_msg_v01 *resp)
  771. {
  772. struct dfc_get_flow_status_req_msg_v01 *req;
  773. struct qmi_txn *txn;
  774. int ret;
  775. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  776. if (!req)
  777. return -ENOMEM;
  778. txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
  779. if (!txn) {
  780. kfree(req);
  781. return -ENOMEM;
  782. }
  783. ret = qmi_txn_init(dfc_handle, txn,
  784. dfc_get_flow_status_resp_msg_v01_ei, resp);
  785. if (ret < 0) {
  786. pr_err("%s() Failed init for response, err: %d\n",
  787. __func__, ret);
  788. goto out;
  789. }
  790. ret = qmi_send_request(dfc_handle, ssctl, txn,
  791. QMI_DFC_GET_FLOW_STATUS_REQ_V01,
  792. QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
  793. dfc_get_flow_status_req_msg_v01_ei, req);
  794. if (ret < 0) {
  795. qmi_txn_cancel(txn);
  796. pr_err("%s() Failed sending request, err: %d\n",
  797. __func__, ret);
  798. goto out;
  799. }
  800. ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
  801. if (ret < 0) {
  802. pr_err("%s() Response waiting failed, err: %d\n",
  803. __func__, ret);
  804. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  805. pr_err("%s() Request rejected, result: %d, err: %d\n",
  806. __func__, resp->resp.result, resp->resp.error);
  807. ret = -resp->resp.result;
  808. }
  809. out:
  810. kfree(txn);
  811. kfree(req);
  812. return ret;
  813. }
  814. static int dfc_init_service(struct dfc_qmi_data *data)
  815. {
  816. int rc;
  817. rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
  818. if (rc < 0)
  819. return rc;
  820. return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
  821. }
  822. static void
  823. dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
  824. {
  825. struct qos_info *qos = rmnet_get_qos_pt(dev);
  826. struct sk_buff *skb;
  827. struct dfc_ack_cmd *msg;
  828. int data_size = sizeof(struct dfc_ack_cmd);
  829. int header_size = sizeof(struct dfc_qmap_header);
  830. if (!qos)
  831. return;
  832. if (dfc_qmap) {
  833. dfc_qmap_send_ack(qos, bearer_id, seq, type);
  834. return;
  835. }
  836. skb = alloc_skb(data_size, GFP_ATOMIC);
  837. if (!skb)
  838. return;
  839. msg = (struct dfc_ack_cmd *)skb_put(skb, data_size);
  840. memset(msg, 0, data_size);
  841. msg->header.cd_bit = 1;
  842. msg->header.mux_id = mux_id;
  843. msg->header.pkt_len = htons(data_size - header_size);
  844. msg->bearer_id = bearer_id;
  845. msg->command_name = 4;
  846. msg->cmd_type = 0;
  847. msg->dfc_seq = htons(seq);
  848. msg->type = type;
  849. msg->ver = 2;
  850. msg->transaction_id = htonl(qos->tran_num);
  851. skb->dev = qos->real_dev;
  852. skb->protocol = htons(ETH_P_MAP);
  853. trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
  854. qos->tran_num++;
  855. rmnet_map_tx_qmap_cmd(skb, RMNET_CH_DEFAULT, true);
  856. }
  857. int dfc_bearer_flow_ctl(struct net_device *dev,
  858. struct rmnet_bearer_map *bearer,
  859. struct qos_info *qos)
  860. {
  861. bool enable;
  862. enable = bearer->grant_size ? true : false;
  863. /* Do not flow disable tcp ack q in tcp bidir
  864. * ACK queue opened first to drain ACKs faster
  865. * Although since tcp ancillary is true most of the time,
  866. * this shouldn't really make a difference
  867. * If there is non zero grant but tcp ancillary is false,
  868. * send out ACKs anyway
  869. */
  870. if (bearer->ack_mq_idx != INVALID_MQ)
  871. qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
  872. enable || bearer->tcp_bidir);
  873. qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
  874. if (!enable && bearer->ack_req)
  875. dfc_send_ack(dev, bearer->bearer_id,
  876. bearer->seq, qos->mux_id,
  877. DFC_ACK_TYPE_DISABLE);
  878. return 0;
  879. }
  880. static int dfc_all_bearer_flow_ctl(struct net_device *dev,
  881. struct qos_info *qos, u8 ack_req, u32 ancillary,
  882. struct dfc_flow_status_info_type_v01 *fc_info)
  883. {
  884. struct rmnet_bearer_map *bearer;
  885. list_for_each_entry(bearer, &qos->bearer_head, list) {
  886. bearer->grant_size = fc_info->num_bytes;
  887. bearer->grant_thresh =
  888. qmi_rmnet_grant_per(bearer->grant_size);
  889. bearer->seq = fc_info->seq_num;
  890. bearer->ack_req = ack_req;
  891. bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  892. bearer->last_grant = fc_info->num_bytes;
  893. bearer->last_seq = fc_info->seq_num;
  894. bearer->last_adjusted_grant = fc_info->num_bytes;
  895. dfc_bearer_flow_ctl(dev, bearer, qos);
  896. }
  897. return 0;
  898. }
  899. static u32 dfc_adjust_grant(struct rmnet_bearer_map *bearer,
  900. struct dfc_flow_status_info_type_v01 *fc_info)
  901. {
  902. u32 grant;
  903. if (!fc_info->rx_bytes_valid)
  904. return fc_info->num_bytes;
  905. if (bearer->bytes_in_flight > fc_info->rx_bytes)
  906. bearer->bytes_in_flight -= fc_info->rx_bytes;
  907. else
  908. bearer->bytes_in_flight = 0;
  909. /* Adjusted grant = grant - bytes_in_flight */
  910. if (fc_info->num_bytes > bearer->bytes_in_flight)
  911. grant = fc_info->num_bytes - bearer->bytes_in_flight;
  912. else
  913. grant = 0;
  914. trace_dfc_adjust_grant(fc_info->mux_id, fc_info->bearer_id,
  915. fc_info->num_bytes, fc_info->rx_bytes,
  916. bearer->bytes_in_flight, grant);
  917. return grant;
  918. }
  919. static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
  920. u8 ack_req, u32 ancillary,
  921. struct dfc_flow_status_info_type_v01 *fc_info,
  922. bool is_query)
  923. {
  924. struct rmnet_bearer_map *itm = NULL;
  925. int rc = 0;
  926. bool action = false;
  927. u32 adjusted_grant;
  928. itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
  929. /* cache the bearer assuming it is a new bearer */
  930. if (unlikely(!itm && !is_query && fc_info->num_bytes))
  931. itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
  932. if (itm) {
  933. /* The RAT switch flag indicates the start and end of
  934. * the switch. Ignore indications in between.
  935. */
  936. if (DFC_IS_RAT_SWITCH(ancillary))
  937. itm->rat_switch = !fc_info->num_bytes;
  938. else
  939. if (itm->rat_switch)
  940. return 0;
  941. /* If TX is OFF but we received grant, ignore it */
  942. if (itm->tx_off && fc_info->num_bytes > 0)
  943. return 0;
  944. if (fc_info->ll_status &&
  945. itm->ch_switch.current_ch != RMNET_CH_LL) {
  946. itm->ch_switch.current_ch = RMNET_CH_LL;
  947. itm->ch_switch.auto_switched = true;
  948. if (itm->mq_idx < MAX_MQ_NUM)
  949. qos->mq[itm->mq_idx].is_ll_ch = RMNET_CH_LL;
  950. }
  951. /* Adjuste grant for query */
  952. if (dfc_qmap && is_query) {
  953. adjusted_grant = dfc_adjust_grant(itm, fc_info);
  954. } else {
  955. adjusted_grant = fc_info->num_bytes;
  956. itm->bytes_in_flight = 0;
  957. }
  958. /* update queue state only if there is a change in grant
  959. * or change in ancillary tcp state
  960. */
  961. if ((itm->grant_size == 0 && adjusted_grant > 0) ||
  962. (itm->grant_size > 0 && adjusted_grant == 0) ||
  963. (itm->tcp_bidir ^ DFC_IS_TCP_BIDIR(ancillary)))
  964. action = true;
  965. /* This is needed by qmap */
  966. if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
  967. dfc_qmap_send_ack(qos, itm->bearer_id,
  968. itm->seq, DFC_ACK_TYPE_DISABLE);
  969. itm->grant_size = adjusted_grant;
  970. /* No further query if the adjusted grant is less
  971. * than 20% of the original grant. Add to watch to
  972. * recover if no indication is received.
  973. */
  974. if (dfc_qmap && is_query &&
  975. itm->grant_size < (fc_info->num_bytes / 5)) {
  976. itm->grant_thresh = itm->grant_size;
  977. qmi_rmnet_watchdog_add(itm);
  978. } else {
  979. itm->grant_thresh =
  980. qmi_rmnet_grant_per(itm->grant_size);
  981. qmi_rmnet_watchdog_remove(itm);
  982. }
  983. itm->seq = fc_info->seq_num;
  984. itm->ack_req = ack_req;
  985. itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  986. itm->last_grant = fc_info->num_bytes;
  987. itm->last_seq = fc_info->seq_num;
  988. itm->last_adjusted_grant = adjusted_grant;
  989. if (action)
  990. rc = dfc_bearer_flow_ctl(dev, itm, qos);
  991. }
  992. return rc;
  993. }
  994. void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
  995. struct dfc_flow_status_ind_msg_v01 *ind,
  996. bool is_query)
  997. {
  998. struct net_device *dev;
  999. struct qos_info *qos;
  1000. struct dfc_flow_status_info_type_v01 *flow_status;
  1001. struct dfc_ancillary_info_type_v01 *ai;
  1002. u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
  1003. u32 ancillary;
  1004. int i, j;
  1005. rcu_read_lock();
  1006. for (i = 0; i < ind->flow_status_len; i++) {
  1007. flow_status = &ind->flow_status[i];
  1008. ancillary = 0;
  1009. if (ind->ancillary_info_valid) {
  1010. for (j = 0; j < ind->ancillary_info_len; j++) {
  1011. ai = &ind->ancillary_info[j];
  1012. if (ai->mux_id == flow_status->mux_id &&
  1013. ai->bearer_id == flow_status->bearer_id) {
  1014. ancillary = ai->reserved;
  1015. break;
  1016. }
  1017. }
  1018. }
  1019. trace_dfc_flow_ind(dfc->index,
  1020. i, flow_status->mux_id,
  1021. flow_status->bearer_id,
  1022. flow_status->num_bytes,
  1023. flow_status->seq_num,
  1024. ack_req,
  1025. ancillary);
  1026. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1027. flow_status->mux_id);
  1028. if (!dev)
  1029. goto clean_out;
  1030. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1031. if (!qos)
  1032. continue;
  1033. spin_lock_bh(&qos->qos_lock);
  1034. /* In powersave, change grant to 1 if it is a enable */
  1035. if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
  1036. if (flow_status->num_bytes) {
  1037. flow_status->num_bytes = DEFAULT_GRANT;
  1038. flow_status->seq_num = 0;
  1039. /* below is to reset bytes-in-flight */
  1040. flow_status->rx_bytes_valid = 1;
  1041. flow_status->rx_bytes = 0xFFFFFFFF;
  1042. } else {
  1043. spin_unlock_bh(&qos->qos_lock);
  1044. continue;
  1045. }
  1046. }
  1047. if (unlikely(flow_status->bearer_id == 0xFF))
  1048. dfc_all_bearer_flow_ctl(
  1049. dev, qos, ack_req, ancillary, flow_status);
  1050. else
  1051. dfc_update_fc_map(
  1052. dev, qos, ack_req, ancillary, flow_status,
  1053. is_query);
  1054. spin_unlock_bh(&qos->qos_lock);
  1055. }
  1056. clean_out:
  1057. rcu_read_unlock();
  1058. }
  1059. static void dfc_update_tx_link_status(struct net_device *dev,
  1060. struct qos_info *qos, u8 tx_status,
  1061. struct dfc_bearer_info_type_v01 *binfo)
  1062. {
  1063. struct rmnet_bearer_map *itm = NULL;
  1064. itm = qmi_rmnet_get_bearer_map(qos, binfo->bearer_id);
  1065. if (!itm)
  1066. return;
  1067. /* If no change in tx status, ignore */
  1068. if (itm->tx_off == !tx_status)
  1069. return;
  1070. if (itm->grant_size && !tx_status) {
  1071. itm->grant_size = 0;
  1072. itm->tcp_bidir = false;
  1073. itm->bytes_in_flight = 0;
  1074. qmi_rmnet_watchdog_remove(itm);
  1075. dfc_bearer_flow_ctl(dev, itm, qos);
  1076. } else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
  1077. itm->grant_size = DEFAULT_GRANT;
  1078. itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
  1079. itm->seq = 0;
  1080. itm->ack_req = 0;
  1081. dfc_bearer_flow_ctl(dev, itm, qos);
  1082. }
  1083. itm->tx_off = !tx_status;
  1084. }
  1085. void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
  1086. struct dfc_tx_link_status_ind_msg_v01 *ind)
  1087. {
  1088. struct net_device *dev;
  1089. struct qos_info *qos;
  1090. struct dfc_bearer_info_type_v01 *bearer_info;
  1091. int i;
  1092. rcu_read_lock();
  1093. for (i = 0; i < ind->bearer_info_len; i++) {
  1094. bearer_info = &ind->bearer_info[i];
  1095. trace_dfc_tx_link_status_ind(dfc->index, i,
  1096. ind->tx_status,
  1097. bearer_info->mux_id,
  1098. bearer_info->bearer_id);
  1099. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1100. bearer_info->mux_id);
  1101. if (!dev)
  1102. goto clean_out;
  1103. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1104. if (!qos)
  1105. continue;
  1106. spin_lock_bh(&qos->qos_lock);
  1107. dfc_update_tx_link_status(
  1108. dev, qos, ind->tx_status, bearer_info);
  1109. spin_unlock_bh(&qos->qos_lock);
  1110. }
  1111. clean_out:
  1112. rcu_read_unlock();
  1113. }
  1114. static void dfc_qmi_ind_work(struct work_struct *work)
  1115. {
  1116. struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
  1117. qmi_ind_work);
  1118. struct dfc_svc_ind *svc_ind;
  1119. unsigned long flags;
  1120. if (!dfc)
  1121. return;
  1122. local_bh_disable();
  1123. do {
  1124. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1125. svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
  1126. struct dfc_svc_ind, list);
  1127. if (svc_ind)
  1128. list_del(&svc_ind->list);
  1129. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1130. if (!svc_ind)
  1131. break;
  1132. if (!dfc->restart_state) {
  1133. if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
  1134. dfc_do_burst_flow_control(
  1135. dfc, &svc_ind->d.dfc_info,
  1136. false);
  1137. else if (svc_ind->msg_id ==
  1138. QMI_DFC_TX_LINK_STATUS_IND_V01)
  1139. dfc_handle_tx_link_status_ind(
  1140. dfc, &svc_ind->d.tx_status);
  1141. }
  1142. kfree(svc_ind);
  1143. } while (1);
  1144. local_bh_enable();
  1145. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  1146. }
  1147. static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
  1148. struct qmi_txn *txn, const void *data)
  1149. {
  1150. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1151. handle);
  1152. struct dfc_flow_status_ind_msg_v01 *ind_msg;
  1153. struct dfc_svc_ind *svc_ind;
  1154. unsigned long flags;
  1155. if (qmi != &dfc->handle)
  1156. return;
  1157. ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data;
  1158. if (ind_msg->flow_status_valid) {
  1159. if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) {
  1160. pr_err("%s() Invalid fc info len: %d\n",
  1161. __func__, ind_msg->flow_status_len);
  1162. return;
  1163. }
  1164. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1165. if (!svc_ind)
  1166. return;
  1167. svc_ind->msg_id = QMI_DFC_FLOW_STATUS_IND_V01;
  1168. memcpy(&svc_ind->d.dfc_info, ind_msg, sizeof(*ind_msg));
  1169. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1170. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1171. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1172. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1173. }
  1174. }
  1175. static void dfc_tx_link_status_ind_cb(struct qmi_handle *qmi,
  1176. struct sockaddr_qrtr *sq,
  1177. struct qmi_txn *txn, const void *data)
  1178. {
  1179. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1180. handle);
  1181. struct dfc_tx_link_status_ind_msg_v01 *ind_msg;
  1182. struct dfc_svc_ind *svc_ind;
  1183. unsigned long flags;
  1184. if (qmi != &dfc->handle)
  1185. return;
  1186. ind_msg = (struct dfc_tx_link_status_ind_msg_v01 *)data;
  1187. if (ind_msg->bearer_info_valid) {
  1188. if (ind_msg->bearer_info_len > DFC_MAX_BEARERS_V01) {
  1189. pr_err("%s() Invalid bearer info len: %d\n",
  1190. __func__, ind_msg->bearer_info_len);
  1191. return;
  1192. }
  1193. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1194. if (!svc_ind)
  1195. return;
  1196. svc_ind->msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01;
  1197. memcpy(&svc_ind->d.tx_status, ind_msg, sizeof(*ind_msg));
  1198. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1199. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1200. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1201. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1202. }
  1203. }
  1204. static void dfc_svc_init(struct work_struct *work)
  1205. {
  1206. int rc = 0;
  1207. struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data,
  1208. svc_arrive);
  1209. struct qmi_info *qmi;
  1210. if (data->restart_state == 1)
  1211. return;
  1212. rc = dfc_init_service(data);
  1213. if (rc < 0) {
  1214. pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
  1215. return;
  1216. }
  1217. if (data->restart_state == 1)
  1218. return;
  1219. while (!rtnl_trylock()) {
  1220. if (!data->restart_state)
  1221. cond_resched();
  1222. else
  1223. return;
  1224. }
  1225. qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
  1226. if (!qmi) {
  1227. rtnl_unlock();
  1228. return;
  1229. }
  1230. qmi->dfc_pending[data->index] = NULL;
  1231. qmi->dfc_clients[data->index] = (void *)data;
  1232. trace_dfc_client_state_up(data->index,
  1233. data->svc.instance,
  1234. data->svc.ep_type,
  1235. data->svc.iface_id);
  1236. rtnl_unlock();
  1237. pr_info("Connection established with the DFC Service\n");
  1238. }
  1239. static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
  1240. {
  1241. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1242. handle);
  1243. data->ssctl.sq_family = AF_QIPCRTR;
  1244. data->ssctl.sq_node = svc->node;
  1245. data->ssctl.sq_port = svc->port;
  1246. queue_work(data->dfc_wq, &data->svc_arrive);
  1247. return 0;
  1248. }
  1249. static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
  1250. {
  1251. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1252. handle);
  1253. if (!data)
  1254. pr_debug("%s() data is null\n", __func__);
  1255. }
  1256. static struct qmi_ops server_ops = {
  1257. .new_server = dfc_svc_arrive,
  1258. .del_server = dfc_svc_exit,
  1259. };
  1260. static struct qmi_msg_handler qmi_indication_handler[] = {
  1261. {
  1262. .type = QMI_INDICATION,
  1263. .msg_id = QMI_DFC_FLOW_STATUS_IND_V01,
  1264. .ei = dfc_flow_status_ind_v01_ei,
  1265. .decoded_size = sizeof(struct dfc_flow_status_ind_msg_v01),
  1266. .fn = dfc_clnt_ind_cb,
  1267. },
  1268. {
  1269. .type = QMI_INDICATION,
  1270. .msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01,
  1271. .ei = dfc_tx_link_status_ind_v01_ei,
  1272. .decoded_size = sizeof(struct dfc_tx_link_status_ind_msg_v01),
  1273. .fn = dfc_tx_link_status_ind_cb,
  1274. },
  1275. {},
  1276. };
  1277. int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
  1278. struct qmi_info *qmi)
  1279. {
  1280. struct dfc_qmi_data *data;
  1281. int rc = -ENOMEM;
  1282. if (!port || !qmi)
  1283. return -EINVAL;
  1284. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  1285. if (!data)
  1286. return -ENOMEM;
  1287. data->rmnet_port = port;
  1288. data->index = index;
  1289. data->restart_state = 0;
  1290. memcpy(&data->svc, psvc, sizeof(data->svc));
  1291. INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
  1292. INIT_LIST_HEAD(&data->qmi_ind_q);
  1293. spin_lock_init(&data->qmi_ind_lock);
  1294. data->dfc_wq = create_singlethread_workqueue("dfc_wq");
  1295. if (!data->dfc_wq) {
  1296. pr_err("%s Could not create workqueue\n", __func__);
  1297. goto err0;
  1298. }
  1299. INIT_WORK(&data->svc_arrive, dfc_svc_init);
  1300. rc = qmi_handle_init(&data->handle,
  1301. QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
  1302. &server_ops, qmi_indication_handler);
  1303. if (rc < 0) {
  1304. pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
  1305. goto err1;
  1306. }
  1307. rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
  1308. DFC_SERVICE_VERS_V01,
  1309. psvc->instance);
  1310. if (rc < 0) {
  1311. pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
  1312. goto err2;
  1313. }
  1314. qmi->dfc_pending[index] = (void *)data;
  1315. return 0;
  1316. err2:
  1317. qmi_handle_release(&data->handle);
  1318. err1:
  1319. destroy_workqueue(data->dfc_wq);
  1320. err0:
  1321. kfree(data);
  1322. return rc;
  1323. }
  1324. void dfc_qmi_client_exit(void *dfc_data)
  1325. {
  1326. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1327. if (!data) {
  1328. pr_err("%s() data is null\n", __func__);
  1329. return;
  1330. }
  1331. data->restart_state = 1;
  1332. trace_dfc_client_state_down(data->index, 0);
  1333. qmi_handle_release(&data->handle);
  1334. drain_workqueue(data->dfc_wq);
  1335. destroy_workqueue(data->dfc_wq);
  1336. kfree(data);
  1337. }
  1338. void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
  1339. int ip_type, u32 mark, unsigned int len)
  1340. {
  1341. struct rmnet_bearer_map *bearer = NULL;
  1342. struct rmnet_flow_map *itm;
  1343. u32 start_grant;
  1344. spin_lock_bh(&qos->qos_lock);
  1345. /* Mark is flow_id */
  1346. itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
  1347. if (likely(itm))
  1348. bearer = itm->bearer;
  1349. if (unlikely(!bearer))
  1350. goto out;
  1351. trace_dfc_flow_check(dev->name, bearer->bearer_id,
  1352. len, mark, bearer->grant_size);
  1353. bearer->bytes_in_flight += len;
  1354. if (!bearer->grant_size)
  1355. goto out;
  1356. start_grant = bearer->grant_size;
  1357. if (len >= bearer->grant_size)
  1358. bearer->grant_size = 0;
  1359. else
  1360. bearer->grant_size -= len;
  1361. if (start_grant > bearer->grant_thresh &&
  1362. bearer->grant_size <= bearer->grant_thresh) {
  1363. dfc_send_ack(dev, bearer->bearer_id,
  1364. bearer->seq, qos->mux_id,
  1365. DFC_ACK_TYPE_THRESHOLD);
  1366. }
  1367. if (!bearer->grant_size)
  1368. dfc_bearer_flow_ctl(dev, bearer, qos);
  1369. out:
  1370. spin_unlock_bh(&qos->qos_lock);
  1371. }
  1372. void dfc_qmi_query_flow(void *dfc_data)
  1373. {
  1374. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1375. struct dfc_get_flow_status_resp_msg_v01 *resp;
  1376. struct dfc_svc_ind *svc_ind;
  1377. int rc;
  1378. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  1379. if (!resp)
  1380. return;
  1381. svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
  1382. if (!svc_ind) {
  1383. kfree(resp);
  1384. return;
  1385. }
  1386. if (!data)
  1387. goto done;
  1388. rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
  1389. if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
  1390. resp->flow_status_len > DFC_MAX_BEARERS_V01)
  1391. goto done;
  1392. svc_ind->d.dfc_info.flow_status_valid = resp->flow_status_valid;
  1393. svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
  1394. memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
  1395. sizeof(resp->flow_status[0]) * resp->flow_status_len);
  1396. dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info, true);
  1397. done:
  1398. kfree(svc_ind);
  1399. kfree(resp);
  1400. }