dfc_qmi.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577
  1. /*
  2. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <net/pkt_sched.h>
  14. #include "rmnet_qmi.h"
  15. #include "qmi_rmnet.h"
  16. #include "dfc_defs.h"
  17. #define CREATE_TRACE_POINTS
  18. #include "dfc.h"
  19. struct dfc_qmap_header {
  20. u8 pad_len:6;
  21. u8 reserved_bit:1;
  22. u8 cd_bit:1;
  23. u8 mux_id;
  24. __be16 pkt_len;
  25. } __aligned(1);
  26. struct dfc_ack_cmd {
  27. struct dfc_qmap_header header;
  28. u8 command_name;
  29. u8 cmd_type:2;
  30. u8 reserved:6;
  31. u16 reserved2;
  32. u32 transaction_id;
  33. u8 ver:2;
  34. u8 reserved3:6;
  35. u8 type:2;
  36. u8 reserved4:6;
  37. u16 dfc_seq;
  38. u8 reserved5[3];
  39. u8 bearer_id;
  40. } __aligned(1);
  41. static void dfc_svc_init(struct work_struct *work);
  42. /* **************************************************** */
  43. #define DFC_SERVICE_ID_V01 0x4E
  44. #define DFC_SERVICE_VERS_V01 0x01
  45. #define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
  46. #define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
  47. #define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
  48. #define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN 11
  49. #define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7
  50. #define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001
  51. #define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001
  52. #define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 8
  53. #define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7
  54. #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
  55. #define QMI_DFC_TX_LINK_STATUS_IND_V01 0x0024
  56. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
  57. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
  58. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
  59. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
  60. struct dfc_bind_client_req_msg_v01 {
  61. u8 ep_id_valid;
  62. struct data_ep_id_type_v01 ep_id;
  63. };
  64. struct dfc_bind_client_resp_msg_v01 {
  65. struct qmi_response_type_v01 resp;
  66. };
  67. struct dfc_indication_register_req_msg_v01 {
  68. u8 report_flow_status_valid;
  69. u8 report_flow_status;
  70. u8 report_tx_link_status_valid;
  71. u8 report_tx_link_status;
  72. };
  73. struct dfc_indication_register_resp_msg_v01 {
  74. struct qmi_response_type_v01 resp;
  75. };
  76. static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
  77. {
  78. .data_type = QMI_UNSIGNED_4_BYTE,
  79. .elem_len = 1,
  80. .elem_size = sizeof(u32),
  81. .array_type = NO_ARRAY,
  82. .tlv_type = QMI_COMMON_TLV_TYPE,
  83. .offset = offsetof(struct dfc_qos_id_type_v01,
  84. qos_id),
  85. .ei_array = NULL,
  86. },
  87. {
  88. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  89. .elem_len = 1,
  90. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  91. .array_type = NO_ARRAY,
  92. .tlv_type = QMI_COMMON_TLV_TYPE,
  93. .offset = offsetof(struct dfc_qos_id_type_v01,
  94. ip_type),
  95. .ei_array = NULL,
  96. },
  97. {
  98. .data_type = QMI_EOTI,
  99. .array_type = NO_ARRAY,
  100. .tlv_type = QMI_COMMON_TLV_TYPE,
  101. },
  102. };
  103. static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
  104. {
  105. .data_type = QMI_UNSIGNED_1_BYTE,
  106. .elem_len = 1,
  107. .elem_size = sizeof(u8),
  108. .array_type = NO_ARRAY,
  109. .tlv_type = QMI_COMMON_TLV_TYPE,
  110. .offset = offsetof(struct
  111. dfc_flow_status_info_type_v01,
  112. subs_id),
  113. .ei_array = NULL,
  114. },
  115. {
  116. .data_type = QMI_UNSIGNED_1_BYTE,
  117. .elem_len = 1,
  118. .elem_size = sizeof(u8),
  119. .array_type = NO_ARRAY,
  120. .tlv_type = QMI_COMMON_TLV_TYPE,
  121. .offset = offsetof(struct
  122. dfc_flow_status_info_type_v01,
  123. mux_id),
  124. .ei_array = NULL,
  125. },
  126. {
  127. .data_type = QMI_UNSIGNED_1_BYTE,
  128. .elem_len = 1,
  129. .elem_size = sizeof(u8),
  130. .array_type = NO_ARRAY,
  131. .tlv_type = QMI_COMMON_TLV_TYPE,
  132. .offset = offsetof(struct
  133. dfc_flow_status_info_type_v01,
  134. bearer_id),
  135. .ei_array = NULL,
  136. },
  137. {
  138. .data_type = QMI_UNSIGNED_4_BYTE,
  139. .elem_len = 1,
  140. .elem_size = sizeof(u32),
  141. .array_type = NO_ARRAY,
  142. .tlv_type = QMI_COMMON_TLV_TYPE,
  143. .offset = offsetof(struct
  144. dfc_flow_status_info_type_v01,
  145. num_bytes),
  146. .ei_array = NULL,
  147. },
  148. {
  149. .data_type = QMI_UNSIGNED_2_BYTE,
  150. .elem_len = 1,
  151. .elem_size = sizeof(u16),
  152. .array_type = NO_ARRAY,
  153. .tlv_type = QMI_COMMON_TLV_TYPE,
  154. .offset = offsetof(struct
  155. dfc_flow_status_info_type_v01,
  156. seq_num),
  157. .ei_array = NULL,
  158. },
  159. {
  160. .data_type = QMI_DATA_LEN,
  161. .elem_len = 1,
  162. .elem_size = sizeof(u8),
  163. .array_type = NO_ARRAY,
  164. .tlv_type = QMI_COMMON_TLV_TYPE,
  165. .offset = offsetof(struct
  166. dfc_flow_status_info_type_v01,
  167. qos_ids_len),
  168. .ei_array = NULL,
  169. },
  170. {
  171. .data_type = QMI_STRUCT,
  172. .elem_len = DFC_MAX_QOS_ID_V01,
  173. .elem_size = sizeof(struct dfc_qos_id_type_v01),
  174. .array_type = VAR_LEN_ARRAY,
  175. .tlv_type = 0x10,
  176. .offset = offsetof(struct
  177. dfc_flow_status_info_type_v01,
  178. qos_ids),
  179. .ei_array = dfc_qos_id_type_v01_ei,
  180. },
  181. {
  182. .data_type = QMI_EOTI,
  183. .array_type = NO_ARRAY,
  184. .tlv_type = QMI_COMMON_TLV_TYPE,
  185. },
  186. };
  187. static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
  188. {
  189. .data_type = QMI_UNSIGNED_1_BYTE,
  190. .elem_len = 1,
  191. .elem_size = sizeof(u8),
  192. .array_type = NO_ARRAY,
  193. .tlv_type = QMI_COMMON_TLV_TYPE,
  194. .offset = offsetof(struct
  195. dfc_ancillary_info_type_v01,
  196. subs_id),
  197. .ei_array = NULL,
  198. },
  199. {
  200. .data_type = QMI_UNSIGNED_1_BYTE,
  201. .elem_len = 1,
  202. .elem_size = sizeof(u8),
  203. .array_type = NO_ARRAY,
  204. .tlv_type = QMI_COMMON_TLV_TYPE,
  205. .offset = offsetof(struct
  206. dfc_ancillary_info_type_v01,
  207. mux_id),
  208. .ei_array = NULL,
  209. },
  210. {
  211. .data_type = QMI_UNSIGNED_1_BYTE,
  212. .elem_len = 1,
  213. .elem_size = sizeof(u8),
  214. .array_type = NO_ARRAY,
  215. .tlv_type = QMI_COMMON_TLV_TYPE,
  216. .offset = offsetof(struct
  217. dfc_ancillary_info_type_v01,
  218. bearer_id),
  219. .ei_array = NULL,
  220. },
  221. {
  222. .data_type = QMI_UNSIGNED_4_BYTE,
  223. .elem_len = 1,
  224. .elem_size = sizeof(u32),
  225. .array_type = NO_ARRAY,
  226. .tlv_type = QMI_COMMON_TLV_TYPE,
  227. .offset = offsetof(struct
  228. dfc_ancillary_info_type_v01,
  229. reserved),
  230. .ei_array = NULL,
  231. },
  232. {
  233. .data_type = QMI_EOTI,
  234. .array_type = NO_ARRAY,
  235. .tlv_type = QMI_COMMON_TLV_TYPE,
  236. },
  237. };
  238. struct dfc_get_flow_status_req_msg_v01 {
  239. u8 bearer_id_list_valid;
  240. u8 bearer_id_list_len;
  241. u8 bearer_id_list[DFC_MAX_BEARERS_V01];
  242. };
  243. struct dfc_get_flow_status_resp_msg_v01 {
  244. struct qmi_response_type_v01 resp;
  245. u8 flow_status_valid;
  246. u8 flow_status_len;
  247. struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
  248. };
  249. struct dfc_svc_ind {
  250. struct list_head list;
  251. u16 msg_id;
  252. union {
  253. struct dfc_flow_status_ind_msg_v01 dfc_info;
  254. struct dfc_tx_link_status_ind_msg_v01 tx_status;
  255. } d;
  256. };
  257. static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
  258. {
  259. .data_type = QMI_OPT_FLAG,
  260. .elem_len = 1,
  261. .elem_size = sizeof(u8),
  262. .array_type = NO_ARRAY,
  263. .tlv_type = 0x10,
  264. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  265. ep_id_valid),
  266. .ei_array = NULL,
  267. },
  268. {
  269. .data_type = QMI_STRUCT,
  270. .elem_len = 1,
  271. .elem_size = sizeof(struct data_ep_id_type_v01),
  272. .array_type = NO_ARRAY,
  273. .tlv_type = 0x10,
  274. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  275. ep_id),
  276. .ei_array = data_ep_id_type_v01_ei,
  277. },
  278. {
  279. .data_type = QMI_EOTI,
  280. .array_type = NO_ARRAY,
  281. .tlv_type = QMI_COMMON_TLV_TYPE,
  282. },
  283. };
  284. static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = {
  285. {
  286. .data_type = QMI_STRUCT,
  287. .elem_len = 1,
  288. .elem_size = sizeof(struct qmi_response_type_v01),
  289. .array_type = NO_ARRAY,
  290. .tlv_type = 0x02,
  291. .offset = offsetof(struct dfc_bind_client_resp_msg_v01,
  292. resp),
  293. .ei_array = qmi_response_type_v01_ei,
  294. },
  295. {
  296. .data_type = QMI_EOTI,
  297. .array_type = NO_ARRAY,
  298. .tlv_type = QMI_COMMON_TLV_TYPE,
  299. },
  300. };
  301. static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = {
  302. {
  303. .data_type = QMI_OPT_FLAG,
  304. .elem_len = 1,
  305. .elem_size = sizeof(u8),
  306. .array_type = NO_ARRAY,
  307. .tlv_type = 0x10,
  308. .offset = offsetof(struct
  309. dfc_indication_register_req_msg_v01,
  310. report_flow_status_valid),
  311. .ei_array = NULL,
  312. },
  313. {
  314. .data_type = QMI_UNSIGNED_1_BYTE,
  315. .elem_len = 1,
  316. .elem_size = sizeof(u8),
  317. .array_type = NO_ARRAY,
  318. .tlv_type = 0x10,
  319. .offset = offsetof(struct
  320. dfc_indication_register_req_msg_v01,
  321. report_flow_status),
  322. .ei_array = NULL,
  323. },
  324. {
  325. .data_type = QMI_OPT_FLAG,
  326. .elem_len = 1,
  327. .elem_size = sizeof(u8),
  328. .array_type = NO_ARRAY,
  329. .tlv_type = 0x11,
  330. .offset = offsetof(struct
  331. dfc_indication_register_req_msg_v01,
  332. report_tx_link_status_valid),
  333. .ei_array = NULL,
  334. },
  335. {
  336. .data_type = QMI_UNSIGNED_1_BYTE,
  337. .elem_len = 1,
  338. .elem_size = sizeof(u8),
  339. .array_type = NO_ARRAY,
  340. .tlv_type = 0x11,
  341. .offset = offsetof(struct
  342. dfc_indication_register_req_msg_v01,
  343. report_tx_link_status),
  344. .ei_array = NULL,
  345. },
  346. {
  347. .data_type = QMI_EOTI,
  348. .array_type = NO_ARRAY,
  349. .tlv_type = QMI_COMMON_TLV_TYPE,
  350. },
  351. };
  352. static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = {
  353. {
  354. .data_type = QMI_STRUCT,
  355. .elem_len = 1,
  356. .elem_size = sizeof(struct qmi_response_type_v01),
  357. .array_type = NO_ARRAY,
  358. .tlv_type = 0x02,
  359. .offset = offsetof(struct
  360. dfc_indication_register_resp_msg_v01,
  361. resp),
  362. .ei_array = qmi_response_type_v01_ei,
  363. },
  364. {
  365. .data_type = QMI_EOTI,
  366. .array_type = NO_ARRAY,
  367. .tlv_type = QMI_COMMON_TLV_TYPE,
  368. },
  369. };
  370. static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = {
  371. {
  372. .data_type = QMI_OPT_FLAG,
  373. .elem_len = 1,
  374. .elem_size = sizeof(u8),
  375. .array_type = NO_ARRAY,
  376. .tlv_type = 0x10,
  377. .offset = offsetof(struct
  378. dfc_flow_status_ind_msg_v01,
  379. flow_status_valid),
  380. .ei_array = NULL,
  381. },
  382. {
  383. .data_type = QMI_DATA_LEN,
  384. .elem_len = 1,
  385. .elem_size = sizeof(u8),
  386. .array_type = NO_ARRAY,
  387. .tlv_type = 0x10,
  388. .offset = offsetof(struct
  389. dfc_flow_status_ind_msg_v01,
  390. flow_status_len),
  391. .ei_array = NULL,
  392. },
  393. {
  394. .data_type = QMI_STRUCT,
  395. .elem_len = DFC_MAX_BEARERS_V01,
  396. .elem_size = sizeof(struct
  397. dfc_flow_status_info_type_v01),
  398. .array_type = VAR_LEN_ARRAY,
  399. .tlv_type = 0x10,
  400. .offset = offsetof(struct
  401. dfc_flow_status_ind_msg_v01,
  402. flow_status),
  403. .ei_array = dfc_flow_status_info_type_v01_ei,
  404. },
  405. {
  406. .data_type = QMI_OPT_FLAG,
  407. .elem_len = 1,
  408. .elem_size = sizeof(u8),
  409. .array_type = NO_ARRAY,
  410. .tlv_type = 0x11,
  411. .offset = offsetof(struct
  412. dfc_flow_status_ind_msg_v01,
  413. eod_ack_reqd_valid),
  414. .ei_array = NULL,
  415. },
  416. {
  417. .data_type = QMI_UNSIGNED_1_BYTE,
  418. .elem_len = 1,
  419. .elem_size = sizeof(u8),
  420. .array_type = NO_ARRAY,
  421. .tlv_type = 0x11,
  422. .offset = offsetof(struct
  423. dfc_flow_status_ind_msg_v01,
  424. eod_ack_reqd),
  425. .ei_array = NULL,
  426. },
  427. {
  428. .data_type = QMI_OPT_FLAG,
  429. .elem_len = 1,
  430. .elem_size = sizeof(u8),
  431. .array_type = NO_ARRAY,
  432. .tlv_type = 0x12,
  433. .offset = offsetof(struct
  434. dfc_flow_status_ind_msg_v01,
  435. ancillary_info_valid),
  436. .ei_array = NULL,
  437. },
  438. {
  439. .data_type = QMI_DATA_LEN,
  440. .elem_len = 1,
  441. .elem_size = sizeof(u8),
  442. .array_type = NO_ARRAY,
  443. .tlv_type = 0x12,
  444. .offset = offsetof(struct
  445. dfc_flow_status_ind_msg_v01,
  446. ancillary_info_len),
  447. .ei_array = NULL,
  448. },
  449. {
  450. .data_type = QMI_STRUCT,
  451. .elem_len = DFC_MAX_BEARERS_V01,
  452. .elem_size = sizeof(struct
  453. dfc_ancillary_info_type_v01),
  454. .array_type = VAR_LEN_ARRAY,
  455. .tlv_type = 0x12,
  456. .offset = offsetof(struct
  457. dfc_flow_status_ind_msg_v01,
  458. ancillary_info),
  459. .ei_array = dfc_ancillary_info_type_v01_ei,
  460. },
  461. {
  462. .data_type = QMI_EOTI,
  463. .array_type = NO_ARRAY,
  464. .tlv_type = QMI_COMMON_TLV_TYPE,
  465. },
  466. };
  467. static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
  468. {
  469. .data_type = QMI_OPT_FLAG,
  470. .elem_len = 1,
  471. .elem_size = sizeof(u8),
  472. .array_type = NO_ARRAY,
  473. .tlv_type = 0x10,
  474. .offset = offsetof(struct
  475. dfc_get_flow_status_req_msg_v01,
  476. bearer_id_list_valid),
  477. .ei_array = NULL,
  478. },
  479. {
  480. .data_type = QMI_DATA_LEN,
  481. .elem_len = 1,
  482. .elem_size = sizeof(u8),
  483. .array_type = NO_ARRAY,
  484. .tlv_type = 0x10,
  485. .offset = offsetof(struct
  486. dfc_get_flow_status_req_msg_v01,
  487. bearer_id_list_len),
  488. .ei_array = NULL,
  489. },
  490. {
  491. .data_type = QMI_UNSIGNED_1_BYTE,
  492. .elem_len = DFC_MAX_BEARERS_V01,
  493. .elem_size = sizeof(u8),
  494. .array_type = VAR_LEN_ARRAY,
  495. .tlv_type = 0x10,
  496. .offset = offsetof(struct
  497. dfc_get_flow_status_req_msg_v01,
  498. bearer_id_list),
  499. .ei_array = NULL,
  500. },
  501. {
  502. .data_type = QMI_EOTI,
  503. .array_type = NO_ARRAY,
  504. .tlv_type = QMI_COMMON_TLV_TYPE,
  505. },
  506. };
  507. static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
  508. {
  509. .data_type = QMI_STRUCT,
  510. .elem_len = 1,
  511. .elem_size = sizeof(struct qmi_response_type_v01),
  512. .array_type = NO_ARRAY,
  513. .tlv_type = 0x02,
  514. .offset = offsetof(struct
  515. dfc_get_flow_status_resp_msg_v01,
  516. resp),
  517. .ei_array = qmi_response_type_v01_ei,
  518. },
  519. {
  520. .data_type = QMI_OPT_FLAG,
  521. .elem_len = 1,
  522. .elem_size = sizeof(u8),
  523. .array_type = NO_ARRAY,
  524. .tlv_type = 0x10,
  525. .offset = offsetof(struct
  526. dfc_get_flow_status_resp_msg_v01,
  527. flow_status_valid),
  528. .ei_array = NULL,
  529. },
  530. {
  531. .data_type = QMI_DATA_LEN,
  532. .elem_len = 1,
  533. .elem_size = sizeof(u8),
  534. .array_type = NO_ARRAY,
  535. .tlv_type = 0x10,
  536. .offset = offsetof(struct
  537. dfc_get_flow_status_resp_msg_v01,
  538. flow_status_len),
  539. .ei_array = NULL,
  540. },
  541. {
  542. .data_type = QMI_STRUCT,
  543. .elem_len = DFC_MAX_BEARERS_V01,
  544. .elem_size = sizeof(struct
  545. dfc_flow_status_info_type_v01),
  546. .array_type = VAR_LEN_ARRAY,
  547. .tlv_type = 0x10,
  548. .offset = offsetof(struct
  549. dfc_get_flow_status_resp_msg_v01,
  550. flow_status),
  551. .ei_array = dfc_flow_status_info_type_v01_ei,
  552. },
  553. {
  554. .data_type = QMI_EOTI,
  555. .array_type = NO_ARRAY,
  556. .tlv_type = QMI_COMMON_TLV_TYPE,
  557. },
  558. };
  559. static struct qmi_elem_info dfc_bearer_info_type_v01_ei[] = {
  560. {
  561. .data_type = QMI_UNSIGNED_1_BYTE,
  562. .elem_len = 1,
  563. .elem_size = sizeof(u8),
  564. .array_type = NO_ARRAY,
  565. .tlv_type = QMI_COMMON_TLV_TYPE,
  566. .offset = offsetof(struct
  567. dfc_bearer_info_type_v01,
  568. subs_id),
  569. .ei_array = NULL,
  570. },
  571. {
  572. .data_type = QMI_UNSIGNED_1_BYTE,
  573. .elem_len = 1,
  574. .elem_size = sizeof(u8),
  575. .array_type = NO_ARRAY,
  576. .tlv_type = QMI_COMMON_TLV_TYPE,
  577. .offset = offsetof(struct
  578. dfc_bearer_info_type_v01,
  579. mux_id),
  580. .ei_array = NULL,
  581. },
  582. {
  583. .data_type = QMI_UNSIGNED_1_BYTE,
  584. .elem_len = 1,
  585. .elem_size = sizeof(u8),
  586. .array_type = NO_ARRAY,
  587. .tlv_type = QMI_COMMON_TLV_TYPE,
  588. .offset = offsetof(struct
  589. dfc_bearer_info_type_v01,
  590. bearer_id),
  591. .ei_array = NULL,
  592. },
  593. {
  594. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  595. .elem_len = 1,
  596. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  597. .array_type = NO_ARRAY,
  598. .tlv_type = QMI_COMMON_TLV_TYPE,
  599. .offset = offsetof(struct
  600. dfc_bearer_info_type_v01,
  601. ip_type),
  602. .ei_array = NULL,
  603. },
  604. {
  605. .data_type = QMI_EOTI,
  606. .array_type = NO_ARRAY,
  607. .tlv_type = QMI_COMMON_TLV_TYPE,
  608. },
  609. };
  610. static struct qmi_elem_info dfc_tx_link_status_ind_v01_ei[] = {
  611. {
  612. .data_type = QMI_UNSIGNED_1_BYTE,
  613. .elem_len = 1,
  614. .elem_size = sizeof(u8),
  615. .array_type = NO_ARRAY,
  616. .tlv_type = 0x01,
  617. .offset = offsetof(struct
  618. dfc_tx_link_status_ind_msg_v01,
  619. tx_status),
  620. .ei_array = NULL,
  621. },
  622. {
  623. .data_type = QMI_OPT_FLAG,
  624. .elem_len = 1,
  625. .elem_size = sizeof(u8),
  626. .array_type = NO_ARRAY,
  627. .tlv_type = 0x10,
  628. .offset = offsetof(struct
  629. dfc_tx_link_status_ind_msg_v01,
  630. bearer_info_valid),
  631. .ei_array = NULL,
  632. },
  633. {
  634. .data_type = QMI_DATA_LEN,
  635. .elem_len = 1,
  636. .elem_size = sizeof(u8),
  637. .array_type = NO_ARRAY,
  638. .tlv_type = 0x10,
  639. .offset = offsetof(struct
  640. dfc_tx_link_status_ind_msg_v01,
  641. bearer_info_len),
  642. .ei_array = NULL,
  643. },
  644. {
  645. .data_type = QMI_STRUCT,
  646. .elem_len = DFC_MAX_BEARERS_V01,
  647. .elem_size = sizeof(struct
  648. dfc_bearer_info_type_v01),
  649. .array_type = VAR_LEN_ARRAY,
  650. .tlv_type = 0x10,
  651. .offset = offsetof(struct
  652. dfc_tx_link_status_ind_msg_v01,
  653. bearer_info),
  654. .ei_array = dfc_bearer_info_type_v01_ei,
  655. },
  656. {
  657. .data_type = QMI_EOTI,
  658. .array_type = NO_ARRAY,
  659. .tlv_type = QMI_COMMON_TLV_TYPE,
  660. },
  661. };
  662. static int
  663. dfc_bind_client_req(struct qmi_handle *dfc_handle,
  664. struct sockaddr_qrtr *ssctl, struct svc_info *svc)
  665. {
  666. struct dfc_bind_client_resp_msg_v01 *resp;
  667. struct dfc_bind_client_req_msg_v01 *req;
  668. struct qmi_txn txn;
  669. int ret;
  670. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  671. if (!req)
  672. return -ENOMEM;
  673. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  674. if (!resp) {
  675. kfree(req);
  676. return -ENOMEM;
  677. }
  678. ret = qmi_txn_init(dfc_handle, &txn,
  679. dfc_bind_client_resp_msg_v01_ei, resp);
  680. if (ret < 0) {
  681. pr_err("%s() Failed init for response, err: %d\n",
  682. __func__, ret);
  683. goto out;
  684. }
  685. req->ep_id_valid = 1;
  686. req->ep_id.ep_type = svc->ep_type;
  687. req->ep_id.iface_id = svc->iface_id;
  688. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  689. QMI_DFC_BIND_CLIENT_REQ_V01,
  690. QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN,
  691. dfc_bind_client_req_msg_v01_ei, req);
  692. if (ret < 0) {
  693. qmi_txn_cancel(&txn);
  694. pr_err("%s() Failed sending request, err: %d\n",
  695. __func__, ret);
  696. goto out;
  697. }
  698. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  699. if (ret < 0) {
  700. pr_err("%s() Response waiting failed, err: %d\n",
  701. __func__, ret);
  702. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  703. pr_err("%s() Request rejected, result: %d, err: %d\n",
  704. __func__, resp->resp.result, resp->resp.error);
  705. ret = -resp->resp.result;
  706. }
  707. out:
  708. kfree(resp);
  709. kfree(req);
  710. return ret;
  711. }
  712. static int
  713. dfc_indication_register_req(struct qmi_handle *dfc_handle,
  714. struct sockaddr_qrtr *ssctl, u8 reg)
  715. {
  716. struct dfc_indication_register_resp_msg_v01 *resp;
  717. struct dfc_indication_register_req_msg_v01 *req;
  718. struct qmi_txn txn;
  719. int ret;
  720. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  721. if (!req)
  722. return -ENOMEM;
  723. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  724. if (!resp) {
  725. kfree(req);
  726. return -ENOMEM;
  727. }
  728. ret = qmi_txn_init(dfc_handle, &txn,
  729. dfc_indication_register_resp_msg_v01_ei, resp);
  730. if (ret < 0) {
  731. pr_err("%s() Failed init for response, err: %d\n",
  732. __func__, ret);
  733. goto out;
  734. }
  735. req->report_flow_status_valid = 1;
  736. req->report_flow_status = reg;
  737. req->report_tx_link_status_valid = 1;
  738. req->report_tx_link_status = reg;
  739. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  740. QMI_DFC_INDICATION_REGISTER_REQ_V01,
  741. QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN,
  742. dfc_indication_register_req_msg_v01_ei, req);
  743. if (ret < 0) {
  744. qmi_txn_cancel(&txn);
  745. pr_err("%s() Failed sending request, err: %d\n",
  746. __func__, ret);
  747. goto out;
  748. }
  749. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  750. if (ret < 0) {
  751. pr_err("%s() Response waiting failed, err: %d\n",
  752. __func__, ret);
  753. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  754. pr_err("%s() Request rejected, result: %d, err: %d\n",
  755. __func__, resp->resp.result, resp->resp.error);
  756. ret = -resp->resp.result;
  757. }
  758. out:
  759. kfree(resp);
  760. kfree(req);
  761. return ret;
  762. }
  763. static int
  764. dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
  765. struct sockaddr_qrtr *ssctl,
  766. struct dfc_get_flow_status_resp_msg_v01 *resp)
  767. {
  768. struct dfc_get_flow_status_req_msg_v01 *req;
  769. struct qmi_txn *txn;
  770. int ret;
  771. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  772. if (!req)
  773. return -ENOMEM;
  774. txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
  775. if (!txn) {
  776. kfree(req);
  777. return -ENOMEM;
  778. }
  779. ret = qmi_txn_init(dfc_handle, txn,
  780. dfc_get_flow_status_resp_msg_v01_ei, resp);
  781. if (ret < 0) {
  782. pr_err("%s() Failed init for response, err: %d\n",
  783. __func__, ret);
  784. goto out;
  785. }
  786. ret = qmi_send_request(dfc_handle, ssctl, txn,
  787. QMI_DFC_GET_FLOW_STATUS_REQ_V01,
  788. QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
  789. dfc_get_flow_status_req_msg_v01_ei, req);
  790. if (ret < 0) {
  791. qmi_txn_cancel(txn);
  792. pr_err("%s() Failed sending request, err: %d\n",
  793. __func__, ret);
  794. goto out;
  795. }
  796. ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
  797. if (ret < 0) {
  798. pr_err("%s() Response waiting failed, err: %d\n",
  799. __func__, ret);
  800. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  801. pr_err("%s() Request rejected, result: %d, err: %d\n",
  802. __func__, resp->resp.result, resp->resp.error);
  803. ret = -resp->resp.result;
  804. }
  805. out:
  806. kfree(txn);
  807. kfree(req);
  808. return ret;
  809. }
  810. static int dfc_init_service(struct dfc_qmi_data *data)
  811. {
  812. int rc;
  813. rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
  814. if (rc < 0)
  815. return rc;
  816. return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
  817. }
  818. static void
  819. dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
  820. {
  821. struct qos_info *qos = rmnet_get_qos_pt(dev);
  822. struct sk_buff *skb;
  823. struct dfc_ack_cmd *msg;
  824. int data_size = sizeof(struct dfc_ack_cmd);
  825. int header_size = sizeof(struct dfc_qmap_header);
  826. if (!qos)
  827. return;
  828. if (dfc_qmap) {
  829. dfc_qmap_send_ack(qos, bearer_id, seq, type);
  830. return;
  831. }
  832. skb = alloc_skb(data_size, GFP_ATOMIC);
  833. if (!skb)
  834. return;
  835. msg = (struct dfc_ack_cmd *)skb_put(skb, data_size);
  836. memset(msg, 0, data_size);
  837. msg->header.cd_bit = 1;
  838. msg->header.mux_id = mux_id;
  839. msg->header.pkt_len = htons(data_size - header_size);
  840. msg->bearer_id = bearer_id;
  841. msg->command_name = 4;
  842. msg->cmd_type = 0;
  843. msg->dfc_seq = htons(seq);
  844. msg->type = type;
  845. msg->ver = 2;
  846. msg->transaction_id = htonl(qos->tran_num);
  847. skb->dev = qos->real_dev;
  848. skb->protocol = htons(ETH_P_MAP);
  849. trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
  850. qos->tran_num++;
  851. rmnet_map_tx_qmap_cmd(skb, RMNET_CH_DEFAULT, true);
  852. }
  853. int dfc_bearer_flow_ctl(struct net_device *dev,
  854. struct rmnet_bearer_map *bearer,
  855. struct qos_info *qos)
  856. {
  857. bool enable;
  858. enable = bearer->grant_size ? true : false;
  859. /* Do not flow disable tcp ack q in tcp bidir
  860. * ACK queue opened first to drain ACKs faster
  861. * Although since tcp ancillary is true most of the time,
  862. * this shouldn't really make a difference
  863. * If there is non zero grant but tcp ancillary is false,
  864. * send out ACKs anyway
  865. */
  866. if (bearer->ack_mq_idx != INVALID_MQ)
  867. qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
  868. enable || bearer->tcp_bidir);
  869. qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
  870. if (!enable && bearer->ack_req)
  871. dfc_send_ack(dev, bearer->bearer_id,
  872. bearer->seq, qos->mux_id,
  873. DFC_ACK_TYPE_DISABLE);
  874. return 0;
  875. }
  876. static int dfc_all_bearer_flow_ctl(struct net_device *dev,
  877. struct qos_info *qos, u8 ack_req, u32 ancillary,
  878. struct dfc_flow_status_info_type_v01 *fc_info)
  879. {
  880. struct rmnet_bearer_map *bearer;
  881. list_for_each_entry(bearer, &qos->bearer_head, list) {
  882. bearer->grant_size = fc_info->num_bytes;
  883. bearer->grant_thresh =
  884. qmi_rmnet_grant_per(bearer->grant_size);
  885. bearer->seq = fc_info->seq_num;
  886. bearer->ack_req = ack_req;
  887. bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  888. bearer->last_grant = fc_info->num_bytes;
  889. bearer->last_seq = fc_info->seq_num;
  890. bearer->last_adjusted_grant = fc_info->num_bytes;
  891. dfc_bearer_flow_ctl(dev, bearer, qos);
  892. }
  893. return 0;
  894. }
  895. static u32 dfc_adjust_grant(struct rmnet_bearer_map *bearer,
  896. struct dfc_flow_status_info_type_v01 *fc_info)
  897. {
  898. u32 grant;
  899. if (!fc_info->rx_bytes_valid)
  900. return fc_info->num_bytes;
  901. if (bearer->bytes_in_flight > fc_info->rx_bytes)
  902. bearer->bytes_in_flight -= fc_info->rx_bytes;
  903. else
  904. bearer->bytes_in_flight = 0;
  905. /* Adjusted grant = grant - bytes_in_flight */
  906. if (fc_info->num_bytes > bearer->bytes_in_flight)
  907. grant = fc_info->num_bytes - bearer->bytes_in_flight;
  908. else
  909. grant = 0;
  910. trace_dfc_adjust_grant(fc_info->mux_id, fc_info->bearer_id,
  911. fc_info->num_bytes, fc_info->rx_bytes,
  912. bearer->bytes_in_flight, grant);
  913. return grant;
  914. }
  915. static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
  916. u8 ack_req, u32 ancillary,
  917. struct dfc_flow_status_info_type_v01 *fc_info,
  918. bool is_query)
  919. {
  920. struct rmnet_bearer_map *itm = NULL;
  921. int rc = 0;
  922. bool action = false;
  923. u32 adjusted_grant;
  924. itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
  925. if (!itm)
  926. itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
  927. if (itm) {
  928. /* The RAT switch flag indicates the start and end of
  929. * the switch. Ignore indications in between.
  930. */
  931. if (DFC_IS_RAT_SWITCH(ancillary))
  932. itm->rat_switch = !fc_info->num_bytes;
  933. else
  934. if (itm->rat_switch)
  935. return 0;
  936. /* If TX is OFF but we received grant, ignore it */
  937. if (itm->tx_off && fc_info->num_bytes > 0)
  938. return 0;
  939. /* Adjuste grant for query */
  940. if (dfc_qmap && is_query) {
  941. adjusted_grant = dfc_adjust_grant(itm, fc_info);
  942. } else {
  943. adjusted_grant = fc_info->num_bytes;
  944. itm->bytes_in_flight = 0;
  945. }
  946. /* update queue state only if there is a change in grant
  947. * or change in ancillary tcp state
  948. */
  949. if ((itm->grant_size == 0 && adjusted_grant > 0) ||
  950. (itm->grant_size > 0 && adjusted_grant == 0) ||
  951. (itm->tcp_bidir ^ DFC_IS_TCP_BIDIR(ancillary)))
  952. action = true;
  953. /* This is needed by qmap */
  954. if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
  955. dfc_qmap_send_ack(qos, itm->bearer_id,
  956. itm->seq, DFC_ACK_TYPE_DISABLE);
  957. itm->grant_size = adjusted_grant;
  958. /* No further query if the adjusted grant is less
  959. * than 20% of the original grant. Add to watch to
  960. * recover if no indication is received.
  961. */
  962. if (dfc_qmap && is_query &&
  963. itm->grant_size < (fc_info->num_bytes / 5)) {
  964. itm->grant_thresh = itm->grant_size;
  965. qmi_rmnet_watchdog_add(itm);
  966. } else {
  967. itm->grant_thresh =
  968. qmi_rmnet_grant_per(itm->grant_size);
  969. qmi_rmnet_watchdog_remove(itm);
  970. }
  971. itm->seq = fc_info->seq_num;
  972. itm->ack_req = ack_req;
  973. itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  974. itm->last_grant = fc_info->num_bytes;
  975. itm->last_seq = fc_info->seq_num;
  976. itm->last_adjusted_grant = adjusted_grant;
  977. if (action)
  978. rc = dfc_bearer_flow_ctl(dev, itm, qos);
  979. }
  980. return rc;
  981. }
  982. void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
  983. struct dfc_flow_status_ind_msg_v01 *ind,
  984. bool is_query)
  985. {
  986. struct net_device *dev;
  987. struct qos_info *qos;
  988. struct dfc_flow_status_info_type_v01 *flow_status;
  989. struct dfc_ancillary_info_type_v01 *ai;
  990. u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
  991. u32 ancillary;
  992. int i, j;
  993. rcu_read_lock();
  994. for (i = 0; i < ind->flow_status_len; i++) {
  995. flow_status = &ind->flow_status[i];
  996. ancillary = 0;
  997. if (ind->ancillary_info_valid) {
  998. for (j = 0; j < ind->ancillary_info_len; j++) {
  999. ai = &ind->ancillary_info[j];
  1000. if (ai->mux_id == flow_status->mux_id &&
  1001. ai->bearer_id == flow_status->bearer_id) {
  1002. ancillary = ai->reserved;
  1003. break;
  1004. }
  1005. }
  1006. }
  1007. trace_dfc_flow_ind(dfc->index,
  1008. i, flow_status->mux_id,
  1009. flow_status->bearer_id,
  1010. flow_status->num_bytes,
  1011. flow_status->seq_num,
  1012. ack_req,
  1013. ancillary);
  1014. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1015. flow_status->mux_id);
  1016. if (!dev)
  1017. goto clean_out;
  1018. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1019. if (!qos)
  1020. continue;
  1021. spin_lock_bh(&qos->qos_lock);
  1022. /* In powersave, change grant to 1 if it is a enable */
  1023. if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
  1024. if (flow_status->num_bytes) {
  1025. flow_status->num_bytes = DEFAULT_GRANT;
  1026. flow_status->seq_num = 0;
  1027. /* below is to reset bytes-in-flight */
  1028. flow_status->rx_bytes_valid = 1;
  1029. flow_status->rx_bytes = 0xFFFFFFFF;
  1030. } else {
  1031. spin_unlock_bh(&qos->qos_lock);
  1032. continue;
  1033. }
  1034. }
  1035. if (unlikely(flow_status->bearer_id == 0xFF))
  1036. dfc_all_bearer_flow_ctl(
  1037. dev, qos, ack_req, ancillary, flow_status);
  1038. else
  1039. dfc_update_fc_map(
  1040. dev, qos, ack_req, ancillary, flow_status,
  1041. is_query);
  1042. spin_unlock_bh(&qos->qos_lock);
  1043. }
  1044. clean_out:
  1045. rcu_read_unlock();
  1046. }
  1047. static void dfc_update_tx_link_status(struct net_device *dev,
  1048. struct qos_info *qos, u8 tx_status,
  1049. struct dfc_bearer_info_type_v01 *binfo)
  1050. {
  1051. struct rmnet_bearer_map *itm = NULL;
  1052. itm = qmi_rmnet_get_bearer_map(qos, binfo->bearer_id);
  1053. if (!itm)
  1054. return;
  1055. /* If no change in tx status, ignore */
  1056. if (itm->tx_off == !tx_status)
  1057. return;
  1058. if (itm->grant_size && !tx_status) {
  1059. itm->grant_size = 0;
  1060. itm->tcp_bidir = false;
  1061. itm->bytes_in_flight = 0;
  1062. qmi_rmnet_watchdog_remove(itm);
  1063. dfc_bearer_flow_ctl(dev, itm, qos);
  1064. } else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
  1065. itm->grant_size = DEFAULT_GRANT;
  1066. itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
  1067. itm->seq = 0;
  1068. itm->ack_req = 0;
  1069. dfc_bearer_flow_ctl(dev, itm, qos);
  1070. }
  1071. itm->tx_off = !tx_status;
  1072. }
  1073. void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
  1074. struct dfc_tx_link_status_ind_msg_v01 *ind)
  1075. {
  1076. struct net_device *dev;
  1077. struct qos_info *qos;
  1078. struct dfc_bearer_info_type_v01 *bearer_info;
  1079. int i;
  1080. rcu_read_lock();
  1081. for (i = 0; i < ind->bearer_info_len; i++) {
  1082. bearer_info = &ind->bearer_info[i];
  1083. trace_dfc_tx_link_status_ind(dfc->index, i,
  1084. ind->tx_status,
  1085. bearer_info->mux_id,
  1086. bearer_info->bearer_id);
  1087. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1088. bearer_info->mux_id);
  1089. if (!dev)
  1090. goto clean_out;
  1091. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1092. if (!qos)
  1093. continue;
  1094. spin_lock_bh(&qos->qos_lock);
  1095. dfc_update_tx_link_status(
  1096. dev, qos, ind->tx_status, bearer_info);
  1097. spin_unlock_bh(&qos->qos_lock);
  1098. }
  1099. clean_out:
  1100. rcu_read_unlock();
  1101. }
  1102. static void dfc_qmi_ind_work(struct work_struct *work)
  1103. {
  1104. struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
  1105. qmi_ind_work);
  1106. struct dfc_svc_ind *svc_ind;
  1107. unsigned long flags;
  1108. if (!dfc)
  1109. return;
  1110. local_bh_disable();
  1111. do {
  1112. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1113. svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
  1114. struct dfc_svc_ind, list);
  1115. if (svc_ind)
  1116. list_del(&svc_ind->list);
  1117. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1118. if (!svc_ind)
  1119. break;
  1120. if (!dfc->restart_state) {
  1121. if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
  1122. dfc_do_burst_flow_control(
  1123. dfc, &svc_ind->d.dfc_info,
  1124. false);
  1125. else if (svc_ind->msg_id ==
  1126. QMI_DFC_TX_LINK_STATUS_IND_V01)
  1127. dfc_handle_tx_link_status_ind(
  1128. dfc, &svc_ind->d.tx_status);
  1129. }
  1130. kfree(svc_ind);
  1131. } while (1);
  1132. local_bh_enable();
  1133. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  1134. }
  1135. static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
  1136. struct qmi_txn *txn, const void *data)
  1137. {
  1138. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1139. handle);
  1140. struct dfc_flow_status_ind_msg_v01 *ind_msg;
  1141. struct dfc_svc_ind *svc_ind;
  1142. unsigned long flags;
  1143. if (qmi != &dfc->handle)
  1144. return;
  1145. ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data;
  1146. if (ind_msg->flow_status_valid) {
  1147. if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) {
  1148. pr_err("%s() Invalid fc info len: %d\n",
  1149. __func__, ind_msg->flow_status_len);
  1150. return;
  1151. }
  1152. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1153. if (!svc_ind)
  1154. return;
  1155. svc_ind->msg_id = QMI_DFC_FLOW_STATUS_IND_V01;
  1156. memcpy(&svc_ind->d.dfc_info, ind_msg, sizeof(*ind_msg));
  1157. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1158. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1159. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1160. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1161. }
  1162. }
  1163. static void dfc_tx_link_status_ind_cb(struct qmi_handle *qmi,
  1164. struct sockaddr_qrtr *sq,
  1165. struct qmi_txn *txn, const void *data)
  1166. {
  1167. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1168. handle);
  1169. struct dfc_tx_link_status_ind_msg_v01 *ind_msg;
  1170. struct dfc_svc_ind *svc_ind;
  1171. unsigned long flags;
  1172. if (qmi != &dfc->handle)
  1173. return;
  1174. ind_msg = (struct dfc_tx_link_status_ind_msg_v01 *)data;
  1175. if (ind_msg->bearer_info_valid) {
  1176. if (ind_msg->bearer_info_len > DFC_MAX_BEARERS_V01) {
  1177. pr_err("%s() Invalid bearer info len: %d\n",
  1178. __func__, ind_msg->bearer_info_len);
  1179. return;
  1180. }
  1181. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1182. if (!svc_ind)
  1183. return;
  1184. svc_ind->msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01;
  1185. memcpy(&svc_ind->d.tx_status, ind_msg, sizeof(*ind_msg));
  1186. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1187. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1188. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1189. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1190. }
  1191. }
  1192. static void dfc_svc_init(struct work_struct *work)
  1193. {
  1194. int rc = 0;
  1195. struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data,
  1196. svc_arrive);
  1197. struct qmi_info *qmi;
  1198. if (data->restart_state == 1)
  1199. return;
  1200. rc = dfc_init_service(data);
  1201. if (rc < 0) {
  1202. pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
  1203. return;
  1204. }
  1205. if (data->restart_state == 1)
  1206. return;
  1207. while (!rtnl_trylock()) {
  1208. if (!data->restart_state)
  1209. cond_resched();
  1210. else
  1211. return;
  1212. }
  1213. qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
  1214. if (!qmi) {
  1215. rtnl_unlock();
  1216. return;
  1217. }
  1218. qmi->dfc_pending[data->index] = NULL;
  1219. qmi->dfc_clients[data->index] = (void *)data;
  1220. trace_dfc_client_state_up(data->index,
  1221. data->svc.instance,
  1222. data->svc.ep_type,
  1223. data->svc.iface_id);
  1224. rtnl_unlock();
  1225. pr_info("Connection established with the DFC Service\n");
  1226. }
  1227. static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
  1228. {
  1229. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1230. handle);
  1231. data->ssctl.sq_family = AF_QIPCRTR;
  1232. data->ssctl.sq_node = svc->node;
  1233. data->ssctl.sq_port = svc->port;
  1234. queue_work(data->dfc_wq, &data->svc_arrive);
  1235. return 0;
  1236. }
  1237. static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
  1238. {
  1239. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1240. handle);
  1241. if (!data)
  1242. pr_debug("%s() data is null\n", __func__);
  1243. }
  1244. static struct qmi_ops server_ops = {
  1245. .new_server = dfc_svc_arrive,
  1246. .del_server = dfc_svc_exit,
  1247. };
  1248. static struct qmi_msg_handler qmi_indication_handler[] = {
  1249. {
  1250. .type = QMI_INDICATION,
  1251. .msg_id = QMI_DFC_FLOW_STATUS_IND_V01,
  1252. .ei = dfc_flow_status_ind_v01_ei,
  1253. .decoded_size = sizeof(struct dfc_flow_status_ind_msg_v01),
  1254. .fn = dfc_clnt_ind_cb,
  1255. },
  1256. {
  1257. .type = QMI_INDICATION,
  1258. .msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01,
  1259. .ei = dfc_tx_link_status_ind_v01_ei,
  1260. .decoded_size = sizeof(struct dfc_tx_link_status_ind_msg_v01),
  1261. .fn = dfc_tx_link_status_ind_cb,
  1262. },
  1263. {},
  1264. };
  1265. int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
  1266. struct qmi_info *qmi)
  1267. {
  1268. struct dfc_qmi_data *data;
  1269. int rc = -ENOMEM;
  1270. if (!port || !qmi)
  1271. return -EINVAL;
  1272. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  1273. if (!data)
  1274. return -ENOMEM;
  1275. data->rmnet_port = port;
  1276. data->index = index;
  1277. data->restart_state = 0;
  1278. memcpy(&data->svc, psvc, sizeof(data->svc));
  1279. INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
  1280. INIT_LIST_HEAD(&data->qmi_ind_q);
  1281. spin_lock_init(&data->qmi_ind_lock);
  1282. data->dfc_wq = create_singlethread_workqueue("dfc_wq");
  1283. if (!data->dfc_wq) {
  1284. pr_err("%s Could not create workqueue\n", __func__);
  1285. goto err0;
  1286. }
  1287. INIT_WORK(&data->svc_arrive, dfc_svc_init);
  1288. rc = qmi_handle_init(&data->handle,
  1289. QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
  1290. &server_ops, qmi_indication_handler);
  1291. if (rc < 0) {
  1292. pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
  1293. goto err1;
  1294. }
  1295. rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
  1296. DFC_SERVICE_VERS_V01,
  1297. psvc->instance);
  1298. if (rc < 0) {
  1299. pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
  1300. goto err2;
  1301. }
  1302. qmi->dfc_pending[index] = (void *)data;
  1303. return 0;
  1304. err2:
  1305. qmi_handle_release(&data->handle);
  1306. err1:
  1307. destroy_workqueue(data->dfc_wq);
  1308. err0:
  1309. kfree(data);
  1310. return rc;
  1311. }
  1312. void dfc_qmi_client_exit(void *dfc_data)
  1313. {
  1314. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1315. if (!data) {
  1316. pr_err("%s() data is null\n", __func__);
  1317. return;
  1318. }
  1319. data->restart_state = 1;
  1320. trace_dfc_client_state_down(data->index, 0);
  1321. qmi_handle_release(&data->handle);
  1322. drain_workqueue(data->dfc_wq);
  1323. destroy_workqueue(data->dfc_wq);
  1324. kfree(data);
  1325. }
  1326. void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
  1327. int ip_type, u32 mark, unsigned int len)
  1328. {
  1329. struct rmnet_bearer_map *bearer = NULL;
  1330. struct rmnet_flow_map *itm;
  1331. u32 start_grant;
  1332. spin_lock_bh(&qos->qos_lock);
  1333. /* Mark is flow_id */
  1334. itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
  1335. if (likely(itm))
  1336. bearer = itm->bearer;
  1337. if (unlikely(!bearer))
  1338. goto out;
  1339. trace_dfc_flow_check(dev->name, bearer->bearer_id,
  1340. len, mark, bearer->grant_size);
  1341. bearer->bytes_in_flight += len;
  1342. if (!bearer->grant_size)
  1343. goto out;
  1344. start_grant = bearer->grant_size;
  1345. if (len >= bearer->grant_size)
  1346. bearer->grant_size = 0;
  1347. else
  1348. bearer->grant_size -= len;
  1349. if (start_grant > bearer->grant_thresh &&
  1350. bearer->grant_size <= bearer->grant_thresh) {
  1351. dfc_send_ack(dev, bearer->bearer_id,
  1352. bearer->seq, qos->mux_id,
  1353. DFC_ACK_TYPE_THRESHOLD);
  1354. }
  1355. if (!bearer->grant_size)
  1356. dfc_bearer_flow_ctl(dev, bearer, qos);
  1357. out:
  1358. spin_unlock_bh(&qos->qos_lock);
  1359. }
  1360. void dfc_qmi_query_flow(void *dfc_data)
  1361. {
  1362. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1363. struct dfc_get_flow_status_resp_msg_v01 *resp;
  1364. struct dfc_svc_ind *svc_ind;
  1365. int rc;
  1366. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  1367. if (!resp)
  1368. return;
  1369. svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
  1370. if (!svc_ind) {
  1371. kfree(resp);
  1372. return;
  1373. }
  1374. if (!data)
  1375. goto done;
  1376. rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
  1377. if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
  1378. resp->flow_status_len > DFC_MAX_BEARERS_V01)
  1379. goto done;
  1380. svc_ind->d.dfc_info.flow_status_valid = resp->flow_status_valid;
  1381. svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
  1382. memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
  1383. sizeof(resp->flow_status[0]) * resp->flow_status_len);
  1384. dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info, true);
  1385. done:
  1386. kfree(svc_ind);
  1387. kfree(resp);
  1388. }