dfc_qmi.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553
  1. /*
  2. * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <net/pkt_sched.h>
  14. #include "rmnet_qmi.h"
  15. #include "qmi_rmnet.h"
  16. #include "dfc_defs.h"
  17. #define CREATE_TRACE_POINTS
  18. #include "dfc.h"
  19. struct dfc_qmap_header {
  20. u8 pad_len:6;
  21. u8 reserved_bit:1;
  22. u8 cd_bit:1;
  23. u8 mux_id;
  24. __be16 pkt_len;
  25. } __aligned(1);
  26. struct dfc_ack_cmd {
  27. struct dfc_qmap_header header;
  28. u8 command_name;
  29. u8 cmd_type:2;
  30. u8 reserved:6;
  31. u16 reserved2;
  32. u32 transaction_id;
  33. u8 ver:2;
  34. u8 reserved3:6;
  35. u8 type:2;
  36. u8 reserved4:6;
  37. u16 dfc_seq;
  38. u8 reserved5[3];
  39. u8 bearer_id;
  40. } __aligned(1);
  41. static void dfc_svc_init(struct work_struct *work);
  42. /* **************************************************** */
  43. #define DFC_SERVICE_ID_V01 0x4E
  44. #define DFC_SERVICE_VERS_V01 0x01
  45. #define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
  46. #define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
  47. #define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
  48. #define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN 11
  49. #define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7
  50. #define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001
  51. #define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001
  52. #define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 8
  53. #define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7
  54. #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
  55. #define QMI_DFC_TX_LINK_STATUS_IND_V01 0x0024
  56. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
  57. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
  58. #define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
  59. #define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
  60. struct dfc_bind_client_req_msg_v01 {
  61. u8 ep_id_valid;
  62. struct data_ep_id_type_v01 ep_id;
  63. };
  64. struct dfc_bind_client_resp_msg_v01 {
  65. struct qmi_response_type_v01 resp;
  66. };
  67. struct dfc_indication_register_req_msg_v01 {
  68. u8 report_flow_status_valid;
  69. u8 report_flow_status;
  70. u8 report_tx_link_status_valid;
  71. u8 report_tx_link_status;
  72. };
  73. struct dfc_indication_register_resp_msg_v01 {
  74. struct qmi_response_type_v01 resp;
  75. };
  76. static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
  77. {
  78. .data_type = QMI_UNSIGNED_4_BYTE,
  79. .elem_len = 1,
  80. .elem_size = sizeof(u32),
  81. .array_type = NO_ARRAY,
  82. .tlv_type = QMI_COMMON_TLV_TYPE,
  83. .offset = offsetof(struct dfc_qos_id_type_v01,
  84. qos_id),
  85. .ei_array = NULL,
  86. },
  87. {
  88. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  89. .elem_len = 1,
  90. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  91. .array_type = NO_ARRAY,
  92. .tlv_type = QMI_COMMON_TLV_TYPE,
  93. .offset = offsetof(struct dfc_qos_id_type_v01,
  94. ip_type),
  95. .ei_array = NULL,
  96. },
  97. {
  98. .data_type = QMI_EOTI,
  99. .array_type = NO_ARRAY,
  100. .tlv_type = QMI_COMMON_TLV_TYPE,
  101. },
  102. };
  103. static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
  104. {
  105. .data_type = QMI_UNSIGNED_1_BYTE,
  106. .elem_len = 1,
  107. .elem_size = sizeof(u8),
  108. .array_type = NO_ARRAY,
  109. .tlv_type = QMI_COMMON_TLV_TYPE,
  110. .offset = offsetof(struct
  111. dfc_flow_status_info_type_v01,
  112. subs_id),
  113. .ei_array = NULL,
  114. },
  115. {
  116. .data_type = QMI_UNSIGNED_1_BYTE,
  117. .elem_len = 1,
  118. .elem_size = sizeof(u8),
  119. .array_type = NO_ARRAY,
  120. .tlv_type = QMI_COMMON_TLV_TYPE,
  121. .offset = offsetof(struct
  122. dfc_flow_status_info_type_v01,
  123. mux_id),
  124. .ei_array = NULL,
  125. },
  126. {
  127. .data_type = QMI_UNSIGNED_1_BYTE,
  128. .elem_len = 1,
  129. .elem_size = sizeof(u8),
  130. .array_type = NO_ARRAY,
  131. .tlv_type = QMI_COMMON_TLV_TYPE,
  132. .offset = offsetof(struct
  133. dfc_flow_status_info_type_v01,
  134. bearer_id),
  135. .ei_array = NULL,
  136. },
  137. {
  138. .data_type = QMI_UNSIGNED_4_BYTE,
  139. .elem_len = 1,
  140. .elem_size = sizeof(u32),
  141. .array_type = NO_ARRAY,
  142. .tlv_type = QMI_COMMON_TLV_TYPE,
  143. .offset = offsetof(struct
  144. dfc_flow_status_info_type_v01,
  145. num_bytes),
  146. .ei_array = NULL,
  147. },
  148. {
  149. .data_type = QMI_UNSIGNED_2_BYTE,
  150. .elem_len = 1,
  151. .elem_size = sizeof(u16),
  152. .array_type = NO_ARRAY,
  153. .tlv_type = QMI_COMMON_TLV_TYPE,
  154. .offset = offsetof(struct
  155. dfc_flow_status_info_type_v01,
  156. seq_num),
  157. .ei_array = NULL,
  158. },
  159. {
  160. .data_type = QMI_DATA_LEN,
  161. .elem_len = 1,
  162. .elem_size = sizeof(u8),
  163. .array_type = NO_ARRAY,
  164. .tlv_type = QMI_COMMON_TLV_TYPE,
  165. .offset = offsetof(struct
  166. dfc_flow_status_info_type_v01,
  167. qos_ids_len),
  168. .ei_array = NULL,
  169. },
  170. {
  171. .data_type = QMI_STRUCT,
  172. .elem_len = DFC_MAX_QOS_ID_V01,
  173. .elem_size = sizeof(struct dfc_qos_id_type_v01),
  174. .array_type = VAR_LEN_ARRAY,
  175. .tlv_type = 0x10,
  176. .offset = offsetof(struct
  177. dfc_flow_status_info_type_v01,
  178. qos_ids),
  179. .ei_array = dfc_qos_id_type_v01_ei,
  180. },
  181. {
  182. .data_type = QMI_EOTI,
  183. .array_type = NO_ARRAY,
  184. .tlv_type = QMI_COMMON_TLV_TYPE,
  185. },
  186. };
  187. static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
  188. {
  189. .data_type = QMI_UNSIGNED_1_BYTE,
  190. .elem_len = 1,
  191. .elem_size = sizeof(u8),
  192. .array_type = NO_ARRAY,
  193. .tlv_type = QMI_COMMON_TLV_TYPE,
  194. .offset = offsetof(struct
  195. dfc_ancillary_info_type_v01,
  196. subs_id),
  197. .ei_array = NULL,
  198. },
  199. {
  200. .data_type = QMI_UNSIGNED_1_BYTE,
  201. .elem_len = 1,
  202. .elem_size = sizeof(u8),
  203. .array_type = NO_ARRAY,
  204. .tlv_type = QMI_COMMON_TLV_TYPE,
  205. .offset = offsetof(struct
  206. dfc_ancillary_info_type_v01,
  207. mux_id),
  208. .ei_array = NULL,
  209. },
  210. {
  211. .data_type = QMI_UNSIGNED_1_BYTE,
  212. .elem_len = 1,
  213. .elem_size = sizeof(u8),
  214. .array_type = NO_ARRAY,
  215. .tlv_type = QMI_COMMON_TLV_TYPE,
  216. .offset = offsetof(struct
  217. dfc_ancillary_info_type_v01,
  218. bearer_id),
  219. .ei_array = NULL,
  220. },
  221. {
  222. .data_type = QMI_UNSIGNED_4_BYTE,
  223. .elem_len = 1,
  224. .elem_size = sizeof(u32),
  225. .array_type = NO_ARRAY,
  226. .tlv_type = QMI_COMMON_TLV_TYPE,
  227. .offset = offsetof(struct
  228. dfc_ancillary_info_type_v01,
  229. reserved),
  230. .ei_array = NULL,
  231. },
  232. {
  233. .data_type = QMI_EOTI,
  234. .array_type = NO_ARRAY,
  235. .tlv_type = QMI_COMMON_TLV_TYPE,
  236. },
  237. };
  238. struct dfc_get_flow_status_req_msg_v01 {
  239. u8 bearer_id_list_valid;
  240. u8 bearer_id_list_len;
  241. u8 bearer_id_list[DFC_MAX_BEARERS_V01];
  242. };
  243. struct dfc_get_flow_status_resp_msg_v01 {
  244. struct qmi_response_type_v01 resp;
  245. u8 flow_status_valid;
  246. u8 flow_status_len;
  247. struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
  248. };
  249. struct dfc_svc_ind {
  250. struct list_head list;
  251. u16 msg_id;
  252. union {
  253. struct dfc_flow_status_ind_msg_v01 dfc_info;
  254. struct dfc_tx_link_status_ind_msg_v01 tx_status;
  255. } d;
  256. };
  257. static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
  258. {
  259. .data_type = QMI_OPT_FLAG,
  260. .elem_len = 1,
  261. .elem_size = sizeof(u8),
  262. .array_type = NO_ARRAY,
  263. .tlv_type = 0x10,
  264. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  265. ep_id_valid),
  266. .ei_array = NULL,
  267. },
  268. {
  269. .data_type = QMI_STRUCT,
  270. .elem_len = 1,
  271. .elem_size = sizeof(struct data_ep_id_type_v01),
  272. .array_type = NO_ARRAY,
  273. .tlv_type = 0x10,
  274. .offset = offsetof(struct dfc_bind_client_req_msg_v01,
  275. ep_id),
  276. .ei_array = data_ep_id_type_v01_ei,
  277. },
  278. {
  279. .data_type = QMI_EOTI,
  280. .array_type = NO_ARRAY,
  281. .tlv_type = QMI_COMMON_TLV_TYPE,
  282. },
  283. };
  284. static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = {
  285. {
  286. .data_type = QMI_STRUCT,
  287. .elem_len = 1,
  288. .elem_size = sizeof(struct qmi_response_type_v01),
  289. .array_type = NO_ARRAY,
  290. .tlv_type = 0x02,
  291. .offset = offsetof(struct dfc_bind_client_resp_msg_v01,
  292. resp),
  293. .ei_array = qmi_response_type_v01_ei,
  294. },
  295. {
  296. .data_type = QMI_EOTI,
  297. .array_type = NO_ARRAY,
  298. .tlv_type = QMI_COMMON_TLV_TYPE,
  299. },
  300. };
  301. static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = {
  302. {
  303. .data_type = QMI_OPT_FLAG,
  304. .elem_len = 1,
  305. .elem_size = sizeof(u8),
  306. .array_type = NO_ARRAY,
  307. .tlv_type = 0x10,
  308. .offset = offsetof(struct
  309. dfc_indication_register_req_msg_v01,
  310. report_flow_status_valid),
  311. .ei_array = NULL,
  312. },
  313. {
  314. .data_type = QMI_UNSIGNED_1_BYTE,
  315. .elem_len = 1,
  316. .elem_size = sizeof(u8),
  317. .array_type = NO_ARRAY,
  318. .tlv_type = 0x10,
  319. .offset = offsetof(struct
  320. dfc_indication_register_req_msg_v01,
  321. report_flow_status),
  322. .ei_array = NULL,
  323. },
  324. {
  325. .data_type = QMI_OPT_FLAG,
  326. .elem_len = 1,
  327. .elem_size = sizeof(u8),
  328. .array_type = NO_ARRAY,
  329. .tlv_type = 0x11,
  330. .offset = offsetof(struct
  331. dfc_indication_register_req_msg_v01,
  332. report_tx_link_status_valid),
  333. .ei_array = NULL,
  334. },
  335. {
  336. .data_type = QMI_UNSIGNED_1_BYTE,
  337. .elem_len = 1,
  338. .elem_size = sizeof(u8),
  339. .array_type = NO_ARRAY,
  340. .tlv_type = 0x11,
  341. .offset = offsetof(struct
  342. dfc_indication_register_req_msg_v01,
  343. report_tx_link_status),
  344. .ei_array = NULL,
  345. },
  346. {
  347. .data_type = QMI_EOTI,
  348. .array_type = NO_ARRAY,
  349. .tlv_type = QMI_COMMON_TLV_TYPE,
  350. },
  351. };
  352. static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = {
  353. {
  354. .data_type = QMI_STRUCT,
  355. .elem_len = 1,
  356. .elem_size = sizeof(struct qmi_response_type_v01),
  357. .array_type = NO_ARRAY,
  358. .tlv_type = 0x02,
  359. .offset = offsetof(struct
  360. dfc_indication_register_resp_msg_v01,
  361. resp),
  362. .ei_array = qmi_response_type_v01_ei,
  363. },
  364. {
  365. .data_type = QMI_EOTI,
  366. .array_type = NO_ARRAY,
  367. .tlv_type = QMI_COMMON_TLV_TYPE,
  368. },
  369. };
  370. static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = {
  371. {
  372. .data_type = QMI_OPT_FLAG,
  373. .elem_len = 1,
  374. .elem_size = sizeof(u8),
  375. .array_type = NO_ARRAY,
  376. .tlv_type = 0x10,
  377. .offset = offsetof(struct
  378. dfc_flow_status_ind_msg_v01,
  379. flow_status_valid),
  380. .ei_array = NULL,
  381. },
  382. {
  383. .data_type = QMI_DATA_LEN,
  384. .elem_len = 1,
  385. .elem_size = sizeof(u8),
  386. .array_type = NO_ARRAY,
  387. .tlv_type = 0x10,
  388. .offset = offsetof(struct
  389. dfc_flow_status_ind_msg_v01,
  390. flow_status_len),
  391. .ei_array = NULL,
  392. },
  393. {
  394. .data_type = QMI_STRUCT,
  395. .elem_len = DFC_MAX_BEARERS_V01,
  396. .elem_size = sizeof(struct
  397. dfc_flow_status_info_type_v01),
  398. .array_type = VAR_LEN_ARRAY,
  399. .tlv_type = 0x10,
  400. .offset = offsetof(struct
  401. dfc_flow_status_ind_msg_v01,
  402. flow_status),
  403. .ei_array = dfc_flow_status_info_type_v01_ei,
  404. },
  405. {
  406. .data_type = QMI_OPT_FLAG,
  407. .elem_len = 1,
  408. .elem_size = sizeof(u8),
  409. .array_type = NO_ARRAY,
  410. .tlv_type = 0x11,
  411. .offset = offsetof(struct
  412. dfc_flow_status_ind_msg_v01,
  413. eod_ack_reqd_valid),
  414. .ei_array = NULL,
  415. },
  416. {
  417. .data_type = QMI_UNSIGNED_1_BYTE,
  418. .elem_len = 1,
  419. .elem_size = sizeof(u8),
  420. .array_type = NO_ARRAY,
  421. .tlv_type = 0x11,
  422. .offset = offsetof(struct
  423. dfc_flow_status_ind_msg_v01,
  424. eod_ack_reqd),
  425. .ei_array = NULL,
  426. },
  427. {
  428. .data_type = QMI_OPT_FLAG,
  429. .elem_len = 1,
  430. .elem_size = sizeof(u8),
  431. .array_type = NO_ARRAY,
  432. .tlv_type = 0x12,
  433. .offset = offsetof(struct
  434. dfc_flow_status_ind_msg_v01,
  435. ancillary_info_valid),
  436. .ei_array = NULL,
  437. },
  438. {
  439. .data_type = QMI_DATA_LEN,
  440. .elem_len = 1,
  441. .elem_size = sizeof(u8),
  442. .array_type = NO_ARRAY,
  443. .tlv_type = 0x12,
  444. .offset = offsetof(struct
  445. dfc_flow_status_ind_msg_v01,
  446. ancillary_info_len),
  447. .ei_array = NULL,
  448. },
  449. {
  450. .data_type = QMI_STRUCT,
  451. .elem_len = DFC_MAX_BEARERS_V01,
  452. .elem_size = sizeof(struct
  453. dfc_ancillary_info_type_v01),
  454. .array_type = VAR_LEN_ARRAY,
  455. .tlv_type = 0x12,
  456. .offset = offsetof(struct
  457. dfc_flow_status_ind_msg_v01,
  458. ancillary_info),
  459. .ei_array = dfc_ancillary_info_type_v01_ei,
  460. },
  461. {
  462. .data_type = QMI_EOTI,
  463. .array_type = NO_ARRAY,
  464. .tlv_type = QMI_COMMON_TLV_TYPE,
  465. },
  466. };
  467. static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
  468. {
  469. .data_type = QMI_OPT_FLAG,
  470. .elem_len = 1,
  471. .elem_size = sizeof(u8),
  472. .array_type = NO_ARRAY,
  473. .tlv_type = 0x10,
  474. .offset = offsetof(struct
  475. dfc_get_flow_status_req_msg_v01,
  476. bearer_id_list_valid),
  477. .ei_array = NULL,
  478. },
  479. {
  480. .data_type = QMI_DATA_LEN,
  481. .elem_len = 1,
  482. .elem_size = sizeof(u8),
  483. .array_type = NO_ARRAY,
  484. .tlv_type = 0x10,
  485. .offset = offsetof(struct
  486. dfc_get_flow_status_req_msg_v01,
  487. bearer_id_list_len),
  488. .ei_array = NULL,
  489. },
  490. {
  491. .data_type = QMI_UNSIGNED_1_BYTE,
  492. .elem_len = DFC_MAX_BEARERS_V01,
  493. .elem_size = sizeof(u8),
  494. .array_type = VAR_LEN_ARRAY,
  495. .tlv_type = 0x10,
  496. .offset = offsetof(struct
  497. dfc_get_flow_status_req_msg_v01,
  498. bearer_id_list),
  499. .ei_array = NULL,
  500. },
  501. {
  502. .data_type = QMI_EOTI,
  503. .array_type = NO_ARRAY,
  504. .tlv_type = QMI_COMMON_TLV_TYPE,
  505. },
  506. };
  507. static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
  508. {
  509. .data_type = QMI_STRUCT,
  510. .elem_len = 1,
  511. .elem_size = sizeof(struct qmi_response_type_v01),
  512. .array_type = NO_ARRAY,
  513. .tlv_type = 0x02,
  514. .offset = offsetof(struct
  515. dfc_get_flow_status_resp_msg_v01,
  516. resp),
  517. .ei_array = qmi_response_type_v01_ei,
  518. },
  519. {
  520. .data_type = QMI_OPT_FLAG,
  521. .elem_len = 1,
  522. .elem_size = sizeof(u8),
  523. .array_type = NO_ARRAY,
  524. .tlv_type = 0x10,
  525. .offset = offsetof(struct
  526. dfc_get_flow_status_resp_msg_v01,
  527. flow_status_valid),
  528. .ei_array = NULL,
  529. },
  530. {
  531. .data_type = QMI_DATA_LEN,
  532. .elem_len = 1,
  533. .elem_size = sizeof(u8),
  534. .array_type = NO_ARRAY,
  535. .tlv_type = 0x10,
  536. .offset = offsetof(struct
  537. dfc_get_flow_status_resp_msg_v01,
  538. flow_status_len),
  539. .ei_array = NULL,
  540. },
  541. {
  542. .data_type = QMI_STRUCT,
  543. .elem_len = DFC_MAX_BEARERS_V01,
  544. .elem_size = sizeof(struct
  545. dfc_flow_status_info_type_v01),
  546. .array_type = VAR_LEN_ARRAY,
  547. .tlv_type = 0x10,
  548. .offset = offsetof(struct
  549. dfc_get_flow_status_resp_msg_v01,
  550. flow_status),
  551. .ei_array = dfc_flow_status_info_type_v01_ei,
  552. },
  553. {
  554. .data_type = QMI_EOTI,
  555. .array_type = NO_ARRAY,
  556. .tlv_type = QMI_COMMON_TLV_TYPE,
  557. },
  558. };
  559. static struct qmi_elem_info dfc_bearer_info_type_v01_ei[] = {
  560. {
  561. .data_type = QMI_UNSIGNED_1_BYTE,
  562. .elem_len = 1,
  563. .elem_size = sizeof(u8),
  564. .array_type = NO_ARRAY,
  565. .tlv_type = QMI_COMMON_TLV_TYPE,
  566. .offset = offsetof(struct
  567. dfc_bearer_info_type_v01,
  568. subs_id),
  569. .ei_array = NULL,
  570. },
  571. {
  572. .data_type = QMI_UNSIGNED_1_BYTE,
  573. .elem_len = 1,
  574. .elem_size = sizeof(u8),
  575. .array_type = NO_ARRAY,
  576. .tlv_type = QMI_COMMON_TLV_TYPE,
  577. .offset = offsetof(struct
  578. dfc_bearer_info_type_v01,
  579. mux_id),
  580. .ei_array = NULL,
  581. },
  582. {
  583. .data_type = QMI_UNSIGNED_1_BYTE,
  584. .elem_len = 1,
  585. .elem_size = sizeof(u8),
  586. .array_type = NO_ARRAY,
  587. .tlv_type = QMI_COMMON_TLV_TYPE,
  588. .offset = offsetof(struct
  589. dfc_bearer_info_type_v01,
  590. bearer_id),
  591. .ei_array = NULL,
  592. },
  593. {
  594. .data_type = QMI_SIGNED_4_BYTE_ENUM,
  595. .elem_len = 1,
  596. .elem_size = sizeof(enum dfc_ip_type_enum_v01),
  597. .array_type = NO_ARRAY,
  598. .tlv_type = QMI_COMMON_TLV_TYPE,
  599. .offset = offsetof(struct
  600. dfc_bearer_info_type_v01,
  601. ip_type),
  602. .ei_array = NULL,
  603. },
  604. {
  605. .data_type = QMI_EOTI,
  606. .array_type = NO_ARRAY,
  607. .tlv_type = QMI_COMMON_TLV_TYPE,
  608. },
  609. };
  610. static struct qmi_elem_info dfc_tx_link_status_ind_v01_ei[] = {
  611. {
  612. .data_type = QMI_UNSIGNED_1_BYTE,
  613. .elem_len = 1,
  614. .elem_size = sizeof(u8),
  615. .array_type = NO_ARRAY,
  616. .tlv_type = 0x01,
  617. .offset = offsetof(struct
  618. dfc_tx_link_status_ind_msg_v01,
  619. tx_status),
  620. .ei_array = NULL,
  621. },
  622. {
  623. .data_type = QMI_OPT_FLAG,
  624. .elem_len = 1,
  625. .elem_size = sizeof(u8),
  626. .array_type = NO_ARRAY,
  627. .tlv_type = 0x10,
  628. .offset = offsetof(struct
  629. dfc_tx_link_status_ind_msg_v01,
  630. bearer_info_valid),
  631. .ei_array = NULL,
  632. },
  633. {
  634. .data_type = QMI_DATA_LEN,
  635. .elem_len = 1,
  636. .elem_size = sizeof(u8),
  637. .array_type = NO_ARRAY,
  638. .tlv_type = 0x10,
  639. .offset = offsetof(struct
  640. dfc_tx_link_status_ind_msg_v01,
  641. bearer_info_len),
  642. .ei_array = NULL,
  643. },
  644. {
  645. .data_type = QMI_STRUCT,
  646. .elem_len = DFC_MAX_BEARERS_V01,
  647. .elem_size = sizeof(struct
  648. dfc_bearer_info_type_v01),
  649. .array_type = VAR_LEN_ARRAY,
  650. .tlv_type = 0x10,
  651. .offset = offsetof(struct
  652. dfc_tx_link_status_ind_msg_v01,
  653. bearer_info),
  654. .ei_array = dfc_bearer_info_type_v01_ei,
  655. },
  656. {
  657. .data_type = QMI_EOTI,
  658. .array_type = NO_ARRAY,
  659. .tlv_type = QMI_COMMON_TLV_TYPE,
  660. },
  661. };
  662. static int
  663. dfc_bind_client_req(struct qmi_handle *dfc_handle,
  664. struct sockaddr_qrtr *ssctl, struct svc_info *svc)
  665. {
  666. struct dfc_bind_client_resp_msg_v01 *resp;
  667. struct dfc_bind_client_req_msg_v01 *req;
  668. struct qmi_txn txn;
  669. int ret;
  670. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  671. if (!req)
  672. return -ENOMEM;
  673. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  674. if (!resp) {
  675. kfree(req);
  676. return -ENOMEM;
  677. }
  678. ret = qmi_txn_init(dfc_handle, &txn,
  679. dfc_bind_client_resp_msg_v01_ei, resp);
  680. if (ret < 0) {
  681. pr_err("%s() Failed init for response, err: %d\n",
  682. __func__, ret);
  683. goto out;
  684. }
  685. req->ep_id_valid = 1;
  686. req->ep_id.ep_type = svc->ep_type;
  687. req->ep_id.iface_id = svc->iface_id;
  688. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  689. QMI_DFC_BIND_CLIENT_REQ_V01,
  690. QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN,
  691. dfc_bind_client_req_msg_v01_ei, req);
  692. if (ret < 0) {
  693. qmi_txn_cancel(&txn);
  694. pr_err("%s() Failed sending request, err: %d\n",
  695. __func__, ret);
  696. goto out;
  697. }
  698. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  699. if (ret < 0) {
  700. pr_err("%s() Response waiting failed, err: %d\n",
  701. __func__, ret);
  702. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  703. pr_err("%s() Request rejected, result: %d, err: %d\n",
  704. __func__, resp->resp.result, resp->resp.error);
  705. ret = -resp->resp.result;
  706. }
  707. out:
  708. kfree(resp);
  709. kfree(req);
  710. return ret;
  711. }
  712. static int
  713. dfc_indication_register_req(struct qmi_handle *dfc_handle,
  714. struct sockaddr_qrtr *ssctl, u8 reg)
  715. {
  716. struct dfc_indication_register_resp_msg_v01 *resp;
  717. struct dfc_indication_register_req_msg_v01 *req;
  718. struct qmi_txn txn;
  719. int ret;
  720. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  721. if (!req)
  722. return -ENOMEM;
  723. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  724. if (!resp) {
  725. kfree(req);
  726. return -ENOMEM;
  727. }
  728. ret = qmi_txn_init(dfc_handle, &txn,
  729. dfc_indication_register_resp_msg_v01_ei, resp);
  730. if (ret < 0) {
  731. pr_err("%s() Failed init for response, err: %d\n",
  732. __func__, ret);
  733. goto out;
  734. }
  735. req->report_flow_status_valid = 1;
  736. req->report_flow_status = reg;
  737. req->report_tx_link_status_valid = 1;
  738. req->report_tx_link_status = reg;
  739. ret = qmi_send_request(dfc_handle, ssctl, &txn,
  740. QMI_DFC_INDICATION_REGISTER_REQ_V01,
  741. QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN,
  742. dfc_indication_register_req_msg_v01_ei, req);
  743. if (ret < 0) {
  744. qmi_txn_cancel(&txn);
  745. pr_err("%s() Failed sending request, err: %d\n",
  746. __func__, ret);
  747. goto out;
  748. }
  749. ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
  750. if (ret < 0) {
  751. pr_err("%s() Response waiting failed, err: %d\n",
  752. __func__, ret);
  753. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  754. pr_err("%s() Request rejected, result: %d, err: %d\n",
  755. __func__, resp->resp.result, resp->resp.error);
  756. ret = -resp->resp.result;
  757. }
  758. out:
  759. kfree(resp);
  760. kfree(req);
  761. return ret;
  762. }
  763. static int
  764. dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
  765. struct sockaddr_qrtr *ssctl,
  766. struct dfc_get_flow_status_resp_msg_v01 *resp)
  767. {
  768. struct dfc_get_flow_status_req_msg_v01 *req;
  769. struct qmi_txn *txn;
  770. int ret;
  771. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  772. if (!req)
  773. return -ENOMEM;
  774. txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
  775. if (!txn) {
  776. kfree(req);
  777. return -ENOMEM;
  778. }
  779. ret = qmi_txn_init(dfc_handle, txn,
  780. dfc_get_flow_status_resp_msg_v01_ei, resp);
  781. if (ret < 0) {
  782. pr_err("%s() Failed init for response, err: %d\n",
  783. __func__, ret);
  784. goto out;
  785. }
  786. ret = qmi_send_request(dfc_handle, ssctl, txn,
  787. QMI_DFC_GET_FLOW_STATUS_REQ_V01,
  788. QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
  789. dfc_get_flow_status_req_msg_v01_ei, req);
  790. if (ret < 0) {
  791. qmi_txn_cancel(txn);
  792. pr_err("%s() Failed sending request, err: %d\n",
  793. __func__, ret);
  794. goto out;
  795. }
  796. ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
  797. if (ret < 0) {
  798. pr_err("%s() Response waiting failed, err: %d\n",
  799. __func__, ret);
  800. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  801. pr_err("%s() Request rejected, result: %d, err: %d\n",
  802. __func__, resp->resp.result, resp->resp.error);
  803. ret = -resp->resp.result;
  804. }
  805. out:
  806. kfree(txn);
  807. kfree(req);
  808. return ret;
  809. }
  810. static int dfc_init_service(struct dfc_qmi_data *data)
  811. {
  812. int rc;
  813. rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
  814. if (rc < 0)
  815. return rc;
  816. return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
  817. }
  818. static void
  819. dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
  820. {
  821. struct qos_info *qos = rmnet_get_qos_pt(dev);
  822. struct sk_buff *skb;
  823. struct dfc_ack_cmd *msg;
  824. int data_size = sizeof(struct dfc_ack_cmd);
  825. int header_size = sizeof(struct dfc_qmap_header);
  826. if (!qos)
  827. return;
  828. if (dfc_qmap) {
  829. dfc_qmap_send_ack(qos, bearer_id, seq, type);
  830. return;
  831. }
  832. skb = alloc_skb(data_size, GFP_ATOMIC);
  833. if (!skb)
  834. return;
  835. msg = (struct dfc_ack_cmd *)skb_put(skb, data_size);
  836. memset(msg, 0, data_size);
  837. msg->header.cd_bit = 1;
  838. msg->header.mux_id = mux_id;
  839. msg->header.pkt_len = htons(data_size - header_size);
  840. msg->bearer_id = bearer_id;
  841. msg->command_name = 4;
  842. msg->cmd_type = 0;
  843. msg->dfc_seq = htons(seq);
  844. msg->type = type;
  845. msg->ver = 2;
  846. msg->transaction_id = htonl(qos->tran_num);
  847. skb->dev = qos->real_dev;
  848. skb->protocol = htons(ETH_P_MAP);
  849. trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
  850. qos->tran_num++;
  851. rmnet_map_tx_qmap_cmd(skb);
  852. }
  853. int dfc_bearer_flow_ctl(struct net_device *dev,
  854. struct rmnet_bearer_map *bearer,
  855. struct qos_info *qos)
  856. {
  857. bool enable;
  858. enable = bearer->grant_size ? true : false;
  859. qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
  860. /* Do not flow disable tcp ack q in tcp bidir */
  861. if (bearer->ack_mq_idx != INVALID_MQ &&
  862. (enable || !bearer->tcp_bidir))
  863. qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, enable);
  864. if (!enable && bearer->ack_req)
  865. dfc_send_ack(dev, bearer->bearer_id,
  866. bearer->seq, qos->mux_id,
  867. DFC_ACK_TYPE_DISABLE);
  868. return 0;
  869. }
  870. static int dfc_all_bearer_flow_ctl(struct net_device *dev,
  871. struct qos_info *qos, u8 ack_req, u32 ancillary,
  872. struct dfc_flow_status_info_type_v01 *fc_info)
  873. {
  874. struct rmnet_bearer_map *bearer;
  875. list_for_each_entry(bearer, &qos->bearer_head, list) {
  876. bearer->grant_size = fc_info->num_bytes;
  877. bearer->grant_thresh =
  878. qmi_rmnet_grant_per(bearer->grant_size);
  879. bearer->seq = fc_info->seq_num;
  880. bearer->ack_req = ack_req;
  881. bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  882. bearer->last_grant = fc_info->num_bytes;
  883. bearer->last_seq = fc_info->seq_num;
  884. bearer->last_adjusted_grant = fc_info->num_bytes;
  885. dfc_bearer_flow_ctl(dev, bearer, qos);
  886. }
  887. return 0;
  888. }
  889. static u32 dfc_adjust_grant(struct rmnet_bearer_map *bearer,
  890. struct dfc_flow_status_info_type_v01 *fc_info)
  891. {
  892. u32 grant;
  893. if (!fc_info->rx_bytes_valid)
  894. return fc_info->num_bytes;
  895. if (bearer->bytes_in_flight > fc_info->rx_bytes)
  896. bearer->bytes_in_flight -= fc_info->rx_bytes;
  897. else
  898. bearer->bytes_in_flight = 0;
  899. /* Adjusted grant = grant - bytes_in_flight */
  900. if (fc_info->num_bytes > bearer->bytes_in_flight)
  901. grant = fc_info->num_bytes - bearer->bytes_in_flight;
  902. else
  903. grant = 0;
  904. trace_dfc_adjust_grant(fc_info->mux_id, fc_info->bearer_id,
  905. fc_info->num_bytes, fc_info->rx_bytes,
  906. bearer->bytes_in_flight, grant);
  907. return grant;
  908. }
  909. static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
  910. u8 ack_req, u32 ancillary,
  911. struct dfc_flow_status_info_type_v01 *fc_info,
  912. bool is_query)
  913. {
  914. struct rmnet_bearer_map *itm = NULL;
  915. int rc = 0;
  916. bool action = false;
  917. u32 adjusted_grant;
  918. itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
  919. if (!itm)
  920. itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
  921. if (itm) {
  922. /* The RAT switch flag indicates the start and end of
  923. * the switch. Ignore indications in between.
  924. */
  925. if (DFC_IS_RAT_SWITCH(ancillary))
  926. itm->rat_switch = !fc_info->num_bytes;
  927. else
  928. if (itm->rat_switch)
  929. return 0;
  930. /* If TX is OFF but we received grant, ignore it */
  931. if (itm->tx_off && fc_info->num_bytes > 0)
  932. return 0;
  933. /* Adjuste grant for query */
  934. if (dfc_qmap && is_query) {
  935. adjusted_grant = dfc_adjust_grant(itm, fc_info);
  936. } else {
  937. adjusted_grant = fc_info->num_bytes;
  938. itm->bytes_in_flight = 0;
  939. }
  940. if ((itm->grant_size == 0 && adjusted_grant > 0) ||
  941. (itm->grant_size > 0 && adjusted_grant == 0))
  942. action = true;
  943. /* This is needed by qmap */
  944. if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
  945. dfc_qmap_send_ack(qos, itm->bearer_id,
  946. itm->seq, DFC_ACK_TYPE_DISABLE);
  947. itm->grant_size = adjusted_grant;
  948. /* No further query if the adjusted grant is less
  949. * than 20% of the original grant
  950. */
  951. if (dfc_qmap && is_query &&
  952. itm->grant_size < (fc_info->num_bytes / 5))
  953. itm->grant_thresh = itm->grant_size;
  954. else
  955. itm->grant_thresh =
  956. qmi_rmnet_grant_per(itm->grant_size);
  957. itm->seq = fc_info->seq_num;
  958. itm->ack_req = ack_req;
  959. itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
  960. itm->last_grant = fc_info->num_bytes;
  961. itm->last_seq = fc_info->seq_num;
  962. itm->last_adjusted_grant = adjusted_grant;
  963. if (action)
  964. rc = dfc_bearer_flow_ctl(dev, itm, qos);
  965. }
  966. return rc;
  967. }
  968. void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
  969. struct dfc_flow_status_ind_msg_v01 *ind,
  970. bool is_query)
  971. {
  972. struct net_device *dev;
  973. struct qos_info *qos;
  974. struct dfc_flow_status_info_type_v01 *flow_status;
  975. struct dfc_ancillary_info_type_v01 *ai;
  976. u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
  977. u32 ancillary;
  978. int i, j;
  979. rcu_read_lock();
  980. for (i = 0; i < ind->flow_status_len; i++) {
  981. flow_status = &ind->flow_status[i];
  982. ancillary = 0;
  983. if (ind->ancillary_info_valid) {
  984. for (j = 0; j < ind->ancillary_info_len; j++) {
  985. ai = &ind->ancillary_info[j];
  986. if (ai->mux_id == flow_status->mux_id &&
  987. ai->bearer_id == flow_status->bearer_id) {
  988. ancillary = ai->reserved;
  989. break;
  990. }
  991. }
  992. }
  993. trace_dfc_flow_ind(dfc->index,
  994. i, flow_status->mux_id,
  995. flow_status->bearer_id,
  996. flow_status->num_bytes,
  997. flow_status->seq_num,
  998. ack_req,
  999. ancillary);
  1000. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1001. flow_status->mux_id);
  1002. if (!dev)
  1003. goto clean_out;
  1004. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1005. if (!qos)
  1006. continue;
  1007. spin_lock_bh(&qos->qos_lock);
  1008. if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
  1009. spin_unlock_bh(&qos->qos_lock);
  1010. continue;
  1011. }
  1012. if (unlikely(flow_status->bearer_id == 0xFF))
  1013. dfc_all_bearer_flow_ctl(
  1014. dev, qos, ack_req, ancillary, flow_status);
  1015. else
  1016. dfc_update_fc_map(
  1017. dev, qos, ack_req, ancillary, flow_status,
  1018. is_query);
  1019. spin_unlock_bh(&qos->qos_lock);
  1020. }
  1021. clean_out:
  1022. rcu_read_unlock();
  1023. }
  1024. static void dfc_update_tx_link_status(struct net_device *dev,
  1025. struct qos_info *qos, u8 tx_status,
  1026. struct dfc_bearer_info_type_v01 *binfo)
  1027. {
  1028. struct rmnet_bearer_map *itm = NULL;
  1029. itm = qmi_rmnet_get_bearer_map(qos, binfo->bearer_id);
  1030. if (!itm)
  1031. return;
  1032. /* If no change in tx status, ignore */
  1033. if (itm->tx_off == !tx_status)
  1034. return;
  1035. if (itm->grant_size && !tx_status) {
  1036. itm->grant_size = 0;
  1037. itm->tcp_bidir = false;
  1038. itm->bytes_in_flight = 0;
  1039. dfc_bearer_flow_ctl(dev, itm, qos);
  1040. } else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
  1041. itm->grant_size = DEFAULT_GRANT;
  1042. itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
  1043. itm->seq = 0;
  1044. itm->ack_req = 0;
  1045. dfc_bearer_flow_ctl(dev, itm, qos);
  1046. }
  1047. itm->tx_off = !tx_status;
  1048. }
  1049. void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
  1050. struct dfc_tx_link_status_ind_msg_v01 *ind)
  1051. {
  1052. struct net_device *dev;
  1053. struct qos_info *qos;
  1054. struct dfc_bearer_info_type_v01 *bearer_info;
  1055. int i;
  1056. rcu_read_lock();
  1057. for (i = 0; i < ind->bearer_info_len; i++) {
  1058. bearer_info = &ind->bearer_info[i];
  1059. trace_dfc_tx_link_status_ind(dfc->index, i,
  1060. ind->tx_status,
  1061. bearer_info->mux_id,
  1062. bearer_info->bearer_id);
  1063. dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
  1064. bearer_info->mux_id);
  1065. if (!dev)
  1066. goto clean_out;
  1067. qos = (struct qos_info *)rmnet_get_qos_pt(dev);
  1068. if (!qos)
  1069. continue;
  1070. spin_lock_bh(&qos->qos_lock);
  1071. dfc_update_tx_link_status(
  1072. dev, qos, ind->tx_status, bearer_info);
  1073. spin_unlock_bh(&qos->qos_lock);
  1074. }
  1075. clean_out:
  1076. rcu_read_unlock();
  1077. }
  1078. static void dfc_qmi_ind_work(struct work_struct *work)
  1079. {
  1080. struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
  1081. qmi_ind_work);
  1082. struct dfc_svc_ind *svc_ind;
  1083. unsigned long flags;
  1084. if (!dfc)
  1085. return;
  1086. local_bh_disable();
  1087. do {
  1088. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1089. svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
  1090. struct dfc_svc_ind, list);
  1091. if (svc_ind)
  1092. list_del(&svc_ind->list);
  1093. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1094. if (!svc_ind)
  1095. break;
  1096. if (!dfc->restart_state) {
  1097. if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
  1098. dfc_do_burst_flow_control(
  1099. dfc, &svc_ind->d.dfc_info,
  1100. false);
  1101. else if (svc_ind->msg_id ==
  1102. QMI_DFC_TX_LINK_STATUS_IND_V01)
  1103. dfc_handle_tx_link_status_ind(
  1104. dfc, &svc_ind->d.tx_status);
  1105. }
  1106. kfree(svc_ind);
  1107. } while (1);
  1108. local_bh_enable();
  1109. qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
  1110. }
  1111. static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
  1112. struct qmi_txn *txn, const void *data)
  1113. {
  1114. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1115. handle);
  1116. struct dfc_flow_status_ind_msg_v01 *ind_msg;
  1117. struct dfc_svc_ind *svc_ind;
  1118. unsigned long flags;
  1119. if (qmi != &dfc->handle)
  1120. return;
  1121. ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data;
  1122. if (ind_msg->flow_status_valid) {
  1123. if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) {
  1124. pr_err("%s() Invalid fc info len: %d\n",
  1125. __func__, ind_msg->flow_status_len);
  1126. return;
  1127. }
  1128. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1129. if (!svc_ind)
  1130. return;
  1131. svc_ind->msg_id = QMI_DFC_FLOW_STATUS_IND_V01;
  1132. memcpy(&svc_ind->d.dfc_info, ind_msg, sizeof(*ind_msg));
  1133. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1134. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1135. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1136. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1137. }
  1138. }
  1139. static void dfc_tx_link_status_ind_cb(struct qmi_handle *qmi,
  1140. struct sockaddr_qrtr *sq,
  1141. struct qmi_txn *txn, const void *data)
  1142. {
  1143. struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
  1144. handle);
  1145. struct dfc_tx_link_status_ind_msg_v01 *ind_msg;
  1146. struct dfc_svc_ind *svc_ind;
  1147. unsigned long flags;
  1148. if (qmi != &dfc->handle)
  1149. return;
  1150. ind_msg = (struct dfc_tx_link_status_ind_msg_v01 *)data;
  1151. if (ind_msg->bearer_info_valid) {
  1152. if (ind_msg->bearer_info_len > DFC_MAX_BEARERS_V01) {
  1153. pr_err("%s() Invalid bearer info len: %d\n",
  1154. __func__, ind_msg->bearer_info_len);
  1155. return;
  1156. }
  1157. svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
  1158. if (!svc_ind)
  1159. return;
  1160. svc_ind->msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01;
  1161. memcpy(&svc_ind->d.tx_status, ind_msg, sizeof(*ind_msg));
  1162. spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
  1163. list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
  1164. spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
  1165. queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
  1166. }
  1167. }
  1168. static void dfc_svc_init(struct work_struct *work)
  1169. {
  1170. int rc = 0;
  1171. struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data,
  1172. svc_arrive);
  1173. struct qmi_info *qmi;
  1174. if (data->restart_state == 1)
  1175. return;
  1176. rc = dfc_init_service(data);
  1177. if (rc < 0) {
  1178. pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
  1179. return;
  1180. }
  1181. if (data->restart_state == 1)
  1182. return;
  1183. while (!rtnl_trylock()) {
  1184. if (!data->restart_state)
  1185. cond_resched();
  1186. else
  1187. return;
  1188. }
  1189. qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
  1190. if (!qmi) {
  1191. rtnl_unlock();
  1192. return;
  1193. }
  1194. qmi->dfc_pending[data->index] = NULL;
  1195. qmi->dfc_clients[data->index] = (void *)data;
  1196. trace_dfc_client_state_up(data->index,
  1197. data->svc.instance,
  1198. data->svc.ep_type,
  1199. data->svc.iface_id);
  1200. rtnl_unlock();
  1201. pr_info("Connection established with the DFC Service\n");
  1202. }
  1203. static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
  1204. {
  1205. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1206. handle);
  1207. data->ssctl.sq_family = AF_QIPCRTR;
  1208. data->ssctl.sq_node = svc->node;
  1209. data->ssctl.sq_port = svc->port;
  1210. queue_work(data->dfc_wq, &data->svc_arrive);
  1211. return 0;
  1212. }
  1213. static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
  1214. {
  1215. struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
  1216. handle);
  1217. if (!data)
  1218. pr_debug("%s() data is null\n", __func__);
  1219. }
  1220. static struct qmi_ops server_ops = {
  1221. .new_server = dfc_svc_arrive,
  1222. .del_server = dfc_svc_exit,
  1223. };
  1224. static struct qmi_msg_handler qmi_indication_handler[] = {
  1225. {
  1226. .type = QMI_INDICATION,
  1227. .msg_id = QMI_DFC_FLOW_STATUS_IND_V01,
  1228. .ei = dfc_flow_status_ind_v01_ei,
  1229. .decoded_size = sizeof(struct dfc_flow_status_ind_msg_v01),
  1230. .fn = dfc_clnt_ind_cb,
  1231. },
  1232. {
  1233. .type = QMI_INDICATION,
  1234. .msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01,
  1235. .ei = dfc_tx_link_status_ind_v01_ei,
  1236. .decoded_size = sizeof(struct dfc_tx_link_status_ind_msg_v01),
  1237. .fn = dfc_tx_link_status_ind_cb,
  1238. },
  1239. {},
  1240. };
  1241. int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
  1242. struct qmi_info *qmi)
  1243. {
  1244. struct dfc_qmi_data *data;
  1245. int rc = -ENOMEM;
  1246. if (!port || !qmi)
  1247. return -EINVAL;
  1248. data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
  1249. if (!data)
  1250. return -ENOMEM;
  1251. data->rmnet_port = port;
  1252. data->index = index;
  1253. data->restart_state = 0;
  1254. memcpy(&data->svc, psvc, sizeof(data->svc));
  1255. INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
  1256. INIT_LIST_HEAD(&data->qmi_ind_q);
  1257. spin_lock_init(&data->qmi_ind_lock);
  1258. data->dfc_wq = create_singlethread_workqueue("dfc_wq");
  1259. if (!data->dfc_wq) {
  1260. pr_err("%s Could not create workqueue\n", __func__);
  1261. goto err0;
  1262. }
  1263. INIT_WORK(&data->svc_arrive, dfc_svc_init);
  1264. rc = qmi_handle_init(&data->handle,
  1265. QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
  1266. &server_ops, qmi_indication_handler);
  1267. if (rc < 0) {
  1268. pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
  1269. goto err1;
  1270. }
  1271. rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
  1272. DFC_SERVICE_VERS_V01,
  1273. psvc->instance);
  1274. if (rc < 0) {
  1275. pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
  1276. goto err2;
  1277. }
  1278. qmi->dfc_pending[index] = (void *)data;
  1279. return 0;
  1280. err2:
  1281. qmi_handle_release(&data->handle);
  1282. err1:
  1283. destroy_workqueue(data->dfc_wq);
  1284. err0:
  1285. kfree(data);
  1286. return rc;
  1287. }
  1288. void dfc_qmi_client_exit(void *dfc_data)
  1289. {
  1290. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1291. if (!data) {
  1292. pr_err("%s() data is null\n", __func__);
  1293. return;
  1294. }
  1295. data->restart_state = 1;
  1296. trace_dfc_client_state_down(data->index, 0);
  1297. qmi_handle_release(&data->handle);
  1298. drain_workqueue(data->dfc_wq);
  1299. destroy_workqueue(data->dfc_wq);
  1300. kfree(data);
  1301. }
  1302. void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
  1303. int ip_type, u32 mark, unsigned int len)
  1304. {
  1305. struct rmnet_bearer_map *bearer = NULL;
  1306. struct rmnet_flow_map *itm;
  1307. u32 start_grant;
  1308. spin_lock_bh(&qos->qos_lock);
  1309. /* Mark is flow_id */
  1310. itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
  1311. if (likely(itm))
  1312. bearer = itm->bearer;
  1313. if (unlikely(!bearer))
  1314. goto out;
  1315. trace_dfc_flow_check(dev->name, bearer->bearer_id,
  1316. len, mark, bearer->grant_size);
  1317. bearer->bytes_in_flight += len;
  1318. if (!bearer->grant_size)
  1319. goto out;
  1320. start_grant = bearer->grant_size;
  1321. if (len >= bearer->grant_size)
  1322. bearer->grant_size = 0;
  1323. else
  1324. bearer->grant_size -= len;
  1325. if (start_grant > bearer->grant_thresh &&
  1326. bearer->grant_size <= bearer->grant_thresh) {
  1327. dfc_send_ack(dev, bearer->bearer_id,
  1328. bearer->seq, qos->mux_id,
  1329. DFC_ACK_TYPE_THRESHOLD);
  1330. }
  1331. if (!bearer->grant_size)
  1332. dfc_bearer_flow_ctl(dev, bearer, qos);
  1333. out:
  1334. spin_unlock_bh(&qos->qos_lock);
  1335. }
  1336. void dfc_qmi_query_flow(void *dfc_data)
  1337. {
  1338. struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
  1339. struct dfc_get_flow_status_resp_msg_v01 *resp;
  1340. struct dfc_svc_ind *svc_ind;
  1341. int rc;
  1342. resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
  1343. if (!resp)
  1344. return;
  1345. svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
  1346. if (!svc_ind) {
  1347. kfree(resp);
  1348. return;
  1349. }
  1350. if (!data)
  1351. goto done;
  1352. rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
  1353. if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
  1354. resp->flow_status_len > DFC_MAX_BEARERS_V01)
  1355. goto done;
  1356. svc_ind->d.dfc_info.flow_status_valid = resp->flow_status_valid;
  1357. svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
  1358. memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
  1359. sizeof(resp->flow_status[0]) * resp->flow_status_len);
  1360. dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info, true);
  1361. done:
  1362. kfree(svc_ind);
  1363. kfree(resp);
  1364. }