rmnet_descriptor.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RMNET Packet Descriptor Framework
  14. *
  15. */
  16. #include <linux/ip.h>
  17. #include <linux/ipv6.h>
  18. #include <linux/inet.h>
  19. #include <net/ipv6.h>
  20. #include <net/ip6_checksum.h>
  21. #include "rmnet_config.h"
  22. #include "rmnet_descriptor.h"
  23. #include "rmnet_handlers.h"
  24. #include "rmnet_private.h"
  25. #include "rmnet_vnd.h"
  26. #include "rmnet_qmi.h"
  27. #include "rmnet_trace.h"
  28. #include "qmi_rmnet.h"
  29. #define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
  30. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  31. sizeof(struct rmnet_map_header) + \
  32. sizeof(struct rmnet_map_control_command_header))
  33. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  34. sizeof(struct rmnet_map_header) + \
  35. sizeof(struct rmnet_map_control_command_header))
  36. #define rmnet_descriptor_for_each_frag(p, desc) \
  37. list_for_each_entry(p, &desc->frags, list)
  38. #define rmnet_descriptor_for_each_frag_safe(p, tmp, desc) \
  39. list_for_each_entry_safe(p, tmp, &desc->frags, list)
  40. #define rmnet_descriptor_for_each_frag_safe_reverse(p, tmp, desc) \
  41. list_for_each_entry_safe_reverse(p, tmp, &desc->frags, list)
  42. typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
  43. struct rmnet_port *port);
  44. typedef void (*rmnet_perf_chain_hook_t)(void);
  45. typedef void (*rmnet_perf_tether_ingress_hook_t)(struct tcphdr *tp, struct sk_buff *skb);
  46. rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress_hook __rcu __read_mostly;
  47. EXPORT_SYMBOL(rmnet_perf_tether_ingress_hook);
  48. struct rmnet_frag_descriptor *
  49. rmnet_get_frag_descriptor(struct rmnet_port *port)
  50. {
  51. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  52. struct rmnet_frag_descriptor *frag_desc;
  53. unsigned long flags;
  54. spin_lock_irqsave(&port->desc_pool_lock, flags);
  55. if (!list_empty(&pool->free_list)) {
  56. frag_desc = list_first_entry(&pool->free_list,
  57. struct rmnet_frag_descriptor,
  58. list);
  59. list_del_init(&frag_desc->list);
  60. } else {
  61. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  62. if (!frag_desc)
  63. goto out;
  64. INIT_LIST_HEAD(&frag_desc->list);
  65. INIT_LIST_HEAD(&frag_desc->frags);
  66. pool->pool_size++;
  67. }
  68. out:
  69. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  70. return frag_desc;
  71. }
  72. EXPORT_SYMBOL(rmnet_get_frag_descriptor);
  73. void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
  74. struct rmnet_port *port)
  75. {
  76. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  77. struct rmnet_fragment *frag, *tmp;
  78. unsigned long flags;
  79. list_del(&frag_desc->list);
  80. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  81. struct page *page = skb_frag_page(&frag->frag);
  82. if (page)
  83. put_page(page);
  84. list_del(&frag->list);
  85. kfree(frag);
  86. }
  87. memset(frag_desc, 0, sizeof(*frag_desc));
  88. INIT_LIST_HEAD(&frag_desc->list);
  89. INIT_LIST_HEAD(&frag_desc->frags);
  90. spin_lock_irqsave(&port->desc_pool_lock, flags);
  91. list_add_tail(&frag_desc->list, &pool->free_list);
  92. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  93. }
  94. EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
  95. void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
  96. struct rmnet_port *port, unsigned int size)
  97. {
  98. struct rmnet_fragment *frag, *tmp;
  99. if (size >= frag_desc->len) {
  100. pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
  101. __func__, size, frag_desc->len);
  102. rmnet_recycle_frag_descriptor(frag_desc, port);
  103. return NULL;
  104. }
  105. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  106. u32 frag_size = skb_frag_size(&frag->frag);
  107. if (!size)
  108. break;
  109. if (size >= frag_size) {
  110. /* Remove the whole frag */
  111. struct page *page = skb_frag_page(&frag->frag);
  112. if (page)
  113. put_page(page);
  114. list_del(&frag->list);
  115. size -= frag_size;
  116. frag_desc->len -= frag_size;
  117. kfree(frag);
  118. continue;
  119. }
  120. /* Pull off 'size' bytes */
  121. skb_frag_off_add(&frag->frag, size);
  122. skb_frag_size_sub(&frag->frag, size);
  123. frag_desc->len -= size;
  124. break;
  125. }
  126. return rmnet_frag_data_ptr(frag_desc);
  127. }
  128. EXPORT_SYMBOL(rmnet_frag_pull);
  129. void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
  130. struct rmnet_port *port, unsigned int size)
  131. {
  132. struct rmnet_fragment *frag, *tmp;
  133. unsigned int eat;
  134. if (!size) {
  135. pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
  136. __func__, frag_desc->len);
  137. rmnet_recycle_frag_descriptor(frag_desc, port);
  138. return NULL;
  139. }
  140. /* Growing bigger doesn't make sense */
  141. if (size >= frag_desc->len)
  142. goto out;
  143. /* Compute number of bytes to remove from the end */
  144. eat = frag_desc->len - size;
  145. rmnet_descriptor_for_each_frag_safe_reverse(frag, tmp, frag_desc) {
  146. u32 frag_size = skb_frag_size(&frag->frag);
  147. if (!eat)
  148. goto out;
  149. if (eat >= frag_size) {
  150. /* Remove the whole frag */
  151. struct page *page = skb_frag_page(&frag->frag);
  152. if (page)
  153. put_page(page);
  154. list_del(&frag->list);
  155. eat -= frag_size;
  156. frag_desc->len -= frag_size;
  157. kfree(frag);
  158. continue;
  159. }
  160. /* Chop off 'eat' bytes from the end */
  161. skb_frag_size_sub(&frag->frag, eat);
  162. frag_desc->len -= eat;
  163. goto out;
  164. }
  165. out:
  166. return rmnet_frag_data_ptr(frag_desc);
  167. }
  168. EXPORT_SYMBOL(rmnet_frag_trim);
  169. static int rmnet_frag_copy_data(struct rmnet_frag_descriptor *frag_desc,
  170. u32 off, u32 len, void *buf)
  171. {
  172. struct rmnet_fragment *frag;
  173. u32 frag_size, copy_len;
  174. u32 buf_offset = 0;
  175. /* Don't make me do something we'd both regret */
  176. if (off > frag_desc->len || len > frag_desc->len ||
  177. off + len > frag_desc->len)
  178. return -EINVAL;
  179. /* Copy 'len' bytes into the bufer starting from 'off' */
  180. rmnet_descriptor_for_each_frag(frag, frag_desc) {
  181. if (!len)
  182. break;
  183. frag_size = skb_frag_size(&frag->frag);
  184. if (off < frag_size) {
  185. copy_len = min_t(u32, len, frag_size - off);
  186. memcpy(buf + buf_offset,
  187. skb_frag_address(&frag->frag) + off,
  188. copy_len);
  189. buf_offset += copy_len;
  190. len -= copy_len;
  191. off = 0;
  192. } else {
  193. off -= frag_size;
  194. }
  195. }
  196. return 0;
  197. }
  198. void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
  199. u32 len, void *buf)
  200. {
  201. struct rmnet_fragment *frag;
  202. u8 *start;
  203. u32 frag_size, offset;
  204. /* Don't take a long pointer off a short frag */
  205. if (off > frag_desc->len || len > frag_desc->len ||
  206. off + len > frag_desc->len)
  207. return NULL;
  208. /* Find the starting fragment */
  209. offset = off;
  210. rmnet_descriptor_for_each_frag(frag, frag_desc) {
  211. frag_size = skb_frag_size(&frag->frag);
  212. if (off < frag_size) {
  213. start = skb_frag_address(&frag->frag) + off;
  214. /* If the header is entirely on this frag, just return
  215. * a pointer to it.
  216. */
  217. if (off + len <= frag_size)
  218. return start;
  219. /* Otherwise, we need to copy the data into a linear
  220. * buffer.
  221. */
  222. break;
  223. }
  224. off -= frag_size;
  225. }
  226. if (rmnet_frag_copy_data(frag_desc, offset, len, buf) < 0)
  227. return NULL;
  228. return buf;
  229. }
  230. EXPORT_SYMBOL(rmnet_frag_header_ptr);
  231. int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
  232. struct page *p, u32 page_offset, u32 len)
  233. {
  234. struct rmnet_fragment *frag;
  235. frag = kzalloc(sizeof(*frag), GFP_ATOMIC);
  236. if (!frag)
  237. return -ENOMEM;
  238. INIT_LIST_HEAD(&frag->list);
  239. get_page(p);
  240. __skb_frag_set_page(&frag->frag, p);
  241. skb_frag_size_set(&frag->frag, len);
  242. skb_frag_off_set(&frag->frag, page_offset);
  243. list_add_tail(&frag->list, &frag_desc->frags);
  244. frag_desc->len += len;
  245. return 0;
  246. }
  247. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frag);
  248. int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
  249. struct rmnet_frag_descriptor *from,
  250. u32 off, u32 len)
  251. {
  252. struct rmnet_fragment *frag;
  253. int rc;
  254. /* Sanity check the lengths */
  255. if (off > from->len || len > from->len || off + len > from->len)
  256. return -EINVAL;
  257. rmnet_descriptor_for_each_frag(frag, from) {
  258. u32 frag_size;
  259. if (!len)
  260. break;
  261. frag_size = skb_frag_size(&frag->frag);
  262. if (off < frag_size) {
  263. struct page *p = skb_frag_page(&frag->frag);
  264. u32 page_off = skb_frag_off(&frag->frag);
  265. u32 copy_len = min_t(u32, len, frag_size - off);
  266. rc = rmnet_frag_descriptor_add_frag(to, p,
  267. page_off + off,
  268. copy_len);
  269. if (rc < 0)
  270. return rc;
  271. len -= copy_len;
  272. off = 0;
  273. } else {
  274. off -= frag_size;
  275. }
  276. }
  277. return 0;
  278. }
  279. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frags_from);
  280. int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
  281. int start, u8 *nexthdrp, __be16 *fragp)
  282. {
  283. u8 nexthdr = *nexthdrp;
  284. *fragp = 0;
  285. while (ipv6_ext_hdr(nexthdr)) {
  286. struct ipv6_opt_hdr *hp, __hp;
  287. int hdrlen;
  288. if (nexthdr == NEXTHDR_NONE)
  289. return -EINVAL;
  290. hp = rmnet_frag_header_ptr(frag_desc, (u32)start, sizeof(*hp),
  291. &__hp);
  292. if (!hp)
  293. return -EINVAL;
  294. if (nexthdr == NEXTHDR_FRAGMENT) {
  295. u32 off = offsetof(struct frag_hdr, frag_off);
  296. __be16 *fp, __fp;
  297. fp = rmnet_frag_header_ptr(frag_desc, (u32)start + off,
  298. sizeof(*fp), &__fp);
  299. if (!fp)
  300. return -EINVAL;
  301. *fragp = *fp;
  302. if (ntohs(*fragp) & ~0x7)
  303. break;
  304. hdrlen = 8;
  305. } else if (nexthdr == NEXTHDR_AUTH) {
  306. hdrlen = (hp->hdrlen + 2) << 2;
  307. } else {
  308. hdrlen = ipv6_optlen(hp);
  309. }
  310. nexthdr = hp->nexthdr;
  311. start += hdrlen;
  312. }
  313. *nexthdrp = nexthdr;
  314. return start;
  315. }
  316. EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
  317. static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
  318. struct rmnet_map_control_command *cmd,
  319. struct rmnet_port *port,
  320. int enable)
  321. {
  322. struct rmnet_endpoint *ep;
  323. struct net_device *vnd;
  324. u16 ip_family;
  325. u16 fc_seq;
  326. u32 qos_id;
  327. u8 mux_id;
  328. int r;
  329. mux_id = qmap->mux_id;
  330. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  331. return RX_HANDLER_CONSUMED;
  332. ep = rmnet_get_endpoint(port, mux_id);
  333. if (!ep)
  334. return RX_HANDLER_CONSUMED;
  335. vnd = ep->egress_dev;
  336. ip_family = cmd->flow_control.ip_family;
  337. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  338. qos_id = ntohl(cmd->flow_control.qos_id);
  339. /* Ignore the ip family and pass the sequence number for both v4 and v6
  340. * sequence. User space does not support creating dedicated flows for
  341. * the 2 protocols
  342. */
  343. r = rmnet_vnd_do_flow_control(vnd, enable);
  344. if (r)
  345. return RMNET_MAP_COMMAND_UNSUPPORTED;
  346. else
  347. return RMNET_MAP_COMMAND_ACK;
  348. }
  349. static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
  350. unsigned char type,
  351. struct rmnet_port *port)
  352. {
  353. struct rmnet_map_control_command *cmd;
  354. struct net_device *dev = port->dev;
  355. struct sk_buff *skb;
  356. u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
  357. skb = alloc_skb(alloc_len, GFP_ATOMIC);
  358. if (!skb)
  359. return;
  360. skb->protocol = htons(ETH_P_MAP);
  361. skb->dev = dev;
  362. cmd = rmnet_map_get_cmd_start(skb);
  363. cmd->cmd_type = type & 0x03;
  364. netif_tx_lock(dev);
  365. dev->netdev_ops->ndo_start_xmit(skb, dev);
  366. netif_tx_unlock(dev);
  367. }
  368. static void
  369. rmnet_frag_process_flow_start(struct rmnet_frag_descriptor *frag_desc,
  370. struct rmnet_map_control_command_header *cmd,
  371. struct rmnet_port *port,
  372. u16 cmd_len)
  373. {
  374. struct rmnet_map_dl_ind_hdr *dlhdr, __dlhdr;
  375. u32 offset = sizeof(struct rmnet_map_header);
  376. u32 data_format;
  377. bool is_dl_mark_v2;
  378. if (cmd_len + offset < RMNET_DL_IND_HDR_SIZE)
  379. return;
  380. data_format = port->data_format;
  381. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  382. dlhdr = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  383. sizeof(*dlhdr), &__dlhdr);
  384. if (!dlhdr)
  385. return;
  386. port->stats.dl_hdr_last_ep_id = cmd->source_id;
  387. port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
  388. port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
  389. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  390. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  391. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  392. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  393. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  394. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  395. port->stats.dl_hdr_count++;
  396. /* If a target is taking frag path, we can assume DL marker v2 is in
  397. * play
  398. */
  399. if (is_dl_mark_v2)
  400. rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
  401. }
  402. static void
  403. rmnet_frag_process_flow_end(struct rmnet_frag_descriptor *frag_desc,
  404. struct rmnet_map_control_command_header *cmd,
  405. struct rmnet_port *port, u16 cmd_len)
  406. {
  407. struct rmnet_map_dl_ind_trl *dltrl, __dltrl;
  408. u32 offset = sizeof(struct rmnet_map_header);
  409. u32 data_format;
  410. bool is_dl_mark_v2;
  411. if (cmd_len + offset < RMNET_DL_IND_TRL_SIZE)
  412. return;
  413. data_format = port->data_format;
  414. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  415. dltrl = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  416. sizeof(*dltrl), &__dltrl);
  417. if (!dltrl)
  418. return;
  419. port->stats.dl_trl_last_seq = dltrl->seq_le;
  420. port->stats.dl_trl_count++;
  421. /* If a target is taking frag path, we can assume DL marker v2 is in
  422. * play
  423. */
  424. if (is_dl_mark_v2)
  425. rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
  426. }
  427. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  428. * name is decoded here and appropriate handler is called.
  429. */
  430. void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
  431. struct rmnet_map_header *qmap, struct rmnet_port *port)
  432. {
  433. struct rmnet_map_control_command *cmd, __cmd;
  434. unsigned char rc = 0;
  435. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(*qmap), sizeof(*cmd),
  436. &__cmd);
  437. if (!cmd)
  438. return;
  439. switch (cmd->command_name) {
  440. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  441. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 1);
  442. break;
  443. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  444. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 0);
  445. break;
  446. default:
  447. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  448. break;
  449. }
  450. if (rc == RMNET_MAP_COMMAND_ACK)
  451. rmnet_frag_send_ack(qmap, rc, port);
  452. }
  453. int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
  454. struct rmnet_port *port, u16 pkt_len)
  455. {
  456. struct rmnet_map_control_command_header *cmd, __cmd;
  457. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(struct rmnet_map_header),
  458. sizeof(*cmd), &__cmd);
  459. if (!cmd)
  460. return -1;
  461. /* Silently discard any marksers recived over the LL channel */
  462. if (frag_desc->priority == 0xda1a &&
  463. (cmd->command_name == RMNET_MAP_COMMAND_FLOW_START ||
  464. cmd->command_name == RMNET_MAP_COMMAND_FLOW_END))
  465. return 0;
  466. switch (cmd->command_name) {
  467. case RMNET_MAP_COMMAND_FLOW_START:
  468. rmnet_frag_process_flow_start(frag_desc, cmd, port, pkt_len);
  469. break;
  470. case RMNET_MAP_COMMAND_FLOW_END:
  471. rmnet_frag_process_flow_end(frag_desc, cmd, port, pkt_len);
  472. break;
  473. default:
  474. return 1;
  475. }
  476. return 0;
  477. }
  478. EXPORT_SYMBOL(rmnet_frag_flow_command);
  479. static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
  480. struct rmnet_port *port,
  481. struct list_head *list,
  482. u32 start, u32 priority)
  483. {
  484. struct skb_shared_info *shinfo = skb_shinfo(skb);
  485. struct rmnet_frag_descriptor *frag_desc;
  486. struct rmnet_map_header *maph, __maph;
  487. skb_frag_t *frag;
  488. u32 start_frag, offset, i;
  489. u32 start_frag_size, start_frag_off;
  490. u32 pkt_len, copy_len = 0;
  491. int rc;
  492. for (start_frag = 0, offset = 0; start_frag < shinfo->nr_frags;
  493. start_frag++) {
  494. frag = &shinfo->frags[start_frag];
  495. if (start < skb_frag_size(frag) + offset)
  496. break;
  497. offset += skb_frag_size(frag);
  498. }
  499. if (start_frag == shinfo->nr_frags)
  500. return -1;
  501. /* start - offset is the additional offset into the page to account
  502. * for any data on it we've already used.
  503. */
  504. start_frag_size = skb_frag_size(frag) - (start - offset);
  505. start_frag_off = skb_frag_off(frag) + (start - offset);
  506. /* Grab the QMAP header. Careful, as there's no guarantee that it's
  507. * continugous!
  508. */
  509. if (likely(start_frag_size >= sizeof(*maph))) {
  510. maph = skb_frag_address(frag) + (start - offset);
  511. } else {
  512. /* The header's split across pages. We can rebuild it.
  513. * Probably not faster or stronger than before. But certainly
  514. * more linear.
  515. */
  516. if (skb_copy_bits(skb, start, &__maph, sizeof(__maph)) < 0)
  517. return -1;
  518. maph = &__maph;
  519. }
  520. pkt_len = ntohs(maph->pkt_len);
  521. /* Catch empty frames */
  522. if (!pkt_len)
  523. return -1;
  524. frag_desc = rmnet_get_frag_descriptor(port);
  525. if (!frag_desc)
  526. return -1;
  527. frag_desc->priority = priority;
  528. pkt_len += sizeof(*maph);
  529. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  530. pkt_len += sizeof(struct rmnet_map_dl_csum_trailer);
  531. } else if ((port->data_format & (RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5 |
  532. RMNET_FLAGS_INGRESS_COALESCE)) &&
  533. !maph->cd_bit) {
  534. u32 hsize = 0;
  535. u8 type;
  536. /* Check the type. This seems like should be overkill for less
  537. * than a single byte, doesn't it?
  538. */
  539. if (likely(start_frag_size >= sizeof(*maph) + 1)) {
  540. type = *((u8 *)maph + sizeof(*maph));
  541. } else {
  542. if (skb_copy_bits(skb, start + sizeof(*maph), &type,
  543. sizeof(type)) < 0)
  544. return -1;
  545. }
  546. /* Type only uses the first 7 bits */
  547. switch ((type & 0xFE) >> 1) {
  548. case RMNET_MAP_HEADER_TYPE_COALESCING:
  549. hsize = sizeof(struct rmnet_map_v5_coal_header);
  550. break;
  551. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  552. hsize = sizeof(struct rmnet_map_v5_csum_header);
  553. break;
  554. }
  555. pkt_len += hsize;
  556. }
  557. /* Add all frags containing the packet data to the descriptor */
  558. for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
  559. u32 size, off;
  560. u32 copy;
  561. frag = &shinfo->frags[i];
  562. size = skb_frag_size(frag);
  563. off = skb_frag_off(frag);
  564. if (i == start_frag) {
  565. /* These are different for the first one to account for
  566. * the starting offset.
  567. */
  568. size = start_frag_size;
  569. off = start_frag_off;
  570. }
  571. copy = min_t(u32, size, pkt_len);
  572. rc = rmnet_frag_descriptor_add_frag(frag_desc,
  573. skb_frag_page(frag), off,
  574. copy);
  575. if (rc < 0) {
  576. rmnet_recycle_frag_descriptor(frag_desc, port);
  577. return -1;
  578. }
  579. pkt_len -= copy;
  580. copy_len += copy;
  581. /* If the fragment is exhausted, we can move to the next one */
  582. if (!(size - copy_len)) {
  583. i++;
  584. copy_len = 0;
  585. }
  586. }
  587. if (pkt_len) {
  588. /* Packet length is larger than the amount of data we have */
  589. rmnet_recycle_frag_descriptor(frag_desc, port);
  590. return -1;
  591. }
  592. list_add_tail(&frag_desc->list, list);
  593. return (int)frag_desc->len;
  594. }
  595. void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
  596. struct list_head *list, u32 priority)
  597. {
  598. u32 start = 0;
  599. int rc;
  600. while (start < skb->len) {
  601. rc = rmnet_frag_deaggregate_one(skb, port, list, start,
  602. priority);
  603. if (rc < 0)
  604. return;
  605. start += (u32)rc;
  606. }
  607. }
  608. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  609. * if needed (i.e. forwarding, UDP GRO)
  610. */
  611. static void rmnet_frag_gso_stamp(struct sk_buff *skb,
  612. struct rmnet_frag_descriptor *frag_desc)
  613. {
  614. struct skb_shared_info *shinfo = skb_shinfo(skb);
  615. if (frag_desc->trans_proto == IPPROTO_TCP)
  616. shinfo->gso_type = (frag_desc->ip_proto == 4) ?
  617. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  618. else
  619. shinfo->gso_type = SKB_GSO_UDP_L4;
  620. shinfo->gso_size = frag_desc->gso_size;
  621. shinfo->gso_segs = frag_desc->gso_segs;
  622. }
  623. /* Set the partial checksum information. Sets the transport checksum to the
  624. * pseudoheader checksum and sets the offload metadata.
  625. */
  626. static void rmnet_frag_partial_csum(struct sk_buff *skb,
  627. struct rmnet_frag_descriptor *frag_desc)
  628. {
  629. rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress;
  630. struct iphdr *iph = (struct iphdr *)skb->data;
  631. __sum16 pseudo;
  632. u16 pkt_len = skb->len - frag_desc->ip_len;
  633. if (frag_desc->ip_proto == 4) {
  634. iph->tot_len = htons(skb->len);
  635. iph->check = 0;
  636. iph->check = ip_fast_csum(iph, iph->ihl);
  637. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  638. pkt_len, frag_desc->trans_proto,
  639. 0);
  640. } else {
  641. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  642. /* Payload length includes any extension headers */
  643. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  644. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  645. pkt_len, frag_desc->trans_proto, 0);
  646. }
  647. if (frag_desc->trans_proto == IPPROTO_TCP) {
  648. struct tcphdr *tp = (struct tcphdr *)
  649. ((u8 *)iph + frag_desc->ip_len);
  650. tp->check = pseudo;
  651. skb->csum_offset = offsetof(struct tcphdr, check);
  652. rmnet_perf_tether_ingress = rcu_dereference(rmnet_perf_tether_ingress_hook);
  653. if (rmnet_perf_tether_ingress)
  654. rmnet_perf_tether_ingress(tp, skb);
  655. } else {
  656. struct udphdr *up = (struct udphdr *)
  657. ((u8 *)iph + frag_desc->ip_len);
  658. up->len = htons(pkt_len);
  659. up->check = pseudo;
  660. skb->csum_offset = offsetof(struct udphdr, check);
  661. }
  662. skb->ip_summed = CHECKSUM_PARTIAL;
  663. skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
  664. }
  665. #define PFN_ENTRY_MAX (128)
  666. #define PFNI (count++ % PFN_ENTRY_MAX)
  667. static void rmnet_descriptor_trace_pfn(struct sk_buff *skb)
  668. {
  669. struct skb_shared_info *shinfo;
  670. struct sk_buff *frag_iter;
  671. unsigned long rpfn[PFN_ENTRY_MAX];
  672. int i, count;
  673. if (!trace_print_pfn_enabled())
  674. return;
  675. shinfo = skb_shinfo(skb);
  676. memset(rpfn, 0, sizeof(rpfn));
  677. count = 0;
  678. for (i = 0; i < shinfo->nr_frags; i++)
  679. rpfn[PFNI] = page_to_pfn(skb_frag_page(&shinfo->frags[i]));
  680. skb_walk_frags(skb, frag_iter) {
  681. shinfo = skb_shinfo(frag_iter);
  682. for (i = 0; i < shinfo->nr_frags; i++)
  683. rpfn[PFNI] = page_to_pfn(skb_frag_page(&shinfo->frags[i]));
  684. }
  685. trace_print_pfn(skb, rpfn, count);
  686. }
  687. /* Allocate and populate an skb to contain the packet represented by the
  688. * frag descriptor.
  689. */
  690. static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
  691. struct rmnet_port *port)
  692. {
  693. struct sk_buff *head_skb, *current_skb, *skb;
  694. struct skb_shared_info *shinfo;
  695. struct rmnet_fragment *frag, *tmp;
  696. struct rmnet_skb_cb *cb;
  697. /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
  698. if (frag_desc->hdrs_valid) {
  699. u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
  700. head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
  701. GFP_ATOMIC);
  702. if (!head_skb)
  703. return NULL;
  704. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  705. rmnet_frag_copy_data(frag_desc, 0, hdr_len,
  706. skb_put(head_skb, hdr_len));
  707. skb_reset_network_header(head_skb);
  708. if (frag_desc->trans_len)
  709. skb_set_transport_header(head_skb, frag_desc->ip_len);
  710. /* Pull the headers off carefully */
  711. if (hdr_len == frag_desc->len)
  712. /* Fast forward "header only" packets */
  713. goto skip_frags;
  714. if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
  715. kfree(head_skb);
  716. return NULL;
  717. }
  718. } else {
  719. /* Allocate enough space to avoid penalties in the stack
  720. * from __pskb_pull_tail()
  721. */
  722. head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
  723. GFP_ATOMIC);
  724. if (!head_skb)
  725. return NULL;
  726. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  727. }
  728. shinfo = skb_shinfo(head_skb);
  729. current_skb = head_skb;
  730. /* Add in the page fragments */
  731. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  732. struct page *p = skb_frag_page(&frag->frag);
  733. u32 frag_size = skb_frag_size(&frag->frag);
  734. add_frag:
  735. if (shinfo->nr_frags < MAX_SKB_FRAGS) {
  736. get_page(p);
  737. skb_add_rx_frag(current_skb, shinfo->nr_frags, p,
  738. skb_frag_off(&frag->frag), frag_size,
  739. frag_size);
  740. if (current_skb != head_skb) {
  741. head_skb->len += frag_size;
  742. head_skb->data_len += frag_size;
  743. }
  744. } else {
  745. /* Alloc a new skb and try again */
  746. skb = alloc_skb(0, GFP_ATOMIC);
  747. if (!skb)
  748. break;
  749. if (current_skb == head_skb)
  750. shinfo->frag_list = skb;
  751. else
  752. current_skb->next = skb;
  753. current_skb = skb;
  754. shinfo = skb_shinfo(current_skb);
  755. goto add_frag;
  756. }
  757. }
  758. skip_frags:
  759. head_skb->dev = frag_desc->dev;
  760. rmnet_set_skb_proto(head_skb);
  761. cb = RMNET_SKB_CB(head_skb);
  762. cb->coal_bytes = frag_desc->coal_bytes;
  763. cb->coal_bufsize = frag_desc->coal_bufsize;
  764. /* Handle any header metadata that needs to be updated after RSB/RSC
  765. * segmentation
  766. */
  767. if (frag_desc->ip_id_set) {
  768. struct iphdr *iph;
  769. iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
  770. csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
  771. iph->id = frag_desc->ip_id;
  772. }
  773. if (frag_desc->tcp_seq_set) {
  774. struct tcphdr *th;
  775. th = (struct tcphdr *)
  776. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  777. th->seq = frag_desc->tcp_seq;
  778. }
  779. if (frag_desc->tcp_flags_set) {
  780. struct tcphdr *th;
  781. __be16 *flags;
  782. th = (struct tcphdr *)
  783. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  784. flags = (__be16 *)&tcp_flag_word(th);
  785. *flags = frag_desc->tcp_flags;
  786. }
  787. /* Handle csum offloading */
  788. if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
  789. /* Set the partial checksum information */
  790. rmnet_frag_partial_csum(head_skb, frag_desc);
  791. } else if (frag_desc->csum_valid) {
  792. /* Non-RSB/RSC/perf packet. The current checksum is fine */
  793. head_skb->ip_summed = CHECKSUM_UNNECESSARY;
  794. } else if (frag_desc->hdrs_valid &&
  795. (frag_desc->trans_proto == IPPROTO_TCP ||
  796. frag_desc->trans_proto == IPPROTO_UDP)) {
  797. /* Unfortunately, we have to fake a bad checksum here, since
  798. * the original bad value is lost by the hardware. The only
  799. * reliable way to do it is to calculate the actual checksum
  800. * and corrupt it.
  801. */
  802. __sum16 *check;
  803. __wsum csum;
  804. unsigned int offset = skb_transport_offset(head_skb);
  805. __sum16 pseudo;
  806. /* Calculate pseudo header and update header fields */
  807. if (frag_desc->ip_proto == 4) {
  808. struct iphdr *iph = ip_hdr(head_skb);
  809. __be16 tot_len = htons(head_skb->len);
  810. csum_replace2(&iph->check, iph->tot_len, tot_len);
  811. iph->tot_len = tot_len;
  812. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  813. head_skb->len -
  814. frag_desc->ip_len,
  815. frag_desc->trans_proto, 0);
  816. } else {
  817. struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
  818. ip6h->payload_len = htons(head_skb->len -
  819. sizeof(*ip6h));
  820. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  821. head_skb->len -
  822. frag_desc->ip_len,
  823. frag_desc->trans_proto, 0);
  824. }
  825. if (frag_desc->trans_proto == IPPROTO_TCP) {
  826. check = &tcp_hdr(head_skb)->check;
  827. } else {
  828. udp_hdr(head_skb)->len = htons(head_skb->len -
  829. frag_desc->ip_len);
  830. check = &udp_hdr(head_skb)->check;
  831. }
  832. *check = pseudo;
  833. csum = skb_checksum(head_skb, offset, head_skb->len - offset,
  834. 0);
  835. /* Add 1 to corrupt. This cannot produce a final value of 0
  836. * since csum_fold() can't return a value of 0xFFFF
  837. */
  838. *check = csum16_add(csum_fold(csum), htons(1));
  839. head_skb->ip_summed = CHECKSUM_NONE;
  840. }
  841. /* Handle any rmnet_perf metadata */
  842. if (frag_desc->hash) {
  843. head_skb->hash = frag_desc->hash;
  844. head_skb->sw_hash = 1;
  845. }
  846. if (frag_desc->flush_shs)
  847. cb->flush_shs = 1;
  848. /* Handle coalesced packets */
  849. if (frag_desc->gso_segs > 1)
  850. rmnet_frag_gso_stamp(head_skb, frag_desc);
  851. /* Propagate original priority value */
  852. head_skb->priority = frag_desc->priority;
  853. if (trace_print_tcp_rx_enabled()) {
  854. char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
  855. if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
  856. goto skip_trace_print_tcp_rx;
  857. memset(saddr, 0, INET6_ADDRSTRLEN);
  858. memset(daddr, 0, INET6_ADDRSTRLEN);
  859. if (head_skb->protocol == htons(ETH_P_IP)) {
  860. if (ip_hdr(head_skb)->protocol != IPPROTO_TCP)
  861. goto skip_trace_print_tcp_rx;
  862. snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
  863. snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
  864. }
  865. if (head_skb->protocol == htons(ETH_P_IPV6)) {
  866. if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_TCP)
  867. goto skip_trace_print_tcp_rx;
  868. snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
  869. snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
  870. }
  871. trace_print_tcp_rx(head_skb, saddr, daddr, tcp_hdr(head_skb));
  872. rmnet_descriptor_trace_pfn(head_skb);
  873. }
  874. skip_trace_print_tcp_rx:
  875. if (trace_print_udp_rx_enabled()) {
  876. char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
  877. u16 ip_id = 0;
  878. if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
  879. goto skip_trace_print_udp_rx;
  880. memset(saddr, 0, INET6_ADDRSTRLEN);
  881. memset(daddr, 0, INET6_ADDRSTRLEN);
  882. if (head_skb->protocol == htons(ETH_P_IP)) {
  883. if (ip_hdr(head_skb)->protocol != IPPROTO_UDP)
  884. goto skip_trace_print_udp_rx;
  885. snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
  886. snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
  887. ip_id = ntohs(ip_hdr(head_skb)->id);
  888. }
  889. if (head_skb->protocol == htons(ETH_P_IPV6)) {
  890. if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_UDP)
  891. goto skip_trace_print_udp_rx;
  892. snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
  893. snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
  894. }
  895. trace_print_udp_rx(head_skb, saddr, daddr, udp_hdr(head_skb), ip_id);
  896. rmnet_descriptor_trace_pfn(head_skb);
  897. }
  898. skip_trace_print_udp_rx:
  899. return head_skb;
  900. }
  901. /* Deliver the packets contained within a frag descriptor */
  902. void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
  903. struct rmnet_port *port)
  904. {
  905. struct sk_buff *skb;
  906. skb = rmnet_alloc_skb(frag_desc, port);
  907. if (skb)
  908. rmnet_deliver_skb(skb, port);
  909. rmnet_recycle_frag_descriptor(frag_desc, port);
  910. }
  911. EXPORT_SYMBOL(rmnet_frag_deliver);
  912. static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
  913. struct rmnet_port *port,
  914. struct list_head *list, u8 pkt_id,
  915. bool csum_valid)
  916. {
  917. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  918. struct rmnet_frag_descriptor *new_desc;
  919. u32 dlen = coal_desc->gso_size * coal_desc->gso_segs;
  920. u32 hlen = coal_desc->ip_len + coal_desc->trans_len;
  921. u32 offset = hlen + coal_desc->data_offset;
  922. int rc;
  923. new_desc = rmnet_get_frag_descriptor(port);
  924. if (!new_desc)
  925. return;
  926. /* Header information and most metadata is the same as the original */
  927. memcpy(new_desc, coal_desc, sizeof(*coal_desc));
  928. INIT_LIST_HEAD(&new_desc->list);
  929. INIT_LIST_HEAD(&new_desc->frags);
  930. new_desc->len = 0;
  931. /* Add the header fragments */
  932. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, 0,
  933. hlen);
  934. if (rc < 0)
  935. goto recycle;
  936. /* Add in the data fragments */
  937. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, offset,
  938. dlen);
  939. if (rc < 0)
  940. goto recycle;
  941. /* Update protocol-specific metadata */
  942. if (coal_desc->trans_proto == IPPROTO_TCP) {
  943. struct tcphdr *th, __th;
  944. th = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  945. sizeof(*th), &__th);
  946. if (!th)
  947. goto recycle;
  948. new_desc->tcp_seq_set = 1;
  949. new_desc->tcp_seq = htonl(ntohl(th->seq) +
  950. coal_desc->data_offset);
  951. /* Don't allow any dangerous flags to appear in any segments
  952. * other than the last.
  953. */
  954. if (th->fin || th->psh) {
  955. if (offset + dlen < coal_desc->len) {
  956. __be32 flag_word = tcp_flag_word(th);
  957. /* Clear the FIN and PSH flags from this
  958. * segment.
  959. */
  960. flag_word &= ~TCP_FLAG_FIN;
  961. flag_word &= ~TCP_FLAG_PSH;
  962. new_desc->tcp_flags_set = 1;
  963. new_desc->tcp_flags = *((__be16 *)&flag_word);
  964. }
  965. }
  966. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  967. struct udphdr *uh, __uh;
  968. uh = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  969. sizeof(*uh), &__uh);
  970. if (!uh)
  971. goto recycle;
  972. if (coal_desc->ip_proto == 4 && !uh->check)
  973. csum_valid = true;
  974. }
  975. if (coal_desc->ip_proto == 4) {
  976. struct iphdr *iph, __iph;
  977. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  978. &__iph);
  979. if (!iph)
  980. goto recycle;
  981. new_desc->ip_id_set = 1;
  982. new_desc->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
  983. }
  984. new_desc->csum_valid = csum_valid;
  985. priv->stats.coal.coal_reconstruct++;
  986. /* Update meta information to move past the data we just segmented */
  987. coal_desc->data_offset += dlen;
  988. coal_desc->pkt_id = pkt_id + 1;
  989. coal_desc->gso_segs = 0;
  990. /* Only relevant for the first segment to avoid overcoutning */
  991. coal_desc->coal_bytes = 0;
  992. coal_desc->coal_bufsize = 0;
  993. list_add_tail(&new_desc->list, list);
  994. return;
  995. recycle:
  996. rmnet_recycle_frag_descriptor(new_desc, port);
  997. }
  998. static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
  999. {
  1000. u8 *data = rmnet_frag_data_ptr(frag_desc);
  1001. unsigned int datagram_len;
  1002. __wsum csum;
  1003. __sum16 pseudo;
  1004. /* Keep analysis tools happy, since they will see that
  1005. * rmnet_frag_data_ptr() could return NULL. It can't in this case,
  1006. * since we can't get this far otherwise...
  1007. */
  1008. if (unlikely(!data))
  1009. return false;
  1010. datagram_len = frag_desc->len - frag_desc->ip_len;
  1011. if (frag_desc->ip_proto == 4) {
  1012. struct iphdr *iph = (struct iphdr *)data;
  1013. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  1014. datagram_len,
  1015. frag_desc->trans_proto, 0);
  1016. } else {
  1017. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  1018. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  1019. datagram_len, frag_desc->trans_proto,
  1020. 0);
  1021. }
  1022. csum = csum_partial(data + frag_desc->ip_len, datagram_len,
  1023. csum_unfold(pseudo));
  1024. return !csum_fold(csum);
  1025. }
  1026. /* Converts the coalesced frame into a list of descriptors */
  1027. static void
  1028. rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
  1029. u64 nlo_err_mask, struct rmnet_port *port,
  1030. struct list_head *list)
  1031. {
  1032. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  1033. struct rmnet_map_v5_coal_header coal_hdr;
  1034. struct rmnet_fragment *frag;
  1035. u8 *version;
  1036. u16 pkt_len;
  1037. u8 pkt, total_pkt = 0;
  1038. u8 nlo;
  1039. bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
  1040. bool zero_csum = false;
  1041. /* Copy the coal header into our local storage before pulling it. It's
  1042. * possible that this header (or part of it) is the last port of a page
  1043. * a pulling it off would cause it to be freed. Referring back to the
  1044. * header would be invalid in that case.
  1045. */
  1046. if (rmnet_frag_copy_data(coal_desc, sizeof(struct rmnet_map_header),
  1047. sizeof(coal_hdr), &coal_hdr) < 0)
  1048. return;
  1049. /* Pull off the headers we no longer need */
  1050. if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header) +
  1051. sizeof(coal_hdr)))
  1052. return;
  1053. /* By definition, this byte is linear, and the first byte on the
  1054. * first fragment. ;) Hence why no header_ptr() call is needed
  1055. * for it.
  1056. */
  1057. version = rmnet_frag_data_ptr(coal_desc);
  1058. if (unlikely(!version))
  1059. return;
  1060. if ((*version & 0xF0) == 0x40) {
  1061. struct iphdr *iph, __iph;
  1062. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  1063. &__iph);
  1064. if (!iph)
  1065. return;
  1066. coal_desc->ip_proto = 4;
  1067. coal_desc->ip_len = iph->ihl * 4;
  1068. coal_desc->trans_proto = iph->protocol;
  1069. /* Don't allow coalescing of any packets with IP options */
  1070. if (iph->ihl != 5)
  1071. gro = false;
  1072. } else if ((*version & 0xF0) == 0x60) {
  1073. struct ipv6hdr *ip6h, __ip6h;
  1074. int ip_len;
  1075. __be16 frag_off;
  1076. u8 protocol;
  1077. ip6h = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*ip6h),
  1078. &__ip6h);
  1079. if (!ip6h)
  1080. return;
  1081. coal_desc->ip_proto = 6;
  1082. protocol = ip6h->nexthdr;
  1083. ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
  1084. sizeof(*ip6h),
  1085. &protocol,
  1086. &frag_off);
  1087. coal_desc->trans_proto = protocol;
  1088. /* If we run into a problem, or this has a fragment header
  1089. * (which should technically not be possible, if the HW
  1090. * works as intended...), bail.
  1091. */
  1092. if (ip_len < 0 || frag_off) {
  1093. priv->stats.coal.coal_ip_invalid++;
  1094. return;
  1095. }
  1096. coal_desc->ip_len = (u16)ip_len;
  1097. if (coal_desc->ip_len > sizeof(*ip6h)) {
  1098. /* Don't allow coalescing of any packets with IPv6
  1099. * extension headers.
  1100. */
  1101. gro = false;
  1102. }
  1103. } else {
  1104. priv->stats.coal.coal_ip_invalid++;
  1105. return;
  1106. }
  1107. if (coal_desc->trans_proto == IPPROTO_TCP) {
  1108. struct tcphdr *th, __th;
  1109. th = rmnet_frag_header_ptr(coal_desc,
  1110. coal_desc->ip_len, sizeof(*th),
  1111. &__th);
  1112. if (!th)
  1113. return;
  1114. coal_desc->trans_len = th->doff * 4;
  1115. priv->stats.coal.coal_tcp++;
  1116. priv->stats.coal.coal_tcp_bytes += coal_desc->len;
  1117. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  1118. struct udphdr *uh, __uh;
  1119. uh = rmnet_frag_header_ptr(coal_desc,
  1120. coal_desc->ip_len, sizeof(*uh),
  1121. &__uh);
  1122. if (!uh)
  1123. return;
  1124. coal_desc->trans_len = sizeof(*uh);
  1125. priv->stats.coal.coal_udp++;
  1126. priv->stats.coal.coal_udp_bytes += coal_desc->len;
  1127. if (coal_desc->ip_proto == 4 && !uh->check)
  1128. zero_csum = true;
  1129. } else {
  1130. priv->stats.coal.coal_trans_invalid++;
  1131. return;
  1132. }
  1133. coal_desc->hdrs_valid = 1;
  1134. coal_desc->coal_bytes = coal_desc->len;
  1135. rmnet_descriptor_for_each_frag(frag, coal_desc)
  1136. coal_desc->coal_bufsize +=
  1137. page_size(skb_frag_page(&frag->frag));
  1138. if (rmnet_map_v5_csum_buggy(&coal_hdr) && !zero_csum) {
  1139. /* Mark the checksum as valid if it checks out */
  1140. if (rmnet_frag_validate_csum(coal_desc))
  1141. coal_desc->csum_valid = true;
  1142. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1143. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1144. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1145. list_add_tail(&coal_desc->list, list);
  1146. return;
  1147. }
  1148. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  1149. * no checksum errors, and are allowing GRO. We can just reuse this
  1150. * descriptor unchanged.
  1151. */
  1152. if (gro && coal_hdr.num_nlos == 1 && coal_hdr.csum_valid) {
  1153. coal_desc->csum_valid = true;
  1154. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1155. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1156. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1157. list_add_tail(&coal_desc->list, list);
  1158. return;
  1159. }
  1160. /* Segment the coalesced descriptor into new packets */
  1161. for (nlo = 0; nlo < coal_hdr.num_nlos; nlo++) {
  1162. pkt_len = ntohs(coal_hdr.nl_pairs[nlo].pkt_len);
  1163. pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
  1164. coal_desc->gso_size = pkt_len;
  1165. for (pkt = 0; pkt < coal_hdr.nl_pairs[nlo].num_packets;
  1166. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  1167. bool csum_err = nlo_err_mask & 1;
  1168. /* Segment the packet if we're not sending the larger
  1169. * packet up the stack.
  1170. */
  1171. if (!gro) {
  1172. coal_desc->gso_segs = 1;
  1173. if (csum_err)
  1174. priv->stats.coal.coal_csum_err++;
  1175. __rmnet_frag_segment_data(coal_desc, port,
  1176. list, total_pkt,
  1177. !csum_err);
  1178. continue;
  1179. }
  1180. if (csum_err) {
  1181. priv->stats.coal.coal_csum_err++;
  1182. /* Segment out the good data */
  1183. if (coal_desc->gso_segs)
  1184. __rmnet_frag_segment_data(coal_desc,
  1185. port,
  1186. list,
  1187. total_pkt,
  1188. true);
  1189. /* Segment out the bad checksum */
  1190. coal_desc->gso_segs = 1;
  1191. __rmnet_frag_segment_data(coal_desc, port,
  1192. list, total_pkt,
  1193. false);
  1194. } else {
  1195. coal_desc->gso_segs++;
  1196. }
  1197. }
  1198. /* If we're switching NLOs, we need to send out everything from
  1199. * the previous one, if we haven't done so. NLOs only switch
  1200. * when the packet length changes.
  1201. */
  1202. if (coal_desc->gso_segs)
  1203. __rmnet_frag_segment_data(coal_desc, port, list,
  1204. total_pkt, true);
  1205. }
  1206. }
  1207. /* Record reason for coalescing pipe closure */
  1208. static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  1209. u8 code)
  1210. {
  1211. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  1212. switch (type) {
  1213. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  1214. stats->non_coal++;
  1215. break;
  1216. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  1217. stats->ip_miss++;
  1218. break;
  1219. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  1220. stats->trans_miss++;
  1221. break;
  1222. case RMNET_MAP_COAL_CLOSE_HW:
  1223. switch (code) {
  1224. case RMNET_MAP_COAL_CLOSE_HW_NL:
  1225. stats->hw_nl++;
  1226. break;
  1227. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  1228. stats->hw_pkt++;
  1229. break;
  1230. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  1231. stats->hw_byte++;
  1232. break;
  1233. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  1234. stats->hw_time++;
  1235. break;
  1236. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  1237. stats->hw_evict++;
  1238. break;
  1239. default:
  1240. break;
  1241. }
  1242. break;
  1243. case RMNET_MAP_COAL_CLOSE_COAL:
  1244. stats->coal++;
  1245. break;
  1246. default:
  1247. break;
  1248. }
  1249. }
  1250. /* Check if the coalesced header has any incorrect values, in which case, the
  1251. * entire coalesced frame must be dropped. Then check if there are any
  1252. * checksum issues
  1253. */
  1254. static int
  1255. rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
  1256. u64 *nlo_err_mask)
  1257. {
  1258. struct rmnet_map_v5_coal_header *coal_hdr, __coal_hdr;
  1259. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1260. u64 mask = 0;
  1261. int i;
  1262. u8 veid, pkts = 0;
  1263. coal_hdr = rmnet_frag_header_ptr(frag_desc,
  1264. sizeof(struct rmnet_map_header),
  1265. sizeof(*coal_hdr), &__coal_hdr);
  1266. if (!coal_hdr)
  1267. return -EINVAL;
  1268. veid = coal_hdr->virtual_channel_id;
  1269. if (coal_hdr->num_nlos == 0 ||
  1270. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  1271. priv->stats.coal.coal_hdr_nlo_err++;
  1272. return -EINVAL;
  1273. }
  1274. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  1275. /* If there is a checksum issue, we need to split
  1276. * up the skb. Rebuild the full csum error field
  1277. */
  1278. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  1279. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  1280. mask |= ((u64)err) << (8 * i);
  1281. /* Track total packets in frame */
  1282. pkts += pkt;
  1283. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  1284. priv->stats.coal.coal_hdr_pkt_err++;
  1285. return -EINVAL;
  1286. }
  1287. }
  1288. /* Track number of packets we get inside of coalesced frames */
  1289. priv->stats.coal.coal_pkts += pkts;
  1290. /* Update ethtool stats */
  1291. rmnet_frag_data_log_close_stats(priv,
  1292. coal_hdr->close_type,
  1293. coal_hdr->close_value);
  1294. if (veid < RMNET_MAX_VEID)
  1295. priv->stats.coal.coal_veid[veid]++;
  1296. *nlo_err_mask = mask;
  1297. return 0;
  1298. }
  1299. static int rmnet_frag_checksum_pkt(struct rmnet_frag_descriptor *frag_desc)
  1300. {
  1301. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1302. struct rmnet_fragment *frag;
  1303. int offset = sizeof(struct rmnet_map_header) +
  1304. sizeof(struct rmnet_map_v5_csum_header);
  1305. u8 *version, __version;
  1306. __wsum csum;
  1307. u16 csum_len;
  1308. version = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*version),
  1309. &__version);
  1310. if (!version)
  1311. return -EINVAL;
  1312. if ((*version & 0xF0) == 0x40) {
  1313. struct iphdr *iph;
  1314. u8 __iph[60]; /* Max IP header size (0xF * 4) */
  1315. /* We need to access the entire IP header including options
  1316. * to validate its checksum. Fortunately, the version byte
  1317. * also will tell us the length, so we only need to pull
  1318. * once ;)
  1319. */
  1320. frag_desc->ip_len = (*version & 0xF) * 4;
  1321. iph = rmnet_frag_header_ptr(frag_desc, offset,
  1322. frag_desc->ip_len,
  1323. __iph);
  1324. if (!iph || ip_is_fragment(iph))
  1325. return -EINVAL;
  1326. /* Length needs to be sensible */
  1327. csum_len = ntohs(iph->tot_len);
  1328. if (csum_len > frag_desc->len - offset)
  1329. return -EINVAL;
  1330. csum_len -= frag_desc->ip_len;
  1331. /* IPv4 checksum must be valid */
  1332. if (ip_fast_csum((u8 *)iph, iph->ihl)) {
  1333. priv->stats.csum_sw++;
  1334. return 0;
  1335. }
  1336. frag_desc->ip_proto = 4;
  1337. frag_desc->trans_proto = iph->protocol;
  1338. csum = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  1339. csum_len,
  1340. frag_desc->trans_proto, 0);
  1341. } else if ((*version & 0xF0) == 0x60) {
  1342. struct ipv6hdr *ip6h, __ip6h;
  1343. int ip_len;
  1344. __be16 frag_off;
  1345. u8 protocol;
  1346. ip6h = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*ip6h),
  1347. &__ip6h);
  1348. if (!ip6h)
  1349. return -EINVAL;
  1350. frag_desc->ip_proto = 6;
  1351. protocol = ip6h->nexthdr;
  1352. ip_len = rmnet_frag_ipv6_skip_exthdr(frag_desc,
  1353. offset + sizeof(*ip6h),
  1354. &protocol, &frag_off);
  1355. if (ip_len < 0 || frag_off)
  1356. return -EINVAL;
  1357. /* Length needs to be sensible */
  1358. frag_desc->ip_len = (u16)ip_len;
  1359. csum_len = ntohs(ip6h->payload_len);
  1360. if (csum_len + frag_desc->ip_len > frag_desc->len - offset)
  1361. return -EINVAL;
  1362. frag_desc->trans_proto = protocol;
  1363. csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  1364. csum_len,
  1365. frag_desc->trans_proto, 0);
  1366. } else {
  1367. /* Not checksumable */
  1368. return -EINVAL;
  1369. }
  1370. /* Protocol check */
  1371. if (frag_desc->trans_proto != IPPROTO_TCP &&
  1372. frag_desc->trans_proto != IPPROTO_UDP)
  1373. return -EINVAL;
  1374. offset += frag_desc->ip_len;
  1375. /* Check for UDP zero csum packets */
  1376. if (frag_desc->trans_proto == IPPROTO_UDP) {
  1377. struct udphdr *uh, __uh;
  1378. uh = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*uh),
  1379. &__uh);
  1380. if (!uh)
  1381. return -EINVAL;
  1382. if (!uh->check) {
  1383. if (frag_desc->ip_proto == 4) {
  1384. /* Zero checksum is valid */
  1385. priv->stats.csum_sw++;
  1386. return 1;
  1387. }
  1388. /* Not valid in IPv6 */
  1389. priv->stats.csum_sw++;
  1390. return 0;
  1391. }
  1392. }
  1393. /* Walk the frags and checksum each chunk */
  1394. list_for_each_entry(frag, &frag_desc->frags, list) {
  1395. u32 frag_size = skb_frag_size(&frag->frag);
  1396. if (!csum_len)
  1397. break;
  1398. if (offset < frag_size) {
  1399. void *addr = skb_frag_address(&frag->frag) + offset;
  1400. u32 len = min_t(u32, csum_len, frag_size - offset);
  1401. /* Checksum 'len' bytes and add them in */
  1402. csum = csum_partial(addr, len, csum);
  1403. csum_len -= len;
  1404. offset = 0;
  1405. } else {
  1406. offset -= frag_size;
  1407. }
  1408. }
  1409. priv->stats.csum_sw++;
  1410. return !csum_fold(csum);
  1411. }
  1412. /* Process a QMAPv5 packet header */
  1413. int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
  1414. struct rmnet_port *port,
  1415. struct list_head *list,
  1416. u16 len)
  1417. {
  1418. struct rmnet_map_v5_csum_header *csum_hdr, __csum_hdr;
  1419. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1420. u64 nlo_err_mask;
  1421. u32 offset = sizeof(struct rmnet_map_header);
  1422. int rc = 0;
  1423. /* Grab the header type. It's easier to grab enough for a full csum
  1424. * offload header here since it's only 8 bytes and then check the
  1425. * header type using that. This also doubles as a check to make sure
  1426. * there's enough data after the QMAP header to ensure that another
  1427. * header is present.
  1428. */
  1429. csum_hdr = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*csum_hdr),
  1430. &__csum_hdr);
  1431. if (!csum_hdr)
  1432. return -EINVAL;
  1433. switch (csum_hdr->header_type) {
  1434. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1435. priv->stats.coal.coal_rx++;
  1436. rc = rmnet_frag_data_check_coal_header(frag_desc,
  1437. &nlo_err_mask);
  1438. if (rc)
  1439. return rc;
  1440. rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
  1441. list);
  1442. if (list_first_entry(list, struct rmnet_frag_descriptor,
  1443. list) != frag_desc)
  1444. rmnet_recycle_frag_descriptor(frag_desc, port);
  1445. break;
  1446. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1447. if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
  1448. priv->stats.csum_sw++;
  1449. } else if (csum_hdr->csum_valid_required) {
  1450. priv->stats.csum_ok++;
  1451. frag_desc->csum_valid = true;
  1452. } else {
  1453. int valid = rmnet_frag_checksum_pkt(frag_desc);
  1454. if (valid < 0) {
  1455. priv->stats.csum_validation_failed++;
  1456. } else if (valid) {
  1457. /* All's good */
  1458. priv->stats.csum_ok++;
  1459. frag_desc->csum_valid = true;
  1460. } else {
  1461. /* Checksum is actually bad */
  1462. priv->stats.csum_valid_unset++;
  1463. }
  1464. }
  1465. if (!rmnet_frag_pull(frag_desc, port,
  1466. offset + sizeof(*csum_hdr))) {
  1467. rc = -EINVAL;
  1468. break;
  1469. }
  1470. /* Remove padding only for csum offload packets.
  1471. * Coalesced packets should never have padding.
  1472. */
  1473. if (!rmnet_frag_trim(frag_desc, port, len)) {
  1474. rc = -EINVAL;
  1475. break;
  1476. }
  1477. list_del_init(&frag_desc->list);
  1478. list_add_tail(&frag_desc->list, list);
  1479. break;
  1480. default:
  1481. rc = -EINVAL;
  1482. break;
  1483. }
  1484. return rc;
  1485. }
  1486. /* Perf hook handler */
  1487. rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
  1488. EXPORT_SYMBOL(rmnet_perf_desc_entry);
  1489. static void
  1490. __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
  1491. struct rmnet_port *port)
  1492. {
  1493. rmnet_perf_desc_hook_t rmnet_perf_ingress;
  1494. struct rmnet_map_header *qmap, __qmap;
  1495. struct rmnet_endpoint *ep;
  1496. struct rmnet_frag_descriptor *frag, *tmp;
  1497. LIST_HEAD(segs);
  1498. u16 len, pad;
  1499. u8 mux_id;
  1500. bool skip_perf = (frag_desc->priority == 0xda1a);
  1501. qmap = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*qmap), &__qmap);
  1502. if (!qmap)
  1503. goto recycle;
  1504. mux_id = qmap->mux_id;
  1505. pad = qmap->pad_len;
  1506. len = ntohs(qmap->pkt_len) - pad;
  1507. if (qmap->cd_bit) {
  1508. qmi_rmnet_set_dl_msg_active(port);
  1509. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  1510. rmnet_frag_flow_command(frag_desc, port, len);
  1511. goto recycle;
  1512. }
  1513. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  1514. rmnet_frag_command(frag_desc, qmap, port);
  1515. goto recycle;
  1516. }
  1517. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  1518. goto recycle;
  1519. ep = rmnet_get_endpoint(port, mux_id);
  1520. if (!ep)
  1521. goto recycle;
  1522. frag_desc->dev = ep->egress_dev;
  1523. /* Handle QMAPv5 packet */
  1524. if (qmap->next_hdr &&
  1525. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  1526. RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5))) {
  1527. if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
  1528. len))
  1529. goto recycle;
  1530. } else {
  1531. /* We only have the main QMAP header to worry about */
  1532. if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
  1533. return;
  1534. if (!rmnet_frag_trim(frag_desc, port, len))
  1535. return;
  1536. list_add_tail(&frag_desc->list, &segs);
  1537. }
  1538. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  1539. qmi_rmnet_work_maybe_restart(port);
  1540. if (skip_perf)
  1541. goto no_perf;
  1542. rcu_read_lock();
  1543. rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
  1544. if (rmnet_perf_ingress) {
  1545. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1546. list_del_init(&frag->list);
  1547. rmnet_perf_ingress(frag, port);
  1548. }
  1549. rcu_read_unlock();
  1550. return;
  1551. }
  1552. rcu_read_unlock();
  1553. no_perf:
  1554. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1555. list_del_init(&frag->list);
  1556. rmnet_frag_deliver(frag, port);
  1557. }
  1558. return;
  1559. recycle:
  1560. rmnet_recycle_frag_descriptor(frag_desc, port);
  1561. }
  1562. /* Notify perf at the end of SKB chain */
  1563. rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
  1564. EXPORT_SYMBOL(rmnet_perf_chain_end);
  1565. void rmnet_descriptor_classify_chain_count(u64 chain_count,
  1566. struct rmnet_port *port)
  1567. {
  1568. u64 index;
  1569. if (chain_count >= 60) {
  1570. port->stats.dl_chain_stat[6] += chain_count;
  1571. return;
  1572. }
  1573. index = chain_count;
  1574. do_div(index, 10);
  1575. port->stats.dl_chain_stat[index] += chain_count;
  1576. }
  1577. void rmnet_descriptor_classify_frag_count(u64 frag_count,
  1578. struct rmnet_port *port)
  1579. {
  1580. u64 index;
  1581. if (frag_count <= 1) {
  1582. port->stats.dl_frag_stat_1 += frag_count;
  1583. return;
  1584. }
  1585. if (frag_count >= 16) {
  1586. port->stats.dl_frag_stat[4] += frag_count;
  1587. return;
  1588. }
  1589. index = frag_count;
  1590. do_div(index, 4);
  1591. port->stats.dl_frag_stat[index] += frag_count;
  1592. }
  1593. void rmnet_frag_ingress_handler(struct sk_buff *skb,
  1594. struct rmnet_port *port)
  1595. {
  1596. rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
  1597. LIST_HEAD(desc_list);
  1598. bool skip_perf = (skb->priority == 0xda1a);
  1599. u64 chain_count = 0;
  1600. /* Deaggregation and freeing of HW originating
  1601. * buffers is done within here
  1602. */
  1603. while (skb) {
  1604. struct sk_buff *skb_frag;
  1605. chain_count++;
  1606. rmnet_descriptor_classify_frag_count(skb_shinfo(skb)->nr_frags,
  1607. port);
  1608. rmnet_frag_deaggregate(skb, port, &desc_list, skb->priority);
  1609. if (!list_empty(&desc_list)) {
  1610. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1611. list_for_each_entry_safe(frag_desc, tmp, &desc_list,
  1612. list) {
  1613. list_del_init(&frag_desc->list);
  1614. __rmnet_frag_ingress_handler(frag_desc, port);
  1615. }
  1616. }
  1617. skb_frag = skb_shinfo(skb)->frag_list;
  1618. skb_shinfo(skb)->frag_list = NULL;
  1619. consume_skb(skb);
  1620. skb = skb_frag;
  1621. }
  1622. rmnet_descriptor_classify_chain_count(chain_count, port);
  1623. if (skip_perf)
  1624. return;
  1625. rcu_read_lock();
  1626. rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
  1627. if (rmnet_perf_opt_chain_end)
  1628. rmnet_perf_opt_chain_end();
  1629. rcu_read_unlock();
  1630. }
  1631. void rmnet_descriptor_deinit(struct rmnet_port *port)
  1632. {
  1633. struct rmnet_frag_descriptor_pool *pool;
  1634. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1635. pool = port->frag_desc_pool;
  1636. list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
  1637. kfree(frag_desc);
  1638. pool->pool_size--;
  1639. }
  1640. kfree(pool);
  1641. }
  1642. int rmnet_descriptor_init(struct rmnet_port *port)
  1643. {
  1644. struct rmnet_frag_descriptor_pool *pool;
  1645. int i;
  1646. spin_lock_init(&port->desc_pool_lock);
  1647. pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
  1648. if (!pool)
  1649. return -ENOMEM;
  1650. INIT_LIST_HEAD(&pool->free_list);
  1651. port->frag_desc_pool = pool;
  1652. for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
  1653. struct rmnet_frag_descriptor *frag_desc;
  1654. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  1655. if (!frag_desc)
  1656. return -ENOMEM;
  1657. INIT_LIST_HEAD(&frag_desc->list);
  1658. INIT_LIST_HEAD(&frag_desc->frags);
  1659. list_add_tail(&frag_desc->list, &pool->free_list);
  1660. pool->pool_size++;
  1661. }
  1662. return 0;
  1663. }