rmnet_descriptor.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Packet Descriptor Framework
  13. *
  14. */
  15. #include <linux/ip.h>
  16. #include <linux/ipv6.h>
  17. #include <linux/inet.h>
  18. #include <net/ipv6.h>
  19. #include <net/ip6_checksum.h>
  20. #include "rmnet_config.h"
  21. #include "rmnet_descriptor.h"
  22. #include "rmnet_handlers.h"
  23. #include "rmnet_private.h"
  24. #include "rmnet_vnd.h"
  25. #include "rmnet_qmi.h"
  26. #include "rmnet_trace.h"
  27. #include "qmi_rmnet.h"
  28. #define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
  29. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  30. sizeof(struct rmnet_map_header) + \
  31. sizeof(struct rmnet_map_control_command_header))
  32. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  33. sizeof(struct rmnet_map_header) + \
  34. sizeof(struct rmnet_map_control_command_header))
  35. #define rmnet_descriptor_for_each_frag(p, desc) \
  36. list_for_each_entry(p, &desc->frags, list)
  37. #define rmnet_descriptor_for_each_frag_safe(p, tmp, desc) \
  38. list_for_each_entry_safe(p, tmp, &desc->frags, list)
  39. #define rmnet_descriptor_for_each_frag_safe_reverse(p, tmp, desc) \
  40. list_for_each_entry_safe_reverse(p, tmp, &desc->frags, list)
  41. typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
  42. struct rmnet_port *port);
  43. typedef void (*rmnet_perf_chain_hook_t)(void);
  44. typedef void (*rmnet_perf_tether_ingress_hook_t)(struct tcphdr *tp, struct sk_buff *skb);
  45. rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress_hook __rcu __read_mostly;
  46. EXPORT_SYMBOL(rmnet_perf_tether_ingress_hook);
  47. struct rmnet_frag_descriptor *
  48. rmnet_get_frag_descriptor(struct rmnet_port *port)
  49. {
  50. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  51. struct rmnet_frag_descriptor *frag_desc;
  52. unsigned long flags;
  53. spin_lock_irqsave(&port->desc_pool_lock, flags);
  54. if (!list_empty(&pool->free_list)) {
  55. frag_desc = list_first_entry(&pool->free_list,
  56. struct rmnet_frag_descriptor,
  57. list);
  58. list_del_init(&frag_desc->list);
  59. } else {
  60. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  61. if (!frag_desc)
  62. goto out;
  63. INIT_LIST_HEAD(&frag_desc->list);
  64. INIT_LIST_HEAD(&frag_desc->frags);
  65. pool->pool_size++;
  66. }
  67. out:
  68. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  69. return frag_desc;
  70. }
  71. EXPORT_SYMBOL(rmnet_get_frag_descriptor);
  72. void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
  73. struct rmnet_port *port)
  74. {
  75. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  76. struct rmnet_fragment *frag, *tmp;
  77. unsigned long flags;
  78. list_del(&frag_desc->list);
  79. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  80. struct page *page = skb_frag_page(&frag->frag);
  81. if (page)
  82. put_page(page);
  83. list_del(&frag->list);
  84. kfree(frag);
  85. }
  86. memset(frag_desc, 0, sizeof(*frag_desc));
  87. INIT_LIST_HEAD(&frag_desc->list);
  88. INIT_LIST_HEAD(&frag_desc->frags);
  89. spin_lock_irqsave(&port->desc_pool_lock, flags);
  90. list_add_tail(&frag_desc->list, &pool->free_list);
  91. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  92. }
  93. EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
  94. void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
  95. struct rmnet_port *port, unsigned int size)
  96. {
  97. struct rmnet_fragment *frag, *tmp;
  98. if (size >= frag_desc->len) {
  99. pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
  100. __func__, size, frag_desc->len);
  101. rmnet_recycle_frag_descriptor(frag_desc, port);
  102. return NULL;
  103. }
  104. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  105. u32 frag_size = skb_frag_size(&frag->frag);
  106. if (!size)
  107. break;
  108. if (size >= frag_size) {
  109. /* Remove the whole frag */
  110. struct page *page = skb_frag_page(&frag->frag);
  111. if (page)
  112. put_page(page);
  113. list_del(&frag->list);
  114. size -= frag_size;
  115. frag_desc->len -= frag_size;
  116. kfree(frag);
  117. continue;
  118. }
  119. /* Pull off 'size' bytes */
  120. skb_frag_off_add(&frag->frag, size);
  121. skb_frag_size_sub(&frag->frag, size);
  122. frag_desc->len -= size;
  123. break;
  124. }
  125. return rmnet_frag_data_ptr(frag_desc);
  126. }
  127. EXPORT_SYMBOL(rmnet_frag_pull);
  128. void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
  129. struct rmnet_port *port, unsigned int size)
  130. {
  131. struct rmnet_fragment *frag, *tmp;
  132. unsigned int eat;
  133. if (!size) {
  134. pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
  135. __func__, frag_desc->len);
  136. rmnet_recycle_frag_descriptor(frag_desc, port);
  137. return NULL;
  138. }
  139. /* Growing bigger doesn't make sense */
  140. if (size >= frag_desc->len)
  141. goto out;
  142. /* Compute number of bytes to remove from the end */
  143. eat = frag_desc->len - size;
  144. rmnet_descriptor_for_each_frag_safe_reverse(frag, tmp, frag_desc) {
  145. u32 frag_size = skb_frag_size(&frag->frag);
  146. if (!eat)
  147. goto out;
  148. if (eat >= frag_size) {
  149. /* Remove the whole frag */
  150. struct page *page = skb_frag_page(&frag->frag);
  151. if (page)
  152. put_page(page);
  153. list_del(&frag->list);
  154. eat -= frag_size;
  155. frag_desc->len -= frag_size;
  156. kfree(frag);
  157. continue;
  158. }
  159. /* Chop off 'eat' bytes from the end */
  160. skb_frag_size_sub(&frag->frag, eat);
  161. frag_desc->len -= eat;
  162. goto out;
  163. }
  164. out:
  165. return rmnet_frag_data_ptr(frag_desc);
  166. }
  167. EXPORT_SYMBOL(rmnet_frag_trim);
  168. static int rmnet_frag_copy_data(struct rmnet_frag_descriptor *frag_desc,
  169. u32 off, u32 len, void *buf)
  170. {
  171. struct rmnet_fragment *frag;
  172. u32 frag_size, copy_len;
  173. u32 buf_offset = 0;
  174. /* Don't make me do something we'd both regret */
  175. if (off > frag_desc->len || len > frag_desc->len ||
  176. off + len > frag_desc->len)
  177. return -EINVAL;
  178. /* Copy 'len' bytes into the bufer starting from 'off' */
  179. rmnet_descriptor_for_each_frag(frag, frag_desc) {
  180. if (!len)
  181. break;
  182. frag_size = skb_frag_size(&frag->frag);
  183. if (off < frag_size) {
  184. copy_len = min_t(u32, len, frag_size - off);
  185. memcpy(buf + buf_offset,
  186. skb_frag_address(&frag->frag) + off,
  187. copy_len);
  188. buf_offset += copy_len;
  189. len -= copy_len;
  190. off = 0;
  191. } else {
  192. off -= frag_size;
  193. }
  194. }
  195. return 0;
  196. }
  197. void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
  198. u32 len, void *buf)
  199. {
  200. struct rmnet_fragment *frag;
  201. u8 *start;
  202. u32 frag_size, offset;
  203. /* Don't take a long pointer off a short frag */
  204. if (off > frag_desc->len || len > frag_desc->len ||
  205. off + len > frag_desc->len)
  206. return NULL;
  207. /* Find the starting fragment */
  208. offset = off;
  209. rmnet_descriptor_for_each_frag(frag, frag_desc) {
  210. frag_size = skb_frag_size(&frag->frag);
  211. if (off < frag_size) {
  212. start = skb_frag_address(&frag->frag) + off;
  213. /* If the header is entirely on this frag, just return
  214. * a pointer to it.
  215. */
  216. if (off + len <= frag_size)
  217. return start;
  218. /* Otherwise, we need to copy the data into a linear
  219. * buffer.
  220. */
  221. break;
  222. }
  223. off -= frag_size;
  224. }
  225. if (rmnet_frag_copy_data(frag_desc, offset, len, buf) < 0)
  226. return NULL;
  227. return buf;
  228. }
  229. EXPORT_SYMBOL(rmnet_frag_header_ptr);
  230. int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
  231. struct page *p, u32 page_offset, u32 len)
  232. {
  233. struct rmnet_fragment *frag;
  234. frag = kzalloc(sizeof(*frag), GFP_ATOMIC);
  235. if (!frag)
  236. return -ENOMEM;
  237. INIT_LIST_HEAD(&frag->list);
  238. get_page(p);
  239. __skb_frag_set_page(&frag->frag, p);
  240. skb_frag_size_set(&frag->frag, len);
  241. skb_frag_off_set(&frag->frag, page_offset);
  242. list_add_tail(&frag->list, &frag_desc->frags);
  243. frag_desc->len += len;
  244. return 0;
  245. }
  246. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frag);
  247. int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
  248. struct rmnet_frag_descriptor *from,
  249. u32 off, u32 len)
  250. {
  251. struct rmnet_fragment *frag;
  252. int rc;
  253. /* Sanity check the lengths */
  254. if (off > from->len || len > from->len || off + len > from->len)
  255. return -EINVAL;
  256. rmnet_descriptor_for_each_frag(frag, from) {
  257. u32 frag_size;
  258. if (!len)
  259. break;
  260. frag_size = skb_frag_size(&frag->frag);
  261. if (off < frag_size) {
  262. struct page *p = skb_frag_page(&frag->frag);
  263. u32 page_off = skb_frag_off(&frag->frag);
  264. u32 copy_len = min_t(u32, len, frag_size - off);
  265. rc = rmnet_frag_descriptor_add_frag(to, p,
  266. page_off + off,
  267. copy_len);
  268. if (rc < 0)
  269. return rc;
  270. len -= copy_len;
  271. off = 0;
  272. } else {
  273. off -= frag_size;
  274. }
  275. }
  276. return 0;
  277. }
  278. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frags_from);
  279. int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
  280. int start, u8 *nexthdrp, __be16 *fragp)
  281. {
  282. u8 nexthdr = *nexthdrp;
  283. *fragp = 0;
  284. while (ipv6_ext_hdr(nexthdr)) {
  285. struct ipv6_opt_hdr *hp, __hp;
  286. int hdrlen;
  287. if (nexthdr == NEXTHDR_NONE)
  288. return -EINVAL;
  289. hp = rmnet_frag_header_ptr(frag_desc, (u32)start, sizeof(*hp),
  290. &__hp);
  291. if (!hp)
  292. return -EINVAL;
  293. if (nexthdr == NEXTHDR_FRAGMENT) {
  294. u32 off = offsetof(struct frag_hdr, frag_off);
  295. __be16 *fp, __fp;
  296. fp = rmnet_frag_header_ptr(frag_desc, (u32)start + off,
  297. sizeof(*fp), &__fp);
  298. if (!fp)
  299. return -EINVAL;
  300. *fragp = *fp;
  301. if (ntohs(*fragp) & ~0x7)
  302. break;
  303. hdrlen = 8;
  304. } else if (nexthdr == NEXTHDR_AUTH) {
  305. hdrlen = (hp->hdrlen + 2) << 2;
  306. } else {
  307. hdrlen = ipv6_optlen(hp);
  308. }
  309. nexthdr = hp->nexthdr;
  310. start += hdrlen;
  311. }
  312. *nexthdrp = nexthdr;
  313. return start;
  314. }
  315. EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
  316. static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
  317. struct rmnet_map_control_command *cmd,
  318. struct rmnet_port *port,
  319. int enable)
  320. {
  321. struct rmnet_endpoint *ep;
  322. struct net_device *vnd;
  323. u16 ip_family;
  324. u16 fc_seq;
  325. u32 qos_id;
  326. u8 mux_id;
  327. int r;
  328. mux_id = qmap->mux_id;
  329. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  330. return RX_HANDLER_CONSUMED;
  331. ep = rmnet_get_endpoint(port, mux_id);
  332. if (!ep)
  333. return RX_HANDLER_CONSUMED;
  334. vnd = ep->egress_dev;
  335. ip_family = cmd->flow_control.ip_family;
  336. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  337. qos_id = ntohl(cmd->flow_control.qos_id);
  338. /* Ignore the ip family and pass the sequence number for both v4 and v6
  339. * sequence. User space does not support creating dedicated flows for
  340. * the 2 protocols
  341. */
  342. r = rmnet_vnd_do_flow_control(vnd, enable);
  343. if (r)
  344. return RMNET_MAP_COMMAND_UNSUPPORTED;
  345. else
  346. return RMNET_MAP_COMMAND_ACK;
  347. }
  348. static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
  349. unsigned char type,
  350. struct rmnet_port *port)
  351. {
  352. struct rmnet_map_control_command *cmd;
  353. struct net_device *dev = port->dev;
  354. struct sk_buff *skb;
  355. u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
  356. skb = alloc_skb(alloc_len, GFP_ATOMIC);
  357. if (!skb)
  358. return;
  359. skb->protocol = htons(ETH_P_MAP);
  360. skb->dev = dev;
  361. cmd = rmnet_map_get_cmd_start(skb);
  362. cmd->cmd_type = type & 0x03;
  363. netif_tx_lock(dev);
  364. dev->netdev_ops->ndo_start_xmit(skb, dev);
  365. netif_tx_unlock(dev);
  366. }
  367. static void
  368. rmnet_frag_process_flow_start(struct rmnet_frag_descriptor *frag_desc,
  369. struct rmnet_map_control_command_header *cmd,
  370. struct rmnet_port *port,
  371. u16 cmd_len)
  372. {
  373. struct rmnet_map_dl_ind_hdr *dlhdr, __dlhdr;
  374. u32 offset = sizeof(struct rmnet_map_header);
  375. u32 data_format;
  376. bool is_dl_mark_v2;
  377. if (cmd_len + offset < RMNET_DL_IND_HDR_SIZE)
  378. return;
  379. data_format = port->data_format;
  380. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  381. dlhdr = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  382. sizeof(*dlhdr), &__dlhdr);
  383. if (!dlhdr)
  384. return;
  385. port->stats.dl_hdr_last_ep_id = cmd->source_id;
  386. port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
  387. port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
  388. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  389. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  390. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  391. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  392. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  393. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  394. port->stats.dl_hdr_count++;
  395. /* If a target is taking frag path, we can assume DL marker v2 is in
  396. * play
  397. */
  398. if (is_dl_mark_v2)
  399. rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
  400. }
  401. static void
  402. rmnet_frag_process_flow_end(struct rmnet_frag_descriptor *frag_desc,
  403. struct rmnet_map_control_command_header *cmd,
  404. struct rmnet_port *port, u16 cmd_len)
  405. {
  406. struct rmnet_map_dl_ind_trl *dltrl, __dltrl;
  407. u32 offset = sizeof(struct rmnet_map_header);
  408. u32 data_format;
  409. bool is_dl_mark_v2;
  410. if (cmd_len + offset < RMNET_DL_IND_TRL_SIZE)
  411. return;
  412. data_format = port->data_format;
  413. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  414. dltrl = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  415. sizeof(*dltrl), &__dltrl);
  416. if (!dltrl)
  417. return;
  418. port->stats.dl_trl_last_seq = dltrl->seq_le;
  419. port->stats.dl_trl_count++;
  420. /* If a target is taking frag path, we can assume DL marker v2 is in
  421. * play
  422. */
  423. if (is_dl_mark_v2)
  424. rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
  425. }
  426. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  427. * name is decoded here and appropriate handler is called.
  428. */
  429. void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
  430. struct rmnet_map_header *qmap, struct rmnet_port *port)
  431. {
  432. struct rmnet_map_control_command *cmd, __cmd;
  433. unsigned char rc = 0;
  434. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(*qmap), sizeof(*cmd),
  435. &__cmd);
  436. if (!cmd)
  437. return;
  438. switch (cmd->command_name) {
  439. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  440. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 1);
  441. break;
  442. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  443. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 0);
  444. break;
  445. default:
  446. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  447. break;
  448. }
  449. if (rc == RMNET_MAP_COMMAND_ACK)
  450. rmnet_frag_send_ack(qmap, rc, port);
  451. }
  452. int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
  453. struct rmnet_port *port, u16 pkt_len)
  454. {
  455. struct rmnet_map_control_command_header *cmd, __cmd;
  456. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(struct rmnet_map_header),
  457. sizeof(*cmd), &__cmd);
  458. if (!cmd)
  459. return -1;
  460. /* Silently discard any marksers recived over the LL channel */
  461. if (frag_desc->priority == 0xda1a &&
  462. (cmd->command_name == RMNET_MAP_COMMAND_FLOW_START ||
  463. cmd->command_name == RMNET_MAP_COMMAND_FLOW_END))
  464. return 0;
  465. switch (cmd->command_name) {
  466. case RMNET_MAP_COMMAND_FLOW_START:
  467. rmnet_frag_process_flow_start(frag_desc, cmd, port, pkt_len);
  468. break;
  469. case RMNET_MAP_COMMAND_FLOW_END:
  470. rmnet_frag_process_flow_end(frag_desc, cmd, port, pkt_len);
  471. break;
  472. default:
  473. return 1;
  474. }
  475. return 0;
  476. }
  477. EXPORT_SYMBOL(rmnet_frag_flow_command);
  478. static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
  479. struct rmnet_port *port,
  480. struct list_head *list,
  481. u32 start, u32 priority)
  482. {
  483. struct skb_shared_info *shinfo = skb_shinfo(skb);
  484. struct rmnet_frag_descriptor *frag_desc;
  485. struct rmnet_map_header *maph, __maph;
  486. skb_frag_t *frag;
  487. u32 start_frag, offset, i;
  488. u32 start_frag_size, start_frag_off;
  489. u32 pkt_len, copy_len = 0;
  490. int rc;
  491. for (start_frag = 0, offset = 0; start_frag < shinfo->nr_frags;
  492. start_frag++) {
  493. frag = &shinfo->frags[start_frag];
  494. if (start < skb_frag_size(frag) + offset)
  495. break;
  496. offset += skb_frag_size(frag);
  497. }
  498. if (start_frag == shinfo->nr_frags)
  499. return -1;
  500. /* start - offset is the additional offset into the page to account
  501. * for any data on it we've already used.
  502. */
  503. start_frag_size = skb_frag_size(frag) - (start - offset);
  504. start_frag_off = skb_frag_off(frag) + (start - offset);
  505. /* Grab the QMAP header. Careful, as there's no guarantee that it's
  506. * continugous!
  507. */
  508. if (likely(start_frag_size >= sizeof(*maph))) {
  509. maph = skb_frag_address(frag) + (start - offset);
  510. } else {
  511. /* The header's split across pages. We can rebuild it.
  512. * Probably not faster or stronger than before. But certainly
  513. * more linear.
  514. */
  515. if (skb_copy_bits(skb, start, &__maph, sizeof(__maph)) < 0)
  516. return -1;
  517. maph = &__maph;
  518. }
  519. pkt_len = ntohs(maph->pkt_len);
  520. /* Catch empty frames */
  521. if (!pkt_len)
  522. return -1;
  523. frag_desc = rmnet_get_frag_descriptor(port);
  524. if (!frag_desc)
  525. return -1;
  526. frag_desc->priority = priority;
  527. pkt_len += sizeof(*maph);
  528. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  529. pkt_len += sizeof(struct rmnet_map_dl_csum_trailer);
  530. } else if ((port->data_format & (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
  531. RMNET_FLAGS_INGRESS_COALESCE)) &&
  532. !maph->cd_bit) {
  533. u32 hsize = 0;
  534. u8 type;
  535. /* Check the type. This seems like should be overkill for less
  536. * than a single byte, doesn't it?
  537. */
  538. if (likely(start_frag_size >= sizeof(*maph) + 1)) {
  539. type = *((u8 *)maph + sizeof(*maph));
  540. } else {
  541. if (skb_copy_bits(skb, start + sizeof(*maph), &type,
  542. sizeof(type)) < 0)
  543. return -1;
  544. }
  545. /* Type only uses the first 7 bits */
  546. switch ((type & 0xFE) >> 1) {
  547. case RMNET_MAP_HEADER_TYPE_COALESCING:
  548. hsize = sizeof(struct rmnet_map_v5_coal_header);
  549. break;
  550. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  551. hsize = sizeof(struct rmnet_map_v5_csum_header);
  552. break;
  553. }
  554. pkt_len += hsize;
  555. }
  556. /* Add all frags containing the packet data to the descriptor */
  557. for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
  558. u32 size, off;
  559. u32 copy;
  560. frag = &shinfo->frags[i];
  561. size = skb_frag_size(frag);
  562. off = skb_frag_off(frag);
  563. if (i == start_frag) {
  564. /* These are different for the first one to account for
  565. * the starting offset.
  566. */
  567. size = start_frag_size;
  568. off = start_frag_off;
  569. }
  570. copy = min_t(u32, size, pkt_len);
  571. rc = rmnet_frag_descriptor_add_frag(frag_desc,
  572. skb_frag_page(frag), off,
  573. copy);
  574. if (rc < 0) {
  575. rmnet_recycle_frag_descriptor(frag_desc, port);
  576. return -1;
  577. }
  578. pkt_len -= copy;
  579. copy_len += copy;
  580. /* If the fragment is exhausted, we can move to the next one */
  581. if (!(size - copy_len)) {
  582. i++;
  583. copy_len = 0;
  584. }
  585. }
  586. if (pkt_len) {
  587. /* Packet length is larger than the amount of data we have */
  588. rmnet_recycle_frag_descriptor(frag_desc, port);
  589. return -1;
  590. }
  591. list_add_tail(&frag_desc->list, list);
  592. return (int)frag_desc->len;
  593. }
  594. void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
  595. struct list_head *list, u32 priority)
  596. {
  597. u32 start = 0;
  598. int rc;
  599. while (start < skb->len) {
  600. rc = rmnet_frag_deaggregate_one(skb, port, list, start,
  601. priority);
  602. if (rc < 0)
  603. return;
  604. start += (u32)rc;
  605. }
  606. }
  607. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  608. * if needed (i.e. forwarding, UDP GRO)
  609. */
  610. static void rmnet_frag_gso_stamp(struct sk_buff *skb,
  611. struct rmnet_frag_descriptor *frag_desc)
  612. {
  613. struct skb_shared_info *shinfo = skb_shinfo(skb);
  614. if (frag_desc->trans_proto == IPPROTO_TCP)
  615. shinfo->gso_type = (frag_desc->ip_proto == 4) ?
  616. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  617. else
  618. shinfo->gso_type = SKB_GSO_UDP_L4;
  619. shinfo->gso_size = frag_desc->gso_size;
  620. shinfo->gso_segs = frag_desc->gso_segs;
  621. }
  622. /* Set the partial checksum information. Sets the transport checksum to the
  623. * pseudoheader checksum and sets the offload metadata.
  624. */
  625. static void rmnet_frag_partial_csum(struct sk_buff *skb,
  626. struct rmnet_frag_descriptor *frag_desc)
  627. {
  628. rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress;
  629. struct iphdr *iph = (struct iphdr *)skb->data;
  630. __sum16 pseudo;
  631. u16 pkt_len = skb->len - frag_desc->ip_len;
  632. if (frag_desc->ip_proto == 4) {
  633. iph->tot_len = htons(skb->len);
  634. iph->check = 0;
  635. iph->check = ip_fast_csum(iph, iph->ihl);
  636. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  637. pkt_len, frag_desc->trans_proto,
  638. 0);
  639. } else {
  640. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  641. /* Payload length includes any extension headers */
  642. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  643. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  644. pkt_len, frag_desc->trans_proto, 0);
  645. }
  646. if (frag_desc->trans_proto == IPPROTO_TCP) {
  647. struct tcphdr *tp = (struct tcphdr *)
  648. ((u8 *)iph + frag_desc->ip_len);
  649. tp->check = pseudo;
  650. skb->csum_offset = offsetof(struct tcphdr, check);
  651. rmnet_perf_tether_ingress = rcu_dereference(rmnet_perf_tether_ingress_hook);
  652. if (rmnet_perf_tether_ingress)
  653. rmnet_perf_tether_ingress(tp, skb);
  654. } else {
  655. struct udphdr *up = (struct udphdr *)
  656. ((u8 *)iph + frag_desc->ip_len);
  657. up->len = htons(pkt_len);
  658. up->check = pseudo;
  659. skb->csum_offset = offsetof(struct udphdr, check);
  660. }
  661. skb->ip_summed = CHECKSUM_PARTIAL;
  662. skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
  663. }
  664. /* Allocate and populate an skb to contain the packet represented by the
  665. * frag descriptor.
  666. */
  667. static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
  668. struct rmnet_port *port)
  669. {
  670. struct sk_buff *head_skb, *current_skb, *skb;
  671. struct skb_shared_info *shinfo;
  672. struct rmnet_fragment *frag, *tmp;
  673. struct rmnet_skb_cb *cb;
  674. /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
  675. if (frag_desc->hdrs_valid) {
  676. u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
  677. head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
  678. GFP_ATOMIC);
  679. if (!head_skb)
  680. return NULL;
  681. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  682. rmnet_frag_copy_data(frag_desc, 0, hdr_len,
  683. skb_put(head_skb, hdr_len));
  684. skb_reset_network_header(head_skb);
  685. if (frag_desc->trans_len)
  686. skb_set_transport_header(head_skb, frag_desc->ip_len);
  687. /* Pull the headers off carefully */
  688. if (hdr_len == frag_desc->len)
  689. /* Fast forward "header only" packets */
  690. goto skip_frags;
  691. if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
  692. kfree(head_skb);
  693. return NULL;
  694. }
  695. } else {
  696. /* Allocate enough space to avoid penalties in the stack
  697. * from __pskb_pull_tail()
  698. */
  699. head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
  700. GFP_ATOMIC);
  701. if (!head_skb)
  702. return NULL;
  703. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  704. }
  705. shinfo = skb_shinfo(head_skb);
  706. current_skb = head_skb;
  707. /* Add in the page fragments */
  708. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  709. struct page *p = skb_frag_page(&frag->frag);
  710. u32 frag_size = skb_frag_size(&frag->frag);
  711. add_frag:
  712. if (shinfo->nr_frags < MAX_SKB_FRAGS) {
  713. get_page(p);
  714. skb_add_rx_frag(current_skb, shinfo->nr_frags, p,
  715. skb_frag_off(&frag->frag), frag_size,
  716. frag_size);
  717. if (current_skb != head_skb) {
  718. head_skb->len += frag_size;
  719. head_skb->data_len += frag_size;
  720. }
  721. } else {
  722. /* Alloc a new skb and try again */
  723. skb = alloc_skb(0, GFP_ATOMIC);
  724. if (!skb)
  725. break;
  726. if (current_skb == head_skb)
  727. shinfo->frag_list = skb;
  728. else
  729. current_skb->next = skb;
  730. current_skb = skb;
  731. shinfo = skb_shinfo(current_skb);
  732. goto add_frag;
  733. }
  734. }
  735. skip_frags:
  736. head_skb->dev = frag_desc->dev;
  737. rmnet_set_skb_proto(head_skb);
  738. cb = RMNET_SKB_CB(head_skb);
  739. cb->coal_bytes = frag_desc->coal_bytes;
  740. cb->coal_bufsize = frag_desc->coal_bufsize;
  741. /* Handle any header metadata that needs to be updated after RSB/RSC
  742. * segmentation
  743. */
  744. if (frag_desc->ip_id_set) {
  745. struct iphdr *iph;
  746. iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
  747. csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
  748. iph->id = frag_desc->ip_id;
  749. }
  750. if (frag_desc->tcp_seq_set) {
  751. struct tcphdr *th;
  752. th = (struct tcphdr *)
  753. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  754. th->seq = frag_desc->tcp_seq;
  755. }
  756. if (frag_desc->tcp_flags_set) {
  757. struct tcphdr *th;
  758. __be16 *flags;
  759. th = (struct tcphdr *)
  760. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  761. flags = (__be16 *)&tcp_flag_word(th);
  762. *flags = frag_desc->tcp_flags;
  763. }
  764. /* Handle csum offloading */
  765. if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
  766. /* Set the partial checksum information */
  767. rmnet_frag_partial_csum(head_skb, frag_desc);
  768. } else if (frag_desc->csum_valid) {
  769. /* Non-RSB/RSC/perf packet. The current checksum is fine */
  770. head_skb->ip_summed = CHECKSUM_UNNECESSARY;
  771. } else if (frag_desc->hdrs_valid &&
  772. (frag_desc->trans_proto == IPPROTO_TCP ||
  773. frag_desc->trans_proto == IPPROTO_UDP)) {
  774. /* Unfortunately, we have to fake a bad checksum here, since
  775. * the original bad value is lost by the hardware. The only
  776. * reliable way to do it is to calculate the actual checksum
  777. * and corrupt it.
  778. */
  779. __sum16 *check;
  780. __wsum csum;
  781. unsigned int offset = skb_transport_offset(head_skb);
  782. __sum16 pseudo;
  783. /* Calculate pseudo header and update header fields */
  784. if (frag_desc->ip_proto == 4) {
  785. struct iphdr *iph = ip_hdr(head_skb);
  786. __be16 tot_len = htons(head_skb->len);
  787. csum_replace2(&iph->check, iph->tot_len, tot_len);
  788. iph->tot_len = tot_len;
  789. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  790. head_skb->len -
  791. frag_desc->ip_len,
  792. frag_desc->trans_proto, 0);
  793. } else {
  794. struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
  795. ip6h->payload_len = htons(head_skb->len -
  796. sizeof(*ip6h));
  797. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  798. head_skb->len -
  799. frag_desc->ip_len,
  800. frag_desc->trans_proto, 0);
  801. }
  802. if (frag_desc->trans_proto == IPPROTO_TCP) {
  803. check = &tcp_hdr(head_skb)->check;
  804. } else {
  805. udp_hdr(head_skb)->len = htons(head_skb->len -
  806. frag_desc->ip_len);
  807. check = &udp_hdr(head_skb)->check;
  808. }
  809. *check = pseudo;
  810. csum = skb_checksum(head_skb, offset, head_skb->len - offset,
  811. 0);
  812. /* Add 1 to corrupt. This cannot produce a final value of 0
  813. * since csum_fold() can't return a value of 0xFFFF
  814. */
  815. *check = csum16_add(csum_fold(csum), htons(1));
  816. head_skb->ip_summed = CHECKSUM_NONE;
  817. }
  818. /* Handle any rmnet_perf metadata */
  819. if (frag_desc->hash) {
  820. head_skb->hash = frag_desc->hash;
  821. head_skb->sw_hash = 1;
  822. }
  823. if (frag_desc->flush_shs)
  824. cb->flush_shs = 1;
  825. /* Handle coalesced packets */
  826. if (frag_desc->gso_segs > 1)
  827. rmnet_frag_gso_stamp(head_skb, frag_desc);
  828. /* Propagate original priority value */
  829. head_skb->priority = frag_desc->priority;
  830. if (trace_print_tcp_rx_enabled()) {
  831. char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
  832. if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
  833. goto skip_trace_print_tcp_rx;
  834. memset(saddr, 0, INET6_ADDRSTRLEN);
  835. memset(daddr, 0, INET6_ADDRSTRLEN);
  836. if (head_skb->protocol == htons(ETH_P_IP)) {
  837. if (ip_hdr(head_skb)->protocol != IPPROTO_TCP)
  838. goto skip_trace_print_tcp_rx;
  839. snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
  840. snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
  841. }
  842. if (head_skb->protocol == htons(ETH_P_IPV6)) {
  843. if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_TCP)
  844. goto skip_trace_print_tcp_rx;
  845. snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
  846. snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
  847. }
  848. trace_print_tcp_rx(head_skb, saddr, daddr, tcp_hdr(head_skb));
  849. }
  850. skip_trace_print_tcp_rx:
  851. if (trace_print_udp_rx_enabled()) {
  852. char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
  853. if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
  854. goto skip_trace_print_udp_rx;
  855. memset(saddr, 0, INET6_ADDRSTRLEN);
  856. memset(daddr, 0, INET6_ADDRSTRLEN);
  857. if (head_skb->protocol == htons(ETH_P_IP)) {
  858. if (ip_hdr(head_skb)->protocol != IPPROTO_UDP)
  859. goto skip_trace_print_udp_rx;
  860. snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
  861. snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
  862. }
  863. if (head_skb->protocol == htons(ETH_P_IPV6)) {
  864. if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_UDP)
  865. goto skip_trace_print_udp_rx;
  866. snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
  867. snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
  868. }
  869. trace_print_udp_rx(head_skb, saddr, daddr, udp_hdr(head_skb));
  870. }
  871. skip_trace_print_udp_rx:
  872. return head_skb;
  873. }
  874. /* Deliver the packets contained within a frag descriptor */
  875. void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
  876. struct rmnet_port *port)
  877. {
  878. struct sk_buff *skb;
  879. skb = rmnet_alloc_skb(frag_desc, port);
  880. if (skb)
  881. rmnet_deliver_skb(skb, port);
  882. rmnet_recycle_frag_descriptor(frag_desc, port);
  883. }
  884. EXPORT_SYMBOL(rmnet_frag_deliver);
  885. static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
  886. struct rmnet_port *port,
  887. struct list_head *list, u8 pkt_id,
  888. bool csum_valid)
  889. {
  890. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  891. struct rmnet_frag_descriptor *new_desc;
  892. u32 dlen = coal_desc->gso_size * coal_desc->gso_segs;
  893. u32 hlen = coal_desc->ip_len + coal_desc->trans_len;
  894. u32 offset = hlen + coal_desc->data_offset;
  895. int rc;
  896. new_desc = rmnet_get_frag_descriptor(port);
  897. if (!new_desc)
  898. return;
  899. /* Header information and most metadata is the same as the original */
  900. memcpy(new_desc, coal_desc, sizeof(*coal_desc));
  901. INIT_LIST_HEAD(&new_desc->list);
  902. INIT_LIST_HEAD(&new_desc->frags);
  903. new_desc->len = 0;
  904. /* Add the header fragments */
  905. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, 0,
  906. hlen);
  907. if (rc < 0)
  908. goto recycle;
  909. /* Add in the data fragments */
  910. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, offset,
  911. dlen);
  912. if (rc < 0)
  913. goto recycle;
  914. /* Update protocol-specific metadata */
  915. if (coal_desc->trans_proto == IPPROTO_TCP) {
  916. struct tcphdr *th, __th;
  917. th = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  918. sizeof(*th), &__th);
  919. if (!th)
  920. goto recycle;
  921. new_desc->tcp_seq_set = 1;
  922. new_desc->tcp_seq = htonl(ntohl(th->seq) +
  923. coal_desc->data_offset);
  924. /* Don't allow any dangerous flags to appear in any segments
  925. * other than the last.
  926. */
  927. if (th->fin || th->psh) {
  928. if (offset + dlen < coal_desc->len) {
  929. __be32 flag_word = tcp_flag_word(th);
  930. /* Clear the FIN and PSH flags from this
  931. * segment.
  932. */
  933. flag_word &= ~TCP_FLAG_FIN;
  934. flag_word &= ~TCP_FLAG_PSH;
  935. new_desc->tcp_flags_set = 1;
  936. new_desc->tcp_flags = *((__be16 *)&flag_word);
  937. }
  938. }
  939. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  940. struct udphdr *uh, __uh;
  941. uh = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  942. sizeof(*uh), &__uh);
  943. if (!uh)
  944. goto recycle;
  945. if (coal_desc->ip_proto == 4 && !uh->check)
  946. csum_valid = true;
  947. }
  948. if (coal_desc->ip_proto == 4) {
  949. struct iphdr *iph, __iph;
  950. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  951. &__iph);
  952. if (!iph)
  953. goto recycle;
  954. new_desc->ip_id_set = 1;
  955. new_desc->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
  956. }
  957. new_desc->csum_valid = csum_valid;
  958. priv->stats.coal.coal_reconstruct++;
  959. /* Update meta information to move past the data we just segmented */
  960. coal_desc->data_offset += dlen;
  961. coal_desc->pkt_id = pkt_id + 1;
  962. coal_desc->gso_segs = 0;
  963. /* Only relevant for the first segment to avoid overcoutning */
  964. coal_desc->coal_bytes = 0;
  965. coal_desc->coal_bufsize = 0;
  966. list_add_tail(&new_desc->list, list);
  967. return;
  968. recycle:
  969. rmnet_recycle_frag_descriptor(new_desc, port);
  970. }
  971. static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
  972. {
  973. u8 *data = rmnet_frag_data_ptr(frag_desc);
  974. unsigned int datagram_len;
  975. __wsum csum;
  976. __sum16 pseudo;
  977. /* Keep analysis tools happy, since they will see that
  978. * rmnet_frag_data_ptr() could return NULL. It can't in this case,
  979. * since we can't get this far otherwise...
  980. */
  981. if (unlikely(!data))
  982. return false;
  983. datagram_len = frag_desc->len - frag_desc->ip_len;
  984. if (frag_desc->ip_proto == 4) {
  985. struct iphdr *iph = (struct iphdr *)data;
  986. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  987. datagram_len,
  988. frag_desc->trans_proto, 0);
  989. } else {
  990. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  991. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  992. datagram_len, frag_desc->trans_proto,
  993. 0);
  994. }
  995. csum = csum_partial(data + frag_desc->ip_len, datagram_len,
  996. csum_unfold(pseudo));
  997. return !csum_fold(csum);
  998. }
  999. /* Converts the coalesced frame into a list of descriptors */
  1000. static void
  1001. rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
  1002. u64 nlo_err_mask, struct rmnet_port *port,
  1003. struct list_head *list)
  1004. {
  1005. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  1006. struct rmnet_map_v5_coal_header coal_hdr;
  1007. struct rmnet_fragment *frag;
  1008. u8 *version;
  1009. u16 pkt_len;
  1010. u8 pkt, total_pkt = 0;
  1011. u8 nlo;
  1012. bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
  1013. bool zero_csum = false;
  1014. /* Copy the coal header into our local storage before pulling it. It's
  1015. * possible that this header (or part of it) is the last port of a page
  1016. * a pulling it off would cause it to be freed. Referring back to the
  1017. * header would be invalid in that case.
  1018. */
  1019. if (rmnet_frag_copy_data(coal_desc, sizeof(struct rmnet_map_header),
  1020. sizeof(coal_hdr), &coal_hdr) < 0)
  1021. return;
  1022. /* Pull off the headers we no longer need */
  1023. if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header) +
  1024. sizeof(coal_hdr)))
  1025. return;
  1026. /* By definition, this byte is linear, and the first byte on the
  1027. * first fragment. ;) Hence why no header_ptr() call is needed
  1028. * for it.
  1029. */
  1030. version = rmnet_frag_data_ptr(coal_desc);
  1031. if (unlikely(!version))
  1032. return;
  1033. if ((*version & 0xF0) == 0x40) {
  1034. struct iphdr *iph, __iph;
  1035. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  1036. &__iph);
  1037. if (!iph)
  1038. return;
  1039. coal_desc->ip_proto = 4;
  1040. coal_desc->ip_len = iph->ihl * 4;
  1041. coal_desc->trans_proto = iph->protocol;
  1042. /* Don't allow coalescing of any packets with IP options */
  1043. if (iph->ihl != 5)
  1044. gro = false;
  1045. } else if ((*version & 0xF0) == 0x60) {
  1046. struct ipv6hdr *ip6h, __ip6h;
  1047. int ip_len;
  1048. __be16 frag_off;
  1049. u8 protocol;
  1050. ip6h = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*ip6h),
  1051. &__ip6h);
  1052. if (!ip6h)
  1053. return;
  1054. coal_desc->ip_proto = 6;
  1055. protocol = ip6h->nexthdr;
  1056. ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
  1057. sizeof(*ip6h),
  1058. &protocol,
  1059. &frag_off);
  1060. coal_desc->trans_proto = protocol;
  1061. /* If we run into a problem, or this has a fragment header
  1062. * (which should technically not be possible, if the HW
  1063. * works as intended...), bail.
  1064. */
  1065. if (ip_len < 0 || frag_off) {
  1066. priv->stats.coal.coal_ip_invalid++;
  1067. return;
  1068. }
  1069. coal_desc->ip_len = (u16)ip_len;
  1070. if (coal_desc->ip_len > sizeof(*ip6h)) {
  1071. /* Don't allow coalescing of any packets with IPv6
  1072. * extension headers.
  1073. */
  1074. gro = false;
  1075. }
  1076. } else {
  1077. priv->stats.coal.coal_ip_invalid++;
  1078. return;
  1079. }
  1080. if (coal_desc->trans_proto == IPPROTO_TCP) {
  1081. struct tcphdr *th, __th;
  1082. th = rmnet_frag_header_ptr(coal_desc,
  1083. coal_desc->ip_len, sizeof(*th),
  1084. &__th);
  1085. if (!th)
  1086. return;
  1087. coal_desc->trans_len = th->doff * 4;
  1088. priv->stats.coal.coal_tcp++;
  1089. priv->stats.coal.coal_tcp_bytes += coal_desc->len;
  1090. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  1091. struct udphdr *uh, __uh;
  1092. uh = rmnet_frag_header_ptr(coal_desc,
  1093. coal_desc->ip_len, sizeof(*uh),
  1094. &__uh);
  1095. if (!uh)
  1096. return;
  1097. coal_desc->trans_len = sizeof(*uh);
  1098. priv->stats.coal.coal_udp++;
  1099. priv->stats.coal.coal_udp_bytes += coal_desc->len;
  1100. if (coal_desc->ip_proto == 4 && !uh->check)
  1101. zero_csum = true;
  1102. } else {
  1103. priv->stats.coal.coal_trans_invalid++;
  1104. return;
  1105. }
  1106. coal_desc->hdrs_valid = 1;
  1107. coal_desc->coal_bytes = coal_desc->len;
  1108. rmnet_descriptor_for_each_frag(frag, coal_desc)
  1109. coal_desc->coal_bufsize +=
  1110. page_size(skb_frag_page(&frag->frag));
  1111. if (rmnet_map_v5_csum_buggy(&coal_hdr) && !zero_csum) {
  1112. /* Mark the checksum as valid if it checks out */
  1113. if (rmnet_frag_validate_csum(coal_desc))
  1114. coal_desc->csum_valid = true;
  1115. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1116. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1117. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1118. list_add_tail(&coal_desc->list, list);
  1119. return;
  1120. }
  1121. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  1122. * no checksum errors, and are allowing GRO. We can just reuse this
  1123. * descriptor unchanged.
  1124. */
  1125. if (gro && coal_hdr.num_nlos == 1 && coal_hdr.csum_valid) {
  1126. coal_desc->csum_valid = true;
  1127. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1128. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1129. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1130. list_add_tail(&coal_desc->list, list);
  1131. return;
  1132. }
  1133. /* Segment the coalesced descriptor into new packets */
  1134. for (nlo = 0; nlo < coal_hdr.num_nlos; nlo++) {
  1135. pkt_len = ntohs(coal_hdr.nl_pairs[nlo].pkt_len);
  1136. pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
  1137. coal_desc->gso_size = pkt_len;
  1138. for (pkt = 0; pkt < coal_hdr.nl_pairs[nlo].num_packets;
  1139. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  1140. bool csum_err = nlo_err_mask & 1;
  1141. /* Segment the packet if we're not sending the larger
  1142. * packet up the stack.
  1143. */
  1144. if (!gro) {
  1145. coal_desc->gso_segs = 1;
  1146. if (csum_err)
  1147. priv->stats.coal.coal_csum_err++;
  1148. __rmnet_frag_segment_data(coal_desc, port,
  1149. list, total_pkt,
  1150. !csum_err);
  1151. continue;
  1152. }
  1153. if (csum_err) {
  1154. priv->stats.coal.coal_csum_err++;
  1155. /* Segment out the good data */
  1156. if (coal_desc->gso_segs)
  1157. __rmnet_frag_segment_data(coal_desc,
  1158. port,
  1159. list,
  1160. total_pkt,
  1161. true);
  1162. /* Segment out the bad checksum */
  1163. coal_desc->gso_segs = 1;
  1164. __rmnet_frag_segment_data(coal_desc, port,
  1165. list, total_pkt,
  1166. false);
  1167. } else {
  1168. coal_desc->gso_segs++;
  1169. }
  1170. }
  1171. /* If we're switching NLOs, we need to send out everything from
  1172. * the previous one, if we haven't done so. NLOs only switch
  1173. * when the packet length changes.
  1174. */
  1175. if (coal_desc->gso_segs)
  1176. __rmnet_frag_segment_data(coal_desc, port, list,
  1177. total_pkt, true);
  1178. }
  1179. }
  1180. /* Record reason for coalescing pipe closure */
  1181. static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  1182. u8 code)
  1183. {
  1184. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  1185. switch (type) {
  1186. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  1187. stats->non_coal++;
  1188. break;
  1189. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  1190. stats->ip_miss++;
  1191. break;
  1192. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  1193. stats->trans_miss++;
  1194. break;
  1195. case RMNET_MAP_COAL_CLOSE_HW:
  1196. switch (code) {
  1197. case RMNET_MAP_COAL_CLOSE_HW_NL:
  1198. stats->hw_nl++;
  1199. break;
  1200. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  1201. stats->hw_pkt++;
  1202. break;
  1203. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  1204. stats->hw_byte++;
  1205. break;
  1206. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  1207. stats->hw_time++;
  1208. break;
  1209. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  1210. stats->hw_evict++;
  1211. break;
  1212. default:
  1213. break;
  1214. }
  1215. break;
  1216. case RMNET_MAP_COAL_CLOSE_COAL:
  1217. stats->coal++;
  1218. break;
  1219. default:
  1220. break;
  1221. }
  1222. }
  1223. /* Check if the coalesced header has any incorrect values, in which case, the
  1224. * entire coalesced frame must be dropped. Then check if there are any
  1225. * checksum issues
  1226. */
  1227. static int
  1228. rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
  1229. u64 *nlo_err_mask)
  1230. {
  1231. struct rmnet_map_v5_coal_header *coal_hdr, __coal_hdr;
  1232. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1233. u64 mask = 0;
  1234. int i;
  1235. u8 veid, pkts = 0;
  1236. coal_hdr = rmnet_frag_header_ptr(frag_desc,
  1237. sizeof(struct rmnet_map_header),
  1238. sizeof(*coal_hdr), &__coal_hdr);
  1239. if (!coal_hdr)
  1240. return -EINVAL;
  1241. veid = coal_hdr->virtual_channel_id;
  1242. if (coal_hdr->num_nlos == 0 ||
  1243. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  1244. priv->stats.coal.coal_hdr_nlo_err++;
  1245. return -EINVAL;
  1246. }
  1247. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  1248. /* If there is a checksum issue, we need to split
  1249. * up the skb. Rebuild the full csum error field
  1250. */
  1251. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  1252. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  1253. mask |= ((u64)err) << (8 * i);
  1254. /* Track total packets in frame */
  1255. pkts += pkt;
  1256. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  1257. priv->stats.coal.coal_hdr_pkt_err++;
  1258. return -EINVAL;
  1259. }
  1260. }
  1261. /* Track number of packets we get inside of coalesced frames */
  1262. priv->stats.coal.coal_pkts += pkts;
  1263. /* Update ethtool stats */
  1264. rmnet_frag_data_log_close_stats(priv,
  1265. coal_hdr->close_type,
  1266. coal_hdr->close_value);
  1267. if (veid < RMNET_MAX_VEID)
  1268. priv->stats.coal.coal_veid[veid]++;
  1269. *nlo_err_mask = mask;
  1270. return 0;
  1271. }
  1272. static int rmnet_frag_checksum_pkt(struct rmnet_frag_descriptor *frag_desc)
  1273. {
  1274. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1275. struct rmnet_fragment *frag;
  1276. int offset = sizeof(struct rmnet_map_header) +
  1277. sizeof(struct rmnet_map_v5_csum_header);
  1278. u8 *version, __version;
  1279. __wsum csum;
  1280. u16 csum_len;
  1281. version = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*version),
  1282. &__version);
  1283. if (!version)
  1284. return -EINVAL;
  1285. if ((*version & 0xF0) == 0x40) {
  1286. struct iphdr *iph;
  1287. u8 __iph[60]; /* Max IP header size (0xF * 4) */
  1288. /* We need to access the entire IP header including options
  1289. * to validate its checksum. Fortunately, the version byte
  1290. * also will tell us the length, so we only need to pull
  1291. * once ;)
  1292. */
  1293. frag_desc->ip_len = (*version & 0xF) * 4;
  1294. iph = rmnet_frag_header_ptr(frag_desc, offset,
  1295. frag_desc->ip_len,
  1296. __iph);
  1297. if (!iph || ip_is_fragment(iph))
  1298. return -EINVAL;
  1299. /* Length needs to be sensible */
  1300. csum_len = ntohs(iph->tot_len);
  1301. if (csum_len > frag_desc->len - offset)
  1302. return -EINVAL;
  1303. csum_len -= frag_desc->ip_len;
  1304. /* IPv4 checksum must be valid */
  1305. if (ip_fast_csum((u8 *)iph, frag_desc->ip_len)) {
  1306. priv->stats.csum_sw++;
  1307. return 0;
  1308. }
  1309. frag_desc->ip_proto = 4;
  1310. frag_desc->trans_proto = iph->protocol;
  1311. csum = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  1312. csum_len,
  1313. frag_desc->trans_proto, 0);
  1314. } else if ((*version & 0xF0) == 0x60) {
  1315. struct ipv6hdr *ip6h, __ip6h;
  1316. int ip_len;
  1317. __be16 frag_off;
  1318. u8 protocol;
  1319. ip6h = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*ip6h),
  1320. &__ip6h);
  1321. if (!ip6h)
  1322. return -EINVAL;
  1323. frag_desc->ip_proto = 6;
  1324. protocol = ip6h->nexthdr;
  1325. ip_len = rmnet_frag_ipv6_skip_exthdr(frag_desc,
  1326. offset + sizeof(*ip6h),
  1327. &protocol, &frag_off);
  1328. if (ip_len < 0 || frag_off)
  1329. return -EINVAL;
  1330. /* Length needs to be sensible */
  1331. frag_desc->ip_len = (u16)ip_len;
  1332. csum_len = ntohs(ip6h->payload_len);
  1333. if (csum_len + frag_desc->ip_len > frag_desc->len - offset)
  1334. return -EINVAL;
  1335. frag_desc->trans_proto = protocol;
  1336. csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  1337. csum_len,
  1338. frag_desc->trans_proto, 0);
  1339. } else {
  1340. /* Not checksumable */
  1341. return -EINVAL;
  1342. }
  1343. /* Protocol check */
  1344. if (frag_desc->trans_proto != IPPROTO_TCP &&
  1345. frag_desc->trans_proto != IPPROTO_UDP)
  1346. return -EINVAL;
  1347. offset += frag_desc->ip_len;
  1348. /* Check for UDP zero csum packets */
  1349. if (frag_desc->trans_proto == IPPROTO_UDP) {
  1350. struct udphdr *uh, __uh;
  1351. uh = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*uh),
  1352. &__uh);
  1353. if (!uh)
  1354. return -EINVAL;
  1355. if (!uh->check) {
  1356. if (frag_desc->ip_proto == 4) {
  1357. /* Zero checksum is valid */
  1358. priv->stats.csum_sw++;
  1359. return 1;
  1360. }
  1361. /* Not valid in IPv6 */
  1362. priv->stats.csum_sw++;
  1363. return 0;
  1364. }
  1365. }
  1366. /* Walk the frags and checksum each chunk */
  1367. list_for_each_entry(frag, &frag_desc->frags, list) {
  1368. u32 frag_size = skb_frag_size(&frag->frag);
  1369. if (!csum_len)
  1370. break;
  1371. if (offset < frag_size) {
  1372. void *addr = skb_frag_address(&frag->frag) + offset;
  1373. u32 len = min_t(u32, csum_len, frag_size - offset);
  1374. /* Checksum 'len' bytes and add them in */
  1375. csum = csum_partial(addr, len, csum);
  1376. csum_len -= len;
  1377. offset = 0;
  1378. } else {
  1379. offset -= frag_size;
  1380. }
  1381. }
  1382. priv->stats.csum_sw++;
  1383. return !csum_fold(csum);
  1384. }
  1385. /* Process a QMAPv5 packet header */
  1386. int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
  1387. struct rmnet_port *port,
  1388. struct list_head *list,
  1389. u16 len)
  1390. {
  1391. struct rmnet_map_v5_csum_header *csum_hdr, __csum_hdr;
  1392. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1393. u64 nlo_err_mask;
  1394. u32 offset = sizeof(struct rmnet_map_header);
  1395. int rc = 0;
  1396. /* Grab the header type. It's easier to grab enough for a full csum
  1397. * offload header here since it's only 8 bytes and then check the
  1398. * header type using that. This also doubles as a check to make sure
  1399. * there's enough data after the QMAP header to ensure that another
  1400. * header is present.
  1401. */
  1402. csum_hdr = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*csum_hdr),
  1403. &__csum_hdr);
  1404. if (!csum_hdr)
  1405. return -EINVAL;
  1406. switch (csum_hdr->header_type) {
  1407. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1408. priv->stats.coal.coal_rx++;
  1409. rc = rmnet_frag_data_check_coal_header(frag_desc,
  1410. &nlo_err_mask);
  1411. if (rc)
  1412. return rc;
  1413. rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
  1414. list);
  1415. if (list_first_entry(list, struct rmnet_frag_descriptor,
  1416. list) != frag_desc)
  1417. rmnet_recycle_frag_descriptor(frag_desc, port);
  1418. break;
  1419. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1420. if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
  1421. priv->stats.csum_sw++;
  1422. } else if (csum_hdr->csum_valid_required) {
  1423. priv->stats.csum_ok++;
  1424. frag_desc->csum_valid = true;
  1425. } else {
  1426. int valid = rmnet_frag_checksum_pkt(frag_desc);
  1427. if (valid < 0) {
  1428. priv->stats.csum_validation_failed++;
  1429. } else if (valid) {
  1430. /* All's good */
  1431. priv->stats.csum_ok++;
  1432. frag_desc->csum_valid = true;
  1433. } else {
  1434. /* Checksum is actually bad */
  1435. priv->stats.csum_valid_unset++;
  1436. }
  1437. }
  1438. if (!rmnet_frag_pull(frag_desc, port,
  1439. offset + sizeof(*csum_hdr))) {
  1440. rc = -EINVAL;
  1441. break;
  1442. }
  1443. /* Remove padding only for csum offload packets.
  1444. * Coalesced packets should never have padding.
  1445. */
  1446. if (!rmnet_frag_trim(frag_desc, port, len)) {
  1447. rc = -EINVAL;
  1448. break;
  1449. }
  1450. list_del_init(&frag_desc->list);
  1451. list_add_tail(&frag_desc->list, list);
  1452. break;
  1453. default:
  1454. rc = -EINVAL;
  1455. break;
  1456. }
  1457. return rc;
  1458. }
  1459. /* Perf hook handler */
  1460. rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
  1461. EXPORT_SYMBOL(rmnet_perf_desc_entry);
  1462. static void
  1463. __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
  1464. struct rmnet_port *port)
  1465. {
  1466. rmnet_perf_desc_hook_t rmnet_perf_ingress;
  1467. struct rmnet_map_header *qmap, __qmap;
  1468. struct rmnet_endpoint *ep;
  1469. struct rmnet_frag_descriptor *frag, *tmp;
  1470. LIST_HEAD(segs);
  1471. u16 len, pad;
  1472. u8 mux_id;
  1473. bool skip_perf = (frag_desc->priority == 0xda1a);
  1474. qmap = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*qmap), &__qmap);
  1475. if (!qmap)
  1476. goto recycle;
  1477. mux_id = qmap->mux_id;
  1478. pad = qmap->pad_len;
  1479. len = ntohs(qmap->pkt_len) - pad;
  1480. if (qmap->cd_bit) {
  1481. qmi_rmnet_set_dl_msg_active(port);
  1482. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  1483. rmnet_frag_flow_command(frag_desc, port, len);
  1484. goto recycle;
  1485. }
  1486. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  1487. rmnet_frag_command(frag_desc, qmap, port);
  1488. goto recycle;
  1489. }
  1490. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  1491. goto recycle;
  1492. ep = rmnet_get_endpoint(port, mux_id);
  1493. if (!ep)
  1494. goto recycle;
  1495. frag_desc->dev = ep->egress_dev;
  1496. /* Handle QMAPv5 packet */
  1497. if (qmap->next_hdr &&
  1498. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  1499. RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
  1500. if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
  1501. len))
  1502. goto recycle;
  1503. } else {
  1504. /* We only have the main QMAP header to worry about */
  1505. if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
  1506. return;
  1507. if (!rmnet_frag_trim(frag_desc, port, len))
  1508. return;
  1509. list_add_tail(&frag_desc->list, &segs);
  1510. }
  1511. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  1512. qmi_rmnet_work_maybe_restart(port);
  1513. if (skip_perf)
  1514. goto no_perf;
  1515. rcu_read_lock();
  1516. rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
  1517. if (rmnet_perf_ingress) {
  1518. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1519. list_del_init(&frag->list);
  1520. rmnet_perf_ingress(frag, port);
  1521. }
  1522. rcu_read_unlock();
  1523. return;
  1524. }
  1525. rcu_read_unlock();
  1526. no_perf:
  1527. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1528. list_del_init(&frag->list);
  1529. rmnet_frag_deliver(frag, port);
  1530. }
  1531. return;
  1532. recycle:
  1533. rmnet_recycle_frag_descriptor(frag_desc, port);
  1534. }
  1535. /* Notify perf at the end of SKB chain */
  1536. rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
  1537. EXPORT_SYMBOL(rmnet_perf_chain_end);
  1538. void rmnet_descriptor_classify_chain_count(u64 chain_count,
  1539. struct rmnet_port *port)
  1540. {
  1541. u64 index;
  1542. if (chain_count >= 60) {
  1543. port->stats.dl_chain_stat[6] += chain_count;
  1544. return;
  1545. }
  1546. index = chain_count;
  1547. do_div(index, 10);
  1548. port->stats.dl_chain_stat[index] += chain_count;
  1549. }
  1550. void rmnet_descriptor_classify_frag_count(u64 frag_count,
  1551. struct rmnet_port *port)
  1552. {
  1553. u64 index;
  1554. if (frag_count <= 1) {
  1555. port->stats.dl_frag_stat_1 += frag_count;
  1556. return;
  1557. }
  1558. if (frag_count >= 16) {
  1559. port->stats.dl_frag_stat[4] += frag_count;
  1560. return;
  1561. }
  1562. index = frag_count;
  1563. do_div(index, 4);
  1564. port->stats.dl_frag_stat[index] += frag_count;
  1565. }
  1566. void rmnet_frag_ingress_handler(struct sk_buff *skb,
  1567. struct rmnet_port *port)
  1568. {
  1569. rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
  1570. LIST_HEAD(desc_list);
  1571. bool skip_perf = (skb->priority == 0xda1a);
  1572. u64 chain_count = 0;
  1573. /* Deaggregation and freeing of HW originating
  1574. * buffers is done within here
  1575. */
  1576. while (skb) {
  1577. struct sk_buff *skb_frag;
  1578. chain_count++;
  1579. rmnet_descriptor_classify_frag_count(skb_shinfo(skb)->nr_frags,
  1580. port);
  1581. rmnet_frag_deaggregate(skb, port, &desc_list, skb->priority);
  1582. if (!list_empty(&desc_list)) {
  1583. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1584. list_for_each_entry_safe(frag_desc, tmp, &desc_list,
  1585. list) {
  1586. list_del_init(&frag_desc->list);
  1587. __rmnet_frag_ingress_handler(frag_desc, port);
  1588. }
  1589. }
  1590. skb_frag = skb_shinfo(skb)->frag_list;
  1591. skb_shinfo(skb)->frag_list = NULL;
  1592. consume_skb(skb);
  1593. skb = skb_frag;
  1594. }
  1595. rmnet_descriptor_classify_chain_count(chain_count, port);
  1596. if (skip_perf)
  1597. return;
  1598. rcu_read_lock();
  1599. rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
  1600. if (rmnet_perf_opt_chain_end)
  1601. rmnet_perf_opt_chain_end();
  1602. rcu_read_unlock();
  1603. }
  1604. void rmnet_descriptor_deinit(struct rmnet_port *port)
  1605. {
  1606. struct rmnet_frag_descriptor_pool *pool;
  1607. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1608. pool = port->frag_desc_pool;
  1609. list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
  1610. kfree(frag_desc);
  1611. pool->pool_size--;
  1612. }
  1613. kfree(pool);
  1614. }
  1615. int rmnet_descriptor_init(struct rmnet_port *port)
  1616. {
  1617. struct rmnet_frag_descriptor_pool *pool;
  1618. int i;
  1619. spin_lock_init(&port->desc_pool_lock);
  1620. pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
  1621. if (!pool)
  1622. return -ENOMEM;
  1623. INIT_LIST_HEAD(&pool->free_list);
  1624. port->frag_desc_pool = pool;
  1625. for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
  1626. struct rmnet_frag_descriptor *frag_desc;
  1627. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  1628. if (!frag_desc)
  1629. return -ENOMEM;
  1630. INIT_LIST_HEAD(&frag_desc->list);
  1631. INIT_LIST_HEAD(&frag_desc->frags);
  1632. list_add_tail(&frag_desc->list, &pool->free_list);
  1633. pool->pool_size++;
  1634. }
  1635. return 0;
  1636. }