rmnet_descriptor.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RMNET Packet Descriptor Framework
  14. *
  15. */
  16. #include <linux/ip.h>
  17. #include <linux/ipv6.h>
  18. #include <linux/inet.h>
  19. #include <net/ipv6.h>
  20. #include <net/ip6_checksum.h>
  21. #include "rmnet_config.h"
  22. #include "rmnet_descriptor.h"
  23. #include "rmnet_handlers.h"
  24. #include "rmnet_private.h"
  25. #include "rmnet_vnd.h"
  26. #include "rmnet_qmi.h"
  27. #include "rmnet_trace.h"
  28. #include "qmi_rmnet.h"
  29. #define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
  30. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  31. sizeof(struct rmnet_map_header) + \
  32. sizeof(struct rmnet_map_control_command_header))
  33. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  34. sizeof(struct rmnet_map_header) + \
  35. sizeof(struct rmnet_map_control_command_header))
  36. #define rmnet_descriptor_for_each_frag(p, desc) \
  37. list_for_each_entry(p, &desc->frags, list)
  38. #define rmnet_descriptor_for_each_frag_safe(p, tmp, desc) \
  39. list_for_each_entry_safe(p, tmp, &desc->frags, list)
  40. #define rmnet_descriptor_for_each_frag_safe_reverse(p, tmp, desc) \
  41. list_for_each_entry_safe_reverse(p, tmp, &desc->frags, list)
  42. typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
  43. struct rmnet_port *port);
  44. typedef void (*rmnet_perf_chain_hook_t)(void);
  45. typedef void (*rmnet_perf_tether_ingress_hook_t)(struct tcphdr *tp, struct sk_buff *skb);
  46. rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress_hook __rcu __read_mostly;
  47. EXPORT_SYMBOL(rmnet_perf_tether_ingress_hook);
  48. struct rmnet_frag_descriptor *
  49. rmnet_get_frag_descriptor(struct rmnet_port *port)
  50. {
  51. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  52. struct rmnet_frag_descriptor *frag_desc;
  53. unsigned long flags;
  54. spin_lock_irqsave(&port->desc_pool_lock, flags);
  55. if (!list_empty(&pool->free_list)) {
  56. frag_desc = list_first_entry(&pool->free_list,
  57. struct rmnet_frag_descriptor,
  58. list);
  59. list_del_init(&frag_desc->list);
  60. } else {
  61. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  62. if (!frag_desc)
  63. goto out;
  64. INIT_LIST_HEAD(&frag_desc->list);
  65. INIT_LIST_HEAD(&frag_desc->frags);
  66. pool->pool_size++;
  67. }
  68. out:
  69. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  70. return frag_desc;
  71. }
  72. EXPORT_SYMBOL(rmnet_get_frag_descriptor);
  73. void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
  74. struct rmnet_port *port)
  75. {
  76. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  77. struct rmnet_fragment *frag, *tmp;
  78. unsigned long flags;
  79. list_del(&frag_desc->list);
  80. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  81. struct page *page = skb_frag_page(&frag->frag);
  82. if (page)
  83. put_page(page);
  84. list_del(&frag->list);
  85. kfree(frag);
  86. }
  87. memset(frag_desc, 0, sizeof(*frag_desc));
  88. INIT_LIST_HEAD(&frag_desc->list);
  89. INIT_LIST_HEAD(&frag_desc->frags);
  90. spin_lock_irqsave(&port->desc_pool_lock, flags);
  91. list_add_tail(&frag_desc->list, &pool->free_list);
  92. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  93. }
  94. EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
  95. void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
  96. struct rmnet_port *port, unsigned int size)
  97. {
  98. struct rmnet_fragment *frag, *tmp;
  99. if (size >= frag_desc->len) {
  100. pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
  101. __func__, size, frag_desc->len);
  102. rmnet_recycle_frag_descriptor(frag_desc, port);
  103. return NULL;
  104. }
  105. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  106. u32 frag_size = skb_frag_size(&frag->frag);
  107. if (!size)
  108. break;
  109. if (size >= frag_size) {
  110. /* Remove the whole frag */
  111. struct page *page = skb_frag_page(&frag->frag);
  112. if (page)
  113. put_page(page);
  114. list_del(&frag->list);
  115. size -= frag_size;
  116. frag_desc->len -= frag_size;
  117. kfree(frag);
  118. continue;
  119. }
  120. /* Pull off 'size' bytes */
  121. skb_frag_off_add(&frag->frag, size);
  122. skb_frag_size_sub(&frag->frag, size);
  123. frag_desc->len -= size;
  124. break;
  125. }
  126. return rmnet_frag_data_ptr(frag_desc);
  127. }
  128. EXPORT_SYMBOL(rmnet_frag_pull);
  129. void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
  130. struct rmnet_port *port, unsigned int size)
  131. {
  132. struct rmnet_fragment *frag, *tmp;
  133. unsigned int eat;
  134. if (!size) {
  135. pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
  136. __func__, frag_desc->len);
  137. rmnet_recycle_frag_descriptor(frag_desc, port);
  138. return NULL;
  139. }
  140. /* Growing bigger doesn't make sense */
  141. if (size >= frag_desc->len)
  142. goto out;
  143. /* Compute number of bytes to remove from the end */
  144. eat = frag_desc->len - size;
  145. rmnet_descriptor_for_each_frag_safe_reverse(frag, tmp, frag_desc) {
  146. u32 frag_size = skb_frag_size(&frag->frag);
  147. if (!eat)
  148. goto out;
  149. if (eat >= frag_size) {
  150. /* Remove the whole frag */
  151. struct page *page = skb_frag_page(&frag->frag);
  152. if (page)
  153. put_page(page);
  154. list_del(&frag->list);
  155. eat -= frag_size;
  156. frag_desc->len -= frag_size;
  157. kfree(frag);
  158. continue;
  159. }
  160. /* Chop off 'eat' bytes from the end */
  161. skb_frag_size_sub(&frag->frag, eat);
  162. frag_desc->len -= eat;
  163. goto out;
  164. }
  165. out:
  166. return rmnet_frag_data_ptr(frag_desc);
  167. }
  168. EXPORT_SYMBOL(rmnet_frag_trim);
  169. static int rmnet_frag_copy_data(struct rmnet_frag_descriptor *frag_desc,
  170. u32 off, u32 len, void *buf)
  171. {
  172. struct rmnet_fragment *frag;
  173. u32 frag_size, copy_len;
  174. u32 buf_offset = 0;
  175. /* Don't make me do something we'd both regret */
  176. if (off > frag_desc->len || len > frag_desc->len ||
  177. off + len > frag_desc->len)
  178. return -EINVAL;
  179. /* Copy 'len' bytes into the bufer starting from 'off' */
  180. rmnet_descriptor_for_each_frag(frag, frag_desc) {
  181. if (!len)
  182. break;
  183. frag_size = skb_frag_size(&frag->frag);
  184. if (off < frag_size) {
  185. copy_len = min_t(u32, len, frag_size - off);
  186. memcpy(buf + buf_offset,
  187. skb_frag_address(&frag->frag) + off,
  188. copy_len);
  189. buf_offset += copy_len;
  190. len -= copy_len;
  191. off = 0;
  192. } else {
  193. off -= frag_size;
  194. }
  195. }
  196. return 0;
  197. }
  198. void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
  199. u32 len, void *buf)
  200. {
  201. struct rmnet_fragment *frag;
  202. u8 *start;
  203. u32 frag_size, offset;
  204. /* Don't take a long pointer off a short frag */
  205. if (off > frag_desc->len || len > frag_desc->len ||
  206. off + len > frag_desc->len)
  207. return NULL;
  208. /* Find the starting fragment */
  209. offset = off;
  210. rmnet_descriptor_for_each_frag(frag, frag_desc) {
  211. frag_size = skb_frag_size(&frag->frag);
  212. if (off < frag_size) {
  213. start = skb_frag_address(&frag->frag) + off;
  214. /* If the header is entirely on this frag, just return
  215. * a pointer to it.
  216. */
  217. if (off + len <= frag_size)
  218. return start;
  219. /* Otherwise, we need to copy the data into a linear
  220. * buffer.
  221. */
  222. break;
  223. }
  224. off -= frag_size;
  225. }
  226. if (rmnet_frag_copy_data(frag_desc, offset, len, buf) < 0)
  227. return NULL;
  228. return buf;
  229. }
  230. EXPORT_SYMBOL(rmnet_frag_header_ptr);
  231. int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
  232. struct page *p, u32 page_offset, u32 len)
  233. {
  234. struct rmnet_fragment *frag;
  235. frag = kzalloc(sizeof(*frag), GFP_ATOMIC);
  236. if (!frag)
  237. return -ENOMEM;
  238. INIT_LIST_HEAD(&frag->list);
  239. get_page(p);
  240. __skb_frag_set_page(&frag->frag, p);
  241. skb_frag_size_set(&frag->frag, len);
  242. skb_frag_off_set(&frag->frag, page_offset);
  243. list_add_tail(&frag->list, &frag_desc->frags);
  244. frag_desc->len += len;
  245. return 0;
  246. }
  247. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frag);
  248. int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
  249. struct rmnet_frag_descriptor *from,
  250. u32 off, u32 len)
  251. {
  252. struct rmnet_fragment *frag;
  253. int rc;
  254. /* Sanity check the lengths */
  255. if (off > from->len || len > from->len || off + len > from->len)
  256. return -EINVAL;
  257. rmnet_descriptor_for_each_frag(frag, from) {
  258. u32 frag_size;
  259. if (!len)
  260. break;
  261. frag_size = skb_frag_size(&frag->frag);
  262. if (off < frag_size) {
  263. struct page *p = skb_frag_page(&frag->frag);
  264. u32 page_off = skb_frag_off(&frag->frag);
  265. u32 copy_len = min_t(u32, len, frag_size - off);
  266. rc = rmnet_frag_descriptor_add_frag(to, p,
  267. page_off + off,
  268. copy_len);
  269. if (rc < 0)
  270. return rc;
  271. len -= copy_len;
  272. off = 0;
  273. } else {
  274. off -= frag_size;
  275. }
  276. }
  277. return 0;
  278. }
  279. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frags_from);
  280. int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
  281. int start, u8 *nexthdrp, __be16 *fragp)
  282. {
  283. u8 nexthdr = *nexthdrp;
  284. *fragp = 0;
  285. while (ipv6_ext_hdr(nexthdr)) {
  286. struct ipv6_opt_hdr *hp, __hp;
  287. int hdrlen;
  288. if (nexthdr == NEXTHDR_NONE)
  289. return -EINVAL;
  290. hp = rmnet_frag_header_ptr(frag_desc, (u32)start, sizeof(*hp),
  291. &__hp);
  292. if (!hp)
  293. return -EINVAL;
  294. if (nexthdr == NEXTHDR_FRAGMENT) {
  295. u32 off = offsetof(struct frag_hdr, frag_off);
  296. __be16 *fp, __fp;
  297. fp = rmnet_frag_header_ptr(frag_desc, (u32)start + off,
  298. sizeof(*fp), &__fp);
  299. if (!fp)
  300. return -EINVAL;
  301. *fragp = *fp;
  302. if (ntohs(*fragp) & ~0x7)
  303. break;
  304. hdrlen = 8;
  305. } else if (nexthdr == NEXTHDR_AUTH) {
  306. hdrlen = (hp->hdrlen + 2) << 2;
  307. } else {
  308. hdrlen = ipv6_optlen(hp);
  309. }
  310. nexthdr = hp->nexthdr;
  311. start += hdrlen;
  312. }
  313. *nexthdrp = nexthdr;
  314. return start;
  315. }
  316. EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
  317. static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
  318. struct rmnet_map_control_command *cmd,
  319. struct rmnet_port *port,
  320. int enable)
  321. {
  322. struct rmnet_endpoint *ep;
  323. struct net_device *vnd;
  324. u16 ip_family;
  325. u16 fc_seq;
  326. u32 qos_id;
  327. u8 mux_id;
  328. int r;
  329. mux_id = qmap->mux_id;
  330. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  331. return RX_HANDLER_CONSUMED;
  332. ep = rmnet_get_endpoint(port, mux_id);
  333. if (!ep)
  334. return RX_HANDLER_CONSUMED;
  335. vnd = ep->egress_dev;
  336. ip_family = cmd->flow_control.ip_family;
  337. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  338. qos_id = ntohl(cmd->flow_control.qos_id);
  339. /* Ignore the ip family and pass the sequence number for both v4 and v6
  340. * sequence. User space does not support creating dedicated flows for
  341. * the 2 protocols
  342. */
  343. r = rmnet_vnd_do_flow_control(vnd, enable);
  344. if (r)
  345. return RMNET_MAP_COMMAND_UNSUPPORTED;
  346. else
  347. return RMNET_MAP_COMMAND_ACK;
  348. }
  349. static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
  350. unsigned char type,
  351. struct rmnet_port *port)
  352. {
  353. struct rmnet_map_control_command *cmd;
  354. struct net_device *dev = port->dev;
  355. struct sk_buff *skb;
  356. u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
  357. skb = alloc_skb(alloc_len, GFP_ATOMIC);
  358. if (!skb)
  359. return;
  360. skb->protocol = htons(ETH_P_MAP);
  361. skb->dev = dev;
  362. cmd = rmnet_map_get_cmd_start(skb);
  363. cmd->cmd_type = type & 0x03;
  364. netif_tx_lock(dev);
  365. dev->netdev_ops->ndo_start_xmit(skb, dev);
  366. netif_tx_unlock(dev);
  367. }
  368. static void
  369. rmnet_frag_process_flow_start(struct rmnet_frag_descriptor *frag_desc,
  370. struct rmnet_map_control_command_header *cmd,
  371. struct rmnet_port *port,
  372. u16 cmd_len)
  373. {
  374. struct rmnet_map_dl_ind_hdr *dlhdr, __dlhdr;
  375. u32 offset = sizeof(struct rmnet_map_header);
  376. u32 data_format;
  377. bool is_dl_mark_v2;
  378. if (cmd_len + offset < RMNET_DL_IND_HDR_SIZE)
  379. return;
  380. data_format = port->data_format;
  381. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  382. dlhdr = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  383. sizeof(*dlhdr), &__dlhdr);
  384. if (!dlhdr)
  385. return;
  386. port->stats.dl_hdr_last_ep_id = cmd->source_id;
  387. port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
  388. port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
  389. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  390. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  391. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  392. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  393. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  394. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  395. port->stats.dl_hdr_count++;
  396. /* If a target is taking frag path, we can assume DL marker v2 is in
  397. * play
  398. */
  399. if (is_dl_mark_v2)
  400. rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
  401. }
  402. static void
  403. rmnet_frag_process_flow_end(struct rmnet_frag_descriptor *frag_desc,
  404. struct rmnet_map_control_command_header *cmd,
  405. struct rmnet_port *port, u16 cmd_len)
  406. {
  407. struct rmnet_map_dl_ind_trl *dltrl, __dltrl;
  408. u32 offset = sizeof(struct rmnet_map_header);
  409. u32 data_format;
  410. bool is_dl_mark_v2;
  411. if (cmd_len + offset < RMNET_DL_IND_TRL_SIZE)
  412. return;
  413. data_format = port->data_format;
  414. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  415. dltrl = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  416. sizeof(*dltrl), &__dltrl);
  417. if (!dltrl)
  418. return;
  419. port->stats.dl_trl_last_seq = dltrl->seq_le;
  420. port->stats.dl_trl_count++;
  421. /* If a target is taking frag path, we can assume DL marker v2 is in
  422. * play
  423. */
  424. if (is_dl_mark_v2)
  425. rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
  426. }
  427. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  428. * name is decoded here and appropriate handler is called.
  429. */
  430. void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
  431. struct rmnet_map_header *qmap, struct rmnet_port *port)
  432. {
  433. struct rmnet_map_control_command *cmd, __cmd;
  434. unsigned char rc = 0;
  435. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(*qmap), sizeof(*cmd),
  436. &__cmd);
  437. if (!cmd)
  438. return;
  439. switch (cmd->command_name) {
  440. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  441. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 1);
  442. break;
  443. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  444. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 0);
  445. break;
  446. default:
  447. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  448. break;
  449. }
  450. if (rc == RMNET_MAP_COMMAND_ACK)
  451. rmnet_frag_send_ack(qmap, rc, port);
  452. }
  453. int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
  454. struct rmnet_port *port, u16 pkt_len)
  455. {
  456. struct rmnet_map_control_command_header *cmd, __cmd;
  457. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(struct rmnet_map_header),
  458. sizeof(*cmd), &__cmd);
  459. if (!cmd)
  460. return -1;
  461. /* Silently discard any marksers recived over the LL channel */
  462. if (frag_desc->priority == 0xda1a &&
  463. (cmd->command_name == RMNET_MAP_COMMAND_FLOW_START ||
  464. cmd->command_name == RMNET_MAP_COMMAND_FLOW_END))
  465. return 0;
  466. switch (cmd->command_name) {
  467. case RMNET_MAP_COMMAND_FLOW_START:
  468. rmnet_frag_process_flow_start(frag_desc, cmd, port, pkt_len);
  469. break;
  470. case RMNET_MAP_COMMAND_FLOW_END:
  471. rmnet_frag_process_flow_end(frag_desc, cmd, port, pkt_len);
  472. break;
  473. default:
  474. return 1;
  475. }
  476. return 0;
  477. }
  478. EXPORT_SYMBOL(rmnet_frag_flow_command);
  479. static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
  480. struct rmnet_port *port,
  481. struct list_head *list,
  482. u32 start, u32 priority)
  483. {
  484. struct skb_shared_info *shinfo = skb_shinfo(skb);
  485. struct rmnet_frag_descriptor *frag_desc;
  486. struct rmnet_map_header *maph, __maph;
  487. skb_frag_t *frag;
  488. u32 start_frag, offset, i;
  489. u32 start_frag_size, start_frag_off;
  490. u32 pkt_len, copy_len = 0;
  491. int rc;
  492. for (start_frag = 0, offset = 0; start_frag < shinfo->nr_frags;
  493. start_frag++) {
  494. frag = &shinfo->frags[start_frag];
  495. if (start < skb_frag_size(frag) + offset)
  496. break;
  497. offset += skb_frag_size(frag);
  498. }
  499. if (start_frag == shinfo->nr_frags)
  500. return -1;
  501. /* start - offset is the additional offset into the page to account
  502. * for any data on it we've already used.
  503. */
  504. start_frag_size = skb_frag_size(frag) - (start - offset);
  505. start_frag_off = skb_frag_off(frag) + (start - offset);
  506. /* Grab the QMAP header. Careful, as there's no guarantee that it's
  507. * continugous!
  508. */
  509. if (likely(start_frag_size >= sizeof(*maph))) {
  510. maph = skb_frag_address(frag) + (start - offset);
  511. } else {
  512. /* The header's split across pages. We can rebuild it.
  513. * Probably not faster or stronger than before. But certainly
  514. * more linear.
  515. */
  516. if (skb_copy_bits(skb, start, &__maph, sizeof(__maph)) < 0)
  517. return -1;
  518. maph = &__maph;
  519. }
  520. pkt_len = ntohs(maph->pkt_len);
  521. /* Catch empty frames */
  522. if (!pkt_len)
  523. return -1;
  524. frag_desc = rmnet_get_frag_descriptor(port);
  525. if (!frag_desc)
  526. return -1;
  527. frag_desc->priority = priority;
  528. pkt_len += sizeof(*maph);
  529. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  530. pkt_len += sizeof(struct rmnet_map_dl_csum_trailer);
  531. } else if ((port->data_format & (RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5 |
  532. RMNET_FLAGS_INGRESS_COALESCE)) &&
  533. !maph->cd_bit) {
  534. u32 hsize = 0;
  535. u8 type;
  536. /* Check the type. This seems like should be overkill for less
  537. * than a single byte, doesn't it?
  538. */
  539. if (likely(start_frag_size >= sizeof(*maph) + 1)) {
  540. type = *((u8 *)maph + sizeof(*maph));
  541. } else {
  542. if (skb_copy_bits(skb, start + sizeof(*maph), &type,
  543. sizeof(type)) < 0)
  544. return -1;
  545. }
  546. /* Type only uses the first 7 bits */
  547. switch ((type & 0xFE) >> 1) {
  548. case RMNET_MAP_HEADER_TYPE_COALESCING:
  549. hsize = sizeof(struct rmnet_map_v5_coal_header);
  550. break;
  551. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  552. hsize = sizeof(struct rmnet_map_v5_csum_header);
  553. break;
  554. }
  555. pkt_len += hsize;
  556. }
  557. /* Add all frags containing the packet data to the descriptor */
  558. for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
  559. u32 size, off;
  560. u32 copy;
  561. frag = &shinfo->frags[i];
  562. size = skb_frag_size(frag);
  563. off = skb_frag_off(frag);
  564. if (i == start_frag) {
  565. /* These are different for the first one to account for
  566. * the starting offset.
  567. */
  568. size = start_frag_size;
  569. off = start_frag_off;
  570. }
  571. copy = min_t(u32, size, pkt_len);
  572. rc = rmnet_frag_descriptor_add_frag(frag_desc,
  573. skb_frag_page(frag), off,
  574. copy);
  575. if (rc < 0) {
  576. rmnet_recycle_frag_descriptor(frag_desc, port);
  577. return -1;
  578. }
  579. pkt_len -= copy;
  580. copy_len += copy;
  581. /* If the fragment is exhausted, we can move to the next one */
  582. if (!(size - copy_len)) {
  583. i++;
  584. copy_len = 0;
  585. }
  586. }
  587. if (pkt_len) {
  588. /* Packet length is larger than the amount of data we have */
  589. rmnet_recycle_frag_descriptor(frag_desc, port);
  590. return -1;
  591. }
  592. list_add_tail(&frag_desc->list, list);
  593. return (int)frag_desc->len;
  594. }
  595. void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
  596. struct list_head *list, u32 priority)
  597. {
  598. u32 start = 0;
  599. int rc;
  600. while (start < skb->len) {
  601. rc = rmnet_frag_deaggregate_one(skb, port, list, start,
  602. priority);
  603. if (rc < 0)
  604. return;
  605. start += (u32)rc;
  606. }
  607. }
  608. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  609. * if needed (i.e. forwarding, UDP GRO)
  610. */
  611. static void rmnet_frag_gso_stamp(struct sk_buff *skb,
  612. struct rmnet_frag_descriptor *frag_desc)
  613. {
  614. struct skb_shared_info *shinfo = skb_shinfo(skb);
  615. if (frag_desc->trans_proto == IPPROTO_TCP)
  616. shinfo->gso_type = (frag_desc->ip_proto == 4) ?
  617. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  618. else
  619. shinfo->gso_type = SKB_GSO_UDP_L4;
  620. shinfo->gso_size = frag_desc->gso_size;
  621. shinfo->gso_segs = frag_desc->gso_segs;
  622. }
  623. /* Set the partial checksum information. Sets the transport checksum to the
  624. * pseudoheader checksum and sets the offload metadata.
  625. */
  626. static void rmnet_frag_partial_csum(struct sk_buff *skb,
  627. struct rmnet_frag_descriptor *frag_desc)
  628. {
  629. rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress;
  630. struct iphdr *iph = (struct iphdr *)skb->data;
  631. __sum16 pseudo;
  632. u16 pkt_len = skb->len - frag_desc->ip_len;
  633. if (frag_desc->ip_proto == 4) {
  634. iph->tot_len = htons(skb->len);
  635. iph->check = 0;
  636. iph->check = ip_fast_csum(iph, iph->ihl);
  637. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  638. pkt_len, frag_desc->trans_proto,
  639. 0);
  640. } else {
  641. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  642. /* Payload length includes any extension headers */
  643. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  644. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  645. pkt_len, frag_desc->trans_proto, 0);
  646. }
  647. if (frag_desc->trans_proto == IPPROTO_TCP) {
  648. struct tcphdr *tp = (struct tcphdr *)
  649. ((u8 *)iph + frag_desc->ip_len);
  650. tp->check = pseudo;
  651. skb->csum_offset = offsetof(struct tcphdr, check);
  652. rmnet_perf_tether_ingress = rcu_dereference(rmnet_perf_tether_ingress_hook);
  653. if (rmnet_perf_tether_ingress)
  654. rmnet_perf_tether_ingress(tp, skb);
  655. } else {
  656. struct udphdr *up = (struct udphdr *)
  657. ((u8 *)iph + frag_desc->ip_len);
  658. up->len = htons(pkt_len);
  659. up->check = pseudo;
  660. skb->csum_offset = offsetof(struct udphdr, check);
  661. }
  662. skb->ip_summed = CHECKSUM_PARTIAL;
  663. skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
  664. }
  665. /* Allocate and populate an skb to contain the packet represented by the
  666. * frag descriptor.
  667. */
  668. static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
  669. struct rmnet_port *port)
  670. {
  671. struct sk_buff *head_skb, *current_skb, *skb;
  672. struct skb_shared_info *shinfo;
  673. struct rmnet_fragment *frag, *tmp;
  674. struct rmnet_skb_cb *cb;
  675. /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
  676. if (frag_desc->hdrs_valid) {
  677. u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
  678. head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
  679. GFP_ATOMIC);
  680. if (!head_skb)
  681. return NULL;
  682. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  683. rmnet_frag_copy_data(frag_desc, 0, hdr_len,
  684. skb_put(head_skb, hdr_len));
  685. skb_reset_network_header(head_skb);
  686. if (frag_desc->trans_len)
  687. skb_set_transport_header(head_skb, frag_desc->ip_len);
  688. /* Pull the headers off carefully */
  689. if (hdr_len == frag_desc->len)
  690. /* Fast forward "header only" packets */
  691. goto skip_frags;
  692. if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
  693. kfree(head_skb);
  694. return NULL;
  695. }
  696. } else {
  697. /* Allocate enough space to avoid penalties in the stack
  698. * from __pskb_pull_tail()
  699. */
  700. head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
  701. GFP_ATOMIC);
  702. if (!head_skb)
  703. return NULL;
  704. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  705. }
  706. shinfo = skb_shinfo(head_skb);
  707. current_skb = head_skb;
  708. /* Add in the page fragments */
  709. rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
  710. struct page *p = skb_frag_page(&frag->frag);
  711. u32 frag_size = skb_frag_size(&frag->frag);
  712. add_frag:
  713. if (shinfo->nr_frags < MAX_SKB_FRAGS) {
  714. get_page(p);
  715. skb_add_rx_frag(current_skb, shinfo->nr_frags, p,
  716. skb_frag_off(&frag->frag), frag_size,
  717. frag_size);
  718. if (current_skb != head_skb) {
  719. head_skb->len += frag_size;
  720. head_skb->data_len += frag_size;
  721. }
  722. } else {
  723. /* Alloc a new skb and try again */
  724. skb = alloc_skb(0, GFP_ATOMIC);
  725. if (!skb)
  726. break;
  727. if (current_skb == head_skb)
  728. shinfo->frag_list = skb;
  729. else
  730. current_skb->next = skb;
  731. current_skb = skb;
  732. shinfo = skb_shinfo(current_skb);
  733. goto add_frag;
  734. }
  735. }
  736. skip_frags:
  737. head_skb->dev = frag_desc->dev;
  738. rmnet_set_skb_proto(head_skb);
  739. cb = RMNET_SKB_CB(head_skb);
  740. cb->coal_bytes = frag_desc->coal_bytes;
  741. cb->coal_bufsize = frag_desc->coal_bufsize;
  742. /* Handle any header metadata that needs to be updated after RSB/RSC
  743. * segmentation
  744. */
  745. if (frag_desc->ip_id_set) {
  746. struct iphdr *iph;
  747. iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
  748. csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
  749. iph->id = frag_desc->ip_id;
  750. }
  751. if (frag_desc->tcp_seq_set) {
  752. struct tcphdr *th;
  753. th = (struct tcphdr *)
  754. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  755. th->seq = frag_desc->tcp_seq;
  756. }
  757. if (frag_desc->tcp_flags_set) {
  758. struct tcphdr *th;
  759. __be16 *flags;
  760. th = (struct tcphdr *)
  761. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  762. flags = (__be16 *)&tcp_flag_word(th);
  763. *flags = frag_desc->tcp_flags;
  764. }
  765. /* Handle csum offloading */
  766. if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
  767. /* Set the partial checksum information */
  768. rmnet_frag_partial_csum(head_skb, frag_desc);
  769. } else if (frag_desc->csum_valid) {
  770. /* Non-RSB/RSC/perf packet. The current checksum is fine */
  771. head_skb->ip_summed = CHECKSUM_UNNECESSARY;
  772. } else if (frag_desc->hdrs_valid &&
  773. (frag_desc->trans_proto == IPPROTO_TCP ||
  774. frag_desc->trans_proto == IPPROTO_UDP)) {
  775. /* Unfortunately, we have to fake a bad checksum here, since
  776. * the original bad value is lost by the hardware. The only
  777. * reliable way to do it is to calculate the actual checksum
  778. * and corrupt it.
  779. */
  780. __sum16 *check;
  781. __wsum csum;
  782. unsigned int offset = skb_transport_offset(head_skb);
  783. __sum16 pseudo;
  784. /* Calculate pseudo header and update header fields */
  785. if (frag_desc->ip_proto == 4) {
  786. struct iphdr *iph = ip_hdr(head_skb);
  787. __be16 tot_len = htons(head_skb->len);
  788. csum_replace2(&iph->check, iph->tot_len, tot_len);
  789. iph->tot_len = tot_len;
  790. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  791. head_skb->len -
  792. frag_desc->ip_len,
  793. frag_desc->trans_proto, 0);
  794. } else {
  795. struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
  796. ip6h->payload_len = htons(head_skb->len -
  797. sizeof(*ip6h));
  798. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  799. head_skb->len -
  800. frag_desc->ip_len,
  801. frag_desc->trans_proto, 0);
  802. }
  803. if (frag_desc->trans_proto == IPPROTO_TCP) {
  804. check = &tcp_hdr(head_skb)->check;
  805. } else {
  806. udp_hdr(head_skb)->len = htons(head_skb->len -
  807. frag_desc->ip_len);
  808. check = &udp_hdr(head_skb)->check;
  809. }
  810. *check = pseudo;
  811. csum = skb_checksum(head_skb, offset, head_skb->len - offset,
  812. 0);
  813. /* Add 1 to corrupt. This cannot produce a final value of 0
  814. * since csum_fold() can't return a value of 0xFFFF
  815. */
  816. *check = csum16_add(csum_fold(csum), htons(1));
  817. head_skb->ip_summed = CHECKSUM_NONE;
  818. }
  819. /* Handle any rmnet_perf metadata */
  820. if (frag_desc->hash) {
  821. head_skb->hash = frag_desc->hash;
  822. head_skb->sw_hash = 1;
  823. }
  824. if (frag_desc->flush_shs)
  825. cb->flush_shs = 1;
  826. /* Handle coalesced packets */
  827. if (frag_desc->gso_segs > 1)
  828. rmnet_frag_gso_stamp(head_skb, frag_desc);
  829. /* Propagate original priority value */
  830. head_skb->priority = frag_desc->priority;
  831. if (trace_print_tcp_rx_enabled()) {
  832. char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
  833. if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
  834. goto skip_trace_print_tcp_rx;
  835. memset(saddr, 0, INET6_ADDRSTRLEN);
  836. memset(daddr, 0, INET6_ADDRSTRLEN);
  837. if (head_skb->protocol == htons(ETH_P_IP)) {
  838. if (ip_hdr(head_skb)->protocol != IPPROTO_TCP)
  839. goto skip_trace_print_tcp_rx;
  840. snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
  841. snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
  842. }
  843. if (head_skb->protocol == htons(ETH_P_IPV6)) {
  844. if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_TCP)
  845. goto skip_trace_print_tcp_rx;
  846. snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
  847. snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
  848. }
  849. trace_print_tcp_rx(head_skb, saddr, daddr, tcp_hdr(head_skb));
  850. }
  851. skip_trace_print_tcp_rx:
  852. if (trace_print_udp_rx_enabled()) {
  853. char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
  854. if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
  855. goto skip_trace_print_udp_rx;
  856. memset(saddr, 0, INET6_ADDRSTRLEN);
  857. memset(daddr, 0, INET6_ADDRSTRLEN);
  858. if (head_skb->protocol == htons(ETH_P_IP)) {
  859. if (ip_hdr(head_skb)->protocol != IPPROTO_UDP)
  860. goto skip_trace_print_udp_rx;
  861. snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
  862. snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
  863. }
  864. if (head_skb->protocol == htons(ETH_P_IPV6)) {
  865. if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_UDP)
  866. goto skip_trace_print_udp_rx;
  867. snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
  868. snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
  869. }
  870. trace_print_udp_rx(head_skb, saddr, daddr, udp_hdr(head_skb));
  871. }
  872. skip_trace_print_udp_rx:
  873. return head_skb;
  874. }
  875. /* Deliver the packets contained within a frag descriptor */
  876. void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
  877. struct rmnet_port *port)
  878. {
  879. struct sk_buff *skb;
  880. skb = rmnet_alloc_skb(frag_desc, port);
  881. if (skb)
  882. rmnet_deliver_skb(skb, port);
  883. rmnet_recycle_frag_descriptor(frag_desc, port);
  884. }
  885. EXPORT_SYMBOL(rmnet_frag_deliver);
  886. static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
  887. struct rmnet_port *port,
  888. struct list_head *list, u8 pkt_id,
  889. bool csum_valid)
  890. {
  891. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  892. struct rmnet_frag_descriptor *new_desc;
  893. u32 dlen = coal_desc->gso_size * coal_desc->gso_segs;
  894. u32 hlen = coal_desc->ip_len + coal_desc->trans_len;
  895. u32 offset = hlen + coal_desc->data_offset;
  896. int rc;
  897. new_desc = rmnet_get_frag_descriptor(port);
  898. if (!new_desc)
  899. return;
  900. /* Header information and most metadata is the same as the original */
  901. memcpy(new_desc, coal_desc, sizeof(*coal_desc));
  902. INIT_LIST_HEAD(&new_desc->list);
  903. INIT_LIST_HEAD(&new_desc->frags);
  904. new_desc->len = 0;
  905. /* Add the header fragments */
  906. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, 0,
  907. hlen);
  908. if (rc < 0)
  909. goto recycle;
  910. /* Add in the data fragments */
  911. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, offset,
  912. dlen);
  913. if (rc < 0)
  914. goto recycle;
  915. /* Update protocol-specific metadata */
  916. if (coal_desc->trans_proto == IPPROTO_TCP) {
  917. struct tcphdr *th, __th;
  918. th = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  919. sizeof(*th), &__th);
  920. if (!th)
  921. goto recycle;
  922. new_desc->tcp_seq_set = 1;
  923. new_desc->tcp_seq = htonl(ntohl(th->seq) +
  924. coal_desc->data_offset);
  925. /* Don't allow any dangerous flags to appear in any segments
  926. * other than the last.
  927. */
  928. if (th->fin || th->psh) {
  929. if (offset + dlen < coal_desc->len) {
  930. __be32 flag_word = tcp_flag_word(th);
  931. /* Clear the FIN and PSH flags from this
  932. * segment.
  933. */
  934. flag_word &= ~TCP_FLAG_FIN;
  935. flag_word &= ~TCP_FLAG_PSH;
  936. new_desc->tcp_flags_set = 1;
  937. new_desc->tcp_flags = *((__be16 *)&flag_word);
  938. }
  939. }
  940. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  941. struct udphdr *uh, __uh;
  942. uh = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  943. sizeof(*uh), &__uh);
  944. if (!uh)
  945. goto recycle;
  946. if (coal_desc->ip_proto == 4 && !uh->check)
  947. csum_valid = true;
  948. }
  949. if (coal_desc->ip_proto == 4) {
  950. struct iphdr *iph, __iph;
  951. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  952. &__iph);
  953. if (!iph)
  954. goto recycle;
  955. new_desc->ip_id_set = 1;
  956. new_desc->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
  957. }
  958. new_desc->csum_valid = csum_valid;
  959. priv->stats.coal.coal_reconstruct++;
  960. /* Update meta information to move past the data we just segmented */
  961. coal_desc->data_offset += dlen;
  962. coal_desc->pkt_id = pkt_id + 1;
  963. coal_desc->gso_segs = 0;
  964. /* Only relevant for the first segment to avoid overcoutning */
  965. coal_desc->coal_bytes = 0;
  966. coal_desc->coal_bufsize = 0;
  967. list_add_tail(&new_desc->list, list);
  968. return;
  969. recycle:
  970. rmnet_recycle_frag_descriptor(new_desc, port);
  971. }
  972. static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
  973. {
  974. u8 *data = rmnet_frag_data_ptr(frag_desc);
  975. unsigned int datagram_len;
  976. __wsum csum;
  977. __sum16 pseudo;
  978. /* Keep analysis tools happy, since they will see that
  979. * rmnet_frag_data_ptr() could return NULL. It can't in this case,
  980. * since we can't get this far otherwise...
  981. */
  982. if (unlikely(!data))
  983. return false;
  984. datagram_len = frag_desc->len - frag_desc->ip_len;
  985. if (frag_desc->ip_proto == 4) {
  986. struct iphdr *iph = (struct iphdr *)data;
  987. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  988. datagram_len,
  989. frag_desc->trans_proto, 0);
  990. } else {
  991. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  992. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  993. datagram_len, frag_desc->trans_proto,
  994. 0);
  995. }
  996. csum = csum_partial(data + frag_desc->ip_len, datagram_len,
  997. csum_unfold(pseudo));
  998. return !csum_fold(csum);
  999. }
  1000. /* Converts the coalesced frame into a list of descriptors */
  1001. static void
  1002. rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
  1003. u64 nlo_err_mask, struct rmnet_port *port,
  1004. struct list_head *list)
  1005. {
  1006. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  1007. struct rmnet_map_v5_coal_header coal_hdr;
  1008. struct rmnet_fragment *frag;
  1009. u8 *version;
  1010. u16 pkt_len;
  1011. u8 pkt, total_pkt = 0;
  1012. u8 nlo;
  1013. bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
  1014. bool zero_csum = false;
  1015. /* Copy the coal header into our local storage before pulling it. It's
  1016. * possible that this header (or part of it) is the last port of a page
  1017. * a pulling it off would cause it to be freed. Referring back to the
  1018. * header would be invalid in that case.
  1019. */
  1020. if (rmnet_frag_copy_data(coal_desc, sizeof(struct rmnet_map_header),
  1021. sizeof(coal_hdr), &coal_hdr) < 0)
  1022. return;
  1023. /* Pull off the headers we no longer need */
  1024. if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header) +
  1025. sizeof(coal_hdr)))
  1026. return;
  1027. /* By definition, this byte is linear, and the first byte on the
  1028. * first fragment. ;) Hence why no header_ptr() call is needed
  1029. * for it.
  1030. */
  1031. version = rmnet_frag_data_ptr(coal_desc);
  1032. if (unlikely(!version))
  1033. return;
  1034. if ((*version & 0xF0) == 0x40) {
  1035. struct iphdr *iph, __iph;
  1036. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  1037. &__iph);
  1038. if (!iph)
  1039. return;
  1040. coal_desc->ip_proto = 4;
  1041. coal_desc->ip_len = iph->ihl * 4;
  1042. coal_desc->trans_proto = iph->protocol;
  1043. /* Don't allow coalescing of any packets with IP options */
  1044. if (iph->ihl != 5)
  1045. gro = false;
  1046. } else if ((*version & 0xF0) == 0x60) {
  1047. struct ipv6hdr *ip6h, __ip6h;
  1048. int ip_len;
  1049. __be16 frag_off;
  1050. u8 protocol;
  1051. ip6h = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*ip6h),
  1052. &__ip6h);
  1053. if (!ip6h)
  1054. return;
  1055. coal_desc->ip_proto = 6;
  1056. protocol = ip6h->nexthdr;
  1057. ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
  1058. sizeof(*ip6h),
  1059. &protocol,
  1060. &frag_off);
  1061. coal_desc->trans_proto = protocol;
  1062. /* If we run into a problem, or this has a fragment header
  1063. * (which should technically not be possible, if the HW
  1064. * works as intended...), bail.
  1065. */
  1066. if (ip_len < 0 || frag_off) {
  1067. priv->stats.coal.coal_ip_invalid++;
  1068. return;
  1069. }
  1070. coal_desc->ip_len = (u16)ip_len;
  1071. if (coal_desc->ip_len > sizeof(*ip6h)) {
  1072. /* Don't allow coalescing of any packets with IPv6
  1073. * extension headers.
  1074. */
  1075. gro = false;
  1076. }
  1077. } else {
  1078. priv->stats.coal.coal_ip_invalid++;
  1079. return;
  1080. }
  1081. if (coal_desc->trans_proto == IPPROTO_TCP) {
  1082. struct tcphdr *th, __th;
  1083. th = rmnet_frag_header_ptr(coal_desc,
  1084. coal_desc->ip_len, sizeof(*th),
  1085. &__th);
  1086. if (!th)
  1087. return;
  1088. coal_desc->trans_len = th->doff * 4;
  1089. priv->stats.coal.coal_tcp++;
  1090. priv->stats.coal.coal_tcp_bytes += coal_desc->len;
  1091. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  1092. struct udphdr *uh, __uh;
  1093. uh = rmnet_frag_header_ptr(coal_desc,
  1094. coal_desc->ip_len, sizeof(*uh),
  1095. &__uh);
  1096. if (!uh)
  1097. return;
  1098. coal_desc->trans_len = sizeof(*uh);
  1099. priv->stats.coal.coal_udp++;
  1100. priv->stats.coal.coal_udp_bytes += coal_desc->len;
  1101. if (coal_desc->ip_proto == 4 && !uh->check)
  1102. zero_csum = true;
  1103. } else {
  1104. priv->stats.coal.coal_trans_invalid++;
  1105. return;
  1106. }
  1107. coal_desc->hdrs_valid = 1;
  1108. coal_desc->coal_bytes = coal_desc->len;
  1109. rmnet_descriptor_for_each_frag(frag, coal_desc)
  1110. coal_desc->coal_bufsize +=
  1111. page_size(skb_frag_page(&frag->frag));
  1112. if (rmnet_map_v5_csum_buggy(&coal_hdr) && !zero_csum) {
  1113. /* Mark the checksum as valid if it checks out */
  1114. if (rmnet_frag_validate_csum(coal_desc))
  1115. coal_desc->csum_valid = true;
  1116. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1117. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1118. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1119. list_add_tail(&coal_desc->list, list);
  1120. return;
  1121. }
  1122. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  1123. * no checksum errors, and are allowing GRO. We can just reuse this
  1124. * descriptor unchanged.
  1125. */
  1126. if (gro && coal_hdr.num_nlos == 1 && coal_hdr.csum_valid) {
  1127. coal_desc->csum_valid = true;
  1128. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1129. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1130. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1131. list_add_tail(&coal_desc->list, list);
  1132. return;
  1133. }
  1134. /* Segment the coalesced descriptor into new packets */
  1135. for (nlo = 0; nlo < coal_hdr.num_nlos; nlo++) {
  1136. pkt_len = ntohs(coal_hdr.nl_pairs[nlo].pkt_len);
  1137. pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
  1138. coal_desc->gso_size = pkt_len;
  1139. for (pkt = 0; pkt < coal_hdr.nl_pairs[nlo].num_packets;
  1140. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  1141. bool csum_err = nlo_err_mask & 1;
  1142. /* Segment the packet if we're not sending the larger
  1143. * packet up the stack.
  1144. */
  1145. if (!gro) {
  1146. coal_desc->gso_segs = 1;
  1147. if (csum_err)
  1148. priv->stats.coal.coal_csum_err++;
  1149. __rmnet_frag_segment_data(coal_desc, port,
  1150. list, total_pkt,
  1151. !csum_err);
  1152. continue;
  1153. }
  1154. if (csum_err) {
  1155. priv->stats.coal.coal_csum_err++;
  1156. /* Segment out the good data */
  1157. if (coal_desc->gso_segs)
  1158. __rmnet_frag_segment_data(coal_desc,
  1159. port,
  1160. list,
  1161. total_pkt,
  1162. true);
  1163. /* Segment out the bad checksum */
  1164. coal_desc->gso_segs = 1;
  1165. __rmnet_frag_segment_data(coal_desc, port,
  1166. list, total_pkt,
  1167. false);
  1168. } else {
  1169. coal_desc->gso_segs++;
  1170. }
  1171. }
  1172. /* If we're switching NLOs, we need to send out everything from
  1173. * the previous one, if we haven't done so. NLOs only switch
  1174. * when the packet length changes.
  1175. */
  1176. if (coal_desc->gso_segs)
  1177. __rmnet_frag_segment_data(coal_desc, port, list,
  1178. total_pkt, true);
  1179. }
  1180. }
  1181. /* Record reason for coalescing pipe closure */
  1182. static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  1183. u8 code)
  1184. {
  1185. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  1186. switch (type) {
  1187. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  1188. stats->non_coal++;
  1189. break;
  1190. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  1191. stats->ip_miss++;
  1192. break;
  1193. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  1194. stats->trans_miss++;
  1195. break;
  1196. case RMNET_MAP_COAL_CLOSE_HW:
  1197. switch (code) {
  1198. case RMNET_MAP_COAL_CLOSE_HW_NL:
  1199. stats->hw_nl++;
  1200. break;
  1201. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  1202. stats->hw_pkt++;
  1203. break;
  1204. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  1205. stats->hw_byte++;
  1206. break;
  1207. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  1208. stats->hw_time++;
  1209. break;
  1210. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  1211. stats->hw_evict++;
  1212. break;
  1213. default:
  1214. break;
  1215. }
  1216. break;
  1217. case RMNET_MAP_COAL_CLOSE_COAL:
  1218. stats->coal++;
  1219. break;
  1220. default:
  1221. break;
  1222. }
  1223. }
  1224. /* Check if the coalesced header has any incorrect values, in which case, the
  1225. * entire coalesced frame must be dropped. Then check if there are any
  1226. * checksum issues
  1227. */
  1228. static int
  1229. rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
  1230. u64 *nlo_err_mask)
  1231. {
  1232. struct rmnet_map_v5_coal_header *coal_hdr, __coal_hdr;
  1233. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1234. u64 mask = 0;
  1235. int i;
  1236. u8 veid, pkts = 0;
  1237. coal_hdr = rmnet_frag_header_ptr(frag_desc,
  1238. sizeof(struct rmnet_map_header),
  1239. sizeof(*coal_hdr), &__coal_hdr);
  1240. if (!coal_hdr)
  1241. return -EINVAL;
  1242. veid = coal_hdr->virtual_channel_id;
  1243. if (coal_hdr->num_nlos == 0 ||
  1244. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  1245. priv->stats.coal.coal_hdr_nlo_err++;
  1246. return -EINVAL;
  1247. }
  1248. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  1249. /* If there is a checksum issue, we need to split
  1250. * up the skb. Rebuild the full csum error field
  1251. */
  1252. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  1253. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  1254. mask |= ((u64)err) << (8 * i);
  1255. /* Track total packets in frame */
  1256. pkts += pkt;
  1257. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  1258. priv->stats.coal.coal_hdr_pkt_err++;
  1259. return -EINVAL;
  1260. }
  1261. }
  1262. /* Track number of packets we get inside of coalesced frames */
  1263. priv->stats.coal.coal_pkts += pkts;
  1264. /* Update ethtool stats */
  1265. rmnet_frag_data_log_close_stats(priv,
  1266. coal_hdr->close_type,
  1267. coal_hdr->close_value);
  1268. if (veid < RMNET_MAX_VEID)
  1269. priv->stats.coal.coal_veid[veid]++;
  1270. *nlo_err_mask = mask;
  1271. return 0;
  1272. }
  1273. static int rmnet_frag_checksum_pkt(struct rmnet_frag_descriptor *frag_desc)
  1274. {
  1275. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1276. struct rmnet_fragment *frag;
  1277. int offset = sizeof(struct rmnet_map_header) +
  1278. sizeof(struct rmnet_map_v5_csum_header);
  1279. u8 *version, __version;
  1280. __wsum csum;
  1281. u16 csum_len;
  1282. version = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*version),
  1283. &__version);
  1284. if (!version)
  1285. return -EINVAL;
  1286. if ((*version & 0xF0) == 0x40) {
  1287. struct iphdr *iph;
  1288. u8 __iph[60]; /* Max IP header size (0xF * 4) */
  1289. /* We need to access the entire IP header including options
  1290. * to validate its checksum. Fortunately, the version byte
  1291. * also will tell us the length, so we only need to pull
  1292. * once ;)
  1293. */
  1294. frag_desc->ip_len = (*version & 0xF) * 4;
  1295. iph = rmnet_frag_header_ptr(frag_desc, offset,
  1296. frag_desc->ip_len,
  1297. __iph);
  1298. if (!iph || ip_is_fragment(iph))
  1299. return -EINVAL;
  1300. /* Length needs to be sensible */
  1301. csum_len = ntohs(iph->tot_len);
  1302. if (csum_len > frag_desc->len - offset)
  1303. return -EINVAL;
  1304. csum_len -= frag_desc->ip_len;
  1305. /* IPv4 checksum must be valid */
  1306. if (ip_fast_csum((u8 *)iph, iph->ihl)) {
  1307. priv->stats.csum_sw++;
  1308. return 0;
  1309. }
  1310. frag_desc->ip_proto = 4;
  1311. frag_desc->trans_proto = iph->protocol;
  1312. csum = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  1313. csum_len,
  1314. frag_desc->trans_proto, 0);
  1315. } else if ((*version & 0xF0) == 0x60) {
  1316. struct ipv6hdr *ip6h, __ip6h;
  1317. int ip_len;
  1318. __be16 frag_off;
  1319. u8 protocol;
  1320. ip6h = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*ip6h),
  1321. &__ip6h);
  1322. if (!ip6h)
  1323. return -EINVAL;
  1324. frag_desc->ip_proto = 6;
  1325. protocol = ip6h->nexthdr;
  1326. ip_len = rmnet_frag_ipv6_skip_exthdr(frag_desc,
  1327. offset + sizeof(*ip6h),
  1328. &protocol, &frag_off);
  1329. if (ip_len < 0 || frag_off)
  1330. return -EINVAL;
  1331. /* Length needs to be sensible */
  1332. frag_desc->ip_len = (u16)ip_len;
  1333. csum_len = ntohs(ip6h->payload_len);
  1334. if (csum_len + frag_desc->ip_len > frag_desc->len - offset)
  1335. return -EINVAL;
  1336. frag_desc->trans_proto = protocol;
  1337. csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  1338. csum_len,
  1339. frag_desc->trans_proto, 0);
  1340. } else {
  1341. /* Not checksumable */
  1342. return -EINVAL;
  1343. }
  1344. /* Protocol check */
  1345. if (frag_desc->trans_proto != IPPROTO_TCP &&
  1346. frag_desc->trans_proto != IPPROTO_UDP)
  1347. return -EINVAL;
  1348. offset += frag_desc->ip_len;
  1349. /* Check for UDP zero csum packets */
  1350. if (frag_desc->trans_proto == IPPROTO_UDP) {
  1351. struct udphdr *uh, __uh;
  1352. uh = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*uh),
  1353. &__uh);
  1354. if (!uh)
  1355. return -EINVAL;
  1356. if (!uh->check) {
  1357. if (frag_desc->ip_proto == 4) {
  1358. /* Zero checksum is valid */
  1359. priv->stats.csum_sw++;
  1360. return 1;
  1361. }
  1362. /* Not valid in IPv6 */
  1363. priv->stats.csum_sw++;
  1364. return 0;
  1365. }
  1366. }
  1367. /* Walk the frags and checksum each chunk */
  1368. list_for_each_entry(frag, &frag_desc->frags, list) {
  1369. u32 frag_size = skb_frag_size(&frag->frag);
  1370. if (!csum_len)
  1371. break;
  1372. if (offset < frag_size) {
  1373. void *addr = skb_frag_address(&frag->frag) + offset;
  1374. u32 len = min_t(u32, csum_len, frag_size - offset);
  1375. /* Checksum 'len' bytes and add them in */
  1376. csum = csum_partial(addr, len, csum);
  1377. csum_len -= len;
  1378. offset = 0;
  1379. } else {
  1380. offset -= frag_size;
  1381. }
  1382. }
  1383. priv->stats.csum_sw++;
  1384. return !csum_fold(csum);
  1385. }
  1386. /* Process a QMAPv5 packet header */
  1387. int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
  1388. struct rmnet_port *port,
  1389. struct list_head *list,
  1390. u16 len)
  1391. {
  1392. struct rmnet_map_v5_csum_header *csum_hdr, __csum_hdr;
  1393. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1394. u64 nlo_err_mask;
  1395. u32 offset = sizeof(struct rmnet_map_header);
  1396. int rc = 0;
  1397. /* Grab the header type. It's easier to grab enough for a full csum
  1398. * offload header here since it's only 8 bytes and then check the
  1399. * header type using that. This also doubles as a check to make sure
  1400. * there's enough data after the QMAP header to ensure that another
  1401. * header is present.
  1402. */
  1403. csum_hdr = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*csum_hdr),
  1404. &__csum_hdr);
  1405. if (!csum_hdr)
  1406. return -EINVAL;
  1407. switch (csum_hdr->header_type) {
  1408. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1409. priv->stats.coal.coal_rx++;
  1410. rc = rmnet_frag_data_check_coal_header(frag_desc,
  1411. &nlo_err_mask);
  1412. if (rc)
  1413. return rc;
  1414. rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
  1415. list);
  1416. if (list_first_entry(list, struct rmnet_frag_descriptor,
  1417. list) != frag_desc)
  1418. rmnet_recycle_frag_descriptor(frag_desc, port);
  1419. break;
  1420. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1421. if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
  1422. priv->stats.csum_sw++;
  1423. } else if (csum_hdr->csum_valid_required) {
  1424. priv->stats.csum_ok++;
  1425. frag_desc->csum_valid = true;
  1426. } else {
  1427. int valid = rmnet_frag_checksum_pkt(frag_desc);
  1428. if (valid < 0) {
  1429. priv->stats.csum_validation_failed++;
  1430. } else if (valid) {
  1431. /* All's good */
  1432. priv->stats.csum_ok++;
  1433. frag_desc->csum_valid = true;
  1434. } else {
  1435. /* Checksum is actually bad */
  1436. priv->stats.csum_valid_unset++;
  1437. }
  1438. }
  1439. if (!rmnet_frag_pull(frag_desc, port,
  1440. offset + sizeof(*csum_hdr))) {
  1441. rc = -EINVAL;
  1442. break;
  1443. }
  1444. /* Remove padding only for csum offload packets.
  1445. * Coalesced packets should never have padding.
  1446. */
  1447. if (!rmnet_frag_trim(frag_desc, port, len)) {
  1448. rc = -EINVAL;
  1449. break;
  1450. }
  1451. list_del_init(&frag_desc->list);
  1452. list_add_tail(&frag_desc->list, list);
  1453. break;
  1454. default:
  1455. rc = -EINVAL;
  1456. break;
  1457. }
  1458. return rc;
  1459. }
  1460. /* Perf hook handler */
  1461. rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
  1462. EXPORT_SYMBOL(rmnet_perf_desc_entry);
  1463. static void
  1464. __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
  1465. struct rmnet_port *port)
  1466. {
  1467. rmnet_perf_desc_hook_t rmnet_perf_ingress;
  1468. struct rmnet_map_header *qmap, __qmap;
  1469. struct rmnet_endpoint *ep;
  1470. struct rmnet_frag_descriptor *frag, *tmp;
  1471. LIST_HEAD(segs);
  1472. u16 len, pad;
  1473. u8 mux_id;
  1474. bool skip_perf = (frag_desc->priority == 0xda1a);
  1475. qmap = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*qmap), &__qmap);
  1476. if (!qmap)
  1477. goto recycle;
  1478. mux_id = qmap->mux_id;
  1479. pad = qmap->pad_len;
  1480. len = ntohs(qmap->pkt_len) - pad;
  1481. if (qmap->cd_bit) {
  1482. qmi_rmnet_set_dl_msg_active(port);
  1483. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  1484. rmnet_frag_flow_command(frag_desc, port, len);
  1485. goto recycle;
  1486. }
  1487. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  1488. rmnet_frag_command(frag_desc, qmap, port);
  1489. goto recycle;
  1490. }
  1491. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  1492. goto recycle;
  1493. ep = rmnet_get_endpoint(port, mux_id);
  1494. if (!ep)
  1495. goto recycle;
  1496. frag_desc->dev = ep->egress_dev;
  1497. /* Handle QMAPv5 packet */
  1498. if (qmap->next_hdr &&
  1499. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  1500. RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5))) {
  1501. if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
  1502. len))
  1503. goto recycle;
  1504. } else {
  1505. /* We only have the main QMAP header to worry about */
  1506. if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
  1507. return;
  1508. if (!rmnet_frag_trim(frag_desc, port, len))
  1509. return;
  1510. list_add_tail(&frag_desc->list, &segs);
  1511. }
  1512. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  1513. qmi_rmnet_work_maybe_restart(port);
  1514. if (skip_perf)
  1515. goto no_perf;
  1516. rcu_read_lock();
  1517. rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
  1518. if (rmnet_perf_ingress) {
  1519. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1520. list_del_init(&frag->list);
  1521. rmnet_perf_ingress(frag, port);
  1522. }
  1523. rcu_read_unlock();
  1524. return;
  1525. }
  1526. rcu_read_unlock();
  1527. no_perf:
  1528. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1529. list_del_init(&frag->list);
  1530. rmnet_frag_deliver(frag, port);
  1531. }
  1532. return;
  1533. recycle:
  1534. rmnet_recycle_frag_descriptor(frag_desc, port);
  1535. }
  1536. /* Notify perf at the end of SKB chain */
  1537. rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
  1538. EXPORT_SYMBOL(rmnet_perf_chain_end);
  1539. void rmnet_descriptor_classify_chain_count(u64 chain_count,
  1540. struct rmnet_port *port)
  1541. {
  1542. u64 index;
  1543. if (chain_count >= 60) {
  1544. port->stats.dl_chain_stat[6] += chain_count;
  1545. return;
  1546. }
  1547. index = chain_count;
  1548. do_div(index, 10);
  1549. port->stats.dl_chain_stat[index] += chain_count;
  1550. }
  1551. void rmnet_descriptor_classify_frag_count(u64 frag_count,
  1552. struct rmnet_port *port)
  1553. {
  1554. u64 index;
  1555. if (frag_count <= 1) {
  1556. port->stats.dl_frag_stat_1 += frag_count;
  1557. return;
  1558. }
  1559. if (frag_count >= 16) {
  1560. port->stats.dl_frag_stat[4] += frag_count;
  1561. return;
  1562. }
  1563. index = frag_count;
  1564. do_div(index, 4);
  1565. port->stats.dl_frag_stat[index] += frag_count;
  1566. }
  1567. void rmnet_frag_ingress_handler(struct sk_buff *skb,
  1568. struct rmnet_port *port)
  1569. {
  1570. rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
  1571. LIST_HEAD(desc_list);
  1572. bool skip_perf = (skb->priority == 0xda1a);
  1573. u64 chain_count = 0;
  1574. /* Deaggregation and freeing of HW originating
  1575. * buffers is done within here
  1576. */
  1577. while (skb) {
  1578. struct sk_buff *skb_frag;
  1579. chain_count++;
  1580. rmnet_descriptor_classify_frag_count(skb_shinfo(skb)->nr_frags,
  1581. port);
  1582. rmnet_frag_deaggregate(skb, port, &desc_list, skb->priority);
  1583. if (!list_empty(&desc_list)) {
  1584. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1585. list_for_each_entry_safe(frag_desc, tmp, &desc_list,
  1586. list) {
  1587. list_del_init(&frag_desc->list);
  1588. __rmnet_frag_ingress_handler(frag_desc, port);
  1589. }
  1590. }
  1591. skb_frag = skb_shinfo(skb)->frag_list;
  1592. skb_shinfo(skb)->frag_list = NULL;
  1593. consume_skb(skb);
  1594. skb = skb_frag;
  1595. }
  1596. rmnet_descriptor_classify_chain_count(chain_count, port);
  1597. if (skip_perf)
  1598. return;
  1599. rcu_read_lock();
  1600. rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
  1601. if (rmnet_perf_opt_chain_end)
  1602. rmnet_perf_opt_chain_end();
  1603. rcu_read_unlock();
  1604. }
  1605. void rmnet_descriptor_deinit(struct rmnet_port *port)
  1606. {
  1607. struct rmnet_frag_descriptor_pool *pool;
  1608. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1609. pool = port->frag_desc_pool;
  1610. list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
  1611. kfree(frag_desc);
  1612. pool->pool_size--;
  1613. }
  1614. kfree(pool);
  1615. }
  1616. int rmnet_descriptor_init(struct rmnet_port *port)
  1617. {
  1618. struct rmnet_frag_descriptor_pool *pool;
  1619. int i;
  1620. spin_lock_init(&port->desc_pool_lock);
  1621. pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
  1622. if (!pool)
  1623. return -ENOMEM;
  1624. INIT_LIST_HEAD(&pool->free_list);
  1625. port->frag_desc_pool = pool;
  1626. for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
  1627. struct rmnet_frag_descriptor *frag_desc;
  1628. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  1629. if (!frag_desc)
  1630. return -ENOMEM;
  1631. INIT_LIST_HEAD(&frag_desc->list);
  1632. INIT_LIST_HEAD(&frag_desc->frags);
  1633. list_add_tail(&frag_desc->list, &pool->free_list);
  1634. pool->pool_size++;
  1635. }
  1636. return 0;
  1637. }