rmnet_map_data.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RMNET Data MAP protocol
  14. *
  15. */
  16. #include <linux/netdevice.h>
  17. #include <linux/ip.h>
  18. #include <linux/ipv6.h>
  19. #include <net/ip6_checksum.h>
  20. #include "rmnet_config.h"
  21. #include "rmnet_map.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_handlers.h"
  24. #include "rmnet_ll.h"
  25. #include "rmnet_mem.h"
  26. #define RMNET_MAP_PKT_COPY_THRESHOLD 64
  27. #define RMNET_MAP_DEAGGR_SPACING 64
  28. #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
  29. #define RMNET_PAGE_COUNT 384
  30. struct rmnet_map_coal_metadata {
  31. void *ip_header;
  32. void *trans_header;
  33. u16 ip_len;
  34. u16 trans_len;
  35. u16 data_offset;
  36. u16 data_len;
  37. u8 ip_proto;
  38. u8 trans_proto;
  39. u8 pkt_id;
  40. u8 pkt_count;
  41. };
  42. static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
  43. const void *txporthdr)
  44. {
  45. __sum16 *check = NULL;
  46. switch (protocol) {
  47. case IPPROTO_TCP:
  48. check = &(((struct tcphdr *)txporthdr)->check);
  49. break;
  50. case IPPROTO_UDP:
  51. check = &(((struct udphdr *)txporthdr)->check);
  52. break;
  53. default:
  54. check = NULL;
  55. break;
  56. }
  57. return check;
  58. }
  59. static int
  60. rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
  61. struct rmnet_map_dl_csum_trailer *csum_trailer,
  62. struct rmnet_priv *priv)
  63. {
  64. __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
  65. u16 csum_value, csum_value_final;
  66. struct iphdr *ip4h;
  67. void *txporthdr;
  68. __be16 addend;
  69. ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
  70. if ((ntohs(ip4h->frag_off) & IP_MF) ||
  71. ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
  72. priv->stats.csum_fragmented_pkt++;
  73. return -EOPNOTSUPP;
  74. }
  75. txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4;
  76. csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
  77. if (!csum_field) {
  78. priv->stats.csum_err_invalid_transport++;
  79. return -EPROTONOSUPPORT;
  80. }
  81. /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
  82. if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
  83. priv->stats.csum_skipped++;
  84. return 0;
  85. }
  86. csum_value = ~ntohs(csum_trailer->csum_value);
  87. hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
  88. ip_payload_csum = csum16_sub((__force __sum16)csum_value,
  89. (__force __be16)hdr_csum);
  90. pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
  91. ntohs(ip4h->tot_len) - ip4h->ihl * 4,
  92. ip4h->protocol, 0);
  93. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  94. pseudo_csum = csum16_add(ip_payload_csum, addend);
  95. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  96. csum_temp = ~csum16_sub(pseudo_csum, addend);
  97. csum_value_final = (__force u16)csum_temp;
  98. if (unlikely(csum_value_final == 0)) {
  99. switch (ip4h->protocol) {
  100. case IPPROTO_UDP:
  101. /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
  102. csum_value_final = ~csum_value_final;
  103. break;
  104. case IPPROTO_TCP:
  105. /* DL4 Non-RFC compliant TCP checksum found */
  106. if (*csum_field == (__force __sum16)0xFFFF)
  107. csum_value_final = ~csum_value_final;
  108. break;
  109. }
  110. }
  111. if (csum_value_final == ntohs((__force __be16)*csum_field)) {
  112. priv->stats.csum_ok++;
  113. return 0;
  114. } else {
  115. priv->stats.csum_validation_failed++;
  116. return -EINVAL;
  117. }
  118. }
  119. #if IS_ENABLED(CONFIG_IPV6)
  120. static int
  121. rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
  122. struct rmnet_map_dl_csum_trailer *csum_trailer,
  123. struct rmnet_priv *priv)
  124. {
  125. __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
  126. u16 csum_value, csum_value_final;
  127. __be16 ip6_hdr_csum, addend;
  128. struct ipv6hdr *ip6h;
  129. void *txporthdr, *data = rmnet_map_data_ptr(skb);
  130. u32 length;
  131. ip6h = data;
  132. txporthdr = data + sizeof(struct ipv6hdr);
  133. csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
  134. if (!csum_field) {
  135. priv->stats.csum_err_invalid_transport++;
  136. return -EPROTONOSUPPORT;
  137. }
  138. csum_value = ~ntohs(csum_trailer->csum_value);
  139. ip6_hdr_csum = (__force __be16)
  140. ~ntohs((__force __be16)ip_compute_csum(ip6h,
  141. (int)(txporthdr - data)));
  142. ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
  143. ip6_hdr_csum);
  144. length = (ip6h->nexthdr == IPPROTO_UDP) ?
  145. ntohs(((struct udphdr *)txporthdr)->len) :
  146. ntohs(ip6h->payload_len);
  147. pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  148. length, ip6h->nexthdr, 0));
  149. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  150. pseudo_csum = csum16_add(ip6_payload_csum, addend);
  151. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  152. csum_temp = ~csum16_sub(pseudo_csum, addend);
  153. csum_value_final = (__force u16)csum_temp;
  154. if (unlikely(csum_value_final == 0)) {
  155. switch (ip6h->nexthdr) {
  156. case IPPROTO_UDP:
  157. /* RFC 2460 section 8.1
  158. * DL6 One's complement rule for UDP checksum 0
  159. */
  160. csum_value_final = ~csum_value_final;
  161. break;
  162. case IPPROTO_TCP:
  163. /* DL6 Non-RFC compliant TCP checksum found */
  164. if (*csum_field == (__force __sum16)0xFFFF)
  165. csum_value_final = ~csum_value_final;
  166. break;
  167. }
  168. }
  169. if (csum_value_final == ntohs((__force __be16)*csum_field)) {
  170. priv->stats.csum_ok++;
  171. return 0;
  172. } else {
  173. priv->stats.csum_validation_failed++;
  174. return -EINVAL;
  175. }
  176. }
  177. #endif
  178. static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
  179. {
  180. struct iphdr *ip4h = (struct iphdr *)iphdr;
  181. void *txphdr;
  182. u16 *csum;
  183. txphdr = iphdr + ip4h->ihl * 4;
  184. if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
  185. csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
  186. *csum = ~(*csum);
  187. }
  188. }
  189. static void
  190. rmnet_map_ipv4_ul_csum_header(void *iphdr,
  191. struct rmnet_map_ul_csum_header *ul_header,
  192. struct sk_buff *skb)
  193. {
  194. struct iphdr *ip4h = (struct iphdr *)iphdr;
  195. __be16 *hdr = (__be16 *)ul_header, offset;
  196. offset = htons((__force u16)(skb_transport_header(skb) -
  197. (unsigned char *)iphdr));
  198. ul_header->csum_start_offset = offset;
  199. ul_header->csum_insert_offset = skb->csum_offset;
  200. ul_header->csum_enabled = 1;
  201. if (ip4h->protocol == IPPROTO_UDP)
  202. ul_header->udp_ind = 1;
  203. else
  204. ul_header->udp_ind = 0;
  205. /* Changing remaining fields to network order */
  206. hdr++;
  207. *hdr = htons((__force u16)*hdr);
  208. skb->ip_summed = CHECKSUM_NONE;
  209. rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
  210. }
  211. #if IS_ENABLED(CONFIG_IPV6)
  212. static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
  213. {
  214. struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
  215. void *txphdr;
  216. u16 *csum;
  217. txphdr = ip6hdr + sizeof(struct ipv6hdr);
  218. if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
  219. csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
  220. *csum = ~(*csum);
  221. }
  222. }
  223. static void
  224. rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
  225. struct rmnet_map_ul_csum_header *ul_header,
  226. struct sk_buff *skb)
  227. {
  228. struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
  229. __be16 *hdr = (__be16 *)ul_header, offset;
  230. offset = htons((__force u16)(skb_transport_header(skb) -
  231. (unsigned char *)ip6hdr));
  232. ul_header->csum_start_offset = offset;
  233. ul_header->csum_insert_offset = skb->csum_offset;
  234. ul_header->csum_enabled = 1;
  235. if (ip6h->nexthdr == IPPROTO_UDP)
  236. ul_header->udp_ind = 1;
  237. else
  238. ul_header->udp_ind = 0;
  239. /* Changing remaining fields to network order */
  240. hdr++;
  241. *hdr = htons((__force u16)*hdr);
  242. skb->ip_summed = CHECKSUM_NONE;
  243. rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
  244. }
  245. #endif
  246. /* Adds MAP header to front of skb->data
  247. * Padding is calculated and set appropriately in MAP header. Mux ID is
  248. * initialized to 0.
  249. */
  250. struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
  251. int hdrlen, int pad,
  252. struct rmnet_port *port)
  253. {
  254. struct rmnet_map_header *map_header;
  255. u32 padding, map_datalen;
  256. u8 *padbytes;
  257. map_datalen = skb->len - hdrlen;
  258. map_header = (struct rmnet_map_header *)
  259. skb_push(skb, sizeof(struct rmnet_map_header));
  260. memset(map_header, 0, sizeof(struct rmnet_map_header));
  261. /* Set next_hdr bit for csum offload packets */
  262. if (port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5)
  263. map_header->next_hdr = 1;
  264. if (pad == RMNET_MAP_NO_PAD_BYTES) {
  265. map_header->pkt_len = htons(map_datalen);
  266. return map_header;
  267. }
  268. padding = ALIGN(map_datalen, 4) - map_datalen;
  269. if (padding == 0)
  270. goto done;
  271. if (skb_tailroom(skb) < padding)
  272. return NULL;
  273. padbytes = (u8 *)skb_put(skb, padding);
  274. memset(padbytes, 0, padding);
  275. done:
  276. map_header->pkt_len = htons(map_datalen + padding);
  277. map_header->pad_len = padding & 0x3F;
  278. return map_header;
  279. }
  280. /* Deaggregates a single packet
  281. * A whole new buffer is allocated for each portion of an aggregated frame.
  282. * Caller should keep calling deaggregate() on the source skb until 0 is
  283. * returned, indicating that there are no more packets to deaggregate. Caller
  284. * is responsible for freeing the original skb.
  285. */
  286. struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
  287. struct rmnet_port *port)
  288. {
  289. struct rmnet_map_header *maph;
  290. struct sk_buff *skbn;
  291. unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
  292. u32 packet_len;
  293. if (skb->len == 0)
  294. return NULL;
  295. maph = (struct rmnet_map_header *)data;
  296. packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
  297. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  298. packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
  299. else if (port->data_format & RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5) {
  300. if (!maph->cd_bit) {
  301. packet_len += sizeof(struct rmnet_map_v5_csum_header);
  302. /* Coalescing headers require MAPv5 */
  303. next_hdr = data + sizeof(*maph);
  304. }
  305. }
  306. if (((int)skb->len - (int)packet_len) < 0)
  307. return NULL;
  308. /* Some hardware can send us empty frames. Catch them */
  309. if (ntohs(maph->pkt_len) == 0)
  310. return NULL;
  311. if (next_hdr &&
  312. ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
  313. RMNET_MAP_HEADER_TYPE_COALESCING)
  314. return skb;
  315. if (skb_is_nonlinear(skb)) {
  316. skb_frag_t *frag0 = skb_shinfo(skb)->frags;
  317. struct page *page = skb_frag_page(frag0);
  318. skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
  319. if (!skbn)
  320. return NULL;
  321. skb_append_pagefrags(skbn, page, frag0->bv_offset,
  322. packet_len);
  323. skbn->data_len += packet_len;
  324. skbn->len += packet_len;
  325. } else {
  326. skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
  327. GFP_ATOMIC);
  328. if (!skbn)
  329. return NULL;
  330. skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
  331. skb_put(skbn, packet_len);
  332. memcpy(skbn->data, data, packet_len);
  333. }
  334. skbn->priority = skb->priority;
  335. pskb_pull(skb, packet_len);
  336. return skbn;
  337. }
  338. /* Validates packet checksums. Function takes a pointer to
  339. * the beginning of a buffer which contains the IP payload +
  340. * padding + checksum trailer.
  341. * Only IPv4 and IPv6 are supported along with TCP & UDP.
  342. * Fragmented or tunneled packets are not supported.
  343. */
  344. int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
  345. {
  346. struct rmnet_priv *priv = netdev_priv(skb->dev);
  347. struct rmnet_map_dl_csum_trailer *csum_trailer;
  348. if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
  349. priv->stats.csum_sw++;
  350. return -EOPNOTSUPP;
  351. }
  352. csum_trailer = (struct rmnet_map_dl_csum_trailer *)
  353. (rmnet_map_data_ptr(skb) + len);
  354. if (!csum_trailer->valid) {
  355. priv->stats.csum_valid_unset++;
  356. return -EINVAL;
  357. }
  358. if (skb->protocol == htons(ETH_P_IP)) {
  359. return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
  360. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  361. #if IS_ENABLED(CONFIG_IPV6)
  362. return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
  363. #else
  364. priv->stats.csum_err_invalid_ip_version++;
  365. return -EPROTONOSUPPORT;
  366. #endif
  367. } else {
  368. priv->stats.csum_err_invalid_ip_version++;
  369. return -EPROTONOSUPPORT;
  370. }
  371. return 0;
  372. }
  373. EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
  374. void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
  375. struct net_device *orig_dev)
  376. {
  377. struct rmnet_priv *priv = netdev_priv(orig_dev);
  378. struct rmnet_map_ul_csum_header *ul_header;
  379. void *iphdr;
  380. ul_header = (struct rmnet_map_ul_csum_header *)
  381. skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
  382. if (unlikely(!(orig_dev->features &
  383. (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
  384. goto sw_csum;
  385. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  386. iphdr = (char *)ul_header +
  387. sizeof(struct rmnet_map_ul_csum_header);
  388. if (skb->protocol == htons(ETH_P_IP)) {
  389. rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
  390. priv->stats.csum_hw++;
  391. return;
  392. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  393. #if IS_ENABLED(CONFIG_IPV6)
  394. rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
  395. priv->stats.csum_hw++;
  396. return;
  397. #else
  398. priv->stats.csum_err_invalid_ip_version++;
  399. goto sw_csum;
  400. #endif
  401. } else {
  402. priv->stats.csum_err_invalid_ip_version++;
  403. }
  404. }
  405. sw_csum:
  406. ul_header->csum_start_offset = 0;
  407. ul_header->csum_insert_offset = 0;
  408. ul_header->csum_enabled = 0;
  409. ul_header->udp_ind = 0;
  410. priv->stats.csum_sw++;
  411. }
  412. static void rmnet_map_v5_check_priority(struct sk_buff *skb,
  413. struct net_device *orig_dev,
  414. struct rmnet_map_v5_csum_header *hdr,
  415. bool tso)
  416. {
  417. struct rmnet_priv *priv = netdev_priv(orig_dev);
  418. if (RMNET_LLM(skb->priority)) {
  419. priv->stats.ul_prio++;
  420. hdr->priority = 1;
  421. }
  422. /* APS priority bit is only valid for csum header */
  423. if (!tso && RMNET_APS_LLB(skb->priority)) {
  424. priv->stats.aps_prio++;
  425. hdr->aps_prio = 1;
  426. }
  427. }
  428. void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
  429. struct rmnet_port *port,
  430. struct net_device *orig_dev)
  431. {
  432. struct rmnet_priv *priv = netdev_priv(orig_dev);
  433. struct rmnet_map_v5_csum_header *ul_header;
  434. ul_header = (struct rmnet_map_v5_csum_header *)
  435. skb_push(skb, sizeof(*ul_header));
  436. memset(ul_header, 0, sizeof(*ul_header));
  437. ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
  438. if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
  439. rmnet_map_v5_check_priority(skb, orig_dev, ul_header, false);
  440. /* Allow priority w/o csum offload */
  441. if (!(port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5))
  442. return;
  443. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  444. void *iph = (char *)ul_header + sizeof(*ul_header);
  445. void *trans;
  446. __sum16 *check;
  447. u8 proto;
  448. if (skb->protocol == htons(ETH_P_IP)) {
  449. u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
  450. proto = ((struct iphdr *)iph)->protocol;
  451. trans = iph + ip_len;
  452. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  453. u16 ip_len = sizeof(struct ipv6hdr);
  454. proto = ((struct ipv6hdr *)iph)->nexthdr;
  455. trans = iph + ip_len;
  456. } else {
  457. priv->stats.csum_err_invalid_ip_version++;
  458. goto sw_csum;
  459. }
  460. check = rmnet_map_get_csum_field(proto, trans);
  461. if (check) {
  462. skb->ip_summed = CHECKSUM_NONE;
  463. /* Ask for checksum offloading */
  464. ul_header->csum_valid_required = 1;
  465. priv->stats.csum_hw++;
  466. return;
  467. }
  468. }
  469. sw_csum:
  470. priv->stats.csum_sw++;
  471. }
  472. /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
  473. * packets that are supported for UL checksum offload.
  474. */
  475. void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
  476. struct rmnet_port *port,
  477. struct net_device *orig_dev,
  478. int csum_type)
  479. {
  480. switch (csum_type) {
  481. case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
  482. rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
  483. break;
  484. case RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5:
  485. rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
  486. break;
  487. default:
  488. break;
  489. }
  490. }
  491. bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr)
  492. {
  493. /* Only applies to frames with a single packet */
  494. if (coal_hdr->num_nlos != 1 || coal_hdr->nl_pairs[0].num_packets != 1)
  495. return false;
  496. /* TCP header has FIN or PUSH set */
  497. if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_COAL)
  498. return true;
  499. /* Hit packet limit, byte limit, or time limit/EOF on DMA */
  500. if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_HW) {
  501. switch (coal_hdr->close_value) {
  502. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  503. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  504. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  505. return true;
  506. }
  507. }
  508. return false;
  509. }
  510. static void rmnet_map_move_headers(struct sk_buff *skb)
  511. {
  512. struct iphdr *iph;
  513. u16 ip_len;
  514. u16 trans_len = 0;
  515. u8 proto;
  516. /* This only applies to non-linear SKBs */
  517. if (!skb_is_nonlinear(skb))
  518. return;
  519. iph = (struct iphdr *)rmnet_map_data_ptr(skb);
  520. if (iph->version == 4) {
  521. ip_len = iph->ihl * 4;
  522. proto = iph->protocol;
  523. if (iph->frag_off & htons(IP_OFFSET))
  524. /* No transport header information */
  525. goto pull;
  526. } else if (iph->version == 6) {
  527. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  528. __be16 frag_off;
  529. u8 nexthdr = ip6h->nexthdr;
  530. ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
  531. &frag_off);
  532. if (ip_len < 0)
  533. return;
  534. proto = nexthdr;
  535. } else {
  536. return;
  537. }
  538. if (proto == IPPROTO_TCP) {
  539. struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
  540. trans_len = tp->doff * 4;
  541. } else if (proto == IPPROTO_UDP) {
  542. trans_len = sizeof(struct udphdr);
  543. } else if (proto == NEXTHDR_FRAGMENT) {
  544. /* Non-first fragments don't have the fragment length added by
  545. * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
  546. * we account for the length here.
  547. */
  548. ip_len += sizeof(struct frag_hdr);
  549. }
  550. pull:
  551. __pskb_pull_tail(skb, ip_len + trans_len);
  552. skb_reset_network_header(skb);
  553. if (trans_len)
  554. skb_set_transport_header(skb, ip_len);
  555. }
  556. static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
  557. struct rmnet_map_coal_metadata *coal_meta,
  558. struct sk_buff *dest)
  559. {
  560. unsigned char *data_start = rmnet_map_data_ptr(coal_skb) +
  561. coal_meta->ip_len + coal_meta->trans_len;
  562. u32 copy_len = coal_meta->data_len * coal_meta->pkt_count;
  563. if (skb_is_nonlinear(coal_skb)) {
  564. skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags;
  565. struct page *page = skb_frag_page(frag0);
  566. skb_append_pagefrags(dest, page,
  567. frag0->bv_offset + coal_meta->ip_len +
  568. coal_meta->trans_len +
  569. coal_meta->data_offset,
  570. copy_len);
  571. dest->data_len += copy_len;
  572. dest->len += copy_len;
  573. } else {
  574. skb_put_data(dest, data_start + coal_meta->data_offset,
  575. copy_len);
  576. }
  577. }
  578. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  579. * if needed (i.e. forwarding, UDP GRO)
  580. */
  581. static void rmnet_map_gso_stamp(struct sk_buff *skb,
  582. struct rmnet_map_coal_metadata *coal_meta)
  583. {
  584. struct skb_shared_info *shinfo = skb_shinfo(skb);
  585. if (coal_meta->trans_proto == IPPROTO_TCP)
  586. shinfo->gso_type = (coal_meta->ip_proto == 4) ?
  587. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  588. else
  589. shinfo->gso_type = SKB_GSO_UDP_L4;
  590. shinfo->gso_size = coal_meta->data_len;
  591. shinfo->gso_segs = coal_meta->pkt_count;
  592. }
  593. /* Handles setting up the partial checksum in the skb. Sets the transport
  594. * checksum to the pseudoheader checksum and sets the csum offload metadata
  595. */
  596. static void rmnet_map_partial_csum(struct sk_buff *skb,
  597. struct rmnet_map_coal_metadata *coal_meta)
  598. {
  599. unsigned char *data = skb->data;
  600. __sum16 pseudo;
  601. u16 pkt_len = skb->len - coal_meta->ip_len;
  602. if (coal_meta->ip_proto == 4) {
  603. struct iphdr *iph = (struct iphdr *)data;
  604. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  605. pkt_len, coal_meta->trans_proto,
  606. 0);
  607. } else {
  608. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  609. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  610. pkt_len, coal_meta->trans_proto, 0);
  611. }
  612. if (coal_meta->trans_proto == IPPROTO_TCP) {
  613. struct tcphdr *tp = (struct tcphdr *)(data + coal_meta->ip_len);
  614. tp->check = pseudo;
  615. skb->csum_offset = offsetof(struct tcphdr, check);
  616. } else {
  617. struct udphdr *up = (struct udphdr *)(data + coal_meta->ip_len);
  618. up->check = pseudo;
  619. skb->csum_offset = offsetof(struct udphdr, check);
  620. }
  621. skb->ip_summed = CHECKSUM_PARTIAL;
  622. skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
  623. }
  624. static void
  625. __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
  626. struct rmnet_map_coal_metadata *coal_meta,
  627. struct sk_buff_head *list, u8 pkt_id,
  628. bool csum_valid)
  629. {
  630. struct sk_buff *skbn;
  631. struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
  632. __sum16 *check = NULL;
  633. u32 alloc_len;
  634. u32 dlen = coal_meta->data_len * coal_meta->pkt_count;
  635. u32 hlen = coal_meta->ip_len + coal_meta->trans_len;
  636. bool zero_csum = false;
  637. /* We can avoid copying the data if the SKB we got from the lower-level
  638. * drivers was nonlinear.
  639. */
  640. if (skb_is_nonlinear(coal_skb))
  641. alloc_len = hlen;
  642. else
  643. alloc_len = hlen + dlen;
  644. skbn = alloc_skb(alloc_len, GFP_ATOMIC);
  645. if (!skbn)
  646. return;
  647. skb_reserve(skbn, hlen);
  648. rmnet_map_nonlinear_copy(coal_skb, coal_meta, skbn);
  649. /* Push transport header and update necessary fields */
  650. skb_push(skbn, coal_meta->trans_len);
  651. memcpy(skbn->data, coal_meta->trans_header, coal_meta->trans_len);
  652. skb_reset_transport_header(skbn);
  653. if (coal_meta->trans_proto == IPPROTO_TCP) {
  654. struct tcphdr *th = tcp_hdr(skbn);
  655. th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset);
  656. check = &th->check;
  657. /* Don't allow dangerous flags to be set in any segment but the
  658. * last one.
  659. */
  660. if (th->fin || th->psh) {
  661. if (hlen + coal_meta->data_offset + dlen <
  662. coal_skb->len) {
  663. th->fin = 0;
  664. th->psh = 0;
  665. }
  666. }
  667. } else if (coal_meta->trans_proto == IPPROTO_UDP) {
  668. struct udphdr *uh = udp_hdr(skbn);
  669. uh->len = htons(skbn->len);
  670. check = &uh->check;
  671. if (coal_meta->ip_proto == 4 && !uh->check)
  672. zero_csum = true;
  673. }
  674. /* Push IP header and update necessary fields */
  675. skb_push(skbn, coal_meta->ip_len);
  676. memcpy(skbn->data, coal_meta->ip_header, coal_meta->ip_len);
  677. skb_reset_network_header(skbn);
  678. if (coal_meta->ip_proto == 4) {
  679. struct iphdr *iph = ip_hdr(skbn);
  680. iph->id = htons(ntohs(iph->id) + coal_meta->pkt_id);
  681. iph->tot_len = htons(skbn->len);
  682. iph->check = 0;
  683. iph->check = ip_fast_csum(iph, iph->ihl);
  684. } else {
  685. /* Payload length includes any extension headers */
  686. ipv6_hdr(skbn)->payload_len = htons(skbn->len -
  687. sizeof(struct ipv6hdr));
  688. }
  689. /* Handle checksum status */
  690. if (likely(csum_valid) || zero_csum) {
  691. /* Set the partial checksum information */
  692. rmnet_map_partial_csum(skbn, coal_meta);
  693. } else if (check) {
  694. /* Unfortunately, we have to fake a bad checksum here, since
  695. * the original bad value is lost by the hardware. The only
  696. * reliable way to do it is to calculate the actual checksum
  697. * and corrupt it.
  698. */
  699. __wsum csum;
  700. unsigned int offset = skb_transport_offset(skbn);
  701. __sum16 pseudo;
  702. /* Calculate pseudo header */
  703. if (coal_meta->ip_proto == 4) {
  704. struct iphdr *iph = ip_hdr(skbn);
  705. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  706. skbn->len -
  707. coal_meta->ip_len,
  708. coal_meta->trans_proto, 0);
  709. } else {
  710. struct ipv6hdr *ip6h = ipv6_hdr(skbn);
  711. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  712. skbn->len - coal_meta->ip_len,
  713. coal_meta->trans_proto, 0);
  714. }
  715. *check = pseudo;
  716. csum = skb_checksum(skbn, offset, skbn->len - offset, 0);
  717. /* Add 1 to corrupt. This cannot produce a final value of 0
  718. * since csum_fold() can't return a value of 0xFFFF.
  719. */
  720. *check = csum16_add(csum_fold(csum), htons(1));
  721. skbn->ip_summed = CHECKSUM_NONE;
  722. }
  723. skbn->dev = coal_skb->dev;
  724. priv->stats.coal.coal_reconstruct++;
  725. /* Stamp GSO information if necessary */
  726. if (coal_meta->pkt_count > 1)
  727. rmnet_map_gso_stamp(skbn, coal_meta);
  728. /* Propagate priority value */
  729. skbn->priority = coal_skb->priority;
  730. __skb_queue_tail(list, skbn);
  731. /* Update meta information to move past the data we just segmented */
  732. coal_meta->data_offset += dlen;
  733. coal_meta->pkt_id = pkt_id + 1;
  734. coal_meta->pkt_count = 0;
  735. }
  736. static bool rmnet_map_validate_csum(struct sk_buff *skb,
  737. struct rmnet_map_coal_metadata *meta)
  738. {
  739. u8 *data = rmnet_map_data_ptr(skb);
  740. unsigned int datagram_len;
  741. __wsum csum;
  742. __sum16 pseudo;
  743. datagram_len = skb->len - meta->ip_len;
  744. if (meta->ip_proto == 4) {
  745. struct iphdr *iph = (struct iphdr *)data;
  746. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  747. datagram_len,
  748. meta->trans_proto, 0);
  749. } else {
  750. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  751. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  752. datagram_len, meta->trans_proto,
  753. 0);
  754. }
  755. csum = skb_checksum(skb, meta->ip_len, datagram_len,
  756. csum_unfold(pseudo));
  757. return !csum_fold(csum);
  758. }
  759. /* Converts the coalesced SKB into a list of SKBs.
  760. * NLOs containing csum erros will not be included.
  761. * The original coalesced SKB should be treated as invalid and
  762. * must be freed by the caller
  763. */
  764. static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
  765. u64 nlo_err_mask,
  766. struct sk_buff_head *list)
  767. {
  768. struct iphdr *iph;
  769. struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
  770. struct rmnet_map_v5_coal_header *coal_hdr;
  771. struct rmnet_map_coal_metadata coal_meta;
  772. u16 pkt_len;
  773. u8 pkt, total_pkt = 0;
  774. u8 nlo;
  775. bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
  776. bool zero_csum = false;
  777. memset(&coal_meta, 0, sizeof(coal_meta));
  778. /* Pull off the headers we no longer need */
  779. pskb_pull(coal_skb, sizeof(struct rmnet_map_header));
  780. coal_hdr = (struct rmnet_map_v5_coal_header *)
  781. rmnet_map_data_ptr(coal_skb);
  782. pskb_pull(coal_skb, sizeof(*coal_hdr));
  783. iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
  784. if (iph->version == 4) {
  785. coal_meta.ip_proto = 4;
  786. coal_meta.ip_len = iph->ihl * 4;
  787. coal_meta.trans_proto = iph->protocol;
  788. coal_meta.ip_header = iph;
  789. /* Don't allow coalescing of any packets with IP options */
  790. if (iph->ihl != 5)
  791. gro = false;
  792. } else if (iph->version == 6) {
  793. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  794. __be16 frag_off;
  795. u8 protocol = ip6h->nexthdr;
  796. coal_meta.ip_proto = 6;
  797. coal_meta.ip_len = ipv6_skip_exthdr(coal_skb, sizeof(*ip6h),
  798. &protocol, &frag_off);
  799. coal_meta.trans_proto = protocol;
  800. coal_meta.ip_header = ip6h;
  801. /* If we run into a problem, or this has a fragment header
  802. * (which should technically not be possible, if the HW
  803. * works as intended...), bail.
  804. */
  805. if (coal_meta.ip_len < 0 || frag_off) {
  806. priv->stats.coal.coal_ip_invalid++;
  807. return;
  808. } else if (coal_meta.ip_len > sizeof(*ip6h)) {
  809. /* Don't allow coalescing of any packets with IPv6
  810. * extension headers.
  811. */
  812. gro = false;
  813. }
  814. } else {
  815. priv->stats.coal.coal_ip_invalid++;
  816. return;
  817. }
  818. if (coal_meta.trans_proto == IPPROTO_TCP) {
  819. struct tcphdr *th;
  820. th = (struct tcphdr *)((u8 *)iph + coal_meta.ip_len);
  821. coal_meta.trans_len = th->doff * 4;
  822. coal_meta.trans_header = th;
  823. } else if (coal_meta.trans_proto == IPPROTO_UDP) {
  824. struct udphdr *uh;
  825. uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
  826. coal_meta.trans_len = sizeof(*uh);
  827. coal_meta.trans_header = uh;
  828. /* Check for v4 zero checksum */
  829. if (coal_meta.ip_proto == 4 && !uh->check)
  830. zero_csum = true;
  831. } else {
  832. priv->stats.coal.coal_trans_invalid++;
  833. return;
  834. }
  835. if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
  836. rmnet_map_move_headers(coal_skb);
  837. /* Mark as valid if it checks out */
  838. if (rmnet_map_validate_csum(coal_skb, &coal_meta))
  839. coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
  840. __skb_queue_tail(list, coal_skb);
  841. return;
  842. }
  843. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  844. * no checksum errors, and are allowing GRO. We can just reuse this
  845. * SKB unchanged.
  846. */
  847. if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
  848. rmnet_map_move_headers(coal_skb);
  849. coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
  850. coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
  851. coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
  852. coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
  853. if (coal_meta.pkt_count > 1) {
  854. rmnet_map_partial_csum(coal_skb, &coal_meta);
  855. rmnet_map_gso_stamp(coal_skb, &coal_meta);
  856. }
  857. __skb_queue_tail(list, coal_skb);
  858. return;
  859. }
  860. /* Segment the coalesced SKB into new packets */
  861. for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
  862. pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
  863. pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
  864. coal_meta.data_len = pkt_len;
  865. for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
  866. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  867. bool csum_err = nlo_err_mask & 1;
  868. /* Segment the packet if we're not sending the larger
  869. * packet up the stack.
  870. */
  871. if (!gro) {
  872. coal_meta.pkt_count = 1;
  873. if (csum_err)
  874. priv->stats.coal.coal_csum_err++;
  875. __rmnet_map_segment_coal_skb(coal_skb,
  876. &coal_meta, list,
  877. total_pkt,
  878. !csum_err);
  879. continue;
  880. }
  881. if (csum_err) {
  882. priv->stats.coal.coal_csum_err++;
  883. /* Segment out the good data */
  884. if (gro && coal_meta.pkt_count)
  885. __rmnet_map_segment_coal_skb(coal_skb,
  886. &coal_meta,
  887. list,
  888. total_pkt,
  889. true);
  890. /* Segment out the bad checksum */
  891. coal_meta.pkt_count = 1;
  892. __rmnet_map_segment_coal_skb(coal_skb,
  893. &coal_meta, list,
  894. total_pkt, false);
  895. } else {
  896. coal_meta.pkt_count++;
  897. }
  898. }
  899. /* If we're switching NLOs, we need to send out everything from
  900. * the previous one, if we haven't done so. NLOs only switch
  901. * when the packet length changes.
  902. */
  903. if (coal_meta.pkt_count)
  904. __rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list,
  905. total_pkt, true);
  906. }
  907. }
  908. /* Record reason for coalescing pipe closure */
  909. static void rmnet_map_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  910. u8 code)
  911. {
  912. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  913. switch (type) {
  914. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  915. stats->non_coal++;
  916. break;
  917. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  918. stats->ip_miss++;
  919. break;
  920. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  921. stats->trans_miss++;
  922. break;
  923. case RMNET_MAP_COAL_CLOSE_HW:
  924. switch (code) {
  925. case RMNET_MAP_COAL_CLOSE_HW_NL:
  926. stats->hw_nl++;
  927. break;
  928. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  929. stats->hw_pkt++;
  930. break;
  931. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  932. stats->hw_byte++;
  933. break;
  934. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  935. stats->hw_time++;
  936. break;
  937. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  938. stats->hw_evict++;
  939. break;
  940. default:
  941. break;
  942. }
  943. break;
  944. case RMNET_MAP_COAL_CLOSE_COAL:
  945. stats->coal++;
  946. break;
  947. default:
  948. break;
  949. }
  950. }
  951. /* Check if the coalesced header has any incorrect values, in which case, the
  952. * entire coalesced skb must be dropped. Then check if there are any
  953. * checksum issues
  954. */
  955. static int rmnet_map_data_check_coal_header(struct sk_buff *skb,
  956. u64 *nlo_err_mask)
  957. {
  958. struct rmnet_map_v5_coal_header *coal_hdr;
  959. unsigned char *data = rmnet_map_data_ptr(skb);
  960. struct rmnet_priv *priv = netdev_priv(skb->dev);
  961. u64 mask = 0;
  962. int i;
  963. u8 veid, pkts = 0;
  964. coal_hdr = ((struct rmnet_map_v5_coal_header *)
  965. (data + sizeof(struct rmnet_map_header)));
  966. veid = coal_hdr->virtual_channel_id;
  967. if (coal_hdr->num_nlos == 0 ||
  968. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  969. priv->stats.coal.coal_hdr_nlo_err++;
  970. return -EINVAL;
  971. }
  972. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  973. /* If there is a checksum issue, we need to split
  974. * up the skb. Rebuild the full csum error field
  975. */
  976. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  977. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  978. mask |= ((u64)err) << (8 * i);
  979. /* Track total packets in frame */
  980. pkts += pkt;
  981. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  982. priv->stats.coal.coal_hdr_pkt_err++;
  983. return -EINVAL;
  984. }
  985. }
  986. /* Track number of packets we get inside of coalesced frames */
  987. priv->stats.coal.coal_pkts += pkts;
  988. /* Update ethtool stats */
  989. rmnet_map_data_log_close_stats(priv,
  990. coal_hdr->close_type,
  991. coal_hdr->close_value);
  992. if (veid < RMNET_MAX_VEID)
  993. priv->stats.coal.coal_veid[veid]++;
  994. *nlo_err_mask = mask;
  995. return 0;
  996. }
  997. /* Process a QMAPv5 packet header */
  998. int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
  999. struct sk_buff_head *list,
  1000. u16 len)
  1001. {
  1002. struct rmnet_priv *priv = netdev_priv(skb->dev);
  1003. u64 nlo_err_mask;
  1004. int rc = 0;
  1005. switch (rmnet_map_get_next_hdr_type(skb)) {
  1006. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1007. priv->stats.coal.coal_rx++;
  1008. rc = rmnet_map_data_check_coal_header(skb, &nlo_err_mask);
  1009. if (rc)
  1010. return rc;
  1011. rmnet_map_segment_coal_skb(skb, nlo_err_mask, list);
  1012. if (skb_peek(list) != skb)
  1013. consume_skb(skb);
  1014. break;
  1015. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1016. if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
  1017. priv->stats.csum_sw++;
  1018. } else if (rmnet_map_get_csum_valid(skb)) {
  1019. priv->stats.csum_ok++;
  1020. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1021. } else {
  1022. priv->stats.csum_valid_unset++;
  1023. }
  1024. /* Pull unnecessary headers and move the rest to the linear
  1025. * section of the skb.
  1026. */
  1027. pskb_pull(skb,
  1028. (sizeof(struct rmnet_map_header) +
  1029. sizeof(struct rmnet_map_v5_csum_header)));
  1030. rmnet_map_move_headers(skb);
  1031. /* Remove padding only for csum offload packets.
  1032. * Coalesced packets should never have padding.
  1033. */
  1034. pskb_trim(skb, len);
  1035. __skb_queue_tail(list, skb);
  1036. break;
  1037. default:
  1038. rc = -EINVAL;
  1039. break;
  1040. }
  1041. return rc;
  1042. }
  1043. long rmnet_agg_time_limit __read_mostly = 1000000L;
  1044. long rmnet_agg_bypass_time __read_mostly = 10000000L;
  1045. int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
  1046. {
  1047. u8 *packet_start = skb->data + offset;
  1048. int is_icmp = 0;
  1049. if (skb->protocol == htons(ETH_P_IP)) {
  1050. struct iphdr *ip4h = (struct iphdr *)(packet_start);
  1051. if (ip4h->protocol == IPPROTO_ICMP)
  1052. is_icmp = 1;
  1053. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1054. struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
  1055. if (ip6h->nexthdr == IPPROTO_ICMPV6) {
  1056. is_icmp = 1;
  1057. } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
  1058. struct frag_hdr *frag;
  1059. frag = (struct frag_hdr *)(packet_start
  1060. + sizeof(struct ipv6hdr));
  1061. if (frag->nexthdr == IPPROTO_ICMPV6)
  1062. is_icmp = 1;
  1063. }
  1064. }
  1065. return is_icmp;
  1066. }
  1067. static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
  1068. {
  1069. struct sk_buff *skb = NULL;
  1070. struct rmnet_aggregation_state *state;
  1071. state = container_of(work, struct rmnet_aggregation_state, agg_wq);
  1072. spin_lock_bh(&state->agg_lock);
  1073. if (likely(state->agg_state == -EINPROGRESS)) {
  1074. /* Buffer may have already been shipped out */
  1075. if (likely(state->agg_skb)) {
  1076. skb = state->agg_skb;
  1077. state->agg_skb = NULL;
  1078. state->agg_count = 0;
  1079. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1080. }
  1081. state->agg_state = 0;
  1082. }
  1083. if (skb)
  1084. state->send_agg_skb(skb);
  1085. spin_unlock_bh(&state->agg_lock);
  1086. }
  1087. enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
  1088. {
  1089. struct rmnet_aggregation_state *state;
  1090. state = container_of(t, struct rmnet_aggregation_state, hrtimer);
  1091. schedule_work(&state->agg_wq);
  1092. return HRTIMER_NORESTART;
  1093. }
  1094. static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
  1095. {
  1096. unsigned int linear = src->len - src->data_len, target = src->len;
  1097. unsigned char *src_buf;
  1098. struct sk_buff *skb;
  1099. src_buf = src->data;
  1100. skb_put_data(dst, src_buf, linear);
  1101. target -= linear;
  1102. skb = src;
  1103. while (target) {
  1104. unsigned int i = 0, non_linear = 0;
  1105. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1106. non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1107. src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
  1108. skb_put_data(dst, src_buf, non_linear);
  1109. target -= non_linear;
  1110. }
  1111. if (skb_shinfo(skb)->frag_list) {
  1112. skb = skb_shinfo(skb)->frag_list;
  1113. continue;
  1114. }
  1115. if (skb->next)
  1116. skb = skb->next;
  1117. }
  1118. }
  1119. static void rmnet_free_agg_pages(struct rmnet_aggregation_state *state)
  1120. {
  1121. struct rmnet_agg_page *agg_page, *idx;
  1122. list_for_each_entry_safe(agg_page, idx, &state->agg_list, list) {
  1123. list_del(&agg_page->list);
  1124. rmnet_mem_put_page_entry(agg_page->page);
  1125. kfree(agg_page);
  1126. }
  1127. state->agg_head = NULL;
  1128. }
  1129. static struct page *rmnet_get_agg_pages(struct rmnet_aggregation_state *state)
  1130. {
  1131. struct rmnet_agg_page *agg_page;
  1132. struct page *page = NULL;
  1133. int i = 0;
  1134. int rc;
  1135. int pageorder = 2;
  1136. if (!(state->params.agg_features & RMNET_PAGE_RECYCLE))
  1137. goto alloc;
  1138. do {
  1139. agg_page = state->agg_head;
  1140. if (unlikely(!agg_page))
  1141. break;
  1142. if (page_ref_count(agg_page->page) == 1) {
  1143. page = agg_page->page;
  1144. page_ref_inc(agg_page->page);
  1145. state->stats->ul_agg_reuse++;
  1146. state->agg_head = list_next_entry(agg_page, list);
  1147. break;
  1148. }
  1149. state->agg_head = list_next_entry(agg_page, list);
  1150. i++;
  1151. } while (i <= 5);
  1152. alloc:
  1153. if (!page) {
  1154. page = rmnet_mem_get_pages_entry(GFP_ATOMIC, state->agg_size_order, &rc,
  1155. &pageorder, RMNET_CORE_ID);
  1156. state->stats->ul_agg_alloc++;
  1157. }
  1158. return page;
  1159. }
  1160. static struct rmnet_agg_page *
  1161. __rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
  1162. {
  1163. struct rmnet_agg_page *agg_page;
  1164. struct page *page;
  1165. int rc;
  1166. int pageorder = 2;
  1167. agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
  1168. if (!agg_page)
  1169. return NULL;
  1170. page = rmnet_mem_get_pages_entry(GFP_ATOMIC, state->agg_size_order, &rc,
  1171. &pageorder, RMNET_CORE_ID);
  1172. if (!page) {
  1173. kfree(agg_page);
  1174. return NULL;
  1175. }
  1176. agg_page->page = page;
  1177. INIT_LIST_HEAD(&agg_page->list);
  1178. return agg_page;
  1179. }
  1180. static void rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
  1181. {
  1182. struct rmnet_agg_page *agg_page = NULL;
  1183. int i = 0;
  1184. for (i = 0; i < RMNET_PAGE_COUNT; i++) {
  1185. agg_page = __rmnet_alloc_agg_pages(state);
  1186. if (agg_page)
  1187. list_add_tail(&agg_page->list, &state->agg_list);
  1188. }
  1189. state->agg_head = list_first_entry_or_null(&state->agg_list,
  1190. struct rmnet_agg_page, list);
  1191. }
  1192. static struct sk_buff *
  1193. rmnet_map_build_skb(struct rmnet_aggregation_state *state)
  1194. {
  1195. struct sk_buff *skb;
  1196. unsigned int size;
  1197. struct page *page;
  1198. void *vaddr;
  1199. page = rmnet_get_agg_pages(state);
  1200. if (!page)
  1201. return NULL;
  1202. vaddr = page_address(page);
  1203. size = PAGE_SIZE << state->agg_size_order;
  1204. skb = build_skb(vaddr, size);
  1205. if (!skb) {
  1206. put_page(page);
  1207. return NULL;
  1208. }
  1209. return skb;
  1210. }
  1211. void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state)
  1212. {
  1213. struct sk_buff *agg_skb;
  1214. if (!state->agg_skb) {
  1215. spin_unlock_bh(&state->agg_lock);
  1216. return;
  1217. }
  1218. agg_skb = state->agg_skb;
  1219. /* Reset the aggregation state */
  1220. state->agg_skb = NULL;
  1221. state->agg_count = 0;
  1222. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1223. state->agg_state = 0;
  1224. state->send_agg_skb(agg_skb);
  1225. spin_unlock_bh(&state->agg_lock);
  1226. hrtimer_cancel(&state->hrtimer);
  1227. }
  1228. void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
  1229. bool low_latency)
  1230. {
  1231. struct rmnet_aggregation_state *state;
  1232. struct timespec64 diff, last;
  1233. int size;
  1234. state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
  1235. RMNET_DEFAULT_AGG_STATE];
  1236. new_packet:
  1237. spin_lock_bh(&state->agg_lock);
  1238. memcpy(&last, &state->agg_last, sizeof(last));
  1239. ktime_get_real_ts64(&state->agg_last);
  1240. if ((port->data_format & RMNET_EGRESS_FORMAT_PRIORITY) &&
  1241. (RMNET_LLM(skb->priority) || RMNET_APS_LLB(skb->priority))) {
  1242. /* Send out any aggregated SKBs we have */
  1243. rmnet_map_send_agg_skb(state);
  1244. /* Send out the priority SKB. Not holding agg_lock anymore */
  1245. skb->protocol = htons(ETH_P_MAP);
  1246. state->send_agg_skb(skb);
  1247. return;
  1248. }
  1249. if (!state->agg_skb) {
  1250. /* Check to see if we should agg first. If the traffic is very
  1251. * sparse, don't aggregate. We will need to tune this later
  1252. */
  1253. diff = timespec64_sub(state->agg_last, last);
  1254. size = state->params.agg_size - skb->len;
  1255. if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
  1256. size <= 0) {
  1257. skb->protocol = htons(ETH_P_MAP);
  1258. state->send_agg_skb(skb);
  1259. spin_unlock_bh(&state->agg_lock);
  1260. return;
  1261. }
  1262. state->agg_skb = rmnet_map_build_skb(state);
  1263. if (!state->agg_skb) {
  1264. state->agg_skb = NULL;
  1265. state->agg_count = 0;
  1266. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1267. skb->protocol = htons(ETH_P_MAP);
  1268. state->send_agg_skb(skb);
  1269. spin_unlock_bh(&state->agg_lock);
  1270. return;
  1271. }
  1272. rmnet_map_linearize_copy(state->agg_skb, skb);
  1273. state->agg_skb->dev = skb->dev;
  1274. state->agg_skb->protocol = htons(ETH_P_MAP);
  1275. state->agg_count = 1;
  1276. ktime_get_real_ts64(&state->agg_time);
  1277. dev_consume_skb_any(skb);
  1278. goto schedule;
  1279. }
  1280. diff = timespec64_sub(state->agg_last, state->agg_time);
  1281. size = skb_tailroom(state->agg_skb);
  1282. if (skb->len > size ||
  1283. state->agg_count >= state->params.agg_count ||
  1284. diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
  1285. rmnet_map_send_agg_skb(state);
  1286. goto new_packet;
  1287. }
  1288. rmnet_map_linearize_copy(state->agg_skb, skb);
  1289. state->agg_count++;
  1290. dev_consume_skb_any(skb);
  1291. schedule:
  1292. if (state->agg_state != -EINPROGRESS) {
  1293. state->agg_state = -EINPROGRESS;
  1294. hrtimer_start(&state->hrtimer,
  1295. ns_to_ktime(state->params.agg_time),
  1296. HRTIMER_MODE_REL);
  1297. }
  1298. spin_unlock_bh(&state->agg_lock);
  1299. }
  1300. void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
  1301. u16 size, u8 count, u8 features, u32 time)
  1302. {
  1303. spin_lock_bh(&state->agg_lock);
  1304. state->params.agg_count = count;
  1305. state->params.agg_time = time;
  1306. state->params.agg_size = size;
  1307. state->params.agg_features = features;
  1308. rmnet_free_agg_pages(state);
  1309. /* This effectively disables recycling in case the UL aggregation
  1310. * size is lesser than PAGE_SIZE.
  1311. */
  1312. if (size < PAGE_SIZE)
  1313. goto done;
  1314. state->agg_size_order = get_order(size);
  1315. size = PAGE_SIZE << state->agg_size_order;
  1316. size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  1317. state->params.agg_size = size;
  1318. if (state->params.agg_features == RMNET_PAGE_RECYCLE)
  1319. rmnet_alloc_agg_pages(state);
  1320. done:
  1321. spin_unlock_bh(&state->agg_lock);
  1322. }
  1323. void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
  1324. {
  1325. unsigned int i;
  1326. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1327. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1328. spin_lock_init(&state->agg_lock);
  1329. INIT_LIST_HEAD(&state->agg_list);
  1330. hrtimer_init(&state->hrtimer, CLOCK_MONOTONIC,
  1331. HRTIMER_MODE_REL);
  1332. state->hrtimer.function = rmnet_map_flush_tx_packet_queue;
  1333. INIT_WORK(&state->agg_wq, rmnet_map_flush_tx_packet_work);
  1334. state->stats = &port->stats.agg;
  1335. /* Since PAGE_SIZE - 1 is specified here, no pages are
  1336. * pre-allocated. This is done to reduce memory usage in cases
  1337. * where UL aggregation is disabled.
  1338. * Additionally, the features flag is also set to 0.
  1339. */
  1340. rmnet_map_update_ul_agg_config(state, PAGE_SIZE - 1, 20, 0,
  1341. 3000000);
  1342. }
  1343. /* Set delivery functions for each aggregation state */
  1344. port->agg_state[RMNET_DEFAULT_AGG_STATE].send_agg_skb = dev_queue_xmit;
  1345. port->agg_state[RMNET_LL_AGG_STATE].send_agg_skb = rmnet_ll_send_skb;
  1346. }
  1347. void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
  1348. {
  1349. unsigned int i;
  1350. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1351. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1352. hrtimer_cancel(&state->hrtimer);
  1353. cancel_work_sync(&state->agg_wq);
  1354. }
  1355. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1356. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1357. spin_lock_bh(&state->agg_lock);
  1358. if (state->agg_state == -EINPROGRESS) {
  1359. if (state->agg_skb) {
  1360. kfree_skb(state->agg_skb);
  1361. state->agg_skb = NULL;
  1362. state->agg_count = 0;
  1363. memset(&state->agg_time, 0,
  1364. sizeof(state->agg_time));
  1365. }
  1366. state->agg_state = 0;
  1367. }
  1368. rmnet_free_agg_pages(state);
  1369. spin_unlock_bh(&state->agg_lock);
  1370. }
  1371. }
  1372. void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush)
  1373. {
  1374. struct rmnet_aggregation_state *state;
  1375. struct rmnet_port *port;
  1376. struct sk_buff *agg_skb;
  1377. if (unlikely(ch >= RMNET_MAX_AGG_STATE))
  1378. ch = RMNET_DEFAULT_AGG_STATE;
  1379. port = rmnet_get_port(qmap_skb->dev);
  1380. if (!port) {
  1381. kfree_skb(qmap_skb);
  1382. return;
  1383. }
  1384. state = &port->agg_state[ch];
  1385. if (!flush)
  1386. goto send;
  1387. if (!(port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION))
  1388. goto send;
  1389. spin_lock_bh(&state->agg_lock);
  1390. if (state->agg_skb) {
  1391. agg_skb = state->agg_skb;
  1392. state->agg_skb = NULL;
  1393. state->agg_count = 0;
  1394. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1395. state->agg_state = 0;
  1396. state->send_agg_skb(agg_skb);
  1397. spin_unlock_bh(&state->agg_lock);
  1398. hrtimer_cancel(&state->hrtimer);
  1399. } else {
  1400. spin_unlock_bh(&state->agg_lock);
  1401. }
  1402. send:
  1403. state->send_agg_skb(qmap_skb);
  1404. }
  1405. EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
  1406. int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
  1407. struct net_device *orig_dev)
  1408. {
  1409. struct rmnet_priv *priv = netdev_priv(orig_dev);
  1410. struct rmnet_map_v5_tso_header *ul_header;
  1411. if (!(orig_dev->features & (NETIF_F_ALL_TSO | NETIF_F_GSO_UDP_L4))) {
  1412. priv->stats.tso_arriv_errs++;
  1413. return -EINVAL;
  1414. }
  1415. ul_header = (struct rmnet_map_v5_tso_header *)
  1416. skb_push(skb, sizeof(*ul_header));
  1417. memset(ul_header, 0, sizeof(*ul_header));
  1418. ul_header->header_type = RMNET_MAP_HEADER_TYPE_TSO;
  1419. if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
  1420. rmnet_map_v5_check_priority(skb, orig_dev,
  1421. (struct rmnet_map_v5_csum_header *)ul_header,
  1422. true);
  1423. ul_header->segment_size = htons(skb_shinfo(skb)->gso_size);
  1424. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
  1425. ul_header->ip_id_cfg = 1;
  1426. skb->ip_summed = CHECKSUM_NONE;
  1427. skb_shinfo(skb)->gso_size = 0;
  1428. skb_shinfo(skb)->gso_segs = 0;
  1429. skb_shinfo(skb)->gso_type = 0;
  1430. priv->stats.tso_pkts++;
  1431. return 0;
  1432. }