rmnet_map_data.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RMNET Data MAP protocol
  14. *
  15. */
  16. #include <linux/netdevice.h>
  17. #include <linux/ip.h>
  18. #include <linux/ipv6.h>
  19. #include <net/ip6_checksum.h>
  20. #include "rmnet_config.h"
  21. #include "rmnet_map.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_handlers.h"
  24. #include "rmnet_ll.h"
  25. #define RMNET_MAP_PKT_COPY_THRESHOLD 64
  26. #define RMNET_MAP_DEAGGR_SPACING 64
  27. #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
  28. #define RMNET_PAGE_COUNT 384
  29. struct rmnet_map_coal_metadata {
  30. void *ip_header;
  31. void *trans_header;
  32. u16 ip_len;
  33. u16 trans_len;
  34. u16 data_offset;
  35. u16 data_len;
  36. u8 ip_proto;
  37. u8 trans_proto;
  38. u8 pkt_id;
  39. u8 pkt_count;
  40. };
  41. static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
  42. const void *txporthdr)
  43. {
  44. __sum16 *check = NULL;
  45. switch (protocol) {
  46. case IPPROTO_TCP:
  47. check = &(((struct tcphdr *)txporthdr)->check);
  48. break;
  49. case IPPROTO_UDP:
  50. check = &(((struct udphdr *)txporthdr)->check);
  51. break;
  52. default:
  53. check = NULL;
  54. break;
  55. }
  56. return check;
  57. }
  58. static int
  59. rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
  60. struct rmnet_map_dl_csum_trailer *csum_trailer,
  61. struct rmnet_priv *priv)
  62. {
  63. __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
  64. u16 csum_value, csum_value_final;
  65. struct iphdr *ip4h;
  66. void *txporthdr;
  67. __be16 addend;
  68. ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
  69. if ((ntohs(ip4h->frag_off) & IP_MF) ||
  70. ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
  71. priv->stats.csum_fragmented_pkt++;
  72. return -EOPNOTSUPP;
  73. }
  74. txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4;
  75. csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
  76. if (!csum_field) {
  77. priv->stats.csum_err_invalid_transport++;
  78. return -EPROTONOSUPPORT;
  79. }
  80. /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
  81. if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
  82. priv->stats.csum_skipped++;
  83. return 0;
  84. }
  85. csum_value = ~ntohs(csum_trailer->csum_value);
  86. hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
  87. ip_payload_csum = csum16_sub((__force __sum16)csum_value,
  88. (__force __be16)hdr_csum);
  89. pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
  90. ntohs(ip4h->tot_len) - ip4h->ihl * 4,
  91. ip4h->protocol, 0);
  92. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  93. pseudo_csum = csum16_add(ip_payload_csum, addend);
  94. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  95. csum_temp = ~csum16_sub(pseudo_csum, addend);
  96. csum_value_final = (__force u16)csum_temp;
  97. if (unlikely(csum_value_final == 0)) {
  98. switch (ip4h->protocol) {
  99. case IPPROTO_UDP:
  100. /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
  101. csum_value_final = ~csum_value_final;
  102. break;
  103. case IPPROTO_TCP:
  104. /* DL4 Non-RFC compliant TCP checksum found */
  105. if (*csum_field == (__force __sum16)0xFFFF)
  106. csum_value_final = ~csum_value_final;
  107. break;
  108. }
  109. }
  110. if (csum_value_final == ntohs((__force __be16)*csum_field)) {
  111. priv->stats.csum_ok++;
  112. return 0;
  113. } else {
  114. priv->stats.csum_validation_failed++;
  115. return -EINVAL;
  116. }
  117. }
  118. #if IS_ENABLED(CONFIG_IPV6)
  119. static int
  120. rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
  121. struct rmnet_map_dl_csum_trailer *csum_trailer,
  122. struct rmnet_priv *priv)
  123. {
  124. __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
  125. u16 csum_value, csum_value_final;
  126. __be16 ip6_hdr_csum, addend;
  127. struct ipv6hdr *ip6h;
  128. void *txporthdr, *data = rmnet_map_data_ptr(skb);
  129. u32 length;
  130. ip6h = data;
  131. txporthdr = data + sizeof(struct ipv6hdr);
  132. csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
  133. if (!csum_field) {
  134. priv->stats.csum_err_invalid_transport++;
  135. return -EPROTONOSUPPORT;
  136. }
  137. csum_value = ~ntohs(csum_trailer->csum_value);
  138. ip6_hdr_csum = (__force __be16)
  139. ~ntohs((__force __be16)ip_compute_csum(ip6h,
  140. (int)(txporthdr - data)));
  141. ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
  142. ip6_hdr_csum);
  143. length = (ip6h->nexthdr == IPPROTO_UDP) ?
  144. ntohs(((struct udphdr *)txporthdr)->len) :
  145. ntohs(ip6h->payload_len);
  146. pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  147. length, ip6h->nexthdr, 0));
  148. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  149. pseudo_csum = csum16_add(ip6_payload_csum, addend);
  150. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  151. csum_temp = ~csum16_sub(pseudo_csum, addend);
  152. csum_value_final = (__force u16)csum_temp;
  153. if (unlikely(csum_value_final == 0)) {
  154. switch (ip6h->nexthdr) {
  155. case IPPROTO_UDP:
  156. /* RFC 2460 section 8.1
  157. * DL6 One's complement rule for UDP checksum 0
  158. */
  159. csum_value_final = ~csum_value_final;
  160. break;
  161. case IPPROTO_TCP:
  162. /* DL6 Non-RFC compliant TCP checksum found */
  163. if (*csum_field == (__force __sum16)0xFFFF)
  164. csum_value_final = ~csum_value_final;
  165. break;
  166. }
  167. }
  168. if (csum_value_final == ntohs((__force __be16)*csum_field)) {
  169. priv->stats.csum_ok++;
  170. return 0;
  171. } else {
  172. priv->stats.csum_validation_failed++;
  173. return -EINVAL;
  174. }
  175. }
  176. #endif
  177. static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
  178. {
  179. struct iphdr *ip4h = (struct iphdr *)iphdr;
  180. void *txphdr;
  181. u16 *csum;
  182. txphdr = iphdr + ip4h->ihl * 4;
  183. if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
  184. csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
  185. *csum = ~(*csum);
  186. }
  187. }
  188. static void
  189. rmnet_map_ipv4_ul_csum_header(void *iphdr,
  190. struct rmnet_map_ul_csum_header *ul_header,
  191. struct sk_buff *skb)
  192. {
  193. struct iphdr *ip4h = (struct iphdr *)iphdr;
  194. __be16 *hdr = (__be16 *)ul_header, offset;
  195. offset = htons((__force u16)(skb_transport_header(skb) -
  196. (unsigned char *)iphdr));
  197. ul_header->csum_start_offset = offset;
  198. ul_header->csum_insert_offset = skb->csum_offset;
  199. ul_header->csum_enabled = 1;
  200. if (ip4h->protocol == IPPROTO_UDP)
  201. ul_header->udp_ind = 1;
  202. else
  203. ul_header->udp_ind = 0;
  204. /* Changing remaining fields to network order */
  205. hdr++;
  206. *hdr = htons((__force u16)*hdr);
  207. skb->ip_summed = CHECKSUM_NONE;
  208. rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
  209. }
  210. #if IS_ENABLED(CONFIG_IPV6)
  211. static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
  212. {
  213. struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
  214. void *txphdr;
  215. u16 *csum;
  216. txphdr = ip6hdr + sizeof(struct ipv6hdr);
  217. if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
  218. csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
  219. *csum = ~(*csum);
  220. }
  221. }
  222. static void
  223. rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
  224. struct rmnet_map_ul_csum_header *ul_header,
  225. struct sk_buff *skb)
  226. {
  227. struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
  228. __be16 *hdr = (__be16 *)ul_header, offset;
  229. offset = htons((__force u16)(skb_transport_header(skb) -
  230. (unsigned char *)ip6hdr));
  231. ul_header->csum_start_offset = offset;
  232. ul_header->csum_insert_offset = skb->csum_offset;
  233. ul_header->csum_enabled = 1;
  234. if (ip6h->nexthdr == IPPROTO_UDP)
  235. ul_header->udp_ind = 1;
  236. else
  237. ul_header->udp_ind = 0;
  238. /* Changing remaining fields to network order */
  239. hdr++;
  240. *hdr = htons((__force u16)*hdr);
  241. skb->ip_summed = CHECKSUM_NONE;
  242. rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
  243. }
  244. #endif
  245. /* Adds MAP header to front of skb->data
  246. * Padding is calculated and set appropriately in MAP header. Mux ID is
  247. * initialized to 0.
  248. */
  249. struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
  250. int hdrlen, int pad,
  251. struct rmnet_port *port)
  252. {
  253. struct rmnet_map_header *map_header;
  254. u32 padding, map_datalen;
  255. u8 *padbytes;
  256. map_datalen = skb->len - hdrlen;
  257. map_header = (struct rmnet_map_header *)
  258. skb_push(skb, sizeof(struct rmnet_map_header));
  259. memset(map_header, 0, sizeof(struct rmnet_map_header));
  260. /* Set next_hdr bit for csum offload packets */
  261. if (port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5)
  262. map_header->next_hdr = 1;
  263. if (pad == RMNET_MAP_NO_PAD_BYTES) {
  264. map_header->pkt_len = htons(map_datalen);
  265. return map_header;
  266. }
  267. padding = ALIGN(map_datalen, 4) - map_datalen;
  268. if (padding == 0)
  269. goto done;
  270. if (skb_tailroom(skb) < padding)
  271. return NULL;
  272. padbytes = (u8 *)skb_put(skb, padding);
  273. memset(padbytes, 0, padding);
  274. done:
  275. map_header->pkt_len = htons(map_datalen + padding);
  276. map_header->pad_len = padding & 0x3F;
  277. return map_header;
  278. }
  279. /* Deaggregates a single packet
  280. * A whole new buffer is allocated for each portion of an aggregated frame.
  281. * Caller should keep calling deaggregate() on the source skb until 0 is
  282. * returned, indicating that there are no more packets to deaggregate. Caller
  283. * is responsible for freeing the original skb.
  284. */
  285. struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
  286. struct rmnet_port *port)
  287. {
  288. struct rmnet_map_header *maph;
  289. struct sk_buff *skbn;
  290. unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
  291. u32 packet_len;
  292. if (skb->len == 0)
  293. return NULL;
  294. maph = (struct rmnet_map_header *)data;
  295. packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
  296. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  297. packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
  298. else if (port->data_format & RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5) {
  299. if (!maph->cd_bit) {
  300. packet_len += sizeof(struct rmnet_map_v5_csum_header);
  301. /* Coalescing headers require MAPv5 */
  302. next_hdr = data + sizeof(*maph);
  303. }
  304. }
  305. if (((int)skb->len - (int)packet_len) < 0)
  306. return NULL;
  307. /* Some hardware can send us empty frames. Catch them */
  308. if (ntohs(maph->pkt_len) == 0)
  309. return NULL;
  310. if (next_hdr &&
  311. ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
  312. RMNET_MAP_HEADER_TYPE_COALESCING)
  313. return skb;
  314. if (skb_is_nonlinear(skb)) {
  315. skb_frag_t *frag0 = skb_shinfo(skb)->frags;
  316. struct page *page = skb_frag_page(frag0);
  317. skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
  318. if (!skbn)
  319. return NULL;
  320. skb_append_pagefrags(skbn, page, frag0->bv_offset,
  321. packet_len);
  322. skbn->data_len += packet_len;
  323. skbn->len += packet_len;
  324. } else {
  325. skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
  326. GFP_ATOMIC);
  327. if (!skbn)
  328. return NULL;
  329. skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
  330. skb_put(skbn, packet_len);
  331. memcpy(skbn->data, data, packet_len);
  332. }
  333. skbn->priority = skb->priority;
  334. pskb_pull(skb, packet_len);
  335. return skbn;
  336. }
  337. /* Validates packet checksums. Function takes a pointer to
  338. * the beginning of a buffer which contains the IP payload +
  339. * padding + checksum trailer.
  340. * Only IPv4 and IPv6 are supported along with TCP & UDP.
  341. * Fragmented or tunneled packets are not supported.
  342. */
  343. int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
  344. {
  345. struct rmnet_priv *priv = netdev_priv(skb->dev);
  346. struct rmnet_map_dl_csum_trailer *csum_trailer;
  347. if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
  348. priv->stats.csum_sw++;
  349. return -EOPNOTSUPP;
  350. }
  351. csum_trailer = (struct rmnet_map_dl_csum_trailer *)
  352. (rmnet_map_data_ptr(skb) + len);
  353. if (!csum_trailer->valid) {
  354. priv->stats.csum_valid_unset++;
  355. return -EINVAL;
  356. }
  357. if (skb->protocol == htons(ETH_P_IP)) {
  358. return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
  359. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  360. #if IS_ENABLED(CONFIG_IPV6)
  361. return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
  362. #else
  363. priv->stats.csum_err_invalid_ip_version++;
  364. return -EPROTONOSUPPORT;
  365. #endif
  366. } else {
  367. priv->stats.csum_err_invalid_ip_version++;
  368. return -EPROTONOSUPPORT;
  369. }
  370. return 0;
  371. }
  372. EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
  373. void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
  374. struct net_device *orig_dev)
  375. {
  376. struct rmnet_priv *priv = netdev_priv(orig_dev);
  377. struct rmnet_map_ul_csum_header *ul_header;
  378. void *iphdr;
  379. ul_header = (struct rmnet_map_ul_csum_header *)
  380. skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
  381. if (unlikely(!(orig_dev->features &
  382. (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
  383. goto sw_csum;
  384. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  385. iphdr = (char *)ul_header +
  386. sizeof(struct rmnet_map_ul_csum_header);
  387. if (skb->protocol == htons(ETH_P_IP)) {
  388. rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
  389. priv->stats.csum_hw++;
  390. return;
  391. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  392. #if IS_ENABLED(CONFIG_IPV6)
  393. rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
  394. priv->stats.csum_hw++;
  395. return;
  396. #else
  397. priv->stats.csum_err_invalid_ip_version++;
  398. goto sw_csum;
  399. #endif
  400. } else {
  401. priv->stats.csum_err_invalid_ip_version++;
  402. }
  403. }
  404. sw_csum:
  405. ul_header->csum_start_offset = 0;
  406. ul_header->csum_insert_offset = 0;
  407. ul_header->csum_enabled = 0;
  408. ul_header->udp_ind = 0;
  409. priv->stats.csum_sw++;
  410. }
  411. static void rmnet_map_v5_check_priority(struct sk_buff *skb,
  412. struct net_device *orig_dev,
  413. struct rmnet_map_v5_csum_header *hdr,
  414. bool tso)
  415. {
  416. struct rmnet_priv *priv = netdev_priv(orig_dev);
  417. if (RMNET_LLM(skb->priority)) {
  418. priv->stats.ul_prio++;
  419. hdr->priority = 1;
  420. }
  421. /* APS priority bit is only valid for csum header */
  422. if (!tso && RMNET_APS_LLB(skb->priority)) {
  423. priv->stats.aps_prio++;
  424. hdr->aps_prio = 1;
  425. }
  426. }
  427. void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
  428. struct rmnet_port *port,
  429. struct net_device *orig_dev)
  430. {
  431. struct rmnet_priv *priv = netdev_priv(orig_dev);
  432. struct rmnet_map_v5_csum_header *ul_header;
  433. ul_header = (struct rmnet_map_v5_csum_header *)
  434. skb_push(skb, sizeof(*ul_header));
  435. memset(ul_header, 0, sizeof(*ul_header));
  436. ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
  437. if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
  438. rmnet_map_v5_check_priority(skb, orig_dev, ul_header, false);
  439. /* Allow priority w/o csum offload */
  440. if (!(port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5))
  441. return;
  442. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  443. void *iph = (char *)ul_header + sizeof(*ul_header);
  444. void *trans;
  445. __sum16 *check;
  446. u8 proto;
  447. if (skb->protocol == htons(ETH_P_IP)) {
  448. u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
  449. proto = ((struct iphdr *)iph)->protocol;
  450. trans = iph + ip_len;
  451. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  452. u16 ip_len = sizeof(struct ipv6hdr);
  453. proto = ((struct ipv6hdr *)iph)->nexthdr;
  454. trans = iph + ip_len;
  455. } else {
  456. priv->stats.csum_err_invalid_ip_version++;
  457. goto sw_csum;
  458. }
  459. check = rmnet_map_get_csum_field(proto, trans);
  460. if (check) {
  461. skb->ip_summed = CHECKSUM_NONE;
  462. /* Ask for checksum offloading */
  463. ul_header->csum_valid_required = 1;
  464. priv->stats.csum_hw++;
  465. return;
  466. }
  467. }
  468. sw_csum:
  469. priv->stats.csum_sw++;
  470. }
  471. /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
  472. * packets that are supported for UL checksum offload.
  473. */
  474. void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
  475. struct rmnet_port *port,
  476. struct net_device *orig_dev,
  477. int csum_type)
  478. {
  479. switch (csum_type) {
  480. case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
  481. rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
  482. break;
  483. case RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5:
  484. rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
  485. break;
  486. default:
  487. break;
  488. }
  489. }
  490. bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr)
  491. {
  492. /* Only applies to frames with a single packet */
  493. if (coal_hdr->num_nlos != 1 || coal_hdr->nl_pairs[0].num_packets != 1)
  494. return false;
  495. /* TCP header has FIN or PUSH set */
  496. if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_COAL)
  497. return true;
  498. /* Hit packet limit, byte limit, or time limit/EOF on DMA */
  499. if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_HW) {
  500. switch (coal_hdr->close_value) {
  501. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  502. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  503. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  504. return true;
  505. }
  506. }
  507. return false;
  508. }
  509. static void rmnet_map_move_headers(struct sk_buff *skb)
  510. {
  511. struct iphdr *iph;
  512. u16 ip_len;
  513. u16 trans_len = 0;
  514. u8 proto;
  515. /* This only applies to non-linear SKBs */
  516. if (!skb_is_nonlinear(skb))
  517. return;
  518. iph = (struct iphdr *)rmnet_map_data_ptr(skb);
  519. if (iph->version == 4) {
  520. ip_len = iph->ihl * 4;
  521. proto = iph->protocol;
  522. if (iph->frag_off & htons(IP_OFFSET))
  523. /* No transport header information */
  524. goto pull;
  525. } else if (iph->version == 6) {
  526. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  527. __be16 frag_off;
  528. u8 nexthdr = ip6h->nexthdr;
  529. ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
  530. &frag_off);
  531. if (ip_len < 0)
  532. return;
  533. proto = nexthdr;
  534. } else {
  535. return;
  536. }
  537. if (proto == IPPROTO_TCP) {
  538. struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
  539. trans_len = tp->doff * 4;
  540. } else if (proto == IPPROTO_UDP) {
  541. trans_len = sizeof(struct udphdr);
  542. } else if (proto == NEXTHDR_FRAGMENT) {
  543. /* Non-first fragments don't have the fragment length added by
  544. * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
  545. * we account for the length here.
  546. */
  547. ip_len += sizeof(struct frag_hdr);
  548. }
  549. pull:
  550. __pskb_pull_tail(skb, ip_len + trans_len);
  551. skb_reset_network_header(skb);
  552. if (trans_len)
  553. skb_set_transport_header(skb, ip_len);
  554. }
  555. static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
  556. struct rmnet_map_coal_metadata *coal_meta,
  557. struct sk_buff *dest)
  558. {
  559. unsigned char *data_start = rmnet_map_data_ptr(coal_skb) +
  560. coal_meta->ip_len + coal_meta->trans_len;
  561. u32 copy_len = coal_meta->data_len * coal_meta->pkt_count;
  562. if (skb_is_nonlinear(coal_skb)) {
  563. skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags;
  564. struct page *page = skb_frag_page(frag0);
  565. skb_append_pagefrags(dest, page,
  566. frag0->bv_offset + coal_meta->ip_len +
  567. coal_meta->trans_len +
  568. coal_meta->data_offset,
  569. copy_len);
  570. dest->data_len += copy_len;
  571. dest->len += copy_len;
  572. } else {
  573. skb_put_data(dest, data_start + coal_meta->data_offset,
  574. copy_len);
  575. }
  576. }
  577. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  578. * if needed (i.e. forwarding, UDP GRO)
  579. */
  580. static void rmnet_map_gso_stamp(struct sk_buff *skb,
  581. struct rmnet_map_coal_metadata *coal_meta)
  582. {
  583. struct skb_shared_info *shinfo = skb_shinfo(skb);
  584. if (coal_meta->trans_proto == IPPROTO_TCP)
  585. shinfo->gso_type = (coal_meta->ip_proto == 4) ?
  586. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  587. else
  588. shinfo->gso_type = SKB_GSO_UDP_L4;
  589. shinfo->gso_size = coal_meta->data_len;
  590. shinfo->gso_segs = coal_meta->pkt_count;
  591. }
  592. /* Handles setting up the partial checksum in the skb. Sets the transport
  593. * checksum to the pseudoheader checksum and sets the csum offload metadata
  594. */
  595. static void rmnet_map_partial_csum(struct sk_buff *skb,
  596. struct rmnet_map_coal_metadata *coal_meta)
  597. {
  598. unsigned char *data = skb->data;
  599. __sum16 pseudo;
  600. u16 pkt_len = skb->len - coal_meta->ip_len;
  601. if (coal_meta->ip_proto == 4) {
  602. struct iphdr *iph = (struct iphdr *)data;
  603. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  604. pkt_len, coal_meta->trans_proto,
  605. 0);
  606. } else {
  607. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  608. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  609. pkt_len, coal_meta->trans_proto, 0);
  610. }
  611. if (coal_meta->trans_proto == IPPROTO_TCP) {
  612. struct tcphdr *tp = (struct tcphdr *)(data + coal_meta->ip_len);
  613. tp->check = pseudo;
  614. skb->csum_offset = offsetof(struct tcphdr, check);
  615. } else {
  616. struct udphdr *up = (struct udphdr *)(data + coal_meta->ip_len);
  617. up->check = pseudo;
  618. skb->csum_offset = offsetof(struct udphdr, check);
  619. }
  620. skb->ip_summed = CHECKSUM_PARTIAL;
  621. skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
  622. }
  623. static void
  624. __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
  625. struct rmnet_map_coal_metadata *coal_meta,
  626. struct sk_buff_head *list, u8 pkt_id,
  627. bool csum_valid)
  628. {
  629. struct sk_buff *skbn;
  630. struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
  631. __sum16 *check = NULL;
  632. u32 alloc_len;
  633. u32 dlen = coal_meta->data_len * coal_meta->pkt_count;
  634. u32 hlen = coal_meta->ip_len + coal_meta->trans_len;
  635. bool zero_csum = false;
  636. /* We can avoid copying the data if the SKB we got from the lower-level
  637. * drivers was nonlinear.
  638. */
  639. if (skb_is_nonlinear(coal_skb))
  640. alloc_len = hlen;
  641. else
  642. alloc_len = hlen + dlen;
  643. skbn = alloc_skb(alloc_len, GFP_ATOMIC);
  644. if (!skbn)
  645. return;
  646. skb_reserve(skbn, hlen);
  647. rmnet_map_nonlinear_copy(coal_skb, coal_meta, skbn);
  648. /* Push transport header and update necessary fields */
  649. skb_push(skbn, coal_meta->trans_len);
  650. memcpy(skbn->data, coal_meta->trans_header, coal_meta->trans_len);
  651. skb_reset_transport_header(skbn);
  652. if (coal_meta->trans_proto == IPPROTO_TCP) {
  653. struct tcphdr *th = tcp_hdr(skbn);
  654. th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset);
  655. check = &th->check;
  656. /* Don't allow dangerous flags to be set in any segment but the
  657. * last one.
  658. */
  659. if (th->fin || th->psh) {
  660. if (hlen + coal_meta->data_offset + dlen <
  661. coal_skb->len) {
  662. th->fin = 0;
  663. th->psh = 0;
  664. }
  665. }
  666. } else if (coal_meta->trans_proto == IPPROTO_UDP) {
  667. struct udphdr *uh = udp_hdr(skbn);
  668. uh->len = htons(skbn->len);
  669. check = &uh->check;
  670. if (coal_meta->ip_proto == 4 && !uh->check)
  671. zero_csum = true;
  672. }
  673. /* Push IP header and update necessary fields */
  674. skb_push(skbn, coal_meta->ip_len);
  675. memcpy(skbn->data, coal_meta->ip_header, coal_meta->ip_len);
  676. skb_reset_network_header(skbn);
  677. if (coal_meta->ip_proto == 4) {
  678. struct iphdr *iph = ip_hdr(skbn);
  679. iph->id = htons(ntohs(iph->id) + coal_meta->pkt_id);
  680. iph->tot_len = htons(skbn->len);
  681. iph->check = 0;
  682. iph->check = ip_fast_csum(iph, iph->ihl);
  683. } else {
  684. /* Payload length includes any extension headers */
  685. ipv6_hdr(skbn)->payload_len = htons(skbn->len -
  686. sizeof(struct ipv6hdr));
  687. }
  688. /* Handle checksum status */
  689. if (likely(csum_valid) || zero_csum) {
  690. /* Set the partial checksum information */
  691. rmnet_map_partial_csum(skbn, coal_meta);
  692. } else if (check) {
  693. /* Unfortunately, we have to fake a bad checksum here, since
  694. * the original bad value is lost by the hardware. The only
  695. * reliable way to do it is to calculate the actual checksum
  696. * and corrupt it.
  697. */
  698. __wsum csum;
  699. unsigned int offset = skb_transport_offset(skbn);
  700. __sum16 pseudo;
  701. /* Calculate pseudo header */
  702. if (coal_meta->ip_proto == 4) {
  703. struct iphdr *iph = ip_hdr(skbn);
  704. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  705. skbn->len -
  706. coal_meta->ip_len,
  707. coal_meta->trans_proto, 0);
  708. } else {
  709. struct ipv6hdr *ip6h = ipv6_hdr(skbn);
  710. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  711. skbn->len - coal_meta->ip_len,
  712. coal_meta->trans_proto, 0);
  713. }
  714. *check = pseudo;
  715. csum = skb_checksum(skbn, offset, skbn->len - offset, 0);
  716. /* Add 1 to corrupt. This cannot produce a final value of 0
  717. * since csum_fold() can't return a value of 0xFFFF.
  718. */
  719. *check = csum16_add(csum_fold(csum), htons(1));
  720. skbn->ip_summed = CHECKSUM_NONE;
  721. }
  722. skbn->dev = coal_skb->dev;
  723. priv->stats.coal.coal_reconstruct++;
  724. /* Stamp GSO information if necessary */
  725. if (coal_meta->pkt_count > 1)
  726. rmnet_map_gso_stamp(skbn, coal_meta);
  727. /* Propagate priority value */
  728. skbn->priority = coal_skb->priority;
  729. __skb_queue_tail(list, skbn);
  730. /* Update meta information to move past the data we just segmented */
  731. coal_meta->data_offset += dlen;
  732. coal_meta->pkt_id = pkt_id + 1;
  733. coal_meta->pkt_count = 0;
  734. }
  735. static bool rmnet_map_validate_csum(struct sk_buff *skb,
  736. struct rmnet_map_coal_metadata *meta)
  737. {
  738. u8 *data = rmnet_map_data_ptr(skb);
  739. unsigned int datagram_len;
  740. __wsum csum;
  741. __sum16 pseudo;
  742. datagram_len = skb->len - meta->ip_len;
  743. if (meta->ip_proto == 4) {
  744. struct iphdr *iph = (struct iphdr *)data;
  745. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  746. datagram_len,
  747. meta->trans_proto, 0);
  748. } else {
  749. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  750. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  751. datagram_len, meta->trans_proto,
  752. 0);
  753. }
  754. csum = skb_checksum(skb, meta->ip_len, datagram_len,
  755. csum_unfold(pseudo));
  756. return !csum_fold(csum);
  757. }
  758. /* Converts the coalesced SKB into a list of SKBs.
  759. * NLOs containing csum erros will not be included.
  760. * The original coalesced SKB should be treated as invalid and
  761. * must be freed by the caller
  762. */
  763. static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
  764. u64 nlo_err_mask,
  765. struct sk_buff_head *list)
  766. {
  767. struct iphdr *iph;
  768. struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
  769. struct rmnet_map_v5_coal_header *coal_hdr;
  770. struct rmnet_map_coal_metadata coal_meta;
  771. u16 pkt_len;
  772. u8 pkt, total_pkt = 0;
  773. u8 nlo;
  774. bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
  775. bool zero_csum = false;
  776. memset(&coal_meta, 0, sizeof(coal_meta));
  777. /* Pull off the headers we no longer need */
  778. pskb_pull(coal_skb, sizeof(struct rmnet_map_header));
  779. coal_hdr = (struct rmnet_map_v5_coal_header *)
  780. rmnet_map_data_ptr(coal_skb);
  781. pskb_pull(coal_skb, sizeof(*coal_hdr));
  782. iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
  783. if (iph->version == 4) {
  784. coal_meta.ip_proto = 4;
  785. coal_meta.ip_len = iph->ihl * 4;
  786. coal_meta.trans_proto = iph->protocol;
  787. coal_meta.ip_header = iph;
  788. /* Don't allow coalescing of any packets with IP options */
  789. if (iph->ihl != 5)
  790. gro = false;
  791. } else if (iph->version == 6) {
  792. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  793. __be16 frag_off;
  794. u8 protocol = ip6h->nexthdr;
  795. coal_meta.ip_proto = 6;
  796. coal_meta.ip_len = ipv6_skip_exthdr(coal_skb, sizeof(*ip6h),
  797. &protocol, &frag_off);
  798. coal_meta.trans_proto = protocol;
  799. coal_meta.ip_header = ip6h;
  800. /* If we run into a problem, or this has a fragment header
  801. * (which should technically not be possible, if the HW
  802. * works as intended...), bail.
  803. */
  804. if (coal_meta.ip_len < 0 || frag_off) {
  805. priv->stats.coal.coal_ip_invalid++;
  806. return;
  807. } else if (coal_meta.ip_len > sizeof(*ip6h)) {
  808. /* Don't allow coalescing of any packets with IPv6
  809. * extension headers.
  810. */
  811. gro = false;
  812. }
  813. } else {
  814. priv->stats.coal.coal_ip_invalid++;
  815. return;
  816. }
  817. if (coal_meta.trans_proto == IPPROTO_TCP) {
  818. struct tcphdr *th;
  819. th = (struct tcphdr *)((u8 *)iph + coal_meta.ip_len);
  820. coal_meta.trans_len = th->doff * 4;
  821. coal_meta.trans_header = th;
  822. } else if (coal_meta.trans_proto == IPPROTO_UDP) {
  823. struct udphdr *uh;
  824. uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
  825. coal_meta.trans_len = sizeof(*uh);
  826. coal_meta.trans_header = uh;
  827. /* Check for v4 zero checksum */
  828. if (coal_meta.ip_proto == 4 && !uh->check)
  829. zero_csum = true;
  830. } else {
  831. priv->stats.coal.coal_trans_invalid++;
  832. return;
  833. }
  834. if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
  835. rmnet_map_move_headers(coal_skb);
  836. /* Mark as valid if it checks out */
  837. if (rmnet_map_validate_csum(coal_skb, &coal_meta))
  838. coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
  839. __skb_queue_tail(list, coal_skb);
  840. return;
  841. }
  842. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  843. * no checksum errors, and are allowing GRO. We can just reuse this
  844. * SKB unchanged.
  845. */
  846. if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
  847. rmnet_map_move_headers(coal_skb);
  848. coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
  849. coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
  850. coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
  851. coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
  852. if (coal_meta.pkt_count > 1) {
  853. rmnet_map_partial_csum(coal_skb, &coal_meta);
  854. rmnet_map_gso_stamp(coal_skb, &coal_meta);
  855. }
  856. __skb_queue_tail(list, coal_skb);
  857. return;
  858. }
  859. /* Segment the coalesced SKB into new packets */
  860. for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
  861. pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
  862. pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
  863. coal_meta.data_len = pkt_len;
  864. for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
  865. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  866. bool csum_err = nlo_err_mask & 1;
  867. /* Segment the packet if we're not sending the larger
  868. * packet up the stack.
  869. */
  870. if (!gro) {
  871. coal_meta.pkt_count = 1;
  872. if (csum_err)
  873. priv->stats.coal.coal_csum_err++;
  874. __rmnet_map_segment_coal_skb(coal_skb,
  875. &coal_meta, list,
  876. total_pkt,
  877. !csum_err);
  878. continue;
  879. }
  880. if (csum_err) {
  881. priv->stats.coal.coal_csum_err++;
  882. /* Segment out the good data */
  883. if (gro && coal_meta.pkt_count)
  884. __rmnet_map_segment_coal_skb(coal_skb,
  885. &coal_meta,
  886. list,
  887. total_pkt,
  888. true);
  889. /* Segment out the bad checksum */
  890. coal_meta.pkt_count = 1;
  891. __rmnet_map_segment_coal_skb(coal_skb,
  892. &coal_meta, list,
  893. total_pkt, false);
  894. } else {
  895. coal_meta.pkt_count++;
  896. }
  897. }
  898. /* If we're switching NLOs, we need to send out everything from
  899. * the previous one, if we haven't done so. NLOs only switch
  900. * when the packet length changes.
  901. */
  902. if (coal_meta.pkt_count)
  903. __rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list,
  904. total_pkt, true);
  905. }
  906. }
  907. /* Record reason for coalescing pipe closure */
  908. static void rmnet_map_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  909. u8 code)
  910. {
  911. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  912. switch (type) {
  913. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  914. stats->non_coal++;
  915. break;
  916. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  917. stats->ip_miss++;
  918. break;
  919. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  920. stats->trans_miss++;
  921. break;
  922. case RMNET_MAP_COAL_CLOSE_HW:
  923. switch (code) {
  924. case RMNET_MAP_COAL_CLOSE_HW_NL:
  925. stats->hw_nl++;
  926. break;
  927. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  928. stats->hw_pkt++;
  929. break;
  930. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  931. stats->hw_byte++;
  932. break;
  933. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  934. stats->hw_time++;
  935. break;
  936. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  937. stats->hw_evict++;
  938. break;
  939. default:
  940. break;
  941. }
  942. break;
  943. case RMNET_MAP_COAL_CLOSE_COAL:
  944. stats->coal++;
  945. break;
  946. default:
  947. break;
  948. }
  949. }
  950. /* Check if the coalesced header has any incorrect values, in which case, the
  951. * entire coalesced skb must be dropped. Then check if there are any
  952. * checksum issues
  953. */
  954. static int rmnet_map_data_check_coal_header(struct sk_buff *skb,
  955. u64 *nlo_err_mask)
  956. {
  957. struct rmnet_map_v5_coal_header *coal_hdr;
  958. unsigned char *data = rmnet_map_data_ptr(skb);
  959. struct rmnet_priv *priv = netdev_priv(skb->dev);
  960. u64 mask = 0;
  961. int i;
  962. u8 veid, pkts = 0;
  963. coal_hdr = ((struct rmnet_map_v5_coal_header *)
  964. (data + sizeof(struct rmnet_map_header)));
  965. veid = coal_hdr->virtual_channel_id;
  966. if (coal_hdr->num_nlos == 0 ||
  967. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  968. priv->stats.coal.coal_hdr_nlo_err++;
  969. return -EINVAL;
  970. }
  971. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  972. /* If there is a checksum issue, we need to split
  973. * up the skb. Rebuild the full csum error field
  974. */
  975. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  976. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  977. mask |= ((u64)err) << (8 * i);
  978. /* Track total packets in frame */
  979. pkts += pkt;
  980. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  981. priv->stats.coal.coal_hdr_pkt_err++;
  982. return -EINVAL;
  983. }
  984. }
  985. /* Track number of packets we get inside of coalesced frames */
  986. priv->stats.coal.coal_pkts += pkts;
  987. /* Update ethtool stats */
  988. rmnet_map_data_log_close_stats(priv,
  989. coal_hdr->close_type,
  990. coal_hdr->close_value);
  991. if (veid < RMNET_MAX_VEID)
  992. priv->stats.coal.coal_veid[veid]++;
  993. *nlo_err_mask = mask;
  994. return 0;
  995. }
  996. /* Process a QMAPv5 packet header */
  997. int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
  998. struct sk_buff_head *list,
  999. u16 len)
  1000. {
  1001. struct rmnet_priv *priv = netdev_priv(skb->dev);
  1002. u64 nlo_err_mask;
  1003. int rc = 0;
  1004. switch (rmnet_map_get_next_hdr_type(skb)) {
  1005. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1006. priv->stats.coal.coal_rx++;
  1007. rc = rmnet_map_data_check_coal_header(skb, &nlo_err_mask);
  1008. if (rc)
  1009. return rc;
  1010. rmnet_map_segment_coal_skb(skb, nlo_err_mask, list);
  1011. if (skb_peek(list) != skb)
  1012. consume_skb(skb);
  1013. break;
  1014. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1015. if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
  1016. priv->stats.csum_sw++;
  1017. } else if (rmnet_map_get_csum_valid(skb)) {
  1018. priv->stats.csum_ok++;
  1019. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1020. } else {
  1021. priv->stats.csum_valid_unset++;
  1022. }
  1023. /* Pull unnecessary headers and move the rest to the linear
  1024. * section of the skb.
  1025. */
  1026. pskb_pull(skb,
  1027. (sizeof(struct rmnet_map_header) +
  1028. sizeof(struct rmnet_map_v5_csum_header)));
  1029. rmnet_map_move_headers(skb);
  1030. /* Remove padding only for csum offload packets.
  1031. * Coalesced packets should never have padding.
  1032. */
  1033. pskb_trim(skb, len);
  1034. __skb_queue_tail(list, skb);
  1035. break;
  1036. default:
  1037. rc = -EINVAL;
  1038. break;
  1039. }
  1040. return rc;
  1041. }
  1042. long rmnet_agg_time_limit __read_mostly = 1000000L;
  1043. long rmnet_agg_bypass_time __read_mostly = 10000000L;
  1044. int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
  1045. {
  1046. u8 *packet_start = skb->data + offset;
  1047. int is_icmp = 0;
  1048. if (skb->protocol == htons(ETH_P_IP)) {
  1049. struct iphdr *ip4h = (struct iphdr *)(packet_start);
  1050. if (ip4h->protocol == IPPROTO_ICMP)
  1051. is_icmp = 1;
  1052. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1053. struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
  1054. if (ip6h->nexthdr == IPPROTO_ICMPV6) {
  1055. is_icmp = 1;
  1056. } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
  1057. struct frag_hdr *frag;
  1058. frag = (struct frag_hdr *)(packet_start
  1059. + sizeof(struct ipv6hdr));
  1060. if (frag->nexthdr == IPPROTO_ICMPV6)
  1061. is_icmp = 1;
  1062. }
  1063. }
  1064. return is_icmp;
  1065. }
  1066. static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
  1067. {
  1068. struct sk_buff *skb = NULL;
  1069. struct rmnet_aggregation_state *state;
  1070. state = container_of(work, struct rmnet_aggregation_state, agg_wq);
  1071. spin_lock_bh(&state->agg_lock);
  1072. if (likely(state->agg_state == -EINPROGRESS)) {
  1073. /* Buffer may have already been shipped out */
  1074. if (likely(state->agg_skb)) {
  1075. skb = state->agg_skb;
  1076. state->agg_skb = NULL;
  1077. state->agg_count = 0;
  1078. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1079. }
  1080. state->agg_state = 0;
  1081. }
  1082. if (skb)
  1083. state->send_agg_skb(skb);
  1084. spin_unlock_bh(&state->agg_lock);
  1085. }
  1086. enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
  1087. {
  1088. struct rmnet_aggregation_state *state;
  1089. state = container_of(t, struct rmnet_aggregation_state, hrtimer);
  1090. schedule_work(&state->agg_wq);
  1091. return HRTIMER_NORESTART;
  1092. }
  1093. static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
  1094. {
  1095. unsigned int linear = src->len - src->data_len, target = src->len;
  1096. unsigned char *src_buf;
  1097. struct sk_buff *skb;
  1098. src_buf = src->data;
  1099. skb_put_data(dst, src_buf, linear);
  1100. target -= linear;
  1101. skb = src;
  1102. while (target) {
  1103. unsigned int i = 0, non_linear = 0;
  1104. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1105. non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1106. src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
  1107. skb_put_data(dst, src_buf, non_linear);
  1108. target -= non_linear;
  1109. }
  1110. if (skb_shinfo(skb)->frag_list) {
  1111. skb = skb_shinfo(skb)->frag_list;
  1112. continue;
  1113. }
  1114. if (skb->next)
  1115. skb = skb->next;
  1116. }
  1117. }
  1118. static void rmnet_free_agg_pages(struct rmnet_aggregation_state *state)
  1119. {
  1120. struct rmnet_agg_page *agg_page, *idx;
  1121. list_for_each_entry_safe(agg_page, idx, &state->agg_list, list) {
  1122. list_del(&agg_page->list);
  1123. put_page(agg_page->page);
  1124. kfree(agg_page);
  1125. }
  1126. state->agg_head = NULL;
  1127. }
  1128. static struct page *rmnet_get_agg_pages(struct rmnet_aggregation_state *state)
  1129. {
  1130. struct rmnet_agg_page *agg_page;
  1131. struct page *page = NULL;
  1132. int i = 0;
  1133. if (!(state->params.agg_features & RMNET_PAGE_RECYCLE))
  1134. goto alloc;
  1135. do {
  1136. agg_page = state->agg_head;
  1137. if (unlikely(!agg_page))
  1138. break;
  1139. if (page_ref_count(agg_page->page) == 1) {
  1140. page = agg_page->page;
  1141. page_ref_inc(agg_page->page);
  1142. state->stats->ul_agg_reuse++;
  1143. state->agg_head = list_next_entry(agg_page, list);
  1144. break;
  1145. }
  1146. state->agg_head = list_next_entry(agg_page, list);
  1147. i++;
  1148. } while (i <= 5);
  1149. alloc:
  1150. if (!page) {
  1151. page = __dev_alloc_pages(GFP_ATOMIC, state->agg_size_order);
  1152. state->stats->ul_agg_alloc++;
  1153. }
  1154. return page;
  1155. }
  1156. static struct rmnet_agg_page *
  1157. __rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
  1158. {
  1159. struct rmnet_agg_page *agg_page;
  1160. struct page *page;
  1161. agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
  1162. if (!agg_page)
  1163. return NULL;
  1164. page = __dev_alloc_pages(GFP_ATOMIC, state->agg_size_order);
  1165. if (!page) {
  1166. kfree(agg_page);
  1167. return NULL;
  1168. }
  1169. agg_page->page = page;
  1170. INIT_LIST_HEAD(&agg_page->list);
  1171. return agg_page;
  1172. }
  1173. static void rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
  1174. {
  1175. struct rmnet_agg_page *agg_page = NULL;
  1176. int i = 0;
  1177. for (i = 0; i < RMNET_PAGE_COUNT; i++) {
  1178. agg_page = __rmnet_alloc_agg_pages(state);
  1179. if (agg_page)
  1180. list_add_tail(&agg_page->list, &state->agg_list);
  1181. }
  1182. state->agg_head = list_first_entry_or_null(&state->agg_list,
  1183. struct rmnet_agg_page, list);
  1184. }
  1185. static struct sk_buff *
  1186. rmnet_map_build_skb(struct rmnet_aggregation_state *state)
  1187. {
  1188. struct sk_buff *skb;
  1189. unsigned int size;
  1190. struct page *page;
  1191. void *vaddr;
  1192. page = rmnet_get_agg_pages(state);
  1193. if (!page)
  1194. return NULL;
  1195. vaddr = page_address(page);
  1196. size = PAGE_SIZE << state->agg_size_order;
  1197. skb = build_skb(vaddr, size);
  1198. if (!skb) {
  1199. put_page(page);
  1200. return NULL;
  1201. }
  1202. return skb;
  1203. }
  1204. void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state)
  1205. {
  1206. struct sk_buff *agg_skb;
  1207. if (!state->agg_skb) {
  1208. spin_unlock_bh(&state->agg_lock);
  1209. return;
  1210. }
  1211. agg_skb = state->agg_skb;
  1212. /* Reset the aggregation state */
  1213. state->agg_skb = NULL;
  1214. state->agg_count = 0;
  1215. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1216. state->agg_state = 0;
  1217. state->send_agg_skb(agg_skb);
  1218. spin_unlock_bh(&state->agg_lock);
  1219. hrtimer_cancel(&state->hrtimer);
  1220. }
  1221. void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
  1222. bool low_latency)
  1223. {
  1224. struct rmnet_aggregation_state *state;
  1225. struct timespec64 diff, last;
  1226. int size;
  1227. state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
  1228. RMNET_DEFAULT_AGG_STATE];
  1229. new_packet:
  1230. spin_lock_bh(&state->agg_lock);
  1231. memcpy(&last, &state->agg_last, sizeof(last));
  1232. ktime_get_real_ts64(&state->agg_last);
  1233. if ((port->data_format & RMNET_EGRESS_FORMAT_PRIORITY) &&
  1234. (RMNET_LLM(skb->priority) || RMNET_APS_LLB(skb->priority))) {
  1235. /* Send out any aggregated SKBs we have */
  1236. rmnet_map_send_agg_skb(state);
  1237. /* Send out the priority SKB. Not holding agg_lock anymore */
  1238. skb->protocol = htons(ETH_P_MAP);
  1239. state->send_agg_skb(skb);
  1240. return;
  1241. }
  1242. if (!state->agg_skb) {
  1243. /* Check to see if we should agg first. If the traffic is very
  1244. * sparse, don't aggregate. We will need to tune this later
  1245. */
  1246. diff = timespec64_sub(state->agg_last, last);
  1247. size = state->params.agg_size - skb->len;
  1248. if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
  1249. size <= 0) {
  1250. skb->protocol = htons(ETH_P_MAP);
  1251. state->send_agg_skb(skb);
  1252. spin_unlock_bh(&state->agg_lock);
  1253. return;
  1254. }
  1255. state->agg_skb = rmnet_map_build_skb(state);
  1256. if (!state->agg_skb) {
  1257. state->agg_skb = NULL;
  1258. state->agg_count = 0;
  1259. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1260. skb->protocol = htons(ETH_P_MAP);
  1261. state->send_agg_skb(skb);
  1262. spin_unlock_bh(&state->agg_lock);
  1263. return;
  1264. }
  1265. rmnet_map_linearize_copy(state->agg_skb, skb);
  1266. state->agg_skb->dev = skb->dev;
  1267. state->agg_skb->protocol = htons(ETH_P_MAP);
  1268. state->agg_count = 1;
  1269. ktime_get_real_ts64(&state->agg_time);
  1270. dev_kfree_skb_any(skb);
  1271. goto schedule;
  1272. }
  1273. diff = timespec64_sub(state->agg_last, state->agg_time);
  1274. size = skb_tailroom(state->agg_skb);
  1275. if (skb->len > size ||
  1276. state->agg_count >= state->params.agg_count ||
  1277. diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
  1278. rmnet_map_send_agg_skb(state);
  1279. goto new_packet;
  1280. }
  1281. rmnet_map_linearize_copy(state->agg_skb, skb);
  1282. state->agg_count++;
  1283. dev_kfree_skb_any(skb);
  1284. schedule:
  1285. if (state->agg_state != -EINPROGRESS) {
  1286. state->agg_state = -EINPROGRESS;
  1287. hrtimer_start(&state->hrtimer,
  1288. ns_to_ktime(state->params.agg_time),
  1289. HRTIMER_MODE_REL);
  1290. }
  1291. spin_unlock_bh(&state->agg_lock);
  1292. }
  1293. void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
  1294. u16 size, u8 count, u8 features, u32 time)
  1295. {
  1296. spin_lock_bh(&state->agg_lock);
  1297. state->params.agg_count = count;
  1298. state->params.agg_time = time;
  1299. state->params.agg_size = size;
  1300. state->params.agg_features = features;
  1301. rmnet_free_agg_pages(state);
  1302. /* This effectively disables recycling in case the UL aggregation
  1303. * size is lesser than PAGE_SIZE.
  1304. */
  1305. if (size < PAGE_SIZE)
  1306. goto done;
  1307. state->agg_size_order = get_order(size);
  1308. size = PAGE_SIZE << state->agg_size_order;
  1309. size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  1310. state->params.agg_size = size;
  1311. if (state->params.agg_features == RMNET_PAGE_RECYCLE)
  1312. rmnet_alloc_agg_pages(state);
  1313. done:
  1314. spin_unlock_bh(&state->agg_lock);
  1315. }
  1316. void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
  1317. {
  1318. unsigned int i;
  1319. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1320. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1321. spin_lock_init(&state->agg_lock);
  1322. INIT_LIST_HEAD(&state->agg_list);
  1323. hrtimer_init(&state->hrtimer, CLOCK_MONOTONIC,
  1324. HRTIMER_MODE_REL);
  1325. state->hrtimer.function = rmnet_map_flush_tx_packet_queue;
  1326. INIT_WORK(&state->agg_wq, rmnet_map_flush_tx_packet_work);
  1327. state->stats = &port->stats.agg;
  1328. /* Since PAGE_SIZE - 1 is specified here, no pages are
  1329. * pre-allocated. This is done to reduce memory usage in cases
  1330. * where UL aggregation is disabled.
  1331. * Additionally, the features flag is also set to 0.
  1332. */
  1333. rmnet_map_update_ul_agg_config(state, PAGE_SIZE - 1, 20, 0,
  1334. 3000000);
  1335. }
  1336. /* Set delivery functions for each aggregation state */
  1337. port->agg_state[RMNET_DEFAULT_AGG_STATE].send_agg_skb = dev_queue_xmit;
  1338. port->agg_state[RMNET_LL_AGG_STATE].send_agg_skb = rmnet_ll_send_skb;
  1339. }
  1340. void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
  1341. {
  1342. unsigned int i;
  1343. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1344. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1345. hrtimer_cancel(&state->hrtimer);
  1346. cancel_work_sync(&state->agg_wq);
  1347. }
  1348. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1349. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1350. spin_lock_bh(&state->agg_lock);
  1351. if (state->agg_state == -EINPROGRESS) {
  1352. if (state->agg_skb) {
  1353. kfree_skb(state->agg_skb);
  1354. state->agg_skb = NULL;
  1355. state->agg_count = 0;
  1356. memset(&state->agg_time, 0,
  1357. sizeof(state->agg_time));
  1358. }
  1359. state->agg_state = 0;
  1360. }
  1361. rmnet_free_agg_pages(state);
  1362. spin_unlock_bh(&state->agg_lock);
  1363. }
  1364. }
  1365. void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush)
  1366. {
  1367. struct rmnet_aggregation_state *state;
  1368. struct rmnet_port *port;
  1369. struct sk_buff *agg_skb;
  1370. if (unlikely(ch >= RMNET_MAX_AGG_STATE))
  1371. ch = RMNET_DEFAULT_AGG_STATE;
  1372. port = rmnet_get_port(qmap_skb->dev);
  1373. if (!port) {
  1374. kfree_skb(qmap_skb);
  1375. return;
  1376. }
  1377. state = &port->agg_state[ch];
  1378. if (!flush)
  1379. goto send;
  1380. if (!(port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION))
  1381. goto send;
  1382. spin_lock_bh(&state->agg_lock);
  1383. if (state->agg_skb) {
  1384. agg_skb = state->agg_skb;
  1385. state->agg_skb = NULL;
  1386. state->agg_count = 0;
  1387. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1388. state->agg_state = 0;
  1389. state->send_agg_skb(agg_skb);
  1390. spin_unlock_bh(&state->agg_lock);
  1391. hrtimer_cancel(&state->hrtimer);
  1392. } else {
  1393. spin_unlock_bh(&state->agg_lock);
  1394. }
  1395. send:
  1396. state->send_agg_skb(qmap_skb);
  1397. }
  1398. EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
  1399. int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
  1400. struct net_device *orig_dev)
  1401. {
  1402. struct rmnet_priv *priv = netdev_priv(orig_dev);
  1403. struct rmnet_map_v5_tso_header *ul_header;
  1404. if (!(orig_dev->features & (NETIF_F_ALL_TSO | NETIF_F_GSO_UDP_L4))) {
  1405. priv->stats.tso_arriv_errs++;
  1406. return -EINVAL;
  1407. }
  1408. ul_header = (struct rmnet_map_v5_tso_header *)
  1409. skb_push(skb, sizeof(*ul_header));
  1410. memset(ul_header, 0, sizeof(*ul_header));
  1411. ul_header->header_type = RMNET_MAP_HEADER_TYPE_TSO;
  1412. if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
  1413. rmnet_map_v5_check_priority(skb, orig_dev,
  1414. (struct rmnet_map_v5_csum_header *)ul_header,
  1415. true);
  1416. ul_header->segment_size = htons(skb_shinfo(skb)->gso_size);
  1417. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
  1418. ul_header->ip_id_cfg = 1;
  1419. skb->ip_summed = CHECKSUM_NONE;
  1420. skb_shinfo(skb)->gso_size = 0;
  1421. skb_shinfo(skb)->gso_segs = 0;
  1422. skb_shinfo(skb)->gso_type = 0;
  1423. priv->stats.tso_pkts++;
  1424. return 0;
  1425. }