rmnet_map_data.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RMNET Data MAP protocol
  14. *
  15. */
  16. #include <linux/netdevice.h>
  17. #include <linux/ip.h>
  18. #include <linux/ipv6.h>
  19. #include <net/ip6_checksum.h>
  20. #include "rmnet_config.h"
  21. #include "rmnet_map.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_handlers.h"
  24. #include "rmnet_ll.h"
  25. #define RMNET_MAP_PKT_COPY_THRESHOLD 64
  26. #define RMNET_MAP_DEAGGR_SPACING 64
  27. #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
  28. struct rmnet_map_coal_metadata {
  29. void *ip_header;
  30. void *trans_header;
  31. u16 ip_len;
  32. u16 trans_len;
  33. u16 data_offset;
  34. u16 data_len;
  35. u8 ip_proto;
  36. u8 trans_proto;
  37. u8 pkt_id;
  38. u8 pkt_count;
  39. };
  40. static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
  41. const void *txporthdr)
  42. {
  43. __sum16 *check = NULL;
  44. switch (protocol) {
  45. case IPPROTO_TCP:
  46. check = &(((struct tcphdr *)txporthdr)->check);
  47. break;
  48. case IPPROTO_UDP:
  49. check = &(((struct udphdr *)txporthdr)->check);
  50. break;
  51. default:
  52. check = NULL;
  53. break;
  54. }
  55. return check;
  56. }
  57. static int
  58. rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
  59. struct rmnet_map_dl_csum_trailer *csum_trailer,
  60. struct rmnet_priv *priv)
  61. {
  62. __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
  63. u16 csum_value, csum_value_final;
  64. struct iphdr *ip4h;
  65. void *txporthdr;
  66. __be16 addend;
  67. ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
  68. if ((ntohs(ip4h->frag_off) & IP_MF) ||
  69. ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
  70. priv->stats.csum_fragmented_pkt++;
  71. return -EOPNOTSUPP;
  72. }
  73. txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4;
  74. csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
  75. if (!csum_field) {
  76. priv->stats.csum_err_invalid_transport++;
  77. return -EPROTONOSUPPORT;
  78. }
  79. /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
  80. if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
  81. priv->stats.csum_skipped++;
  82. return 0;
  83. }
  84. csum_value = ~ntohs(csum_trailer->csum_value);
  85. hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
  86. ip_payload_csum = csum16_sub((__force __sum16)csum_value,
  87. (__force __be16)hdr_csum);
  88. pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
  89. ntohs(ip4h->tot_len) - ip4h->ihl * 4,
  90. ip4h->protocol, 0);
  91. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  92. pseudo_csum = csum16_add(ip_payload_csum, addend);
  93. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  94. csum_temp = ~csum16_sub(pseudo_csum, addend);
  95. csum_value_final = (__force u16)csum_temp;
  96. if (unlikely(csum_value_final == 0)) {
  97. switch (ip4h->protocol) {
  98. case IPPROTO_UDP:
  99. /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
  100. csum_value_final = ~csum_value_final;
  101. break;
  102. case IPPROTO_TCP:
  103. /* DL4 Non-RFC compliant TCP checksum found */
  104. if (*csum_field == (__force __sum16)0xFFFF)
  105. csum_value_final = ~csum_value_final;
  106. break;
  107. }
  108. }
  109. if (csum_value_final == ntohs((__force __be16)*csum_field)) {
  110. priv->stats.csum_ok++;
  111. return 0;
  112. } else {
  113. priv->stats.csum_validation_failed++;
  114. return -EINVAL;
  115. }
  116. }
  117. #if IS_ENABLED(CONFIG_IPV6)
  118. static int
  119. rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
  120. struct rmnet_map_dl_csum_trailer *csum_trailer,
  121. struct rmnet_priv *priv)
  122. {
  123. __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
  124. u16 csum_value, csum_value_final;
  125. __be16 ip6_hdr_csum, addend;
  126. struct ipv6hdr *ip6h;
  127. void *txporthdr, *data = rmnet_map_data_ptr(skb);
  128. u32 length;
  129. ip6h = data;
  130. txporthdr = data + sizeof(struct ipv6hdr);
  131. csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
  132. if (!csum_field) {
  133. priv->stats.csum_err_invalid_transport++;
  134. return -EPROTONOSUPPORT;
  135. }
  136. csum_value = ~ntohs(csum_trailer->csum_value);
  137. ip6_hdr_csum = (__force __be16)
  138. ~ntohs((__force __be16)ip_compute_csum(ip6h,
  139. (int)(txporthdr - data)));
  140. ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
  141. ip6_hdr_csum);
  142. length = (ip6h->nexthdr == IPPROTO_UDP) ?
  143. ntohs(((struct udphdr *)txporthdr)->len) :
  144. ntohs(ip6h->payload_len);
  145. pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  146. length, ip6h->nexthdr, 0));
  147. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  148. pseudo_csum = csum16_add(ip6_payload_csum, addend);
  149. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  150. csum_temp = ~csum16_sub(pseudo_csum, addend);
  151. csum_value_final = (__force u16)csum_temp;
  152. if (unlikely(csum_value_final == 0)) {
  153. switch (ip6h->nexthdr) {
  154. case IPPROTO_UDP:
  155. /* RFC 2460 section 8.1
  156. * DL6 One's complement rule for UDP checksum 0
  157. */
  158. csum_value_final = ~csum_value_final;
  159. break;
  160. case IPPROTO_TCP:
  161. /* DL6 Non-RFC compliant TCP checksum found */
  162. if (*csum_field == (__force __sum16)0xFFFF)
  163. csum_value_final = ~csum_value_final;
  164. break;
  165. }
  166. }
  167. if (csum_value_final == ntohs((__force __be16)*csum_field)) {
  168. priv->stats.csum_ok++;
  169. return 0;
  170. } else {
  171. priv->stats.csum_validation_failed++;
  172. return -EINVAL;
  173. }
  174. }
  175. #endif
  176. static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
  177. {
  178. struct iphdr *ip4h = (struct iphdr *)iphdr;
  179. void *txphdr;
  180. u16 *csum;
  181. txphdr = iphdr + ip4h->ihl * 4;
  182. if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
  183. csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
  184. *csum = ~(*csum);
  185. }
  186. }
  187. static void
  188. rmnet_map_ipv4_ul_csum_header(void *iphdr,
  189. struct rmnet_map_ul_csum_header *ul_header,
  190. struct sk_buff *skb)
  191. {
  192. struct iphdr *ip4h = (struct iphdr *)iphdr;
  193. __be16 *hdr = (__be16 *)ul_header, offset;
  194. offset = htons((__force u16)(skb_transport_header(skb) -
  195. (unsigned char *)iphdr));
  196. ul_header->csum_start_offset = offset;
  197. ul_header->csum_insert_offset = skb->csum_offset;
  198. ul_header->csum_enabled = 1;
  199. if (ip4h->protocol == IPPROTO_UDP)
  200. ul_header->udp_ind = 1;
  201. else
  202. ul_header->udp_ind = 0;
  203. /* Changing remaining fields to network order */
  204. hdr++;
  205. *hdr = htons((__force u16)*hdr);
  206. skb->ip_summed = CHECKSUM_NONE;
  207. rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
  208. }
  209. #if IS_ENABLED(CONFIG_IPV6)
  210. static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
  211. {
  212. struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
  213. void *txphdr;
  214. u16 *csum;
  215. txphdr = ip6hdr + sizeof(struct ipv6hdr);
  216. if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
  217. csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
  218. *csum = ~(*csum);
  219. }
  220. }
  221. static void
  222. rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
  223. struct rmnet_map_ul_csum_header *ul_header,
  224. struct sk_buff *skb)
  225. {
  226. struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
  227. __be16 *hdr = (__be16 *)ul_header, offset;
  228. offset = htons((__force u16)(skb_transport_header(skb) -
  229. (unsigned char *)ip6hdr));
  230. ul_header->csum_start_offset = offset;
  231. ul_header->csum_insert_offset = skb->csum_offset;
  232. ul_header->csum_enabled = 1;
  233. if (ip6h->nexthdr == IPPROTO_UDP)
  234. ul_header->udp_ind = 1;
  235. else
  236. ul_header->udp_ind = 0;
  237. /* Changing remaining fields to network order */
  238. hdr++;
  239. *hdr = htons((__force u16)*hdr);
  240. skb->ip_summed = CHECKSUM_NONE;
  241. rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
  242. }
  243. #endif
  244. /* Adds MAP header to front of skb->data
  245. * Padding is calculated and set appropriately in MAP header. Mux ID is
  246. * initialized to 0.
  247. */
  248. struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
  249. int hdrlen, int pad,
  250. struct rmnet_port *port)
  251. {
  252. struct rmnet_map_header *map_header;
  253. u32 padding, map_datalen;
  254. u8 *padbytes;
  255. map_datalen = skb->len - hdrlen;
  256. map_header = (struct rmnet_map_header *)
  257. skb_push(skb, sizeof(struct rmnet_map_header));
  258. memset(map_header, 0, sizeof(struct rmnet_map_header));
  259. /* Set next_hdr bit for csum offload packets */
  260. if (port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5)
  261. map_header->next_hdr = 1;
  262. if (pad == RMNET_MAP_NO_PAD_BYTES) {
  263. map_header->pkt_len = htons(map_datalen);
  264. return map_header;
  265. }
  266. padding = ALIGN(map_datalen, 4) - map_datalen;
  267. if (padding == 0)
  268. goto done;
  269. if (skb_tailroom(skb) < padding)
  270. return NULL;
  271. padbytes = (u8 *)skb_put(skb, padding);
  272. memset(padbytes, 0, padding);
  273. done:
  274. map_header->pkt_len = htons(map_datalen + padding);
  275. map_header->pad_len = padding & 0x3F;
  276. return map_header;
  277. }
  278. /* Deaggregates a single packet
  279. * A whole new buffer is allocated for each portion of an aggregated frame.
  280. * Caller should keep calling deaggregate() on the source skb until 0 is
  281. * returned, indicating that there are no more packets to deaggregate. Caller
  282. * is responsible for freeing the original skb.
  283. */
  284. struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
  285. struct rmnet_port *port)
  286. {
  287. struct rmnet_map_header *maph;
  288. struct sk_buff *skbn;
  289. unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
  290. u32 packet_len;
  291. if (skb->len == 0)
  292. return NULL;
  293. maph = (struct rmnet_map_header *)data;
  294. packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
  295. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  296. packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
  297. else if (port->data_format & RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5) {
  298. if (!maph->cd_bit) {
  299. packet_len += sizeof(struct rmnet_map_v5_csum_header);
  300. /* Coalescing headers require MAPv5 */
  301. next_hdr = data + sizeof(*maph);
  302. }
  303. }
  304. if (((int)skb->len - (int)packet_len) < 0)
  305. return NULL;
  306. /* Some hardware can send us empty frames. Catch them */
  307. if (ntohs(maph->pkt_len) == 0)
  308. return NULL;
  309. if (next_hdr &&
  310. ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
  311. RMNET_MAP_HEADER_TYPE_COALESCING)
  312. return skb;
  313. if (skb_is_nonlinear(skb)) {
  314. skb_frag_t *frag0 = skb_shinfo(skb)->frags;
  315. struct page *page = skb_frag_page(frag0);
  316. skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
  317. if (!skbn)
  318. return NULL;
  319. skb_append_pagefrags(skbn, page, frag0->bv_offset,
  320. packet_len);
  321. skbn->data_len += packet_len;
  322. skbn->len += packet_len;
  323. } else {
  324. skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
  325. GFP_ATOMIC);
  326. if (!skbn)
  327. return NULL;
  328. skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
  329. skb_put(skbn, packet_len);
  330. memcpy(skbn->data, data, packet_len);
  331. }
  332. skbn->priority = skb->priority;
  333. pskb_pull(skb, packet_len);
  334. return skbn;
  335. }
  336. /* Validates packet checksums. Function takes a pointer to
  337. * the beginning of a buffer which contains the IP payload +
  338. * padding + checksum trailer.
  339. * Only IPv4 and IPv6 are supported along with TCP & UDP.
  340. * Fragmented or tunneled packets are not supported.
  341. */
  342. int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
  343. {
  344. struct rmnet_priv *priv = netdev_priv(skb->dev);
  345. struct rmnet_map_dl_csum_trailer *csum_trailer;
  346. if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
  347. priv->stats.csum_sw++;
  348. return -EOPNOTSUPP;
  349. }
  350. csum_trailer = (struct rmnet_map_dl_csum_trailer *)
  351. (rmnet_map_data_ptr(skb) + len);
  352. if (!csum_trailer->valid) {
  353. priv->stats.csum_valid_unset++;
  354. return -EINVAL;
  355. }
  356. if (skb->protocol == htons(ETH_P_IP)) {
  357. return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
  358. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  359. #if IS_ENABLED(CONFIG_IPV6)
  360. return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
  361. #else
  362. priv->stats.csum_err_invalid_ip_version++;
  363. return -EPROTONOSUPPORT;
  364. #endif
  365. } else {
  366. priv->stats.csum_err_invalid_ip_version++;
  367. return -EPROTONOSUPPORT;
  368. }
  369. return 0;
  370. }
  371. EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
  372. void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
  373. struct net_device *orig_dev)
  374. {
  375. struct rmnet_priv *priv = netdev_priv(orig_dev);
  376. struct rmnet_map_ul_csum_header *ul_header;
  377. void *iphdr;
  378. ul_header = (struct rmnet_map_ul_csum_header *)
  379. skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
  380. if (unlikely(!(orig_dev->features &
  381. (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
  382. goto sw_csum;
  383. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  384. iphdr = (char *)ul_header +
  385. sizeof(struct rmnet_map_ul_csum_header);
  386. if (skb->protocol == htons(ETH_P_IP)) {
  387. rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
  388. priv->stats.csum_hw++;
  389. return;
  390. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  391. #if IS_ENABLED(CONFIG_IPV6)
  392. rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
  393. priv->stats.csum_hw++;
  394. return;
  395. #else
  396. priv->stats.csum_err_invalid_ip_version++;
  397. goto sw_csum;
  398. #endif
  399. } else {
  400. priv->stats.csum_err_invalid_ip_version++;
  401. }
  402. }
  403. sw_csum:
  404. ul_header->csum_start_offset = 0;
  405. ul_header->csum_insert_offset = 0;
  406. ul_header->csum_enabled = 0;
  407. ul_header->udp_ind = 0;
  408. priv->stats.csum_sw++;
  409. }
  410. static void rmnet_map_v5_check_priority(struct sk_buff *skb,
  411. struct net_device *orig_dev,
  412. struct rmnet_map_v5_csum_header *hdr,
  413. bool tso)
  414. {
  415. struct rmnet_priv *priv = netdev_priv(orig_dev);
  416. if (RMNET_LLM(skb->priority)) {
  417. priv->stats.ul_prio++;
  418. hdr->priority = 1;
  419. }
  420. /* APS priority bit is only valid for csum header */
  421. if (!tso && RMNET_APS_LLB(skb->priority)) {
  422. priv->stats.aps_prio++;
  423. hdr->aps_prio = 1;
  424. }
  425. }
  426. void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
  427. struct rmnet_port *port,
  428. struct net_device *orig_dev)
  429. {
  430. struct rmnet_priv *priv = netdev_priv(orig_dev);
  431. struct rmnet_map_v5_csum_header *ul_header;
  432. ul_header = (struct rmnet_map_v5_csum_header *)
  433. skb_push(skb, sizeof(*ul_header));
  434. memset(ul_header, 0, sizeof(*ul_header));
  435. ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
  436. if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
  437. rmnet_map_v5_check_priority(skb, orig_dev, ul_header, false);
  438. /* Allow priority w/o csum offload */
  439. if (!(port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5))
  440. return;
  441. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  442. void *iph = (char *)ul_header + sizeof(*ul_header);
  443. void *trans;
  444. __sum16 *check;
  445. u8 proto;
  446. if (skb->protocol == htons(ETH_P_IP)) {
  447. u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
  448. proto = ((struct iphdr *)iph)->protocol;
  449. trans = iph + ip_len;
  450. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  451. u16 ip_len = sizeof(struct ipv6hdr);
  452. proto = ((struct ipv6hdr *)iph)->nexthdr;
  453. trans = iph + ip_len;
  454. } else {
  455. priv->stats.csum_err_invalid_ip_version++;
  456. goto sw_csum;
  457. }
  458. check = rmnet_map_get_csum_field(proto, trans);
  459. if (check) {
  460. skb->ip_summed = CHECKSUM_NONE;
  461. /* Ask for checksum offloading */
  462. ul_header->csum_valid_required = 1;
  463. priv->stats.csum_hw++;
  464. return;
  465. }
  466. }
  467. sw_csum:
  468. priv->stats.csum_sw++;
  469. }
  470. /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
  471. * packets that are supported for UL checksum offload.
  472. */
  473. void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
  474. struct rmnet_port *port,
  475. struct net_device *orig_dev,
  476. int csum_type)
  477. {
  478. switch (csum_type) {
  479. case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
  480. rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
  481. break;
  482. case RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5:
  483. rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
  484. break;
  485. default:
  486. break;
  487. }
  488. }
  489. bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr)
  490. {
  491. /* Only applies to frames with a single packet */
  492. if (coal_hdr->num_nlos != 1 || coal_hdr->nl_pairs[0].num_packets != 1)
  493. return false;
  494. /* TCP header has FIN or PUSH set */
  495. if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_COAL)
  496. return true;
  497. /* Hit packet limit, byte limit, or time limit/EOF on DMA */
  498. if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_HW) {
  499. switch (coal_hdr->close_value) {
  500. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  501. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  502. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  503. return true;
  504. }
  505. }
  506. return false;
  507. }
  508. static void rmnet_map_move_headers(struct sk_buff *skb)
  509. {
  510. struct iphdr *iph;
  511. u16 ip_len;
  512. u16 trans_len = 0;
  513. u8 proto;
  514. /* This only applies to non-linear SKBs */
  515. if (!skb_is_nonlinear(skb))
  516. return;
  517. iph = (struct iphdr *)rmnet_map_data_ptr(skb);
  518. if (iph->version == 4) {
  519. ip_len = iph->ihl * 4;
  520. proto = iph->protocol;
  521. if (iph->frag_off & htons(IP_OFFSET))
  522. /* No transport header information */
  523. goto pull;
  524. } else if (iph->version == 6) {
  525. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  526. __be16 frag_off;
  527. u8 nexthdr = ip6h->nexthdr;
  528. ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
  529. &frag_off);
  530. if (ip_len < 0)
  531. return;
  532. proto = nexthdr;
  533. } else {
  534. return;
  535. }
  536. if (proto == IPPROTO_TCP) {
  537. struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
  538. trans_len = tp->doff * 4;
  539. } else if (proto == IPPROTO_UDP) {
  540. trans_len = sizeof(struct udphdr);
  541. } else if (proto == NEXTHDR_FRAGMENT) {
  542. /* Non-first fragments don't have the fragment length added by
  543. * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
  544. * we account for the length here.
  545. */
  546. ip_len += sizeof(struct frag_hdr);
  547. }
  548. pull:
  549. __pskb_pull_tail(skb, ip_len + trans_len);
  550. skb_reset_network_header(skb);
  551. if (trans_len)
  552. skb_set_transport_header(skb, ip_len);
  553. }
  554. static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
  555. struct rmnet_map_coal_metadata *coal_meta,
  556. struct sk_buff *dest)
  557. {
  558. unsigned char *data_start = rmnet_map_data_ptr(coal_skb) +
  559. coal_meta->ip_len + coal_meta->trans_len;
  560. u32 copy_len = coal_meta->data_len * coal_meta->pkt_count;
  561. if (skb_is_nonlinear(coal_skb)) {
  562. skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags;
  563. struct page *page = skb_frag_page(frag0);
  564. skb_append_pagefrags(dest, page,
  565. frag0->bv_offset + coal_meta->ip_len +
  566. coal_meta->trans_len +
  567. coal_meta->data_offset,
  568. copy_len);
  569. dest->data_len += copy_len;
  570. dest->len += copy_len;
  571. } else {
  572. skb_put_data(dest, data_start + coal_meta->data_offset,
  573. copy_len);
  574. }
  575. }
  576. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  577. * if needed (i.e. forwarding, UDP GRO)
  578. */
  579. static void rmnet_map_gso_stamp(struct sk_buff *skb,
  580. struct rmnet_map_coal_metadata *coal_meta)
  581. {
  582. struct skb_shared_info *shinfo = skb_shinfo(skb);
  583. if (coal_meta->trans_proto == IPPROTO_TCP)
  584. shinfo->gso_type = (coal_meta->ip_proto == 4) ?
  585. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  586. else
  587. shinfo->gso_type = SKB_GSO_UDP_L4;
  588. shinfo->gso_size = coal_meta->data_len;
  589. shinfo->gso_segs = coal_meta->pkt_count;
  590. }
  591. /* Handles setting up the partial checksum in the skb. Sets the transport
  592. * checksum to the pseudoheader checksum and sets the csum offload metadata
  593. */
  594. static void rmnet_map_partial_csum(struct sk_buff *skb,
  595. struct rmnet_map_coal_metadata *coal_meta)
  596. {
  597. unsigned char *data = skb->data;
  598. __sum16 pseudo;
  599. u16 pkt_len = skb->len - coal_meta->ip_len;
  600. if (coal_meta->ip_proto == 4) {
  601. struct iphdr *iph = (struct iphdr *)data;
  602. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  603. pkt_len, coal_meta->trans_proto,
  604. 0);
  605. } else {
  606. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  607. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  608. pkt_len, coal_meta->trans_proto, 0);
  609. }
  610. if (coal_meta->trans_proto == IPPROTO_TCP) {
  611. struct tcphdr *tp = (struct tcphdr *)(data + coal_meta->ip_len);
  612. tp->check = pseudo;
  613. skb->csum_offset = offsetof(struct tcphdr, check);
  614. } else {
  615. struct udphdr *up = (struct udphdr *)(data + coal_meta->ip_len);
  616. up->check = pseudo;
  617. skb->csum_offset = offsetof(struct udphdr, check);
  618. }
  619. skb->ip_summed = CHECKSUM_PARTIAL;
  620. skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
  621. }
  622. static void
  623. __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
  624. struct rmnet_map_coal_metadata *coal_meta,
  625. struct sk_buff_head *list, u8 pkt_id,
  626. bool csum_valid)
  627. {
  628. struct sk_buff *skbn;
  629. struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
  630. __sum16 *check = NULL;
  631. u32 alloc_len;
  632. u32 dlen = coal_meta->data_len * coal_meta->pkt_count;
  633. u32 hlen = coal_meta->ip_len + coal_meta->trans_len;
  634. bool zero_csum = false;
  635. /* We can avoid copying the data if the SKB we got from the lower-level
  636. * drivers was nonlinear.
  637. */
  638. if (skb_is_nonlinear(coal_skb))
  639. alloc_len = hlen;
  640. else
  641. alloc_len = hlen + dlen;
  642. skbn = alloc_skb(alloc_len, GFP_ATOMIC);
  643. if (!skbn)
  644. return;
  645. skb_reserve(skbn, hlen);
  646. rmnet_map_nonlinear_copy(coal_skb, coal_meta, skbn);
  647. /* Push transport header and update necessary fields */
  648. skb_push(skbn, coal_meta->trans_len);
  649. memcpy(skbn->data, coal_meta->trans_header, coal_meta->trans_len);
  650. skb_reset_transport_header(skbn);
  651. if (coal_meta->trans_proto == IPPROTO_TCP) {
  652. struct tcphdr *th = tcp_hdr(skbn);
  653. th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset);
  654. check = &th->check;
  655. /* Don't allow dangerous flags to be set in any segment but the
  656. * last one.
  657. */
  658. if (th->fin || th->psh) {
  659. if (hlen + coal_meta->data_offset + dlen <
  660. coal_skb->len) {
  661. th->fin = 0;
  662. th->psh = 0;
  663. }
  664. }
  665. } else if (coal_meta->trans_proto == IPPROTO_UDP) {
  666. struct udphdr *uh = udp_hdr(skbn);
  667. uh->len = htons(skbn->len);
  668. check = &uh->check;
  669. if (coal_meta->ip_proto == 4 && !uh->check)
  670. zero_csum = true;
  671. }
  672. /* Push IP header and update necessary fields */
  673. skb_push(skbn, coal_meta->ip_len);
  674. memcpy(skbn->data, coal_meta->ip_header, coal_meta->ip_len);
  675. skb_reset_network_header(skbn);
  676. if (coal_meta->ip_proto == 4) {
  677. struct iphdr *iph = ip_hdr(skbn);
  678. iph->id = htons(ntohs(iph->id) + coal_meta->pkt_id);
  679. iph->tot_len = htons(skbn->len);
  680. iph->check = 0;
  681. iph->check = ip_fast_csum(iph, iph->ihl);
  682. } else {
  683. /* Payload length includes any extension headers */
  684. ipv6_hdr(skbn)->payload_len = htons(skbn->len -
  685. sizeof(struct ipv6hdr));
  686. }
  687. /* Handle checksum status */
  688. if (likely(csum_valid) || zero_csum) {
  689. /* Set the partial checksum information */
  690. rmnet_map_partial_csum(skbn, coal_meta);
  691. } else if (check) {
  692. /* Unfortunately, we have to fake a bad checksum here, since
  693. * the original bad value is lost by the hardware. The only
  694. * reliable way to do it is to calculate the actual checksum
  695. * and corrupt it.
  696. */
  697. __wsum csum;
  698. unsigned int offset = skb_transport_offset(skbn);
  699. __sum16 pseudo;
  700. /* Calculate pseudo header */
  701. if (coal_meta->ip_proto == 4) {
  702. struct iphdr *iph = ip_hdr(skbn);
  703. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  704. skbn->len -
  705. coal_meta->ip_len,
  706. coal_meta->trans_proto, 0);
  707. } else {
  708. struct ipv6hdr *ip6h = ipv6_hdr(skbn);
  709. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  710. skbn->len - coal_meta->ip_len,
  711. coal_meta->trans_proto, 0);
  712. }
  713. *check = pseudo;
  714. csum = skb_checksum(skbn, offset, skbn->len - offset, 0);
  715. /* Add 1 to corrupt. This cannot produce a final value of 0
  716. * since csum_fold() can't return a value of 0xFFFF.
  717. */
  718. *check = csum16_add(csum_fold(csum), htons(1));
  719. skbn->ip_summed = CHECKSUM_NONE;
  720. }
  721. skbn->dev = coal_skb->dev;
  722. priv->stats.coal.coal_reconstruct++;
  723. /* Stamp GSO information if necessary */
  724. if (coal_meta->pkt_count > 1)
  725. rmnet_map_gso_stamp(skbn, coal_meta);
  726. /* Propagate priority value */
  727. skbn->priority = coal_skb->priority;
  728. __skb_queue_tail(list, skbn);
  729. /* Update meta information to move past the data we just segmented */
  730. coal_meta->data_offset += dlen;
  731. coal_meta->pkt_id = pkt_id + 1;
  732. coal_meta->pkt_count = 0;
  733. }
  734. static bool rmnet_map_validate_csum(struct sk_buff *skb,
  735. struct rmnet_map_coal_metadata *meta)
  736. {
  737. u8 *data = rmnet_map_data_ptr(skb);
  738. unsigned int datagram_len;
  739. __wsum csum;
  740. __sum16 pseudo;
  741. datagram_len = skb->len - meta->ip_len;
  742. if (meta->ip_proto == 4) {
  743. struct iphdr *iph = (struct iphdr *)data;
  744. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  745. datagram_len,
  746. meta->trans_proto, 0);
  747. } else {
  748. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  749. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  750. datagram_len, meta->trans_proto,
  751. 0);
  752. }
  753. csum = skb_checksum(skb, meta->ip_len, datagram_len,
  754. csum_unfold(pseudo));
  755. return !csum_fold(csum);
  756. }
  757. /* Converts the coalesced SKB into a list of SKBs.
  758. * NLOs containing csum erros will not be included.
  759. * The original coalesced SKB should be treated as invalid and
  760. * must be freed by the caller
  761. */
  762. static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
  763. u64 nlo_err_mask,
  764. struct sk_buff_head *list)
  765. {
  766. struct iphdr *iph;
  767. struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
  768. struct rmnet_map_v5_coal_header *coal_hdr;
  769. struct rmnet_map_coal_metadata coal_meta;
  770. u16 pkt_len;
  771. u8 pkt, total_pkt = 0;
  772. u8 nlo;
  773. bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
  774. bool zero_csum = false;
  775. memset(&coal_meta, 0, sizeof(coal_meta));
  776. /* Pull off the headers we no longer need */
  777. pskb_pull(coal_skb, sizeof(struct rmnet_map_header));
  778. coal_hdr = (struct rmnet_map_v5_coal_header *)
  779. rmnet_map_data_ptr(coal_skb);
  780. pskb_pull(coal_skb, sizeof(*coal_hdr));
  781. iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
  782. if (iph->version == 4) {
  783. coal_meta.ip_proto = 4;
  784. coal_meta.ip_len = iph->ihl * 4;
  785. coal_meta.trans_proto = iph->protocol;
  786. coal_meta.ip_header = iph;
  787. /* Don't allow coalescing of any packets with IP options */
  788. if (iph->ihl != 5)
  789. gro = false;
  790. } else if (iph->version == 6) {
  791. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  792. __be16 frag_off;
  793. u8 protocol = ip6h->nexthdr;
  794. coal_meta.ip_proto = 6;
  795. coal_meta.ip_len = ipv6_skip_exthdr(coal_skb, sizeof(*ip6h),
  796. &protocol, &frag_off);
  797. coal_meta.trans_proto = protocol;
  798. coal_meta.ip_header = ip6h;
  799. /* If we run into a problem, or this has a fragment header
  800. * (which should technically not be possible, if the HW
  801. * works as intended...), bail.
  802. */
  803. if (coal_meta.ip_len < 0 || frag_off) {
  804. priv->stats.coal.coal_ip_invalid++;
  805. return;
  806. } else if (coal_meta.ip_len > sizeof(*ip6h)) {
  807. /* Don't allow coalescing of any packets with IPv6
  808. * extension headers.
  809. */
  810. gro = false;
  811. }
  812. } else {
  813. priv->stats.coal.coal_ip_invalid++;
  814. return;
  815. }
  816. if (coal_meta.trans_proto == IPPROTO_TCP) {
  817. struct tcphdr *th;
  818. th = (struct tcphdr *)((u8 *)iph + coal_meta.ip_len);
  819. coal_meta.trans_len = th->doff * 4;
  820. coal_meta.trans_header = th;
  821. } else if (coal_meta.trans_proto == IPPROTO_UDP) {
  822. struct udphdr *uh;
  823. uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
  824. coal_meta.trans_len = sizeof(*uh);
  825. coal_meta.trans_header = uh;
  826. /* Check for v4 zero checksum */
  827. if (coal_meta.ip_proto == 4 && !uh->check)
  828. zero_csum = true;
  829. } else {
  830. priv->stats.coal.coal_trans_invalid++;
  831. return;
  832. }
  833. if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
  834. rmnet_map_move_headers(coal_skb);
  835. /* Mark as valid if it checks out */
  836. if (rmnet_map_validate_csum(coal_skb, &coal_meta))
  837. coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
  838. __skb_queue_tail(list, coal_skb);
  839. return;
  840. }
  841. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  842. * no checksum errors, and are allowing GRO. We can just reuse this
  843. * SKB unchanged.
  844. */
  845. if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
  846. rmnet_map_move_headers(coal_skb);
  847. coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
  848. coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
  849. coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
  850. coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
  851. if (coal_meta.pkt_count > 1) {
  852. rmnet_map_partial_csum(coal_skb, &coal_meta);
  853. rmnet_map_gso_stamp(coal_skb, &coal_meta);
  854. }
  855. __skb_queue_tail(list, coal_skb);
  856. return;
  857. }
  858. /* Segment the coalesced SKB into new packets */
  859. for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
  860. pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
  861. pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
  862. coal_meta.data_len = pkt_len;
  863. for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
  864. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  865. bool csum_err = nlo_err_mask & 1;
  866. /* Segment the packet if we're not sending the larger
  867. * packet up the stack.
  868. */
  869. if (!gro) {
  870. coal_meta.pkt_count = 1;
  871. if (csum_err)
  872. priv->stats.coal.coal_csum_err++;
  873. __rmnet_map_segment_coal_skb(coal_skb,
  874. &coal_meta, list,
  875. total_pkt,
  876. !csum_err);
  877. continue;
  878. }
  879. if (csum_err) {
  880. priv->stats.coal.coal_csum_err++;
  881. /* Segment out the good data */
  882. if (gro && coal_meta.pkt_count)
  883. __rmnet_map_segment_coal_skb(coal_skb,
  884. &coal_meta,
  885. list,
  886. total_pkt,
  887. true);
  888. /* Segment out the bad checksum */
  889. coal_meta.pkt_count = 1;
  890. __rmnet_map_segment_coal_skb(coal_skb,
  891. &coal_meta, list,
  892. total_pkt, false);
  893. } else {
  894. coal_meta.pkt_count++;
  895. }
  896. }
  897. /* If we're switching NLOs, we need to send out everything from
  898. * the previous one, if we haven't done so. NLOs only switch
  899. * when the packet length changes.
  900. */
  901. if (coal_meta.pkt_count)
  902. __rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list,
  903. total_pkt, true);
  904. }
  905. }
  906. /* Record reason for coalescing pipe closure */
  907. static void rmnet_map_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  908. u8 code)
  909. {
  910. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  911. switch (type) {
  912. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  913. stats->non_coal++;
  914. break;
  915. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  916. stats->ip_miss++;
  917. break;
  918. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  919. stats->trans_miss++;
  920. break;
  921. case RMNET_MAP_COAL_CLOSE_HW:
  922. switch (code) {
  923. case RMNET_MAP_COAL_CLOSE_HW_NL:
  924. stats->hw_nl++;
  925. break;
  926. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  927. stats->hw_pkt++;
  928. break;
  929. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  930. stats->hw_byte++;
  931. break;
  932. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  933. stats->hw_time++;
  934. break;
  935. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  936. stats->hw_evict++;
  937. break;
  938. default:
  939. break;
  940. }
  941. break;
  942. case RMNET_MAP_COAL_CLOSE_COAL:
  943. stats->coal++;
  944. break;
  945. default:
  946. break;
  947. }
  948. }
  949. /* Check if the coalesced header has any incorrect values, in which case, the
  950. * entire coalesced skb must be dropped. Then check if there are any
  951. * checksum issues
  952. */
  953. static int rmnet_map_data_check_coal_header(struct sk_buff *skb,
  954. u64 *nlo_err_mask)
  955. {
  956. struct rmnet_map_v5_coal_header *coal_hdr;
  957. unsigned char *data = rmnet_map_data_ptr(skb);
  958. struct rmnet_priv *priv = netdev_priv(skb->dev);
  959. u64 mask = 0;
  960. int i;
  961. u8 veid, pkts = 0;
  962. coal_hdr = ((struct rmnet_map_v5_coal_header *)
  963. (data + sizeof(struct rmnet_map_header)));
  964. veid = coal_hdr->virtual_channel_id;
  965. if (coal_hdr->num_nlos == 0 ||
  966. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  967. priv->stats.coal.coal_hdr_nlo_err++;
  968. return -EINVAL;
  969. }
  970. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  971. /* If there is a checksum issue, we need to split
  972. * up the skb. Rebuild the full csum error field
  973. */
  974. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  975. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  976. mask |= ((u64)err) << (8 * i);
  977. /* Track total packets in frame */
  978. pkts += pkt;
  979. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  980. priv->stats.coal.coal_hdr_pkt_err++;
  981. return -EINVAL;
  982. }
  983. }
  984. /* Track number of packets we get inside of coalesced frames */
  985. priv->stats.coal.coal_pkts += pkts;
  986. /* Update ethtool stats */
  987. rmnet_map_data_log_close_stats(priv,
  988. coal_hdr->close_type,
  989. coal_hdr->close_value);
  990. if (veid < RMNET_MAX_VEID)
  991. priv->stats.coal.coal_veid[veid]++;
  992. *nlo_err_mask = mask;
  993. return 0;
  994. }
  995. /* Process a QMAPv5 packet header */
  996. int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
  997. struct sk_buff_head *list,
  998. u16 len)
  999. {
  1000. struct rmnet_priv *priv = netdev_priv(skb->dev);
  1001. u64 nlo_err_mask;
  1002. int rc = 0;
  1003. switch (rmnet_map_get_next_hdr_type(skb)) {
  1004. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1005. priv->stats.coal.coal_rx++;
  1006. rc = rmnet_map_data_check_coal_header(skb, &nlo_err_mask);
  1007. if (rc)
  1008. return rc;
  1009. rmnet_map_segment_coal_skb(skb, nlo_err_mask, list);
  1010. if (skb_peek(list) != skb)
  1011. consume_skb(skb);
  1012. break;
  1013. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1014. if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
  1015. priv->stats.csum_sw++;
  1016. } else if (rmnet_map_get_csum_valid(skb)) {
  1017. priv->stats.csum_ok++;
  1018. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1019. } else {
  1020. priv->stats.csum_valid_unset++;
  1021. }
  1022. /* Pull unnecessary headers and move the rest to the linear
  1023. * section of the skb.
  1024. */
  1025. pskb_pull(skb,
  1026. (sizeof(struct rmnet_map_header) +
  1027. sizeof(struct rmnet_map_v5_csum_header)));
  1028. rmnet_map_move_headers(skb);
  1029. /* Remove padding only for csum offload packets.
  1030. * Coalesced packets should never have padding.
  1031. */
  1032. pskb_trim(skb, len);
  1033. __skb_queue_tail(list, skb);
  1034. break;
  1035. default:
  1036. rc = -EINVAL;
  1037. break;
  1038. }
  1039. return rc;
  1040. }
  1041. long rmnet_agg_time_limit __read_mostly = 1000000L;
  1042. long rmnet_agg_bypass_time __read_mostly = 10000000L;
  1043. int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
  1044. {
  1045. u8 *packet_start = skb->data + offset;
  1046. int is_icmp = 0;
  1047. if (skb->protocol == htons(ETH_P_IP)) {
  1048. struct iphdr *ip4h = (struct iphdr *)(packet_start);
  1049. if (ip4h->protocol == IPPROTO_ICMP)
  1050. is_icmp = 1;
  1051. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1052. struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
  1053. if (ip6h->nexthdr == IPPROTO_ICMPV6) {
  1054. is_icmp = 1;
  1055. } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
  1056. struct frag_hdr *frag;
  1057. frag = (struct frag_hdr *)(packet_start
  1058. + sizeof(struct ipv6hdr));
  1059. if (frag->nexthdr == IPPROTO_ICMPV6)
  1060. is_icmp = 1;
  1061. }
  1062. }
  1063. return is_icmp;
  1064. }
  1065. static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
  1066. {
  1067. struct sk_buff *skb = NULL;
  1068. struct rmnet_aggregation_state *state;
  1069. state = container_of(work, struct rmnet_aggregation_state, agg_wq);
  1070. spin_lock_bh(&state->agg_lock);
  1071. if (likely(state->agg_state == -EINPROGRESS)) {
  1072. /* Buffer may have already been shipped out */
  1073. if (likely(state->agg_skb)) {
  1074. skb = state->agg_skb;
  1075. state->agg_skb = NULL;
  1076. state->agg_count = 0;
  1077. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1078. }
  1079. state->agg_state = 0;
  1080. }
  1081. if (skb)
  1082. state->send_agg_skb(skb);
  1083. spin_unlock_bh(&state->agg_lock);
  1084. }
  1085. enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
  1086. {
  1087. struct rmnet_aggregation_state *state;
  1088. state = container_of(t, struct rmnet_aggregation_state, hrtimer);
  1089. schedule_work(&state->agg_wq);
  1090. return HRTIMER_NORESTART;
  1091. }
  1092. static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
  1093. {
  1094. unsigned int linear = src->len - src->data_len, target = src->len;
  1095. unsigned char *src_buf;
  1096. struct sk_buff *skb;
  1097. src_buf = src->data;
  1098. skb_put_data(dst, src_buf, linear);
  1099. target -= linear;
  1100. skb = src;
  1101. while (target) {
  1102. unsigned int i = 0, non_linear = 0;
  1103. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1104. non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1105. src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
  1106. skb_put_data(dst, src_buf, non_linear);
  1107. target -= non_linear;
  1108. }
  1109. if (skb_shinfo(skb)->frag_list) {
  1110. skb = skb_shinfo(skb)->frag_list;
  1111. continue;
  1112. }
  1113. if (skb->next)
  1114. skb = skb->next;
  1115. }
  1116. }
  1117. static void rmnet_free_agg_pages(struct rmnet_aggregation_state *state)
  1118. {
  1119. struct rmnet_agg_page *agg_page, *idx;
  1120. list_for_each_entry_safe(agg_page, idx, &state->agg_list, list) {
  1121. list_del(&agg_page->list);
  1122. put_page(agg_page->page);
  1123. kfree(agg_page);
  1124. }
  1125. state->agg_head = NULL;
  1126. }
  1127. static struct page *rmnet_get_agg_pages(struct rmnet_aggregation_state *state)
  1128. {
  1129. struct rmnet_agg_page *agg_page;
  1130. struct page *page = NULL;
  1131. int i = 0;
  1132. if (!(state->params.agg_features & RMNET_PAGE_RECYCLE))
  1133. goto alloc;
  1134. do {
  1135. agg_page = state->agg_head;
  1136. if (unlikely(!agg_page))
  1137. break;
  1138. if (page_ref_count(agg_page->page) == 1) {
  1139. page = agg_page->page;
  1140. page_ref_inc(agg_page->page);
  1141. state->stats->ul_agg_reuse++;
  1142. state->agg_head = list_next_entry(agg_page, list);
  1143. break;
  1144. }
  1145. state->agg_head = list_next_entry(agg_page, list);
  1146. i++;
  1147. } while (i <= 5);
  1148. alloc:
  1149. if (!page) {
  1150. page = __dev_alloc_pages(GFP_ATOMIC, state->agg_size_order);
  1151. state->stats->ul_agg_alloc++;
  1152. }
  1153. return page;
  1154. }
  1155. static struct rmnet_agg_page *
  1156. __rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
  1157. {
  1158. struct rmnet_agg_page *agg_page;
  1159. struct page *page;
  1160. agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
  1161. if (!agg_page)
  1162. return NULL;
  1163. page = __dev_alloc_pages(GFP_ATOMIC, state->agg_size_order);
  1164. if (!page) {
  1165. kfree(agg_page);
  1166. return NULL;
  1167. }
  1168. agg_page->page = page;
  1169. INIT_LIST_HEAD(&agg_page->list);
  1170. return agg_page;
  1171. }
  1172. static void rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
  1173. {
  1174. struct rmnet_agg_page *agg_page = NULL;
  1175. int i = 0;
  1176. for (i = 0; i < 512; i++) {
  1177. agg_page = __rmnet_alloc_agg_pages(state);
  1178. if (agg_page)
  1179. list_add_tail(&agg_page->list, &state->agg_list);
  1180. }
  1181. state->agg_head = list_first_entry_or_null(&state->agg_list,
  1182. struct rmnet_agg_page, list);
  1183. }
  1184. static struct sk_buff *
  1185. rmnet_map_build_skb(struct rmnet_aggregation_state *state)
  1186. {
  1187. struct sk_buff *skb;
  1188. unsigned int size;
  1189. struct page *page;
  1190. void *vaddr;
  1191. page = rmnet_get_agg_pages(state);
  1192. if (!page)
  1193. return NULL;
  1194. vaddr = page_address(page);
  1195. size = PAGE_SIZE << state->agg_size_order;
  1196. skb = build_skb(vaddr, size);
  1197. if (!skb) {
  1198. put_page(page);
  1199. return NULL;
  1200. }
  1201. return skb;
  1202. }
  1203. void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state)
  1204. {
  1205. struct sk_buff *agg_skb;
  1206. if (!state->agg_skb) {
  1207. spin_unlock_bh(&state->agg_lock);
  1208. return;
  1209. }
  1210. agg_skb = state->agg_skb;
  1211. /* Reset the aggregation state */
  1212. state->agg_skb = NULL;
  1213. state->agg_count = 0;
  1214. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1215. state->agg_state = 0;
  1216. state->send_agg_skb(agg_skb);
  1217. spin_unlock_bh(&state->agg_lock);
  1218. hrtimer_cancel(&state->hrtimer);
  1219. }
  1220. void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
  1221. bool low_latency)
  1222. {
  1223. struct rmnet_aggregation_state *state;
  1224. struct timespec64 diff, last;
  1225. int size;
  1226. state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
  1227. RMNET_DEFAULT_AGG_STATE];
  1228. new_packet:
  1229. spin_lock_bh(&state->agg_lock);
  1230. memcpy(&last, &state->agg_last, sizeof(last));
  1231. ktime_get_real_ts64(&state->agg_last);
  1232. if ((port->data_format & RMNET_EGRESS_FORMAT_PRIORITY) &&
  1233. (RMNET_LLM(skb->priority) || RMNET_APS_LLB(skb->priority))) {
  1234. /* Send out any aggregated SKBs we have */
  1235. rmnet_map_send_agg_skb(state);
  1236. /* Send out the priority SKB. Not holding agg_lock anymore */
  1237. skb->protocol = htons(ETH_P_MAP);
  1238. state->send_agg_skb(skb);
  1239. return;
  1240. }
  1241. if (!state->agg_skb) {
  1242. /* Check to see if we should agg first. If the traffic is very
  1243. * sparse, don't aggregate. We will need to tune this later
  1244. */
  1245. diff = timespec64_sub(state->agg_last, last);
  1246. size = state->params.agg_size - skb->len;
  1247. if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
  1248. size <= 0) {
  1249. skb->protocol = htons(ETH_P_MAP);
  1250. state->send_agg_skb(skb);
  1251. spin_unlock_bh(&state->agg_lock);
  1252. return;
  1253. }
  1254. state->agg_skb = rmnet_map_build_skb(state);
  1255. if (!state->agg_skb) {
  1256. state->agg_skb = NULL;
  1257. state->agg_count = 0;
  1258. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1259. skb->protocol = htons(ETH_P_MAP);
  1260. state->send_agg_skb(skb);
  1261. spin_unlock_bh(&state->agg_lock);
  1262. return;
  1263. }
  1264. rmnet_map_linearize_copy(state->agg_skb, skb);
  1265. state->agg_skb->dev = skb->dev;
  1266. state->agg_skb->protocol = htons(ETH_P_MAP);
  1267. state->agg_count = 1;
  1268. ktime_get_real_ts64(&state->agg_time);
  1269. dev_kfree_skb_any(skb);
  1270. goto schedule;
  1271. }
  1272. diff = timespec64_sub(state->agg_last, state->agg_time);
  1273. size = skb_tailroom(state->agg_skb);
  1274. if (skb->len > size ||
  1275. state->agg_count >= state->params.agg_count ||
  1276. diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
  1277. rmnet_map_send_agg_skb(state);
  1278. goto new_packet;
  1279. }
  1280. rmnet_map_linearize_copy(state->agg_skb, skb);
  1281. state->agg_count++;
  1282. dev_kfree_skb_any(skb);
  1283. schedule:
  1284. if (state->agg_state != -EINPROGRESS) {
  1285. state->agg_state = -EINPROGRESS;
  1286. hrtimer_start(&state->hrtimer,
  1287. ns_to_ktime(state->params.agg_time),
  1288. HRTIMER_MODE_REL);
  1289. }
  1290. spin_unlock_bh(&state->agg_lock);
  1291. }
  1292. void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
  1293. u16 size, u8 count, u8 features, u32 time)
  1294. {
  1295. spin_lock_bh(&state->agg_lock);
  1296. state->params.agg_count = count;
  1297. state->params.agg_time = time;
  1298. state->params.agg_size = size;
  1299. state->params.agg_features = features;
  1300. rmnet_free_agg_pages(state);
  1301. /* This effectively disables recycling in case the UL aggregation
  1302. * size is lesser than PAGE_SIZE.
  1303. */
  1304. if (size < PAGE_SIZE)
  1305. goto done;
  1306. state->agg_size_order = get_order(size);
  1307. size = PAGE_SIZE << state->agg_size_order;
  1308. size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  1309. state->params.agg_size = size;
  1310. if (state->params.agg_features == RMNET_PAGE_RECYCLE)
  1311. rmnet_alloc_agg_pages(state);
  1312. done:
  1313. spin_unlock_bh(&state->agg_lock);
  1314. }
  1315. void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
  1316. {
  1317. unsigned int i;
  1318. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1319. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1320. spin_lock_init(&state->agg_lock);
  1321. INIT_LIST_HEAD(&state->agg_list);
  1322. hrtimer_init(&state->hrtimer, CLOCK_MONOTONIC,
  1323. HRTIMER_MODE_REL);
  1324. state->hrtimer.function = rmnet_map_flush_tx_packet_queue;
  1325. INIT_WORK(&state->agg_wq, rmnet_map_flush_tx_packet_work);
  1326. state->stats = &port->stats.agg;
  1327. /* Since PAGE_SIZE - 1 is specified here, no pages are
  1328. * pre-allocated. This is done to reduce memory usage in cases
  1329. * where UL aggregation is disabled.
  1330. * Additionally, the features flag is also set to 0.
  1331. */
  1332. rmnet_map_update_ul_agg_config(state, PAGE_SIZE - 1, 20, 0,
  1333. 3000000);
  1334. }
  1335. /* Set delivery functions for each aggregation state */
  1336. port->agg_state[RMNET_DEFAULT_AGG_STATE].send_agg_skb = dev_queue_xmit;
  1337. port->agg_state[RMNET_LL_AGG_STATE].send_agg_skb = rmnet_ll_send_skb;
  1338. }
  1339. void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
  1340. {
  1341. unsigned int i;
  1342. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1343. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1344. hrtimer_cancel(&state->hrtimer);
  1345. cancel_work_sync(&state->agg_wq);
  1346. }
  1347. for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
  1348. struct rmnet_aggregation_state *state = &port->agg_state[i];
  1349. spin_lock_bh(&state->agg_lock);
  1350. if (state->agg_state == -EINPROGRESS) {
  1351. if (state->agg_skb) {
  1352. kfree_skb(state->agg_skb);
  1353. state->agg_skb = NULL;
  1354. state->agg_count = 0;
  1355. memset(&state->agg_time, 0,
  1356. sizeof(state->agg_time));
  1357. }
  1358. state->agg_state = 0;
  1359. }
  1360. rmnet_free_agg_pages(state);
  1361. spin_unlock_bh(&state->agg_lock);
  1362. }
  1363. }
  1364. void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush)
  1365. {
  1366. struct rmnet_aggregation_state *state;
  1367. struct rmnet_port *port;
  1368. struct sk_buff *agg_skb;
  1369. if (unlikely(ch >= RMNET_MAX_AGG_STATE))
  1370. ch = RMNET_DEFAULT_AGG_STATE;
  1371. port = rmnet_get_port(qmap_skb->dev);
  1372. if (!port) {
  1373. kfree_skb(qmap_skb);
  1374. return;
  1375. }
  1376. state = &port->agg_state[ch];
  1377. if (!flush)
  1378. goto send;
  1379. if (!(port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION))
  1380. goto send;
  1381. spin_lock_bh(&state->agg_lock);
  1382. if (state->agg_skb) {
  1383. agg_skb = state->agg_skb;
  1384. state->agg_skb = NULL;
  1385. state->agg_count = 0;
  1386. memset(&state->agg_time, 0, sizeof(state->agg_time));
  1387. state->agg_state = 0;
  1388. state->send_agg_skb(agg_skb);
  1389. spin_unlock_bh(&state->agg_lock);
  1390. hrtimer_cancel(&state->hrtimer);
  1391. } else {
  1392. spin_unlock_bh(&state->agg_lock);
  1393. }
  1394. send:
  1395. state->send_agg_skb(qmap_skb);
  1396. }
  1397. EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
  1398. int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
  1399. struct net_device *orig_dev)
  1400. {
  1401. struct rmnet_priv *priv = netdev_priv(orig_dev);
  1402. struct rmnet_map_v5_tso_header *ul_header;
  1403. if (!(orig_dev->features & (NETIF_F_ALL_TSO | NETIF_F_GSO_UDP_L4))) {
  1404. priv->stats.tso_arriv_errs++;
  1405. return -EINVAL;
  1406. }
  1407. ul_header = (struct rmnet_map_v5_tso_header *)
  1408. skb_push(skb, sizeof(*ul_header));
  1409. memset(ul_header, 0, sizeof(*ul_header));
  1410. ul_header->header_type = RMNET_MAP_HEADER_TYPE_TSO;
  1411. if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
  1412. rmnet_map_v5_check_priority(skb, orig_dev,
  1413. (struct rmnet_map_v5_csum_header *)ul_header,
  1414. true);
  1415. ul_header->segment_size = htons(skb_shinfo(skb)->gso_size);
  1416. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
  1417. ul_header->ip_id_cfg = 1;
  1418. skb->ip_summed = CHECKSUM_NONE;
  1419. skb_shinfo(skb)->gso_size = 0;
  1420. skb_shinfo(skb)->gso_segs = 0;
  1421. skb_shinfo(skb)->gso_type = 0;
  1422. priv->stats.tso_pkts++;
  1423. return 0;
  1424. }