rmnet_descriptor.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. /* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Packet Descriptor Framework
  13. *
  14. */
  15. #include <linux/ip.h>
  16. #include <linux/ipv6.h>
  17. #include <net/ipv6.h>
  18. #include <net/ip6_checksum.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_descriptor.h"
  21. #include "rmnet_handlers.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_vnd.h"
  24. #include "rmnet_qmi.h"
  25. #include "qmi_rmnet.h"
  26. #define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
  27. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  28. sizeof(struct rmnet_map_header) + \
  29. sizeof(struct rmnet_map_control_command_header))
  30. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  31. sizeof(struct rmnet_map_header) + \
  32. sizeof(struct rmnet_map_control_command_header))
  33. typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
  34. struct rmnet_port *port);
  35. typedef void (*rmnet_perf_chain_hook_t)(void);
  36. struct rmnet_frag_descriptor *
  37. rmnet_get_frag_descriptor(struct rmnet_port *port)
  38. {
  39. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  40. struct rmnet_frag_descriptor *frag_desc;
  41. unsigned long flags;
  42. spin_lock_irqsave(&port->desc_pool_lock, flags);
  43. if (!list_empty(&pool->free_list)) {
  44. frag_desc = list_first_entry(&pool->free_list,
  45. struct rmnet_frag_descriptor,
  46. list);
  47. list_del_init(&frag_desc->list);
  48. } else {
  49. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  50. if (!frag_desc)
  51. goto out;
  52. INIT_LIST_HEAD(&frag_desc->list);
  53. INIT_LIST_HEAD(&frag_desc->sub_frags);
  54. pool->pool_size++;
  55. }
  56. out:
  57. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  58. return frag_desc;
  59. }
  60. EXPORT_SYMBOL(rmnet_get_frag_descriptor);
  61. void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
  62. struct rmnet_port *port)
  63. {
  64. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  65. struct page *page = skb_frag_page(&frag_desc->frag);
  66. unsigned long flags;
  67. list_del(&frag_desc->list);
  68. if (page)
  69. put_page(page);
  70. memset(frag_desc, 0, sizeof(*frag_desc));
  71. INIT_LIST_HEAD(&frag_desc->list);
  72. INIT_LIST_HEAD(&frag_desc->sub_frags);
  73. spin_lock_irqsave(&port->desc_pool_lock, flags);
  74. list_add_tail(&frag_desc->list, &pool->free_list);
  75. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  76. }
  77. EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
  78. void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
  79. struct page *p, u32 page_offset, u32 len)
  80. {
  81. struct rmnet_frag_descriptor *frag_desc;
  82. frag_desc = rmnet_get_frag_descriptor(port);
  83. if (!frag_desc)
  84. return;
  85. rmnet_frag_fill(frag_desc, p, page_offset, len);
  86. list_add_tail(&frag_desc->list, list);
  87. }
  88. EXPORT_SYMBOL(rmnet_descriptor_add_frag);
  89. int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
  90. int start, u8 *nexthdrp, __be16 *fragp)
  91. {
  92. u32 frag_size = skb_frag_size(&frag_desc->frag);
  93. u8 nexthdr = *nexthdrp;
  94. *fragp = 0;
  95. while (ipv6_ext_hdr(nexthdr)) {
  96. struct ipv6_opt_hdr *hp;
  97. int hdrlen;
  98. if (nexthdr == NEXTHDR_NONE)
  99. return -EINVAL;
  100. if (start >= frag_size)
  101. return -EINVAL;
  102. hp = rmnet_frag_data_ptr(frag_desc) + start;
  103. if (nexthdr == NEXTHDR_FRAGMENT) {
  104. __be16 *fp;
  105. if (start + offsetof(struct frag_hdr, frag_off) >=
  106. frag_size)
  107. return -EINVAL;
  108. fp = rmnet_frag_data_ptr(frag_desc) + start +
  109. offsetof(struct frag_hdr, frag_off);
  110. *fragp = *fp;
  111. if (ntohs(*fragp) & ~0x7)
  112. break;
  113. hdrlen = 8;
  114. } else if (nexthdr == NEXTHDR_AUTH) {
  115. hdrlen = (hp->hdrlen + 2) << 2;
  116. } else {
  117. hdrlen = ipv6_optlen(hp);
  118. }
  119. nexthdr = hp->nexthdr;
  120. start += hdrlen;
  121. }
  122. *nexthdrp = nexthdr;
  123. return start;
  124. }
  125. EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
  126. static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
  127. struct rmnet_port *port,
  128. int enable)
  129. {
  130. struct rmnet_map_control_command *cmd;
  131. struct rmnet_endpoint *ep;
  132. struct net_device *vnd;
  133. u16 ip_family;
  134. u16 fc_seq;
  135. u32 qos_id;
  136. u8 mux_id;
  137. int r;
  138. mux_id = qmap->mux_id;
  139. cmd = (struct rmnet_map_control_command *)
  140. ((char *)qmap + sizeof(*qmap));
  141. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  142. return RX_HANDLER_CONSUMED;
  143. ep = rmnet_get_endpoint(port, mux_id);
  144. if (!ep)
  145. return RX_HANDLER_CONSUMED;
  146. vnd = ep->egress_dev;
  147. ip_family = cmd->flow_control.ip_family;
  148. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  149. qos_id = ntohl(cmd->flow_control.qos_id);
  150. /* Ignore the ip family and pass the sequence number for both v4 and v6
  151. * sequence. User space does not support creating dedicated flows for
  152. * the 2 protocols
  153. */
  154. r = rmnet_vnd_do_flow_control(vnd, enable);
  155. if (r)
  156. return RMNET_MAP_COMMAND_UNSUPPORTED;
  157. else
  158. return RMNET_MAP_COMMAND_ACK;
  159. }
  160. static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
  161. unsigned char type,
  162. struct rmnet_port *port)
  163. {
  164. struct rmnet_map_control_command *cmd;
  165. struct net_device *dev = port->dev;
  166. struct sk_buff *skb;
  167. u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
  168. skb = alloc_skb(alloc_len, GFP_ATOMIC);
  169. if (!skb)
  170. return;
  171. skb->protocol = htons(ETH_P_MAP);
  172. skb->dev = dev;
  173. cmd = rmnet_map_get_cmd_start(skb);
  174. cmd->cmd_type = type & 0x03;
  175. netif_tx_lock(dev);
  176. dev->netdev_ops->ndo_start_xmit(skb, dev);
  177. netif_tx_unlock(dev);
  178. }
  179. static void
  180. rmnet_frag_process_flow_start(struct rmnet_map_control_command_header *cmd,
  181. struct rmnet_port *port,
  182. u16 cmd_len)
  183. {
  184. struct rmnet_map_dl_ind_hdr *dlhdr;
  185. u32 data_format;
  186. bool is_dl_mark_v2;
  187. if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_HDR_SIZE)
  188. return;
  189. data_format = port->data_format;
  190. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  191. dlhdr = (struct rmnet_map_dl_ind_hdr *)((char *)cmd + sizeof(*cmd));
  192. port->stats.dl_hdr_last_ep_id = cmd->source_id;
  193. port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
  194. port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
  195. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  196. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  197. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  198. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  199. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  200. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  201. port->stats.dl_hdr_count++;
  202. /* If a target is taking frag path, we can assume DL marker v2 is in
  203. * play
  204. */
  205. if (is_dl_mark_v2)
  206. rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
  207. }
  208. static void
  209. rmnet_frag_process_flow_end(struct rmnet_map_control_command_header *cmd,
  210. struct rmnet_port *port, u16 cmd_len)
  211. {
  212. struct rmnet_map_dl_ind_trl *dltrl;
  213. u32 data_format;
  214. bool is_dl_mark_v2;
  215. if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_TRL_SIZE)
  216. return;
  217. data_format = port->data_format;
  218. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  219. dltrl = (struct rmnet_map_dl_ind_trl *)((char *)cmd + sizeof(*cmd));
  220. port->stats.dl_trl_last_seq = dltrl->seq_le;
  221. port->stats.dl_trl_count++;
  222. /* If a target is taking frag path, we can assume DL marker v2 is in
  223. * play
  224. */
  225. if (is_dl_mark_v2)
  226. rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
  227. }
  228. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  229. * name is decoded here and appropriate handler is called.
  230. */
  231. void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port)
  232. {
  233. struct rmnet_map_control_command *cmd;
  234. unsigned char command_name;
  235. unsigned char rc = 0;
  236. cmd = (struct rmnet_map_control_command *)
  237. ((char *)qmap + sizeof(*qmap));
  238. command_name = cmd->command_name;
  239. switch (command_name) {
  240. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  241. rc = rmnet_frag_do_flow_control(qmap, port, 1);
  242. break;
  243. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  244. rc = rmnet_frag_do_flow_control(qmap, port, 0);
  245. break;
  246. default:
  247. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  248. break;
  249. }
  250. if (rc == RMNET_MAP_COMMAND_ACK)
  251. rmnet_frag_send_ack(qmap, rc, port);
  252. }
  253. int rmnet_frag_flow_command(struct rmnet_map_header *qmap,
  254. struct rmnet_port *port, u16 pkt_len)
  255. {
  256. struct rmnet_map_control_command_header *cmd;
  257. unsigned char command_name;
  258. cmd = (struct rmnet_map_control_command_header *)
  259. ((char *)qmap + sizeof(*qmap));
  260. command_name = cmd->command_name;
  261. switch (command_name) {
  262. case RMNET_MAP_COMMAND_FLOW_START:
  263. rmnet_frag_process_flow_start(cmd, port, pkt_len);
  264. break;
  265. case RMNET_MAP_COMMAND_FLOW_END:
  266. rmnet_frag_process_flow_end(cmd, port, pkt_len);
  267. break;
  268. default:
  269. return 1;
  270. }
  271. return 0;
  272. }
  273. EXPORT_SYMBOL(rmnet_frag_flow_command);
  274. void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
  275. struct list_head *list)
  276. {
  277. struct rmnet_map_header *maph;
  278. u8 *data = skb_frag_address(frag);
  279. u32 offset = 0;
  280. u32 packet_len;
  281. while (offset < skb_frag_size(frag)) {
  282. maph = (struct rmnet_map_header *)data;
  283. packet_len = ntohs(maph->pkt_len);
  284. /* Some hardware can send us empty frames. Catch them */
  285. if (packet_len == 0)
  286. return;
  287. packet_len += sizeof(*maph);
  288. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  289. packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
  290. } else if (port->data_format &
  291. (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
  292. RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) {
  293. u32 hsize = 0;
  294. u8 type;
  295. type = ((struct rmnet_map_v5_coal_header *)
  296. (data + sizeof(*maph)))->header_type;
  297. switch (type) {
  298. case RMNET_MAP_HEADER_TYPE_COALESCING:
  299. hsize = sizeof(struct rmnet_map_v5_coal_header);
  300. break;
  301. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  302. hsize = sizeof(struct rmnet_map_v5_csum_header);
  303. break;
  304. }
  305. packet_len += hsize;
  306. }
  307. if ((int)skb_frag_size(frag) - (int)packet_len < 0)
  308. return;
  309. rmnet_descriptor_add_frag(port, list, skb_frag_page(frag),
  310. frag->bv_offset + offset,
  311. packet_len);
  312. offset += packet_len;
  313. data += packet_len;
  314. }
  315. }
  316. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  317. * if needed (i.e. forwarding, UDP GRO)
  318. */
  319. static void rmnet_frag_gso_stamp(struct sk_buff *skb,
  320. struct rmnet_frag_descriptor *frag_desc)
  321. {
  322. struct skb_shared_info *shinfo = skb_shinfo(skb);
  323. if (frag_desc->trans_proto == IPPROTO_TCP)
  324. shinfo->gso_type = (frag_desc->ip_proto == 4) ?
  325. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  326. else
  327. shinfo->gso_type = SKB_GSO_UDP_L4;
  328. shinfo->gso_size = frag_desc->gso_size;
  329. shinfo->gso_segs = frag_desc->gso_segs;
  330. }
  331. /* Set the partial checksum information. Sets the transport checksum to the
  332. * pseudoheader checksum and sets the offload metadata.
  333. */
  334. static void rmnet_frag_partial_csum(struct sk_buff *skb,
  335. struct rmnet_frag_descriptor *frag_desc)
  336. {
  337. struct iphdr *iph = (struct iphdr *)skb->data;
  338. __sum16 pseudo;
  339. u16 pkt_len = skb->len - frag_desc->ip_len;
  340. if (frag_desc->ip_proto == 4) {
  341. iph->tot_len = htons(skb->len);
  342. iph->check = 0;
  343. iph->check = ip_fast_csum(iph, iph->ihl);
  344. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  345. pkt_len, frag_desc->trans_proto,
  346. 0);
  347. } else {
  348. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  349. /* Payload length includes any extension headers */
  350. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  351. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  352. pkt_len, frag_desc->trans_proto, 0);
  353. }
  354. if (frag_desc->trans_proto == IPPROTO_TCP) {
  355. struct tcphdr *tp = (struct tcphdr *)
  356. ((u8 *)iph + frag_desc->ip_len);
  357. tp->check = pseudo;
  358. skb->csum_offset = offsetof(struct tcphdr, check);
  359. } else {
  360. struct udphdr *up = (struct udphdr *)
  361. ((u8 *)iph + frag_desc->ip_len);
  362. up->len = htons(pkt_len);
  363. up->check = pseudo;
  364. skb->csum_offset = offsetof(struct udphdr, check);
  365. }
  366. skb->ip_summed = CHECKSUM_PARTIAL;
  367. skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
  368. }
  369. /* Allocate and populate an skb to contain the packet represented by the
  370. * frag descriptor.
  371. */
  372. static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
  373. struct rmnet_port *port)
  374. {
  375. struct sk_buff *head_skb, *current_skb, *skb;
  376. struct skb_shared_info *shinfo;
  377. struct rmnet_frag_descriptor *sub_frag, *tmp;
  378. /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
  379. if (frag_desc->hdrs_valid) {
  380. u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
  381. head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
  382. GFP_ATOMIC);
  383. if (!head_skb)
  384. return NULL;
  385. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  386. skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len);
  387. skb_reset_network_header(head_skb);
  388. if (frag_desc->trans_len)
  389. skb_set_transport_header(head_skb, frag_desc->ip_len);
  390. /* If the headers we added are the start of the page,
  391. * we don't want to add them twice
  392. */
  393. if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
  394. /* "Header only" packets can be fast-forwarded */
  395. if (hdr_len == skb_frag_size(&frag_desc->frag))
  396. goto skip_frags;
  397. if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
  398. kfree_skb(head_skb);
  399. return NULL;
  400. }
  401. }
  402. } else {
  403. /* Allocate enough space to avoid penalties in the stack
  404. * from __pskb_pull_tail()
  405. */
  406. head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
  407. GFP_ATOMIC);
  408. if (!head_skb)
  409. return NULL;
  410. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  411. }
  412. /* Add main fragment */
  413. get_page(skb_frag_page(&frag_desc->frag));
  414. skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag),
  415. frag_desc->frag.bv_offset,
  416. skb_frag_size(&frag_desc->frag),
  417. skb_frag_size(&frag_desc->frag));
  418. shinfo = skb_shinfo(head_skb);
  419. current_skb = head_skb;
  420. /* Add in any frags from rmnet_perf */
  421. list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) {
  422. skb_frag_t *frag;
  423. u32 frag_size;
  424. frag = &sub_frag->frag;
  425. frag_size = skb_frag_size(frag);
  426. add_frag:
  427. if (shinfo->nr_frags < MAX_SKB_FRAGS) {
  428. get_page(skb_frag_page(frag));
  429. skb_add_rx_frag(current_skb, shinfo->nr_frags,
  430. skb_frag_page(frag), frag->bv_offset,
  431. frag_size, frag_size);
  432. if (current_skb != head_skb) {
  433. head_skb->len += frag_size;
  434. head_skb->data_len += frag_size;
  435. }
  436. } else {
  437. /* Alloc a new skb and try again */
  438. skb = alloc_skb(0, GFP_ATOMIC);
  439. if (!skb)
  440. break;
  441. if (current_skb == head_skb)
  442. shinfo->frag_list = skb;
  443. else
  444. current_skb->next = skb;
  445. current_skb = skb;
  446. shinfo = skb_shinfo(current_skb);
  447. goto add_frag;
  448. }
  449. rmnet_recycle_frag_descriptor(sub_frag, port);
  450. }
  451. skip_frags:
  452. head_skb->dev = frag_desc->dev;
  453. rmnet_set_skb_proto(head_skb);
  454. /* Handle any header metadata that needs to be updated after RSB/RSC
  455. * segmentation
  456. */
  457. if (frag_desc->ip_id_set) {
  458. struct iphdr *iph;
  459. iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
  460. csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
  461. iph->id = frag_desc->ip_id;
  462. }
  463. if (frag_desc->tcp_seq_set) {
  464. struct tcphdr *th;
  465. th = (struct tcphdr *)
  466. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  467. th->seq = frag_desc->tcp_seq;
  468. }
  469. /* Handle csum offloading */
  470. if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
  471. /* Set the partial checksum information */
  472. rmnet_frag_partial_csum(head_skb, frag_desc);
  473. } else if (frag_desc->csum_valid) {
  474. /* Non-RSB/RSC/perf packet. The current checksum is fine */
  475. head_skb->ip_summed = CHECKSUM_UNNECESSARY;
  476. } else if (frag_desc->hdrs_valid &&
  477. (frag_desc->trans_proto == IPPROTO_TCP ||
  478. frag_desc->trans_proto == IPPROTO_UDP)) {
  479. /* Unfortunately, we have to fake a bad checksum here, since
  480. * the original bad value is lost by the hardware. The only
  481. * reliable way to do it is to calculate the actual checksum
  482. * and corrupt it.
  483. */
  484. __sum16 *check;
  485. __wsum csum;
  486. unsigned int offset = skb_transport_offset(head_skb);
  487. __sum16 pseudo;
  488. /* Calculate pseudo header and update header fields */
  489. if (frag_desc->ip_proto == 4) {
  490. struct iphdr *iph = ip_hdr(head_skb);
  491. __be16 tot_len = htons(head_skb->len);
  492. csum_replace2(&iph->check, iph->tot_len, tot_len);
  493. iph->tot_len = tot_len;
  494. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  495. head_skb->len -
  496. frag_desc->ip_len,
  497. frag_desc->trans_proto, 0);
  498. } else {
  499. struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
  500. ip6h->payload_len = htons(head_skb->len -
  501. sizeof(*ip6h));
  502. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  503. head_skb->len -
  504. frag_desc->ip_len,
  505. frag_desc->trans_proto, 0);
  506. }
  507. if (frag_desc->trans_proto == IPPROTO_TCP) {
  508. check = &tcp_hdr(head_skb)->check;
  509. } else {
  510. udp_hdr(head_skb)->len = htons(head_skb->len -
  511. frag_desc->ip_len);
  512. check = &udp_hdr(head_skb)->check;
  513. }
  514. *check = pseudo;
  515. csum = skb_checksum(head_skb, offset, head_skb->len - offset,
  516. 0);
  517. /* Add 1 to corrupt. This cannot produce a final value of 0
  518. * since csum_fold() can't return a value of 0xFFFF
  519. */
  520. *check = csum16_add(csum_fold(csum), htons(1));
  521. head_skb->ip_summed = CHECKSUM_NONE;
  522. }
  523. /* Handle any rmnet_perf metadata */
  524. if (frag_desc->hash) {
  525. head_skb->hash = frag_desc->hash;
  526. head_skb->sw_hash = 1;
  527. }
  528. if (frag_desc->flush_shs)
  529. head_skb->cb[0] = 1;
  530. /* Handle coalesced packets */
  531. if (frag_desc->gso_segs > 1)
  532. rmnet_frag_gso_stamp(head_skb, frag_desc);
  533. return head_skb;
  534. }
  535. /* Deliver the packets contained within a frag descriptor */
  536. void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
  537. struct rmnet_port *port)
  538. {
  539. struct sk_buff *skb;
  540. skb = rmnet_alloc_skb(frag_desc, port);
  541. if (skb)
  542. rmnet_deliver_skb(skb, port);
  543. rmnet_recycle_frag_descriptor(frag_desc, port);
  544. }
  545. EXPORT_SYMBOL(rmnet_frag_deliver);
  546. static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
  547. struct rmnet_port *port,
  548. struct list_head *list, u8 pkt_id,
  549. bool csum_valid)
  550. {
  551. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  552. struct rmnet_frag_descriptor *new_frag;
  553. u8 *hdr_start = rmnet_frag_data_ptr(coal_desc);
  554. u32 offset;
  555. new_frag = rmnet_get_frag_descriptor(port);
  556. if (!new_frag)
  557. return;
  558. /* Account for header lengths to access the data start */
  559. offset = coal_desc->frag.bv_offset + coal_desc->ip_len +
  560. coal_desc->trans_len + coal_desc->data_offset;
  561. /* Header information and most metadata is the same as the original */
  562. memcpy(new_frag, coal_desc, sizeof(*coal_desc));
  563. INIT_LIST_HEAD(&new_frag->list);
  564. INIT_LIST_HEAD(&new_frag->sub_frags);
  565. rmnet_frag_fill(new_frag, skb_frag_page(&coal_desc->frag), offset,
  566. coal_desc->gso_size * coal_desc->gso_segs);
  567. if (coal_desc->trans_proto == IPPROTO_TCP) {
  568. struct tcphdr *th;
  569. th = (struct tcphdr *)(hdr_start + coal_desc->ip_len);
  570. new_frag->tcp_seq_set = 1;
  571. new_frag->tcp_seq = htonl(ntohl(th->seq) +
  572. coal_desc->data_offset);
  573. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  574. struct udphdr *uh;
  575. uh = (struct udphdr *)(hdr_start + coal_desc->ip_len);
  576. if (coal_desc->ip_proto == 4 && !uh->check)
  577. csum_valid = true;
  578. }
  579. if (coal_desc->ip_proto == 4) {
  580. struct iphdr *iph;
  581. iph = (struct iphdr *)hdr_start;
  582. new_frag->ip_id_set = 1;
  583. new_frag->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
  584. }
  585. new_frag->hdr_ptr = hdr_start;
  586. new_frag->csum_valid = csum_valid;
  587. priv->stats.coal.coal_reconstruct++;
  588. /* Update meta information to move past the data we just segmented */
  589. coal_desc->data_offset += coal_desc->gso_size * coal_desc->gso_segs;
  590. coal_desc->pkt_id = pkt_id + 1;
  591. coal_desc->gso_segs = 0;
  592. list_add_tail(&new_frag->list, list);
  593. }
  594. static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
  595. {
  596. u8 *data = rmnet_frag_data_ptr(frag_desc);
  597. unsigned int datagram_len;
  598. __wsum csum;
  599. __sum16 pseudo;
  600. datagram_len = skb_frag_size(&frag_desc->frag) - frag_desc->ip_len;
  601. if (frag_desc->ip_proto == 4) {
  602. struct iphdr *iph = (struct iphdr *)data;
  603. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  604. datagram_len,
  605. frag_desc->trans_proto, 0);
  606. } else {
  607. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  608. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  609. datagram_len, frag_desc->trans_proto,
  610. 0);
  611. }
  612. csum = csum_partial(data + frag_desc->ip_len, datagram_len,
  613. csum_unfold(pseudo));
  614. return !csum_fold(csum);
  615. }
  616. /* Converts the coalesced frame into a list of descriptors.
  617. * NLOs containing csum erros will not be included.
  618. */
  619. static void
  620. rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
  621. u64 nlo_err_mask, struct rmnet_port *port,
  622. struct list_head *list)
  623. {
  624. struct iphdr *iph;
  625. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  626. struct rmnet_map_v5_coal_header *coal_hdr;
  627. u16 pkt_len;
  628. u8 pkt, total_pkt = 0;
  629. u8 nlo;
  630. bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
  631. bool zero_csum = false;
  632. /* Pull off the headers we no longer need */
  633. if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header)))
  634. return;
  635. coal_hdr = (struct rmnet_map_v5_coal_header *)
  636. rmnet_frag_data_ptr(coal_desc);
  637. if (!rmnet_frag_pull(coal_desc, port, sizeof(*coal_hdr)))
  638. return;
  639. iph = (struct iphdr *)rmnet_frag_data_ptr(coal_desc);
  640. if (iph->version == 4) {
  641. coal_desc->ip_proto = 4;
  642. coal_desc->ip_len = iph->ihl * 4;
  643. coal_desc->trans_proto = iph->protocol;
  644. /* Don't allow coalescing of any packets with IP options */
  645. if (iph->ihl != 5)
  646. gro = false;
  647. } else if (iph->version == 6) {
  648. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  649. int ip_len;
  650. __be16 frag_off;
  651. u8 protocol = ip6h->nexthdr;
  652. coal_desc->ip_proto = 6;
  653. ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
  654. sizeof(*ip6h),
  655. &protocol,
  656. &frag_off);
  657. coal_desc->trans_proto = protocol;
  658. /* If we run into a problem, or this has a fragment header
  659. * (which should technically not be possible, if the HW
  660. * works as intended...), bail.
  661. */
  662. if (ip_len < 0 || frag_off) {
  663. priv->stats.coal.coal_ip_invalid++;
  664. return;
  665. }
  666. coal_desc->ip_len = (u16)ip_len;
  667. if (coal_desc->ip_len > sizeof(*ip6h)) {
  668. /* Don't allow coalescing of any packets with IPv6
  669. * extension headers.
  670. */
  671. gro = false;
  672. }
  673. } else {
  674. priv->stats.coal.coal_ip_invalid++;
  675. return;
  676. }
  677. if (coal_desc->trans_proto == IPPROTO_TCP) {
  678. struct tcphdr *th;
  679. th = (struct tcphdr *)((u8 *)iph + coal_desc->ip_len);
  680. coal_desc->trans_len = th->doff * 4;
  681. priv->stats.coal.coal_tcp++;
  682. priv->stats.coal.coal_tcp_bytes +=
  683. skb_frag_size(&coal_desc->frag);
  684. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  685. struct udphdr *uh;
  686. uh = (struct udphdr *)((u8 *)iph + coal_desc->ip_len);
  687. coal_desc->trans_len = sizeof(*uh);
  688. priv->stats.coal.coal_udp++;
  689. priv->stats.coal.coal_udp_bytes +=
  690. skb_frag_size(&coal_desc->frag);
  691. if (coal_desc->ip_proto == 4 && !uh->check)
  692. zero_csum = true;
  693. } else {
  694. priv->stats.coal.coal_trans_invalid++;
  695. return;
  696. }
  697. coal_desc->hdrs_valid = 1;
  698. if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
  699. /* Mark the checksum as valid if it checks out */
  700. if (rmnet_frag_validate_csum(coal_desc))
  701. coal_desc->csum_valid = true;
  702. coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
  703. coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
  704. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  705. coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
  706. list_add_tail(&coal_desc->list, list);
  707. return;
  708. }
  709. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  710. * no checksum errors, and are allowing GRO. We can just reuse this
  711. * descriptor unchanged.
  712. */
  713. if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
  714. coal_desc->csum_valid = true;
  715. coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
  716. coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
  717. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  718. coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
  719. list_add_tail(&coal_desc->list, list);
  720. return;
  721. }
  722. /* Segment the coalesced descriptor into new packets */
  723. for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
  724. pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
  725. pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
  726. coal_desc->gso_size = pkt_len;
  727. for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
  728. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  729. bool csum_err = nlo_err_mask & 1;
  730. /* Segment the packet if we're not sending the larger
  731. * packet up the stack.
  732. */
  733. if (!gro) {
  734. coal_desc->gso_segs = 1;
  735. if (csum_err)
  736. priv->stats.coal.coal_csum_err++;
  737. __rmnet_frag_segment_data(coal_desc, port,
  738. list, total_pkt,
  739. !csum_err);
  740. continue;
  741. }
  742. if (csum_err) {
  743. priv->stats.coal.coal_csum_err++;
  744. /* Segment out the good data */
  745. if (coal_desc->gso_segs)
  746. __rmnet_frag_segment_data(coal_desc,
  747. port,
  748. list,
  749. total_pkt,
  750. true);
  751. /* Segment out the bad checksum */
  752. coal_desc->gso_segs = 1;
  753. __rmnet_frag_segment_data(coal_desc, port,
  754. list, total_pkt,
  755. false);
  756. } else {
  757. coal_desc->gso_segs++;
  758. }
  759. }
  760. /* If we're switching NLOs, we need to send out everything from
  761. * the previous one, if we haven't done so. NLOs only switch
  762. * when the packet length changes.
  763. */
  764. if (coal_desc->gso_segs)
  765. __rmnet_frag_segment_data(coal_desc, port, list,
  766. total_pkt, true);
  767. }
  768. }
  769. /* Record reason for coalescing pipe closure */
  770. static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  771. u8 code)
  772. {
  773. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  774. switch (type) {
  775. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  776. stats->non_coal++;
  777. break;
  778. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  779. stats->ip_miss++;
  780. break;
  781. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  782. stats->trans_miss++;
  783. break;
  784. case RMNET_MAP_COAL_CLOSE_HW:
  785. switch (code) {
  786. case RMNET_MAP_COAL_CLOSE_HW_NL:
  787. stats->hw_nl++;
  788. break;
  789. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  790. stats->hw_pkt++;
  791. break;
  792. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  793. stats->hw_byte++;
  794. break;
  795. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  796. stats->hw_time++;
  797. break;
  798. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  799. stats->hw_evict++;
  800. break;
  801. default:
  802. break;
  803. }
  804. break;
  805. case RMNET_MAP_COAL_CLOSE_COAL:
  806. stats->coal++;
  807. break;
  808. default:
  809. break;
  810. }
  811. }
  812. /* Check if the coalesced header has any incorrect values, in which case, the
  813. * entire coalesced frame must be dropped. Then check if there are any
  814. * checksum issues
  815. */
  816. static int
  817. rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
  818. u64 *nlo_err_mask)
  819. {
  820. struct rmnet_map_v5_coal_header *coal_hdr;
  821. unsigned char *data = rmnet_frag_data_ptr(frag_desc);
  822. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  823. u64 mask = 0;
  824. int i;
  825. u8 veid, pkts = 0;
  826. coal_hdr = (struct rmnet_map_v5_coal_header *)
  827. (data + sizeof(struct rmnet_map_header));
  828. veid = coal_hdr->virtual_channel_id;
  829. if (coal_hdr->num_nlos == 0 ||
  830. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  831. priv->stats.coal.coal_hdr_nlo_err++;
  832. return -EINVAL;
  833. }
  834. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  835. /* If there is a checksum issue, we need to split
  836. * up the skb. Rebuild the full csum error field
  837. */
  838. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  839. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  840. mask |= ((u64)err) << (8 * i);
  841. /* Track total packets in frame */
  842. pkts += pkt;
  843. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  844. priv->stats.coal.coal_hdr_pkt_err++;
  845. return -EINVAL;
  846. }
  847. }
  848. /* Track number of packets we get inside of coalesced frames */
  849. priv->stats.coal.coal_pkts += pkts;
  850. /* Update ethtool stats */
  851. rmnet_frag_data_log_close_stats(priv,
  852. coal_hdr->close_type,
  853. coal_hdr->close_value);
  854. if (veid < RMNET_MAX_VEID)
  855. priv->stats.coal.coal_veid[veid]++;
  856. *nlo_err_mask = mask;
  857. return 0;
  858. }
  859. /* Process a QMAPv5 packet header */
  860. int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
  861. struct rmnet_port *port,
  862. struct list_head *list,
  863. u16 len)
  864. {
  865. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  866. u64 nlo_err_mask;
  867. int rc = 0;
  868. switch (rmnet_frag_get_next_hdr_type(frag_desc)) {
  869. case RMNET_MAP_HEADER_TYPE_COALESCING:
  870. priv->stats.coal.coal_rx++;
  871. rc = rmnet_frag_data_check_coal_header(frag_desc,
  872. &nlo_err_mask);
  873. if (rc)
  874. return rc;
  875. rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
  876. list);
  877. if (list_first_entry(list, struct rmnet_frag_descriptor,
  878. list) != frag_desc)
  879. rmnet_recycle_frag_descriptor(frag_desc, port);
  880. break;
  881. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  882. if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
  883. priv->stats.csum_sw++;
  884. } else if (rmnet_frag_get_csum_valid(frag_desc)) {
  885. priv->stats.csum_ok++;
  886. frag_desc->csum_valid = true;
  887. } else {
  888. priv->stats.csum_valid_unset++;
  889. }
  890. if (!rmnet_frag_pull(frag_desc, port,
  891. sizeof(struct rmnet_map_header) +
  892. sizeof(struct rmnet_map_v5_csum_header))) {
  893. rc = -EINVAL;
  894. break;
  895. }
  896. frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
  897. /* Remove padding only for csum offload packets.
  898. * Coalesced packets should never have padding.
  899. */
  900. if (!rmnet_frag_trim(frag_desc, port, len)) {
  901. rc = -EINVAL;
  902. break;
  903. }
  904. list_del_init(&frag_desc->list);
  905. list_add_tail(&frag_desc->list, list);
  906. break;
  907. default:
  908. rc = -EINVAL;
  909. break;
  910. }
  911. return rc;
  912. }
  913. /* Perf hook handler */
  914. rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
  915. EXPORT_SYMBOL(rmnet_perf_desc_entry);
  916. static void
  917. __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
  918. struct rmnet_port *port)
  919. {
  920. rmnet_perf_desc_hook_t rmnet_perf_ingress;
  921. struct rmnet_map_header *qmap;
  922. struct rmnet_endpoint *ep;
  923. struct rmnet_frag_descriptor *frag, *tmp;
  924. LIST_HEAD(segs);
  925. u16 len, pad;
  926. u8 mux_id;
  927. qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag);
  928. mux_id = qmap->mux_id;
  929. pad = qmap->pad_len;
  930. len = ntohs(qmap->pkt_len) - pad;
  931. if (qmap->cd_bit) {
  932. qmi_rmnet_set_dl_msg_active(port);
  933. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  934. rmnet_frag_flow_command(qmap, port, len);
  935. goto recycle;
  936. }
  937. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  938. rmnet_frag_command(qmap, port);
  939. goto recycle;
  940. }
  941. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  942. goto recycle;
  943. ep = rmnet_get_endpoint(port, mux_id);
  944. if (!ep)
  945. goto recycle;
  946. frag_desc->dev = ep->egress_dev;
  947. /* Handle QMAPv5 packet */
  948. if (qmap->next_hdr &&
  949. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  950. RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
  951. if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
  952. len))
  953. goto recycle;
  954. } else {
  955. /* We only have the main QMAP header to worry about */
  956. if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
  957. return;
  958. frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
  959. if (!rmnet_frag_trim(frag_desc, port, len))
  960. return;
  961. list_add_tail(&frag_desc->list, &segs);
  962. }
  963. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  964. qmi_rmnet_work_maybe_restart(port);
  965. rcu_read_lock();
  966. rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
  967. if (rmnet_perf_ingress) {
  968. list_for_each_entry_safe(frag, tmp, &segs, list) {
  969. list_del_init(&frag->list);
  970. rmnet_perf_ingress(frag, port);
  971. }
  972. rcu_read_unlock();
  973. return;
  974. }
  975. rcu_read_unlock();
  976. list_for_each_entry_safe(frag, tmp, &segs, list) {
  977. list_del_init(&frag->list);
  978. rmnet_frag_deliver(frag, port);
  979. }
  980. return;
  981. recycle:
  982. rmnet_recycle_frag_descriptor(frag_desc, port);
  983. }
  984. /* Notify perf at the end of SKB chain */
  985. rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
  986. EXPORT_SYMBOL(rmnet_perf_chain_end);
  987. void rmnet_frag_ingress_handler(struct sk_buff *skb,
  988. struct rmnet_port *port)
  989. {
  990. rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
  991. LIST_HEAD(desc_list);
  992. /* Deaggregation and freeing of HW originating
  993. * buffers is done within here
  994. */
  995. while (skb) {
  996. struct sk_buff *skb_frag;
  997. rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port,
  998. &desc_list);
  999. if (!list_empty(&desc_list)) {
  1000. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1001. list_for_each_entry_safe(frag_desc, tmp, &desc_list,
  1002. list) {
  1003. list_del_init(&frag_desc->list);
  1004. __rmnet_frag_ingress_handler(frag_desc, port);
  1005. }
  1006. }
  1007. skb_frag = skb_shinfo(skb)->frag_list;
  1008. skb_shinfo(skb)->frag_list = NULL;
  1009. consume_skb(skb);
  1010. skb = skb_frag;
  1011. }
  1012. rcu_read_lock();
  1013. rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
  1014. if (rmnet_perf_opt_chain_end)
  1015. rmnet_perf_opt_chain_end();
  1016. rcu_read_unlock();
  1017. }
  1018. void rmnet_descriptor_deinit(struct rmnet_port *port)
  1019. {
  1020. struct rmnet_frag_descriptor_pool *pool;
  1021. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1022. pool = port->frag_desc_pool;
  1023. list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
  1024. kfree(frag_desc);
  1025. pool->pool_size--;
  1026. }
  1027. kfree(pool);
  1028. }
  1029. int rmnet_descriptor_init(struct rmnet_port *port)
  1030. {
  1031. struct rmnet_frag_descriptor_pool *pool;
  1032. int i;
  1033. spin_lock_init(&port->desc_pool_lock);
  1034. pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
  1035. if (!pool)
  1036. return -ENOMEM;
  1037. INIT_LIST_HEAD(&pool->free_list);
  1038. port->frag_desc_pool = pool;
  1039. for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
  1040. struct rmnet_frag_descriptor *frag_desc;
  1041. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  1042. if (!frag_desc)
  1043. return -ENOMEM;
  1044. INIT_LIST_HEAD(&frag_desc->list);
  1045. INIT_LIST_HEAD(&frag_desc->sub_frags);
  1046. list_add_tail(&frag_desc->list, &pool->free_list);
  1047. pool->pool_size++;
  1048. }
  1049. return 0;
  1050. }