rmnet_descriptor.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. /* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Packet Descriptor Framework
  13. *
  14. */
  15. #include <linux/ip.h>
  16. #include <linux/ipv6.h>
  17. #include <net/ipv6.h>
  18. #include <net/ip6_checksum.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_descriptor.h"
  21. #include "rmnet_handlers.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_vnd.h"
  24. #include "rmnet_qmi.h"
  25. #include "qmi_rmnet.h"
  26. #define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
  27. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  28. sizeof(struct rmnet_map_header) + \
  29. sizeof(struct rmnet_map_control_command_header))
  30. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  31. sizeof(struct rmnet_map_header) + \
  32. sizeof(struct rmnet_map_control_command_header))
  33. typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
  34. struct rmnet_port *port);
  35. typedef void (*rmnet_perf_chain_hook_t)(void);
  36. struct rmnet_frag_descriptor *
  37. rmnet_get_frag_descriptor(struct rmnet_port *port)
  38. {
  39. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  40. struct rmnet_frag_descriptor *frag_desc;
  41. unsigned long flags;
  42. spin_lock_irqsave(&port->desc_pool_lock, flags);
  43. if (!list_empty(&pool->free_list)) {
  44. frag_desc = list_first_entry(&pool->free_list,
  45. struct rmnet_frag_descriptor,
  46. list);
  47. list_del_init(&frag_desc->list);
  48. } else {
  49. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  50. if (!frag_desc)
  51. goto out;
  52. INIT_LIST_HEAD(&frag_desc->list);
  53. INIT_LIST_HEAD(&frag_desc->sub_frags);
  54. pool->pool_size++;
  55. }
  56. out:
  57. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  58. return frag_desc;
  59. }
  60. EXPORT_SYMBOL(rmnet_get_frag_descriptor);
  61. void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
  62. struct rmnet_port *port)
  63. {
  64. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  65. struct page *page = skb_frag_page(&frag_desc->frag);
  66. unsigned long flags;
  67. list_del(&frag_desc->list);
  68. if (page)
  69. put_page(page);
  70. memset(frag_desc, 0, sizeof(*frag_desc));
  71. INIT_LIST_HEAD(&frag_desc->list);
  72. INIT_LIST_HEAD(&frag_desc->sub_frags);
  73. spin_lock_irqsave(&port->desc_pool_lock, flags);
  74. list_add_tail(&frag_desc->list, &pool->free_list);
  75. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  76. }
  77. EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
  78. void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
  79. struct page *p, u32 page_offset, u32 len)
  80. {
  81. struct rmnet_frag_descriptor *frag_desc;
  82. frag_desc = rmnet_get_frag_descriptor(port);
  83. if (!frag_desc)
  84. return;
  85. rmnet_frag_fill(frag_desc, p, page_offset, len);
  86. list_add_tail(&frag_desc->list, list);
  87. }
  88. EXPORT_SYMBOL(rmnet_descriptor_add_frag);
  89. int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
  90. int start, u8 *nexthdrp, __be16 *fragp)
  91. {
  92. u8 nexthdr = *nexthdrp;
  93. *fragp = 0;
  94. while (ipv6_ext_hdr(nexthdr)) {
  95. struct ipv6_opt_hdr *hp;
  96. int hdrlen;
  97. if (nexthdr == NEXTHDR_NONE)
  98. return -EINVAL;
  99. hp = rmnet_frag_data_ptr(frag_desc) + start;
  100. if (nexthdr == NEXTHDR_FRAGMENT) {
  101. __be16 *fp;
  102. fp = rmnet_frag_data_ptr(frag_desc) + start +
  103. offsetof(struct frag_hdr, frag_off);
  104. *fragp = *fp;
  105. if (ntohs(*fragp) & ~0x7)
  106. break;
  107. hdrlen = 8;
  108. } else if (nexthdr == NEXTHDR_AUTH) {
  109. hdrlen = (hp->hdrlen + 2) << 2;
  110. } else {
  111. hdrlen = ipv6_optlen(hp);
  112. }
  113. nexthdr = hp->nexthdr;
  114. start += hdrlen;
  115. }
  116. *nexthdrp = nexthdr;
  117. return start;
  118. }
  119. EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
  120. static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
  121. struct rmnet_port *port,
  122. int enable)
  123. {
  124. struct rmnet_map_control_command *cmd;
  125. struct rmnet_endpoint *ep;
  126. struct net_device *vnd;
  127. u16 ip_family;
  128. u16 fc_seq;
  129. u32 qos_id;
  130. u8 mux_id;
  131. int r;
  132. mux_id = qmap->mux_id;
  133. cmd = (struct rmnet_map_control_command *)
  134. ((char *)qmap + sizeof(*qmap));
  135. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  136. return RX_HANDLER_CONSUMED;
  137. ep = rmnet_get_endpoint(port, mux_id);
  138. if (!ep)
  139. return RX_HANDLER_CONSUMED;
  140. vnd = ep->egress_dev;
  141. ip_family = cmd->flow_control.ip_family;
  142. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  143. qos_id = ntohl(cmd->flow_control.qos_id);
  144. /* Ignore the ip family and pass the sequence number for both v4 and v6
  145. * sequence. User space does not support creating dedicated flows for
  146. * the 2 protocols
  147. */
  148. r = rmnet_vnd_do_flow_control(vnd, enable);
  149. if (r)
  150. return RMNET_MAP_COMMAND_UNSUPPORTED;
  151. else
  152. return RMNET_MAP_COMMAND_ACK;
  153. }
  154. static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
  155. unsigned char type,
  156. struct rmnet_port *port)
  157. {
  158. struct rmnet_map_control_command *cmd;
  159. struct net_device *dev = port->dev;
  160. struct sk_buff *skb;
  161. u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
  162. skb = alloc_skb(alloc_len, GFP_ATOMIC);
  163. if (!skb)
  164. return;
  165. skb->protocol = htons(ETH_P_MAP);
  166. skb->dev = dev;
  167. cmd = rmnet_map_get_cmd_start(skb);
  168. cmd->cmd_type = type & 0x03;
  169. netif_tx_lock(dev);
  170. dev->netdev_ops->ndo_start_xmit(skb, dev);
  171. netif_tx_unlock(dev);
  172. }
  173. static void
  174. rmnet_frag_process_flow_start(struct rmnet_map_control_command_header *cmd,
  175. struct rmnet_port *port,
  176. u16 cmd_len)
  177. {
  178. struct rmnet_map_dl_ind_hdr *dlhdr;
  179. u32 data_format;
  180. bool is_dl_mark_v2;
  181. if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_HDR_SIZE)
  182. return;
  183. data_format = port->data_format;
  184. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  185. dlhdr = (struct rmnet_map_dl_ind_hdr *)((char *)cmd + sizeof(*cmd));
  186. port->stats.dl_hdr_last_ep_id = cmd->source_id;
  187. port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
  188. port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
  189. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  190. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  191. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  192. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  193. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  194. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  195. port->stats.dl_hdr_count++;
  196. /* If a target is taking frag path, we can assume DL marker v2 is in
  197. * play
  198. */
  199. if (is_dl_mark_v2)
  200. rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
  201. }
  202. static void
  203. rmnet_frag_process_flow_end(struct rmnet_map_control_command_header *cmd,
  204. struct rmnet_port *port, u16 cmd_len)
  205. {
  206. struct rmnet_map_dl_ind_trl *dltrl;
  207. u32 data_format;
  208. bool is_dl_mark_v2;
  209. if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_TRL_SIZE)
  210. return;
  211. data_format = port->data_format;
  212. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  213. dltrl = (struct rmnet_map_dl_ind_trl *)((char *)cmd + sizeof(*cmd));
  214. port->stats.dl_trl_last_seq = dltrl->seq_le;
  215. port->stats.dl_trl_count++;
  216. /* If a target is taking frag path, we can assume DL marker v2 is in
  217. * play
  218. */
  219. if (is_dl_mark_v2)
  220. rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
  221. }
  222. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  223. * name is decoded here and appropriate handler is called.
  224. */
  225. void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port)
  226. {
  227. struct rmnet_map_control_command *cmd;
  228. unsigned char command_name;
  229. unsigned char rc = 0;
  230. cmd = (struct rmnet_map_control_command *)
  231. ((char *)qmap + sizeof(*qmap));
  232. command_name = cmd->command_name;
  233. switch (command_name) {
  234. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  235. rc = rmnet_frag_do_flow_control(qmap, port, 1);
  236. break;
  237. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  238. rc = rmnet_frag_do_flow_control(qmap, port, 0);
  239. break;
  240. default:
  241. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  242. break;
  243. }
  244. if (rc == RMNET_MAP_COMMAND_ACK)
  245. rmnet_frag_send_ack(qmap, rc, port);
  246. }
  247. int rmnet_frag_flow_command(struct rmnet_map_header *qmap,
  248. struct rmnet_port *port, u16 pkt_len)
  249. {
  250. struct rmnet_map_control_command_header *cmd;
  251. unsigned char command_name;
  252. cmd = (struct rmnet_map_control_command_header *)
  253. ((char *)qmap + sizeof(*qmap));
  254. command_name = cmd->command_name;
  255. switch (command_name) {
  256. case RMNET_MAP_COMMAND_FLOW_START:
  257. rmnet_frag_process_flow_start(cmd, port, pkt_len);
  258. break;
  259. case RMNET_MAP_COMMAND_FLOW_END:
  260. rmnet_frag_process_flow_end(cmd, port, pkt_len);
  261. break;
  262. default:
  263. return 1;
  264. }
  265. return 0;
  266. }
  267. EXPORT_SYMBOL(rmnet_frag_flow_command);
  268. void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
  269. struct list_head *list)
  270. {
  271. struct rmnet_map_header *maph;
  272. u8 *data = skb_frag_address(frag);
  273. u32 offset = 0;
  274. u32 packet_len;
  275. while (offset < skb_frag_size(frag)) {
  276. maph = (struct rmnet_map_header *)data;
  277. packet_len = ntohs(maph->pkt_len);
  278. /* Some hardware can send us empty frames. Catch them */
  279. if (packet_len == 0)
  280. return;
  281. packet_len += sizeof(*maph);
  282. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  283. packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
  284. } else if (port->data_format &
  285. (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
  286. RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) {
  287. u32 hsize = 0;
  288. u8 type;
  289. type = ((struct rmnet_map_v5_coal_header *)
  290. (data + sizeof(*maph)))->header_type;
  291. switch (type) {
  292. case RMNET_MAP_HEADER_TYPE_COALESCING:
  293. hsize = sizeof(struct rmnet_map_v5_coal_header);
  294. break;
  295. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  296. hsize = sizeof(struct rmnet_map_v5_csum_header);
  297. break;
  298. }
  299. packet_len += hsize;
  300. }
  301. if ((int)skb_frag_size(frag) - (int)packet_len < 0)
  302. return;
  303. rmnet_descriptor_add_frag(port, list, skb_frag_page(frag),
  304. frag->bv_offset + offset,
  305. packet_len);
  306. offset += packet_len;
  307. data += packet_len;
  308. }
  309. }
  310. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  311. * if needed (i.e. forwarding, UDP GRO)
  312. */
  313. static void rmnet_frag_gso_stamp(struct sk_buff *skb,
  314. struct rmnet_frag_descriptor *frag_desc)
  315. {
  316. struct skb_shared_info *shinfo = skb_shinfo(skb);
  317. if (frag_desc->trans_proto == IPPROTO_TCP)
  318. shinfo->gso_type = (frag_desc->ip_proto == 4) ?
  319. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  320. else
  321. shinfo->gso_type = SKB_GSO_UDP_L4;
  322. shinfo->gso_size = frag_desc->gso_size;
  323. shinfo->gso_segs = frag_desc->gso_segs;
  324. }
  325. /* Set the partial checksum information. Sets the transport checksum to the
  326. * pseudoheader checksum and sets the offload metadata.
  327. */
  328. static void rmnet_frag_partial_csum(struct sk_buff *skb,
  329. struct rmnet_frag_descriptor *frag_desc)
  330. {
  331. struct iphdr *iph = (struct iphdr *)skb->data;
  332. __sum16 pseudo;
  333. u16 pkt_len = skb->len - frag_desc->ip_len;
  334. if (frag_desc->ip_proto == 4) {
  335. iph->tot_len = htons(skb->len);
  336. iph->check = 0;
  337. iph->check = ip_fast_csum(iph, iph->ihl);
  338. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  339. pkt_len, frag_desc->trans_proto,
  340. 0);
  341. } else {
  342. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  343. /* Payload length includes any extension headers */
  344. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  345. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  346. pkt_len, frag_desc->trans_proto, 0);
  347. }
  348. if (frag_desc->trans_proto == IPPROTO_TCP) {
  349. struct tcphdr *tp = (struct tcphdr *)
  350. ((u8 *)iph + frag_desc->ip_len);
  351. tp->check = pseudo;
  352. skb->csum_offset = offsetof(struct tcphdr, check);
  353. } else {
  354. struct udphdr *up = (struct udphdr *)
  355. ((u8 *)iph + frag_desc->ip_len);
  356. up->len = htons(pkt_len);
  357. up->check = pseudo;
  358. skb->csum_offset = offsetof(struct udphdr, check);
  359. }
  360. skb->ip_summed = CHECKSUM_PARTIAL;
  361. skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
  362. }
  363. /* Allocate and populate an skb to contain the packet represented by the
  364. * frag descriptor.
  365. */
  366. static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
  367. struct rmnet_port *port)
  368. {
  369. struct sk_buff *head_skb, *current_skb, *skb;
  370. struct skb_shared_info *shinfo;
  371. struct rmnet_frag_descriptor *sub_frag, *tmp;
  372. /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
  373. if (frag_desc->hdrs_valid) {
  374. u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
  375. head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
  376. GFP_ATOMIC);
  377. if (!head_skb)
  378. return NULL;
  379. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  380. skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len);
  381. skb_reset_network_header(head_skb);
  382. if (frag_desc->trans_len)
  383. skb_set_transport_header(head_skb, frag_desc->ip_len);
  384. /* If the headers we added are the start of the page,
  385. * we don't want to add them twice
  386. */
  387. if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
  388. /* "Header only" packets can be fast-forwarded */
  389. if (hdr_len == skb_frag_size(&frag_desc->frag))
  390. goto skip_frags;
  391. if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
  392. kfree_skb(head_skb);
  393. return NULL;
  394. }
  395. }
  396. } else {
  397. /* Allocate enough space to avoid penalties in the stack
  398. * from __pskb_pull_tail()
  399. */
  400. head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
  401. GFP_ATOMIC);
  402. if (!head_skb)
  403. return NULL;
  404. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  405. }
  406. /* Add main fragment */
  407. get_page(skb_frag_page(&frag_desc->frag));
  408. skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag),
  409. frag_desc->frag.bv_offset,
  410. skb_frag_size(&frag_desc->frag),
  411. skb_frag_size(&frag_desc->frag));
  412. shinfo = skb_shinfo(head_skb);
  413. current_skb = head_skb;
  414. /* Add in any frags from rmnet_perf */
  415. list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) {
  416. skb_frag_t *frag;
  417. u32 frag_size;
  418. frag = &sub_frag->frag;
  419. frag_size = skb_frag_size(frag);
  420. add_frag:
  421. if (shinfo->nr_frags < MAX_SKB_FRAGS) {
  422. get_page(skb_frag_page(frag));
  423. skb_add_rx_frag(current_skb, shinfo->nr_frags,
  424. skb_frag_page(frag), frag->bv_offset,
  425. frag_size, frag_size);
  426. if (current_skb != head_skb) {
  427. head_skb->len += frag_size;
  428. head_skb->data_len += frag_size;
  429. }
  430. } else {
  431. /* Alloc a new skb and try again */
  432. skb = alloc_skb(0, GFP_ATOMIC);
  433. if (!skb)
  434. break;
  435. if (current_skb == head_skb)
  436. shinfo->frag_list = skb;
  437. else
  438. current_skb->next = skb;
  439. current_skb = skb;
  440. shinfo = skb_shinfo(current_skb);
  441. goto add_frag;
  442. }
  443. rmnet_recycle_frag_descriptor(sub_frag, port);
  444. }
  445. skip_frags:
  446. head_skb->dev = frag_desc->dev;
  447. rmnet_set_skb_proto(head_skb);
  448. /* Handle any header metadata that needs to be updated after RSB/RSC
  449. * segmentation
  450. */
  451. if (frag_desc->ip_id_set) {
  452. struct iphdr *iph;
  453. iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
  454. csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
  455. iph->id = frag_desc->ip_id;
  456. }
  457. if (frag_desc->tcp_seq_set) {
  458. struct tcphdr *th;
  459. th = (struct tcphdr *)
  460. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  461. th->seq = frag_desc->tcp_seq;
  462. }
  463. /* Handle csum offloading */
  464. if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
  465. /* Set the partial checksum information */
  466. rmnet_frag_partial_csum(head_skb, frag_desc);
  467. } else if (frag_desc->csum_valid) {
  468. /* Non-RSB/RSC/perf packet. The current checksum is fine */
  469. head_skb->ip_summed = CHECKSUM_UNNECESSARY;
  470. } else if (frag_desc->hdrs_valid &&
  471. (frag_desc->trans_proto == IPPROTO_TCP ||
  472. frag_desc->trans_proto == IPPROTO_UDP)) {
  473. /* Unfortunately, we have to fake a bad checksum here, since
  474. * the original bad value is lost by the hardware. The only
  475. * reliable way to do it is to calculate the actual checksum
  476. * and corrupt it.
  477. */
  478. __sum16 *check;
  479. __wsum csum;
  480. unsigned int offset = skb_transport_offset(head_skb);
  481. __sum16 pseudo;
  482. /* Calculate pseudo header and update header fields */
  483. if (frag_desc->ip_proto == 4) {
  484. struct iphdr *iph = ip_hdr(head_skb);
  485. __be16 tot_len = htons(head_skb->len);
  486. csum_replace2(&iph->check, iph->tot_len, tot_len);
  487. iph->tot_len = tot_len;
  488. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  489. head_skb->len -
  490. frag_desc->ip_len,
  491. frag_desc->trans_proto, 0);
  492. } else {
  493. struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
  494. ip6h->payload_len = htons(head_skb->len -
  495. sizeof(*ip6h));
  496. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  497. head_skb->len -
  498. frag_desc->ip_len,
  499. frag_desc->trans_proto, 0);
  500. }
  501. if (frag_desc->trans_proto == IPPROTO_TCP) {
  502. check = &tcp_hdr(head_skb)->check;
  503. } else {
  504. udp_hdr(head_skb)->len = htons(head_skb->len -
  505. frag_desc->ip_len);
  506. check = &udp_hdr(head_skb)->check;
  507. }
  508. *check = pseudo;
  509. csum = skb_checksum(head_skb, offset, head_skb->len - offset,
  510. 0);
  511. /* Add 1 to corrupt. This cannot produce a final value of 0
  512. * since csum_fold() can't return a value of 0xFFFF
  513. */
  514. *check = csum16_add(csum_fold(csum), htons(1));
  515. head_skb->ip_summed = CHECKSUM_NONE;
  516. }
  517. /* Handle any rmnet_perf metadata */
  518. if (frag_desc->hash) {
  519. head_skb->hash = frag_desc->hash;
  520. head_skb->sw_hash = 1;
  521. }
  522. if (frag_desc->flush_shs)
  523. head_skb->cb[0] = 1;
  524. /* Handle coalesced packets */
  525. if (frag_desc->gso_segs > 1)
  526. rmnet_frag_gso_stamp(head_skb, frag_desc);
  527. return head_skb;
  528. }
  529. /* Deliver the packets contained within a frag descriptor */
  530. void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
  531. struct rmnet_port *port)
  532. {
  533. struct sk_buff *skb;
  534. skb = rmnet_alloc_skb(frag_desc, port);
  535. if (skb)
  536. rmnet_deliver_skb(skb, port);
  537. rmnet_recycle_frag_descriptor(frag_desc, port);
  538. }
  539. EXPORT_SYMBOL(rmnet_frag_deliver);
  540. static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
  541. struct rmnet_port *port,
  542. struct list_head *list, u8 pkt_id,
  543. bool csum_valid)
  544. {
  545. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  546. struct rmnet_frag_descriptor *new_frag;
  547. u8 *hdr_start = rmnet_frag_data_ptr(coal_desc);
  548. u32 offset;
  549. new_frag = rmnet_get_frag_descriptor(port);
  550. if (!new_frag)
  551. return;
  552. /* Account for header lengths to access the data start */
  553. offset = coal_desc->frag.bv_offset + coal_desc->ip_len +
  554. coal_desc->trans_len + coal_desc->data_offset;
  555. /* Header information and most metadata is the same as the original */
  556. memcpy(new_frag, coal_desc, sizeof(*coal_desc));
  557. INIT_LIST_HEAD(&new_frag->list);
  558. INIT_LIST_HEAD(&new_frag->sub_frags);
  559. rmnet_frag_fill(new_frag, skb_frag_page(&coal_desc->frag), offset,
  560. coal_desc->gso_size * coal_desc->gso_segs);
  561. if (coal_desc->trans_proto == IPPROTO_TCP) {
  562. struct tcphdr *th;
  563. th = (struct tcphdr *)(hdr_start + coal_desc->ip_len);
  564. new_frag->tcp_seq_set = 1;
  565. new_frag->tcp_seq = htonl(ntohl(th->seq) +
  566. coal_desc->data_offset);
  567. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  568. struct udphdr *uh;
  569. uh = (struct udphdr *)(hdr_start + coal_desc->ip_len);
  570. if (coal_desc->ip_proto == 4 && !uh->check)
  571. csum_valid = true;
  572. }
  573. if (coal_desc->ip_proto == 4) {
  574. struct iphdr *iph;
  575. iph = (struct iphdr *)hdr_start;
  576. new_frag->ip_id_set = 1;
  577. new_frag->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
  578. }
  579. new_frag->hdr_ptr = hdr_start;
  580. new_frag->csum_valid = csum_valid;
  581. priv->stats.coal.coal_reconstruct++;
  582. /* Update meta information to move past the data we just segmented */
  583. coal_desc->data_offset += coal_desc->gso_size * coal_desc->gso_segs;
  584. coal_desc->pkt_id = pkt_id + 1;
  585. coal_desc->gso_segs = 0;
  586. list_add_tail(&new_frag->list, list);
  587. }
  588. static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
  589. {
  590. u8 *data = rmnet_frag_data_ptr(frag_desc);
  591. unsigned int datagram_len;
  592. __wsum csum;
  593. __sum16 pseudo;
  594. datagram_len = skb_frag_size(&frag_desc->frag) - frag_desc->ip_len;
  595. if (frag_desc->ip_proto == 4) {
  596. struct iphdr *iph = (struct iphdr *)data;
  597. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  598. datagram_len,
  599. frag_desc->trans_proto, 0);
  600. } else {
  601. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  602. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  603. datagram_len, frag_desc->trans_proto,
  604. 0);
  605. }
  606. csum = csum_partial(data + frag_desc->ip_len, datagram_len,
  607. csum_unfold(pseudo));
  608. return !csum_fold(csum);
  609. }
  610. /* Converts the coalesced frame into a list of descriptors.
  611. * NLOs containing csum erros will not be included.
  612. */
  613. static void
  614. rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
  615. u64 nlo_err_mask, struct rmnet_port *port,
  616. struct list_head *list)
  617. {
  618. struct iphdr *iph;
  619. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  620. struct rmnet_map_v5_coal_header *coal_hdr;
  621. u16 pkt_len;
  622. u8 pkt, total_pkt = 0;
  623. u8 nlo;
  624. bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
  625. bool zero_csum = false;
  626. /* Pull off the headers we no longer need */
  627. if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header)))
  628. return;
  629. coal_hdr = (struct rmnet_map_v5_coal_header *)
  630. rmnet_frag_data_ptr(coal_desc);
  631. if (!rmnet_frag_pull(coal_desc, port, sizeof(*coal_hdr)))
  632. return;
  633. iph = (struct iphdr *)rmnet_frag_data_ptr(coal_desc);
  634. if (iph->version == 4) {
  635. coal_desc->ip_proto = 4;
  636. coal_desc->ip_len = iph->ihl * 4;
  637. coal_desc->trans_proto = iph->protocol;
  638. /* Don't allow coalescing of any packets with IP options */
  639. if (iph->ihl != 5)
  640. gro = false;
  641. } else if (iph->version == 6) {
  642. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  643. int ip_len;
  644. __be16 frag_off;
  645. u8 protocol = ip6h->nexthdr;
  646. coal_desc->ip_proto = 6;
  647. ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
  648. sizeof(*ip6h),
  649. &protocol,
  650. &frag_off);
  651. coal_desc->trans_proto = protocol;
  652. /* If we run into a problem, or this has a fragment header
  653. * (which should technically not be possible, if the HW
  654. * works as intended...), bail.
  655. */
  656. if (ip_len < 0 || frag_off) {
  657. priv->stats.coal.coal_ip_invalid++;
  658. return;
  659. }
  660. coal_desc->ip_len = (u16)ip_len;
  661. if (coal_desc->ip_len > sizeof(*ip6h)) {
  662. /* Don't allow coalescing of any packets with IPv6
  663. * extension headers.
  664. */
  665. gro = false;
  666. }
  667. } else {
  668. priv->stats.coal.coal_ip_invalid++;
  669. return;
  670. }
  671. if (coal_desc->trans_proto == IPPROTO_TCP) {
  672. struct tcphdr *th;
  673. th = (struct tcphdr *)((u8 *)iph + coal_desc->ip_len);
  674. coal_desc->trans_len = th->doff * 4;
  675. priv->stats.coal.coal_tcp++;
  676. priv->stats.coal.coal_tcp_bytes +=
  677. skb_frag_size(&coal_desc->frag);
  678. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  679. struct udphdr *uh;
  680. uh = (struct udphdr *)((u8 *)iph + coal_desc->ip_len);
  681. coal_desc->trans_len = sizeof(*uh);
  682. priv->stats.coal.coal_udp++;
  683. priv->stats.coal.coal_udp_bytes +=
  684. skb_frag_size(&coal_desc->frag);
  685. if (coal_desc->ip_proto == 4 && !uh->check)
  686. zero_csum = true;
  687. } else {
  688. priv->stats.coal.coal_trans_invalid++;
  689. return;
  690. }
  691. coal_desc->hdrs_valid = 1;
  692. if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
  693. /* Mark the checksum as valid if it checks out */
  694. if (rmnet_frag_validate_csum(coal_desc))
  695. coal_desc->csum_valid = true;
  696. coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
  697. coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
  698. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  699. coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
  700. list_add_tail(&coal_desc->list, list);
  701. return;
  702. }
  703. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  704. * no checksum errors, and are allowing GRO. We can just reuse this
  705. * descriptor unchanged.
  706. */
  707. if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
  708. coal_desc->csum_valid = true;
  709. coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
  710. coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
  711. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  712. coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
  713. list_add_tail(&coal_desc->list, list);
  714. return;
  715. }
  716. /* Segment the coalesced descriptor into new packets */
  717. for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
  718. pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
  719. pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
  720. coal_desc->gso_size = pkt_len;
  721. for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
  722. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  723. bool csum_err = nlo_err_mask & 1;
  724. /* Segment the packet if we're not sending the larger
  725. * packet up the stack.
  726. */
  727. if (!gro) {
  728. coal_desc->gso_segs = 1;
  729. if (csum_err)
  730. priv->stats.coal.coal_csum_err++;
  731. __rmnet_frag_segment_data(coal_desc, port,
  732. list, total_pkt,
  733. !csum_err);
  734. continue;
  735. }
  736. if (csum_err) {
  737. priv->stats.coal.coal_csum_err++;
  738. /* Segment out the good data */
  739. if (coal_desc->gso_segs)
  740. __rmnet_frag_segment_data(coal_desc,
  741. port,
  742. list,
  743. total_pkt,
  744. true);
  745. /* Segment out the bad checksum */
  746. coal_desc->gso_segs = 1;
  747. __rmnet_frag_segment_data(coal_desc, port,
  748. list, total_pkt,
  749. false);
  750. } else {
  751. coal_desc->gso_segs++;
  752. }
  753. }
  754. /* If we're switching NLOs, we need to send out everything from
  755. * the previous one, if we haven't done so. NLOs only switch
  756. * when the packet length changes.
  757. */
  758. if (coal_desc->gso_segs)
  759. __rmnet_frag_segment_data(coal_desc, port, list,
  760. total_pkt, true);
  761. }
  762. }
  763. /* Record reason for coalescing pipe closure */
  764. static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  765. u8 code)
  766. {
  767. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  768. switch (type) {
  769. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  770. stats->non_coal++;
  771. break;
  772. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  773. stats->ip_miss++;
  774. break;
  775. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  776. stats->trans_miss++;
  777. break;
  778. case RMNET_MAP_COAL_CLOSE_HW:
  779. switch (code) {
  780. case RMNET_MAP_COAL_CLOSE_HW_NL:
  781. stats->hw_nl++;
  782. break;
  783. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  784. stats->hw_pkt++;
  785. break;
  786. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  787. stats->hw_byte++;
  788. break;
  789. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  790. stats->hw_time++;
  791. break;
  792. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  793. stats->hw_evict++;
  794. break;
  795. default:
  796. break;
  797. }
  798. break;
  799. case RMNET_MAP_COAL_CLOSE_COAL:
  800. stats->coal++;
  801. break;
  802. default:
  803. break;
  804. }
  805. }
  806. /* Check if the coalesced header has any incorrect values, in which case, the
  807. * entire coalesced frame must be dropped. Then check if there are any
  808. * checksum issues
  809. */
  810. static int
  811. rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
  812. u64 *nlo_err_mask)
  813. {
  814. struct rmnet_map_v5_coal_header *coal_hdr;
  815. unsigned char *data = rmnet_frag_data_ptr(frag_desc);
  816. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  817. u64 mask = 0;
  818. int i;
  819. u8 veid, pkts = 0;
  820. coal_hdr = (struct rmnet_map_v5_coal_header *)
  821. (data + sizeof(struct rmnet_map_header));
  822. veid = coal_hdr->virtual_channel_id;
  823. if (coal_hdr->num_nlos == 0 ||
  824. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  825. priv->stats.coal.coal_hdr_nlo_err++;
  826. return -EINVAL;
  827. }
  828. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  829. /* If there is a checksum issue, we need to split
  830. * up the skb. Rebuild the full csum error field
  831. */
  832. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  833. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  834. mask |= ((u64)err) << (8 * i);
  835. /* Track total packets in frame */
  836. pkts += pkt;
  837. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  838. priv->stats.coal.coal_hdr_pkt_err++;
  839. return -EINVAL;
  840. }
  841. }
  842. /* Track number of packets we get inside of coalesced frames */
  843. priv->stats.coal.coal_pkts += pkts;
  844. /* Update ethtool stats */
  845. rmnet_frag_data_log_close_stats(priv,
  846. coal_hdr->close_type,
  847. coal_hdr->close_value);
  848. if (veid < RMNET_MAX_VEID)
  849. priv->stats.coal.coal_veid[veid]++;
  850. *nlo_err_mask = mask;
  851. return 0;
  852. }
  853. /* Process a QMAPv5 packet header */
  854. int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
  855. struct rmnet_port *port,
  856. struct list_head *list,
  857. u16 len)
  858. {
  859. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  860. u64 nlo_err_mask;
  861. int rc = 0;
  862. switch (rmnet_frag_get_next_hdr_type(frag_desc)) {
  863. case RMNET_MAP_HEADER_TYPE_COALESCING:
  864. priv->stats.coal.coal_rx++;
  865. rc = rmnet_frag_data_check_coal_header(frag_desc,
  866. &nlo_err_mask);
  867. if (rc)
  868. return rc;
  869. rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
  870. list);
  871. if (list_first_entry(list, struct rmnet_frag_descriptor,
  872. list) != frag_desc)
  873. rmnet_recycle_frag_descriptor(frag_desc, port);
  874. break;
  875. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  876. if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
  877. priv->stats.csum_sw++;
  878. } else if (rmnet_frag_get_csum_valid(frag_desc)) {
  879. priv->stats.csum_ok++;
  880. frag_desc->csum_valid = true;
  881. } else {
  882. priv->stats.csum_valid_unset++;
  883. }
  884. if (!rmnet_frag_pull(frag_desc, port,
  885. sizeof(struct rmnet_map_header) +
  886. sizeof(struct rmnet_map_v5_csum_header))) {
  887. rc = -EINVAL;
  888. break;
  889. }
  890. frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
  891. /* Remove padding only for csum offload packets.
  892. * Coalesced packets should never have padding.
  893. */
  894. if (!rmnet_frag_trim(frag_desc, port, len)) {
  895. rc = -EINVAL;
  896. break;
  897. }
  898. list_del_init(&frag_desc->list);
  899. list_add_tail(&frag_desc->list, list);
  900. break;
  901. default:
  902. rc = -EINVAL;
  903. break;
  904. }
  905. return rc;
  906. }
  907. /* Perf hook handler */
  908. rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
  909. EXPORT_SYMBOL(rmnet_perf_desc_entry);
  910. static void
  911. __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
  912. struct rmnet_port *port)
  913. {
  914. rmnet_perf_desc_hook_t rmnet_perf_ingress;
  915. struct rmnet_map_header *qmap;
  916. struct rmnet_endpoint *ep;
  917. struct rmnet_frag_descriptor *frag, *tmp;
  918. LIST_HEAD(segs);
  919. u16 len, pad;
  920. u8 mux_id;
  921. qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag);
  922. mux_id = qmap->mux_id;
  923. pad = qmap->pad_len;
  924. len = ntohs(qmap->pkt_len) - pad;
  925. if (qmap->cd_bit) {
  926. qmi_rmnet_set_dl_msg_active(port);
  927. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  928. rmnet_frag_flow_command(qmap, port, len);
  929. goto recycle;
  930. }
  931. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  932. rmnet_frag_command(qmap, port);
  933. goto recycle;
  934. }
  935. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  936. goto recycle;
  937. ep = rmnet_get_endpoint(port, mux_id);
  938. if (!ep)
  939. goto recycle;
  940. frag_desc->dev = ep->egress_dev;
  941. /* Handle QMAPv5 packet */
  942. if (qmap->next_hdr &&
  943. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  944. RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
  945. if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
  946. len))
  947. goto recycle;
  948. } else {
  949. /* We only have the main QMAP header to worry about */
  950. if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
  951. return;
  952. frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
  953. if (!rmnet_frag_trim(frag_desc, port, len))
  954. return;
  955. list_add_tail(&frag_desc->list, &segs);
  956. }
  957. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  958. qmi_rmnet_work_maybe_restart(port);
  959. rcu_read_lock();
  960. rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
  961. if (rmnet_perf_ingress) {
  962. list_for_each_entry_safe(frag, tmp, &segs, list) {
  963. list_del_init(&frag->list);
  964. rmnet_perf_ingress(frag, port);
  965. }
  966. rcu_read_unlock();
  967. return;
  968. }
  969. rcu_read_unlock();
  970. list_for_each_entry_safe(frag, tmp, &segs, list) {
  971. list_del_init(&frag->list);
  972. rmnet_frag_deliver(frag, port);
  973. }
  974. return;
  975. recycle:
  976. rmnet_recycle_frag_descriptor(frag_desc, port);
  977. }
  978. /* Notify perf at the end of SKB chain */
  979. rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
  980. EXPORT_SYMBOL(rmnet_perf_chain_end);
  981. void rmnet_frag_ingress_handler(struct sk_buff *skb,
  982. struct rmnet_port *port)
  983. {
  984. rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
  985. LIST_HEAD(desc_list);
  986. /* Deaggregation and freeing of HW originating
  987. * buffers is done within here
  988. */
  989. while (skb) {
  990. struct sk_buff *skb_frag;
  991. rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port,
  992. &desc_list);
  993. if (!list_empty(&desc_list)) {
  994. struct rmnet_frag_descriptor *frag_desc, *tmp;
  995. list_for_each_entry_safe(frag_desc, tmp, &desc_list,
  996. list) {
  997. list_del_init(&frag_desc->list);
  998. __rmnet_frag_ingress_handler(frag_desc, port);
  999. }
  1000. }
  1001. skb_frag = skb_shinfo(skb)->frag_list;
  1002. skb_shinfo(skb)->frag_list = NULL;
  1003. consume_skb(skb);
  1004. skb = skb_frag;
  1005. }
  1006. rcu_read_lock();
  1007. rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
  1008. if (rmnet_perf_opt_chain_end)
  1009. rmnet_perf_opt_chain_end();
  1010. rcu_read_unlock();
  1011. }
  1012. void rmnet_descriptor_deinit(struct rmnet_port *port)
  1013. {
  1014. struct rmnet_frag_descriptor_pool *pool;
  1015. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1016. pool = port->frag_desc_pool;
  1017. list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
  1018. kfree(frag_desc);
  1019. pool->pool_size--;
  1020. }
  1021. kfree(pool);
  1022. }
  1023. int rmnet_descriptor_init(struct rmnet_port *port)
  1024. {
  1025. struct rmnet_frag_descriptor_pool *pool;
  1026. int i;
  1027. spin_lock_init(&port->desc_pool_lock);
  1028. pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
  1029. if (!pool)
  1030. return -ENOMEM;
  1031. INIT_LIST_HEAD(&pool->free_list);
  1032. port->frag_desc_pool = pool;
  1033. for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
  1034. struct rmnet_frag_descriptor *frag_desc;
  1035. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  1036. if (!frag_desc)
  1037. return -ENOMEM;
  1038. INIT_LIST_HEAD(&frag_desc->list);
  1039. INIT_LIST_HEAD(&frag_desc->sub_frags);
  1040. list_add_tail(&frag_desc->list, &pool->free_list);
  1041. pool->pool_size++;
  1042. }
  1043. return 0;
  1044. }