rmnet_descriptor.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579
  1. /* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Packet Descriptor Framework
  13. *
  14. */
  15. #include <linux/ip.h>
  16. #include <linux/ipv6.h>
  17. #include <net/ipv6.h>
  18. #include <net/ip6_checksum.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_descriptor.h"
  21. #include "rmnet_handlers.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_vnd.h"
  24. #include "rmnet_qmi.h"
  25. #include "qmi_rmnet.h"
  26. #define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
  27. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  28. sizeof(struct rmnet_map_header) + \
  29. sizeof(struct rmnet_map_control_command_header))
  30. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  31. sizeof(struct rmnet_map_header) + \
  32. sizeof(struct rmnet_map_control_command_header))
  33. typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
  34. struct rmnet_port *port);
  35. typedef void (*rmnet_perf_chain_hook_t)(void);
  36. struct rmnet_frag_descriptor *
  37. rmnet_get_frag_descriptor(struct rmnet_port *port)
  38. {
  39. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  40. struct rmnet_frag_descriptor *frag_desc;
  41. unsigned long flags;
  42. spin_lock_irqsave(&port->desc_pool_lock, flags);
  43. if (!list_empty(&pool->free_list)) {
  44. frag_desc = list_first_entry(&pool->free_list,
  45. struct rmnet_frag_descriptor,
  46. list);
  47. list_del_init(&frag_desc->list);
  48. } else {
  49. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  50. if (!frag_desc)
  51. goto out;
  52. INIT_LIST_HEAD(&frag_desc->list);
  53. INIT_LIST_HEAD(&frag_desc->frags);
  54. pool->pool_size++;
  55. }
  56. out:
  57. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  58. return frag_desc;
  59. }
  60. EXPORT_SYMBOL(rmnet_get_frag_descriptor);
  61. void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
  62. struct rmnet_port *port)
  63. {
  64. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  65. struct rmnet_fragment *frag, *tmp;
  66. unsigned long flags;
  67. list_del(&frag_desc->list);
  68. list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
  69. struct page *page = skb_frag_page(&frag->frag);
  70. if (page)
  71. put_page(page);
  72. list_del(&frag->list);
  73. kfree(frag);
  74. }
  75. memset(frag_desc, 0, sizeof(*frag_desc));
  76. INIT_LIST_HEAD(&frag_desc->list);
  77. INIT_LIST_HEAD(&frag_desc->frags);
  78. spin_lock_irqsave(&port->desc_pool_lock, flags);
  79. list_add_tail(&frag_desc->list, &pool->free_list);
  80. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  81. }
  82. EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
  83. void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
  84. struct rmnet_port *port, unsigned int size)
  85. {
  86. struct rmnet_fragment *frag, *tmp;
  87. if (size >= frag_desc->len) {
  88. pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
  89. __func__, size, frag_desc->len);
  90. rmnet_recycle_frag_descriptor(frag_desc, port);
  91. return NULL;
  92. }
  93. list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
  94. u32 frag_size = skb_frag_size(&frag->frag);
  95. if (!size)
  96. break;
  97. if (size >= frag_size) {
  98. /* Remove the whole frag */
  99. struct page *page = skb_frag_page(&frag->frag);
  100. if (page)
  101. put_page(page);
  102. list_del(&frag->list);
  103. size -= frag_size;
  104. frag_desc->len -= frag_size;
  105. kfree(frag);
  106. continue;
  107. }
  108. /* Pull off 'size' bytes */
  109. skb_frag_off_add(&frag->frag, size);
  110. skb_frag_size_sub(&frag->frag, size);
  111. frag_desc->len -= size;
  112. break;
  113. }
  114. return rmnet_frag_data_ptr(frag_desc);
  115. }
  116. EXPORT_SYMBOL(rmnet_frag_pull);
  117. void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
  118. struct rmnet_port *port, unsigned int size)
  119. {
  120. struct rmnet_fragment *frag, *tmp;
  121. unsigned int eat;
  122. if (!size) {
  123. pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
  124. __func__, frag_desc->len);
  125. rmnet_recycle_frag_descriptor(frag_desc, port);
  126. return NULL;
  127. }
  128. /* Growing bigger doesn't make sense */
  129. if (size >= frag_desc->len)
  130. goto out;
  131. /* Compute number of bytes to remove from the end */
  132. eat = frag_desc->len - size;
  133. list_for_each_entry_safe_reverse(frag, tmp, &frag_desc->frags, list) {
  134. u32 frag_size = skb_frag_size(&frag->frag);
  135. if (!eat)
  136. goto out;
  137. if (eat >= frag_size) {
  138. /* Remove the whole frag */
  139. struct page *page = skb_frag_page(&frag->frag);
  140. if (page)
  141. put_page(page);
  142. list_del(&frag->list);
  143. eat -= frag_size;
  144. frag_desc->len -= frag_size;
  145. kfree(frag);
  146. continue;
  147. }
  148. /* Chop off 'eat' bytes from the end */
  149. skb_frag_size_sub(&frag->frag, eat);
  150. frag_desc->len -= eat;
  151. goto out;
  152. }
  153. out:
  154. return rmnet_frag_data_ptr(frag_desc);
  155. }
  156. EXPORT_SYMBOL(rmnet_frag_trim);
  157. static int rmnet_frag_copy_data(struct rmnet_frag_descriptor *frag_desc,
  158. u32 off, u32 len, void *buf)
  159. {
  160. struct rmnet_fragment *frag;
  161. u32 frag_size, copy_len;
  162. u32 buf_offset = 0;
  163. /* Don't make me do something we'd both regret */
  164. if (off > frag_desc->len || len > frag_desc->len ||
  165. off + len > frag_desc->len)
  166. return -EINVAL;
  167. /* Copy 'len' bytes into the bufer starting from 'off' */
  168. list_for_each_entry(frag, &frag_desc->frags, list) {
  169. if (!len)
  170. break;
  171. frag_size = skb_frag_size(&frag->frag);
  172. if (off < frag_size) {
  173. copy_len = min_t(u32, len, frag_size - off);
  174. memcpy(buf + buf_offset,
  175. skb_frag_address(&frag->frag) + off,
  176. copy_len);
  177. buf_offset += copy_len;
  178. len -= copy_len;
  179. off = 0;
  180. } else {
  181. off -= frag_size;
  182. }
  183. }
  184. return 0;
  185. }
  186. void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
  187. u32 len, void *buf)
  188. {
  189. struct rmnet_fragment *frag;
  190. u8 *start;
  191. u32 frag_size, offset;
  192. /* Don't take a long pointer off a short frag */
  193. if (off > frag_desc->len || len > frag_desc->len ||
  194. off + len > frag_desc->len)
  195. return NULL;
  196. /* Find the starting fragment */
  197. offset = off;
  198. list_for_each_entry(frag, &frag_desc->frags, list) {
  199. frag_size = skb_frag_size(&frag->frag);
  200. if (off < frag_size) {
  201. start = skb_frag_address(&frag->frag) + off;
  202. /* If the header is entirely on this frag, just return
  203. * a pointer to it.
  204. */
  205. if (off + len <= frag_size)
  206. return start;
  207. /* Otherwise, we need to copy the data into a linear
  208. * buffer.
  209. */
  210. break;
  211. }
  212. off -= frag_size;
  213. }
  214. if (rmnet_frag_copy_data(frag_desc, offset, len, buf) < 0)
  215. return NULL;
  216. return buf;
  217. }
  218. EXPORT_SYMBOL(rmnet_frag_header_ptr);
  219. int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
  220. struct page *p, u32 page_offset, u32 len)
  221. {
  222. struct rmnet_fragment *frag;
  223. frag = kzalloc(sizeof(*frag), GFP_ATOMIC);
  224. if (!frag)
  225. return -ENOMEM;
  226. INIT_LIST_HEAD(&frag->list);
  227. get_page(p);
  228. __skb_frag_set_page(&frag->frag, p);
  229. skb_frag_size_set(&frag->frag, len);
  230. skb_frag_off_set(&frag->frag, page_offset);
  231. list_add_tail(&frag->list, &frag_desc->frags);
  232. frag_desc->len += len;
  233. return 0;
  234. }
  235. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frag);
  236. int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
  237. struct rmnet_frag_descriptor *from,
  238. u32 off, u32 len)
  239. {
  240. struct rmnet_fragment *frag;
  241. int rc;
  242. /* Sanity check the lengths */
  243. if (off > from->len || len > from->len || off + len > from->len)
  244. return -EINVAL;
  245. list_for_each_entry(frag, &from->frags, list) {
  246. u32 frag_size;
  247. if (!len)
  248. break;
  249. frag_size = skb_frag_size(&frag->frag);
  250. if (off < frag_size) {
  251. struct page *p = skb_frag_page(&frag->frag);
  252. u32 page_off = skb_frag_off(&frag->frag);
  253. u32 copy_len = min_t(u32, len, frag_size - off);
  254. rc = rmnet_frag_descriptor_add_frag(to, p,
  255. page_off + off,
  256. copy_len);
  257. if (rc < 0)
  258. return rc;
  259. len -= copy_len;
  260. off = 0;
  261. } else {
  262. off -= frag_size;
  263. }
  264. }
  265. return 0;
  266. }
  267. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frags_from);
  268. int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
  269. int start, u8 *nexthdrp, __be16 *fragp)
  270. {
  271. u8 nexthdr = *nexthdrp;
  272. *fragp = 0;
  273. while (ipv6_ext_hdr(nexthdr)) {
  274. struct ipv6_opt_hdr *hp, __hp;
  275. int hdrlen;
  276. if (nexthdr == NEXTHDR_NONE)
  277. return -EINVAL;
  278. hp = rmnet_frag_header_ptr(frag_desc, (u32)start, sizeof(*hp),
  279. &__hp);
  280. if (!hp)
  281. return -EINVAL;
  282. hp = rmnet_frag_data_ptr(frag_desc) + start;
  283. if (nexthdr == NEXTHDR_FRAGMENT) {
  284. u32 off = offsetof(struct frag_hdr, frag_off);
  285. __be16 *fp, __fp;
  286. fp = rmnet_frag_header_ptr(frag_desc, (u32)start + off,
  287. sizeof(*fp), &__fp);
  288. if (!fp)
  289. return -EINVAL;
  290. *fragp = *fp;
  291. if (ntohs(*fragp) & ~0x7)
  292. break;
  293. hdrlen = 8;
  294. } else if (nexthdr == NEXTHDR_AUTH) {
  295. hdrlen = (hp->hdrlen + 2) << 2;
  296. } else {
  297. hdrlen = ipv6_optlen(hp);
  298. }
  299. nexthdr = hp->nexthdr;
  300. start += hdrlen;
  301. }
  302. *nexthdrp = nexthdr;
  303. return start;
  304. }
  305. EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
  306. static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
  307. struct rmnet_map_control_command *cmd,
  308. struct rmnet_port *port,
  309. int enable)
  310. {
  311. struct rmnet_endpoint *ep;
  312. struct net_device *vnd;
  313. u16 ip_family;
  314. u16 fc_seq;
  315. u32 qos_id;
  316. u8 mux_id;
  317. int r;
  318. mux_id = qmap->mux_id;
  319. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  320. return RX_HANDLER_CONSUMED;
  321. ep = rmnet_get_endpoint(port, mux_id);
  322. if (!ep)
  323. return RX_HANDLER_CONSUMED;
  324. vnd = ep->egress_dev;
  325. ip_family = cmd->flow_control.ip_family;
  326. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  327. qos_id = ntohl(cmd->flow_control.qos_id);
  328. /* Ignore the ip family and pass the sequence number for both v4 and v6
  329. * sequence. User space does not support creating dedicated flows for
  330. * the 2 protocols
  331. */
  332. r = rmnet_vnd_do_flow_control(vnd, enable);
  333. if (r)
  334. return RMNET_MAP_COMMAND_UNSUPPORTED;
  335. else
  336. return RMNET_MAP_COMMAND_ACK;
  337. }
  338. static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
  339. unsigned char type,
  340. struct rmnet_port *port)
  341. {
  342. struct rmnet_map_control_command *cmd;
  343. struct net_device *dev = port->dev;
  344. struct sk_buff *skb;
  345. u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
  346. skb = alloc_skb(alloc_len, GFP_ATOMIC);
  347. if (!skb)
  348. return;
  349. skb->protocol = htons(ETH_P_MAP);
  350. skb->dev = dev;
  351. cmd = rmnet_map_get_cmd_start(skb);
  352. cmd->cmd_type = type & 0x03;
  353. netif_tx_lock(dev);
  354. dev->netdev_ops->ndo_start_xmit(skb, dev);
  355. netif_tx_unlock(dev);
  356. }
  357. static void
  358. rmnet_frag_process_flow_start(struct rmnet_frag_descriptor *frag_desc,
  359. struct rmnet_map_control_command_header *cmd,
  360. struct rmnet_port *port,
  361. u16 cmd_len)
  362. {
  363. struct rmnet_map_dl_ind_hdr *dlhdr, __dlhdr;
  364. u32 offset = sizeof(struct rmnet_map_header);
  365. u32 data_format;
  366. bool is_dl_mark_v2;
  367. if (cmd_len + offset < RMNET_DL_IND_HDR_SIZE)
  368. return;
  369. data_format = port->data_format;
  370. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  371. dlhdr = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  372. sizeof(*dlhdr), &__dlhdr);
  373. if (!dlhdr)
  374. return;
  375. port->stats.dl_hdr_last_ep_id = cmd->source_id;
  376. port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
  377. port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
  378. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  379. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  380. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  381. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  382. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  383. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  384. port->stats.dl_hdr_count++;
  385. /* If a target is taking frag path, we can assume DL marker v2 is in
  386. * play
  387. */
  388. if (is_dl_mark_v2)
  389. rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
  390. }
  391. static void
  392. rmnet_frag_process_flow_end(struct rmnet_frag_descriptor *frag_desc,
  393. struct rmnet_map_control_command_header *cmd,
  394. struct rmnet_port *port, u16 cmd_len)
  395. {
  396. struct rmnet_map_dl_ind_trl *dltrl, __dltrl;
  397. u32 offset = sizeof(struct rmnet_map_header);
  398. u32 data_format;
  399. bool is_dl_mark_v2;
  400. if (cmd_len + offset < RMNET_DL_IND_TRL_SIZE)
  401. return;
  402. data_format = port->data_format;
  403. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  404. dltrl = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  405. sizeof(*dltrl), &__dltrl);
  406. if (!dltrl)
  407. return;
  408. port->stats.dl_trl_last_seq = dltrl->seq_le;
  409. port->stats.dl_trl_count++;
  410. /* If a target is taking frag path, we can assume DL marker v2 is in
  411. * play
  412. */
  413. if (is_dl_mark_v2)
  414. rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
  415. }
  416. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  417. * name is decoded here and appropriate handler is called.
  418. */
  419. void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
  420. struct rmnet_map_header *qmap, struct rmnet_port *port)
  421. {
  422. struct rmnet_map_control_command *cmd, __cmd;
  423. unsigned char rc = 0;
  424. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(*qmap), sizeof(*cmd),
  425. &__cmd);
  426. if (!cmd)
  427. return;
  428. switch (cmd->command_name) {
  429. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  430. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 1);
  431. break;
  432. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  433. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 0);
  434. break;
  435. default:
  436. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  437. break;
  438. }
  439. if (rc == RMNET_MAP_COMMAND_ACK)
  440. rmnet_frag_send_ack(qmap, rc, port);
  441. }
  442. int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
  443. struct rmnet_port *port, u16 pkt_len)
  444. {
  445. struct rmnet_map_control_command_header *cmd, __cmd;
  446. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(struct rmnet_map_header),
  447. sizeof(*cmd), &__cmd);
  448. if (!cmd)
  449. return -1;
  450. switch (cmd->command_name) {
  451. case RMNET_MAP_COMMAND_FLOW_START:
  452. rmnet_frag_process_flow_start(frag_desc, cmd, port, pkt_len);
  453. break;
  454. case RMNET_MAP_COMMAND_FLOW_END:
  455. rmnet_frag_process_flow_end(frag_desc, cmd, port, pkt_len);
  456. break;
  457. default:
  458. return 1;
  459. }
  460. return 0;
  461. }
  462. EXPORT_SYMBOL(rmnet_frag_flow_command);
  463. static int rmnet_frag_deaggregate_one(struct skb_shared_info *shinfo,
  464. struct rmnet_port *port,
  465. struct list_head *list,
  466. u32 start_frag)
  467. {
  468. struct rmnet_frag_descriptor *frag_desc;
  469. struct rmnet_map_header *maph;
  470. skb_frag_t *frag;
  471. u32 i;
  472. u32 pkt_len;
  473. int rc;
  474. frag = &shinfo->frags[start_frag];
  475. maph = skb_frag_address(frag);
  476. pkt_len = ntohs(maph->pkt_len);
  477. /* Catch empty frames */
  478. if (!pkt_len)
  479. return -1;
  480. frag_desc = rmnet_get_frag_descriptor(port);
  481. if (!frag_desc)
  482. return -1;
  483. pkt_len += sizeof(*maph);
  484. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  485. pkt_len += sizeof(struct rmnet_map_dl_csum_trailer);
  486. } else if ((port->data_format & (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
  487. RMNET_FLAGS_INGRESS_COALESCE)) &&
  488. !maph->cd_bit) {
  489. u32 hsize = 0;
  490. u8 type;
  491. type = ((struct rmnet_map_v5_coal_header *)
  492. (maph + 1))->header_type;
  493. switch (type) {
  494. case RMNET_MAP_HEADER_TYPE_COALESCING:
  495. hsize = sizeof(struct rmnet_map_v5_coal_header);
  496. break;
  497. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  498. hsize = sizeof(struct rmnet_map_v5_csum_header);
  499. break;
  500. }
  501. pkt_len += hsize;
  502. }
  503. /* Add all frags containing the packet data to the descriptor */
  504. for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
  505. u32 frag_size;
  506. u32 copy_len;
  507. frag = &shinfo->frags[i];
  508. frag_size = skb_frag_size(frag);
  509. copy_len = min_t(u32, frag_size, pkt_len);
  510. rc = rmnet_frag_descriptor_add_frag(frag_desc,
  511. skb_frag_page(frag),
  512. skb_frag_off(frag),
  513. copy_len);
  514. if (rc < 0) {
  515. rmnet_recycle_frag_descriptor(frag_desc, port);
  516. return -1;
  517. }
  518. pkt_len -= copy_len;
  519. skb_frag_off_add(frag, copy_len);
  520. skb_frag_size_sub(frag, copy_len);
  521. /* If the fragment is exhausted, we can move to the next one */
  522. if (!skb_frag_size(frag))
  523. i++;
  524. }
  525. if (pkt_len) {
  526. /* Packet length is larger than the amount of data we have */
  527. rmnet_recycle_frag_descriptor(frag_desc, port);
  528. return -1;
  529. }
  530. list_add_tail(&frag_desc->list, list);
  531. return (int)(i - start_frag);
  532. }
  533. void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
  534. struct list_head *list)
  535. {
  536. struct skb_shared_info *shinfo = skb_shinfo(skb);
  537. u32 i = 0;
  538. int rc;
  539. while (i < shinfo->nr_frags) {
  540. rc = rmnet_frag_deaggregate_one(shinfo, port, list, i);
  541. if (rc < 0)
  542. return;
  543. i += (u32)rc;
  544. }
  545. }
  546. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  547. * if needed (i.e. forwarding, UDP GRO)
  548. */
  549. static void rmnet_frag_gso_stamp(struct sk_buff *skb,
  550. struct rmnet_frag_descriptor *frag_desc)
  551. {
  552. struct skb_shared_info *shinfo = skb_shinfo(skb);
  553. if (frag_desc->trans_proto == IPPROTO_TCP)
  554. shinfo->gso_type = (frag_desc->ip_proto == 4) ?
  555. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  556. else
  557. shinfo->gso_type = SKB_GSO_UDP_L4;
  558. shinfo->gso_size = frag_desc->gso_size;
  559. shinfo->gso_segs = frag_desc->gso_segs;
  560. }
  561. /* Set the partial checksum information. Sets the transport checksum to the
  562. * pseudoheader checksum and sets the offload metadata.
  563. */
  564. static void rmnet_frag_partial_csum(struct sk_buff *skb,
  565. struct rmnet_frag_descriptor *frag_desc)
  566. {
  567. struct iphdr *iph = (struct iphdr *)skb->data;
  568. __sum16 pseudo;
  569. u16 pkt_len = skb->len - frag_desc->ip_len;
  570. if (frag_desc->ip_proto == 4) {
  571. iph->tot_len = htons(skb->len);
  572. iph->check = 0;
  573. iph->check = ip_fast_csum(iph, iph->ihl);
  574. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  575. pkt_len, frag_desc->trans_proto,
  576. 0);
  577. } else {
  578. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  579. /* Payload length includes any extension headers */
  580. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  581. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  582. pkt_len, frag_desc->trans_proto, 0);
  583. }
  584. if (frag_desc->trans_proto == IPPROTO_TCP) {
  585. struct tcphdr *tp = (struct tcphdr *)
  586. ((u8 *)iph + frag_desc->ip_len);
  587. tp->check = pseudo;
  588. skb->csum_offset = offsetof(struct tcphdr, check);
  589. } else {
  590. struct udphdr *up = (struct udphdr *)
  591. ((u8 *)iph + frag_desc->ip_len);
  592. up->len = htons(pkt_len);
  593. up->check = pseudo;
  594. skb->csum_offset = offsetof(struct udphdr, check);
  595. }
  596. skb->ip_summed = CHECKSUM_PARTIAL;
  597. skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
  598. }
  599. /* Allocate and populate an skb to contain the packet represented by the
  600. * frag descriptor.
  601. */
  602. static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
  603. struct rmnet_port *port)
  604. {
  605. struct sk_buff *head_skb, *current_skb, *skb;
  606. struct skb_shared_info *shinfo;
  607. struct rmnet_fragment *frag, *tmp;
  608. /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
  609. if (frag_desc->hdrs_valid) {
  610. u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
  611. head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
  612. GFP_ATOMIC);
  613. if (!head_skb)
  614. return NULL;
  615. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  616. rmnet_frag_copy_data(frag_desc, 0, hdr_len,
  617. skb_put(head_skb, hdr_len));
  618. skb_reset_network_header(head_skb);
  619. if (frag_desc->trans_len)
  620. skb_set_transport_header(head_skb, frag_desc->ip_len);
  621. /* Pull the headers off carefully */
  622. if (hdr_len == frag_desc->len)
  623. /* Fast forward "header only" packets */
  624. goto skip_frags;
  625. if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
  626. kfree(head_skb);
  627. return NULL;
  628. }
  629. } else {
  630. /* Allocate enough space to avoid penalties in the stack
  631. * from __pskb_pull_tail()
  632. */
  633. head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
  634. GFP_ATOMIC);
  635. if (!head_skb)
  636. return NULL;
  637. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  638. }
  639. shinfo = skb_shinfo(head_skb);
  640. current_skb = head_skb;
  641. /* Add in the page fragments */
  642. list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
  643. struct page *p = skb_frag_page(&frag->frag);
  644. u32 frag_size = skb_frag_size(&frag->frag);
  645. add_frag:
  646. if (shinfo->nr_frags < MAX_SKB_FRAGS) {
  647. get_page(p);
  648. skb_add_rx_frag(current_skb, shinfo->nr_frags, p,
  649. skb_frag_off(&frag->frag), frag_size,
  650. frag_size);
  651. if (current_skb != head_skb) {
  652. head_skb->len += frag_size;
  653. head_skb->data_len += frag_size;
  654. }
  655. } else {
  656. /* Alloc a new skb and try again */
  657. skb = alloc_skb(0, GFP_ATOMIC);
  658. if (!skb)
  659. break;
  660. if (current_skb == head_skb)
  661. shinfo->frag_list = skb;
  662. else
  663. current_skb->next = skb;
  664. current_skb = skb;
  665. shinfo = skb_shinfo(current_skb);
  666. goto add_frag;
  667. }
  668. }
  669. skip_frags:
  670. head_skb->dev = frag_desc->dev;
  671. rmnet_set_skb_proto(head_skb);
  672. /* Handle any header metadata that needs to be updated after RSB/RSC
  673. * segmentation
  674. */
  675. if (frag_desc->ip_id_set) {
  676. struct iphdr *iph;
  677. iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
  678. csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
  679. iph->id = frag_desc->ip_id;
  680. }
  681. if (frag_desc->tcp_seq_set) {
  682. struct tcphdr *th;
  683. th = (struct tcphdr *)
  684. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  685. th->seq = frag_desc->tcp_seq;
  686. }
  687. /* Handle csum offloading */
  688. if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
  689. /* Set the partial checksum information */
  690. rmnet_frag_partial_csum(head_skb, frag_desc);
  691. } else if (frag_desc->csum_valid) {
  692. /* Non-RSB/RSC/perf packet. The current checksum is fine */
  693. head_skb->ip_summed = CHECKSUM_UNNECESSARY;
  694. } else if (frag_desc->hdrs_valid &&
  695. (frag_desc->trans_proto == IPPROTO_TCP ||
  696. frag_desc->trans_proto == IPPROTO_UDP)) {
  697. /* Unfortunately, we have to fake a bad checksum here, since
  698. * the original bad value is lost by the hardware. The only
  699. * reliable way to do it is to calculate the actual checksum
  700. * and corrupt it.
  701. */
  702. __sum16 *check;
  703. __wsum csum;
  704. unsigned int offset = skb_transport_offset(head_skb);
  705. __sum16 pseudo;
  706. /* Calculate pseudo header and update header fields */
  707. if (frag_desc->ip_proto == 4) {
  708. struct iphdr *iph = ip_hdr(head_skb);
  709. __be16 tot_len = htons(head_skb->len);
  710. csum_replace2(&iph->check, iph->tot_len, tot_len);
  711. iph->tot_len = tot_len;
  712. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  713. head_skb->len -
  714. frag_desc->ip_len,
  715. frag_desc->trans_proto, 0);
  716. } else {
  717. struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
  718. ip6h->payload_len = htons(head_skb->len -
  719. sizeof(*ip6h));
  720. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  721. head_skb->len -
  722. frag_desc->ip_len,
  723. frag_desc->trans_proto, 0);
  724. }
  725. if (frag_desc->trans_proto == IPPROTO_TCP) {
  726. check = &tcp_hdr(head_skb)->check;
  727. } else {
  728. udp_hdr(head_skb)->len = htons(head_skb->len -
  729. frag_desc->ip_len);
  730. check = &udp_hdr(head_skb)->check;
  731. }
  732. *check = pseudo;
  733. csum = skb_checksum(head_skb, offset, head_skb->len - offset,
  734. 0);
  735. /* Add 1 to corrupt. This cannot produce a final value of 0
  736. * since csum_fold() can't return a value of 0xFFFF
  737. */
  738. *check = csum16_add(csum_fold(csum), htons(1));
  739. head_skb->ip_summed = CHECKSUM_NONE;
  740. }
  741. /* Handle any rmnet_perf metadata */
  742. if (frag_desc->hash) {
  743. head_skb->hash = frag_desc->hash;
  744. head_skb->sw_hash = 1;
  745. }
  746. if (frag_desc->flush_shs)
  747. head_skb->cb[0] = 1;
  748. /* Handle coalesced packets */
  749. if (frag_desc->gso_segs > 1)
  750. rmnet_frag_gso_stamp(head_skb, frag_desc);
  751. return head_skb;
  752. }
  753. /* Deliver the packets contained within a frag descriptor */
  754. void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
  755. struct rmnet_port *port)
  756. {
  757. struct sk_buff *skb;
  758. skb = rmnet_alloc_skb(frag_desc, port);
  759. if (skb)
  760. rmnet_deliver_skb(skb, port);
  761. rmnet_recycle_frag_descriptor(frag_desc, port);
  762. }
  763. EXPORT_SYMBOL(rmnet_frag_deliver);
  764. static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
  765. struct rmnet_port *port,
  766. struct list_head *list, u8 pkt_id,
  767. bool csum_valid)
  768. {
  769. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  770. struct rmnet_frag_descriptor *new_desc;
  771. u32 dlen = coal_desc->gso_size * coal_desc->gso_segs;
  772. u32 hlen = coal_desc->ip_len + coal_desc->trans_len;
  773. u32 offset = hlen + coal_desc->data_offset;
  774. int rc;
  775. new_desc = rmnet_get_frag_descriptor(port);
  776. if (!new_desc)
  777. return;
  778. /* Header information and most metadata is the same as the original */
  779. memcpy(new_desc, coal_desc, sizeof(*coal_desc));
  780. INIT_LIST_HEAD(&new_desc->list);
  781. INIT_LIST_HEAD(&new_desc->frags);
  782. new_desc->len = 0;
  783. /* Add the header fragments */
  784. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, 0,
  785. hlen);
  786. if (rc < 0)
  787. goto recycle;
  788. /* Add in the data fragments */
  789. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, offset,
  790. dlen);
  791. if (rc < 0)
  792. goto recycle;
  793. /* Update protocol-specific metadata */
  794. if (coal_desc->trans_proto == IPPROTO_TCP) {
  795. struct tcphdr *th, __th;
  796. th = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  797. sizeof(*th), &__th);
  798. if (!th)
  799. goto recycle;
  800. new_desc->tcp_seq_set = 1;
  801. new_desc->tcp_seq = htonl(ntohl(th->seq) +
  802. coal_desc->data_offset);
  803. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  804. struct udphdr *uh, __uh;
  805. uh = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  806. sizeof(*uh), &__uh);
  807. if (!uh)
  808. goto recycle;
  809. if (coal_desc->ip_proto == 4 && !uh->check)
  810. csum_valid = true;
  811. }
  812. if (coal_desc->ip_proto == 4) {
  813. struct iphdr *iph, __iph;
  814. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  815. &__iph);
  816. if (!iph)
  817. goto recycle;
  818. new_desc->ip_id_set = 1;
  819. new_desc->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
  820. }
  821. new_desc->csum_valid = csum_valid;
  822. priv->stats.coal.coal_reconstruct++;
  823. /* Update meta information to move past the data we just segmented */
  824. coal_desc->data_offset += dlen;
  825. coal_desc->pkt_id = pkt_id + 1;
  826. coal_desc->gso_segs = 0;
  827. list_add_tail(&new_desc->list, list);
  828. return;
  829. recycle:
  830. rmnet_recycle_frag_descriptor(new_desc, port);
  831. }
  832. static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
  833. {
  834. u8 *data = rmnet_frag_data_ptr(frag_desc);
  835. unsigned int datagram_len;
  836. __wsum csum;
  837. __sum16 pseudo;
  838. datagram_len = frag_desc->len - frag_desc->ip_len;
  839. if (frag_desc->ip_proto == 4) {
  840. struct iphdr *iph = (struct iphdr *)data;
  841. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  842. datagram_len,
  843. frag_desc->trans_proto, 0);
  844. } else {
  845. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  846. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  847. datagram_len, frag_desc->trans_proto,
  848. 0);
  849. }
  850. csum = csum_partial(data + frag_desc->ip_len, datagram_len,
  851. csum_unfold(pseudo));
  852. return !csum_fold(csum);
  853. }
  854. /* Converts the coalesced frame into a list of descriptors.
  855. * NLOs containing csum erros will not be included.
  856. */
  857. static void
  858. rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
  859. u64 nlo_err_mask, struct rmnet_port *port,
  860. struct list_head *list)
  861. {
  862. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  863. struct rmnet_map_v5_coal_header coal_hdr;
  864. u8 *version;
  865. u16 pkt_len;
  866. u8 pkt, total_pkt = 0;
  867. u8 nlo;
  868. bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
  869. bool zero_csum = false;
  870. /* Copy the coal header into our local storage before pulling it. It's
  871. * possible that this header (or part of it) is the last port of a page
  872. * a pulling it off would cause it to be freed. Referring back to the
  873. * header would be invalid in that case.
  874. */
  875. if (rmnet_frag_copy_data(coal_desc, sizeof(struct rmnet_map_header),
  876. sizeof(coal_hdr), &coal_hdr) < 0)
  877. return;
  878. /* Pull off the headers we no longer need */
  879. if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header) +
  880. sizeof(coal_hdr)))
  881. return;
  882. /* By definition, this byte is linear, and the first byte on the
  883. * first fragment. ;) Hence why no header_ptr() call is needed
  884. * for it.
  885. */
  886. version = rmnet_frag_data_ptr(coal_desc);
  887. if ((*version & 0xF0) == 0x40) {
  888. struct iphdr *iph, __iph;
  889. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  890. &__iph);
  891. if (!iph)
  892. return;
  893. coal_desc->ip_proto = 4;
  894. coal_desc->ip_len = iph->ihl * 4;
  895. coal_desc->trans_proto = iph->protocol;
  896. /* Don't allow coalescing of any packets with IP options */
  897. if (iph->ihl != 5)
  898. gro = false;
  899. } else if ((*version & 0xF0) == 0x60) {
  900. struct ipv6hdr *ip6h, __ip6h;
  901. int ip_len;
  902. __be16 frag_off;
  903. u8 protocol;
  904. ip6h = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*ip6h),
  905. &__ip6h);
  906. if (!ip6h)
  907. return;
  908. coal_desc->ip_proto = 6;
  909. protocol = ip6h->nexthdr;
  910. ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
  911. sizeof(*ip6h),
  912. &protocol,
  913. &frag_off);
  914. coal_desc->trans_proto = protocol;
  915. /* If we run into a problem, or this has a fragment header
  916. * (which should technically not be possible, if the HW
  917. * works as intended...), bail.
  918. */
  919. if (ip_len < 0 || frag_off) {
  920. priv->stats.coal.coal_ip_invalid++;
  921. return;
  922. }
  923. coal_desc->ip_len = (u16)ip_len;
  924. if (coal_desc->ip_len > sizeof(*ip6h)) {
  925. /* Don't allow coalescing of any packets with IPv6
  926. * extension headers.
  927. */
  928. gro = false;
  929. }
  930. } else {
  931. priv->stats.coal.coal_ip_invalid++;
  932. return;
  933. }
  934. if (coal_desc->trans_proto == IPPROTO_TCP) {
  935. struct tcphdr *th, __th;
  936. th = rmnet_frag_header_ptr(coal_desc,
  937. coal_desc->ip_len, sizeof(*th),
  938. &__th);
  939. if (!th)
  940. return;
  941. coal_desc->trans_len = th->doff * 4;
  942. priv->stats.coal.coal_tcp++;
  943. priv->stats.coal.coal_tcp_bytes += coal_desc->len;
  944. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  945. struct udphdr *uh, __uh;
  946. uh = rmnet_frag_header_ptr(coal_desc,
  947. coal_desc->ip_len, sizeof(*uh),
  948. &__uh);
  949. if (!uh)
  950. return;
  951. coal_desc->trans_len = sizeof(*uh);
  952. priv->stats.coal.coal_udp++;
  953. priv->stats.coal.coal_udp_bytes += coal_desc->len;
  954. if (coal_desc->ip_proto == 4 && !uh->check)
  955. zero_csum = true;
  956. } else {
  957. priv->stats.coal.coal_trans_invalid++;
  958. return;
  959. }
  960. coal_desc->hdrs_valid = 1;
  961. if (rmnet_map_v5_csum_buggy(&coal_hdr) && !zero_csum) {
  962. /* Mark the checksum as valid if it checks out */
  963. if (rmnet_frag_validate_csum(coal_desc))
  964. coal_desc->csum_valid = true;
  965. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  966. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  967. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  968. list_add_tail(&coal_desc->list, list);
  969. return;
  970. }
  971. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  972. * no checksum errors, and are allowing GRO. We can just reuse this
  973. * descriptor unchanged.
  974. */
  975. if (gro && coal_hdr.num_nlos == 1 && coal_hdr.csum_valid) {
  976. coal_desc->csum_valid = true;
  977. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  978. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  979. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  980. list_add_tail(&coal_desc->list, list);
  981. return;
  982. }
  983. /* Segment the coalesced descriptor into new packets */
  984. for (nlo = 0; nlo < coal_hdr.num_nlos; nlo++) {
  985. pkt_len = ntohs(coal_hdr.nl_pairs[nlo].pkt_len);
  986. pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
  987. coal_desc->gso_size = pkt_len;
  988. for (pkt = 0; pkt < coal_hdr.nl_pairs[nlo].num_packets;
  989. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  990. bool csum_err = nlo_err_mask & 1;
  991. /* Segment the packet if we're not sending the larger
  992. * packet up the stack.
  993. */
  994. if (!gro) {
  995. coal_desc->gso_segs = 1;
  996. if (csum_err)
  997. priv->stats.coal.coal_csum_err++;
  998. __rmnet_frag_segment_data(coal_desc, port,
  999. list, total_pkt,
  1000. !csum_err);
  1001. continue;
  1002. }
  1003. if (csum_err) {
  1004. priv->stats.coal.coal_csum_err++;
  1005. /* Segment out the good data */
  1006. if (coal_desc->gso_segs)
  1007. __rmnet_frag_segment_data(coal_desc,
  1008. port,
  1009. list,
  1010. total_pkt,
  1011. true);
  1012. /* Segment out the bad checksum */
  1013. coal_desc->gso_segs = 1;
  1014. __rmnet_frag_segment_data(coal_desc, port,
  1015. list, total_pkt,
  1016. false);
  1017. } else {
  1018. coal_desc->gso_segs++;
  1019. }
  1020. }
  1021. /* If we're switching NLOs, we need to send out everything from
  1022. * the previous one, if we haven't done so. NLOs only switch
  1023. * when the packet length changes.
  1024. */
  1025. if (coal_desc->gso_segs)
  1026. __rmnet_frag_segment_data(coal_desc, port, list,
  1027. total_pkt, true);
  1028. }
  1029. }
  1030. /* Record reason for coalescing pipe closure */
  1031. static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  1032. u8 code)
  1033. {
  1034. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  1035. switch (type) {
  1036. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  1037. stats->non_coal++;
  1038. break;
  1039. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  1040. stats->ip_miss++;
  1041. break;
  1042. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  1043. stats->trans_miss++;
  1044. break;
  1045. case RMNET_MAP_COAL_CLOSE_HW:
  1046. switch (code) {
  1047. case RMNET_MAP_COAL_CLOSE_HW_NL:
  1048. stats->hw_nl++;
  1049. break;
  1050. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  1051. stats->hw_pkt++;
  1052. break;
  1053. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  1054. stats->hw_byte++;
  1055. break;
  1056. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  1057. stats->hw_time++;
  1058. break;
  1059. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  1060. stats->hw_evict++;
  1061. break;
  1062. default:
  1063. break;
  1064. }
  1065. break;
  1066. case RMNET_MAP_COAL_CLOSE_COAL:
  1067. stats->coal++;
  1068. break;
  1069. default:
  1070. break;
  1071. }
  1072. }
  1073. /* Check if the coalesced header has any incorrect values, in which case, the
  1074. * entire coalesced frame must be dropped. Then check if there are any
  1075. * checksum issues
  1076. */
  1077. static int
  1078. rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
  1079. u64 *nlo_err_mask)
  1080. {
  1081. struct rmnet_map_v5_coal_header *coal_hdr, __coal_hdr;
  1082. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1083. u64 mask = 0;
  1084. int i;
  1085. u8 veid, pkts = 0;
  1086. coal_hdr = rmnet_frag_header_ptr(frag_desc,
  1087. sizeof(struct rmnet_map_header),
  1088. sizeof(*coal_hdr), &__coal_hdr);
  1089. if (!coal_hdr)
  1090. return -EINVAL;
  1091. veid = coal_hdr->virtual_channel_id;
  1092. if (coal_hdr->num_nlos == 0 ||
  1093. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  1094. priv->stats.coal.coal_hdr_nlo_err++;
  1095. return -EINVAL;
  1096. }
  1097. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  1098. /* If there is a checksum issue, we need to split
  1099. * up the skb. Rebuild the full csum error field
  1100. */
  1101. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  1102. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  1103. mask |= ((u64)err) << (8 * i);
  1104. /* Track total packets in frame */
  1105. pkts += pkt;
  1106. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  1107. priv->stats.coal.coal_hdr_pkt_err++;
  1108. return -EINVAL;
  1109. }
  1110. }
  1111. /* Track number of packets we get inside of coalesced frames */
  1112. priv->stats.coal.coal_pkts += pkts;
  1113. /* Update ethtool stats */
  1114. rmnet_frag_data_log_close_stats(priv,
  1115. coal_hdr->close_type,
  1116. coal_hdr->close_value);
  1117. if (veid < RMNET_MAX_VEID)
  1118. priv->stats.coal.coal_veid[veid]++;
  1119. *nlo_err_mask = mask;
  1120. return 0;
  1121. }
  1122. /* Process a QMAPv5 packet header */
  1123. int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
  1124. struct rmnet_port *port,
  1125. struct list_head *list,
  1126. u16 len)
  1127. {
  1128. struct rmnet_map_v5_csum_header *csum_hdr, __csum_hdr;
  1129. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1130. u64 nlo_err_mask;
  1131. u32 offset = sizeof(struct rmnet_map_header);
  1132. int rc = 0;
  1133. /* Grab the header type. It's easier to grab enough for a full csum
  1134. * offload header here since it's only 8 bytes and then check the
  1135. * header type using that. This also doubles as a check to make sure
  1136. * there's enough data after the QMAP header to ensure that another
  1137. * header is present.
  1138. */
  1139. csum_hdr = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*csum_hdr),
  1140. &__csum_hdr);
  1141. if (!csum_hdr)
  1142. return -EINVAL;
  1143. switch (csum_hdr->header_type) {
  1144. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1145. priv->stats.coal.coal_rx++;
  1146. rc = rmnet_frag_data_check_coal_header(frag_desc,
  1147. &nlo_err_mask);
  1148. if (rc)
  1149. return rc;
  1150. rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
  1151. list);
  1152. if (list_first_entry(list, struct rmnet_frag_descriptor,
  1153. list) != frag_desc)
  1154. rmnet_recycle_frag_descriptor(frag_desc, port);
  1155. break;
  1156. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1157. if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
  1158. priv->stats.csum_sw++;
  1159. } else if (csum_hdr->csum_valid_required) {
  1160. priv->stats.csum_ok++;
  1161. frag_desc->csum_valid = true;
  1162. } else {
  1163. priv->stats.csum_valid_unset++;
  1164. }
  1165. if (!rmnet_frag_pull(frag_desc, port,
  1166. offset + sizeof(*csum_hdr))) {
  1167. rc = -EINVAL;
  1168. break;
  1169. }
  1170. /* Remove padding only for csum offload packets.
  1171. * Coalesced packets should never have padding.
  1172. */
  1173. if (!rmnet_frag_trim(frag_desc, port, len)) {
  1174. rc = -EINVAL;
  1175. break;
  1176. }
  1177. list_del_init(&frag_desc->list);
  1178. list_add_tail(&frag_desc->list, list);
  1179. break;
  1180. default:
  1181. rc = -EINVAL;
  1182. break;
  1183. }
  1184. return rc;
  1185. }
  1186. /* Perf hook handler */
  1187. rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
  1188. EXPORT_SYMBOL(rmnet_perf_desc_entry);
  1189. static void
  1190. __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
  1191. struct rmnet_port *port)
  1192. {
  1193. rmnet_perf_desc_hook_t rmnet_perf_ingress;
  1194. struct rmnet_map_header *qmap, __qmap;
  1195. struct rmnet_endpoint *ep;
  1196. struct rmnet_frag_descriptor *frag, *tmp;
  1197. LIST_HEAD(segs);
  1198. u16 len, pad;
  1199. u8 mux_id;
  1200. qmap = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*qmap), &__qmap);
  1201. if (!qmap)
  1202. goto recycle;
  1203. mux_id = qmap->mux_id;
  1204. pad = qmap->pad_len;
  1205. len = ntohs(qmap->pkt_len) - pad;
  1206. if (qmap->cd_bit) {
  1207. qmi_rmnet_set_dl_msg_active(port);
  1208. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  1209. rmnet_frag_flow_command(frag_desc, port, len);
  1210. goto recycle;
  1211. }
  1212. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  1213. rmnet_frag_command(frag_desc, qmap, port);
  1214. goto recycle;
  1215. }
  1216. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  1217. goto recycle;
  1218. ep = rmnet_get_endpoint(port, mux_id);
  1219. if (!ep)
  1220. goto recycle;
  1221. frag_desc->dev = ep->egress_dev;
  1222. /* Handle QMAPv5 packet */
  1223. if (qmap->next_hdr &&
  1224. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  1225. RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
  1226. if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
  1227. len))
  1228. goto recycle;
  1229. } else {
  1230. /* We only have the main QMAP header to worry about */
  1231. if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
  1232. return;
  1233. if (!rmnet_frag_trim(frag_desc, port, len))
  1234. return;
  1235. list_add_tail(&frag_desc->list, &segs);
  1236. }
  1237. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  1238. qmi_rmnet_work_maybe_restart(port);
  1239. rcu_read_lock();
  1240. rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
  1241. if (rmnet_perf_ingress) {
  1242. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1243. list_del_init(&frag->list);
  1244. rmnet_perf_ingress(frag, port);
  1245. }
  1246. rcu_read_unlock();
  1247. return;
  1248. }
  1249. rcu_read_unlock();
  1250. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1251. list_del_init(&frag->list);
  1252. rmnet_frag_deliver(frag, port);
  1253. }
  1254. return;
  1255. recycle:
  1256. rmnet_recycle_frag_descriptor(frag_desc, port);
  1257. }
  1258. /* Notify perf at the end of SKB chain */
  1259. rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
  1260. EXPORT_SYMBOL(rmnet_perf_chain_end);
  1261. void rmnet_frag_ingress_handler(struct sk_buff *skb,
  1262. struct rmnet_port *port)
  1263. {
  1264. rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
  1265. LIST_HEAD(desc_list);
  1266. /* Deaggregation and freeing of HW originating
  1267. * buffers is done within here
  1268. */
  1269. while (skb) {
  1270. struct sk_buff *skb_frag;
  1271. rmnet_frag_deaggregate(skb, port, &desc_list);
  1272. if (!list_empty(&desc_list)) {
  1273. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1274. list_for_each_entry_safe(frag_desc, tmp, &desc_list,
  1275. list) {
  1276. list_del_init(&frag_desc->list);
  1277. __rmnet_frag_ingress_handler(frag_desc, port);
  1278. }
  1279. }
  1280. skb_frag = skb_shinfo(skb)->frag_list;
  1281. skb_shinfo(skb)->frag_list = NULL;
  1282. consume_skb(skb);
  1283. skb = skb_frag;
  1284. }
  1285. rcu_read_lock();
  1286. rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
  1287. if (rmnet_perf_opt_chain_end)
  1288. rmnet_perf_opt_chain_end();
  1289. rcu_read_unlock();
  1290. }
  1291. void rmnet_descriptor_deinit(struct rmnet_port *port)
  1292. {
  1293. struct rmnet_frag_descriptor_pool *pool;
  1294. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1295. pool = port->frag_desc_pool;
  1296. list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
  1297. kfree(frag_desc);
  1298. pool->pool_size--;
  1299. }
  1300. kfree(pool);
  1301. }
  1302. int rmnet_descriptor_init(struct rmnet_port *port)
  1303. {
  1304. struct rmnet_frag_descriptor_pool *pool;
  1305. int i;
  1306. spin_lock_init(&port->desc_pool_lock);
  1307. pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
  1308. if (!pool)
  1309. return -ENOMEM;
  1310. INIT_LIST_HEAD(&pool->free_list);
  1311. port->frag_desc_pool = pool;
  1312. for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
  1313. struct rmnet_frag_descriptor *frag_desc;
  1314. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  1315. if (!frag_desc)
  1316. return -ENOMEM;
  1317. INIT_LIST_HEAD(&frag_desc->list);
  1318. INIT_LIST_HEAD(&frag_desc->frags);
  1319. list_add_tail(&frag_desc->list, &pool->free_list);
  1320. pool->pool_size++;
  1321. }
  1322. return 0;
  1323. }