rmnet_descriptor.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Packet Descriptor Framework
  13. *
  14. */
  15. #include <linux/ip.h>
  16. #include <linux/ipv6.h>
  17. #include <net/ipv6.h>
  18. #include <net/ip6_checksum.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_descriptor.h"
  21. #include "rmnet_handlers.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_vnd.h"
  24. #include "rmnet_qmi.h"
  25. #include "qmi_rmnet.h"
  26. #define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
  27. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  28. sizeof(struct rmnet_map_header) + \
  29. sizeof(struct rmnet_map_control_command_header))
  30. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  31. sizeof(struct rmnet_map_header) + \
  32. sizeof(struct rmnet_map_control_command_header))
  33. typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
  34. struct rmnet_port *port);
  35. typedef void (*rmnet_perf_chain_hook_t)(void);
  36. struct rmnet_frag_descriptor *
  37. rmnet_get_frag_descriptor(struct rmnet_port *port)
  38. {
  39. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  40. struct rmnet_frag_descriptor *frag_desc;
  41. unsigned long flags;
  42. spin_lock_irqsave(&port->desc_pool_lock, flags);
  43. if (!list_empty(&pool->free_list)) {
  44. frag_desc = list_first_entry(&pool->free_list,
  45. struct rmnet_frag_descriptor,
  46. list);
  47. list_del_init(&frag_desc->list);
  48. } else {
  49. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  50. if (!frag_desc)
  51. goto out;
  52. INIT_LIST_HEAD(&frag_desc->list);
  53. INIT_LIST_HEAD(&frag_desc->frags);
  54. pool->pool_size++;
  55. }
  56. out:
  57. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  58. return frag_desc;
  59. }
  60. EXPORT_SYMBOL(rmnet_get_frag_descriptor);
  61. void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
  62. struct rmnet_port *port)
  63. {
  64. struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
  65. struct rmnet_fragment *frag, *tmp;
  66. unsigned long flags;
  67. list_del(&frag_desc->list);
  68. list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
  69. struct page *page = skb_frag_page(&frag->frag);
  70. if (page)
  71. put_page(page);
  72. list_del(&frag->list);
  73. kfree(frag);
  74. }
  75. memset(frag_desc, 0, sizeof(*frag_desc));
  76. INIT_LIST_HEAD(&frag_desc->list);
  77. INIT_LIST_HEAD(&frag_desc->frags);
  78. spin_lock_irqsave(&port->desc_pool_lock, flags);
  79. list_add_tail(&frag_desc->list, &pool->free_list);
  80. spin_unlock_irqrestore(&port->desc_pool_lock, flags);
  81. }
  82. EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
  83. void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
  84. struct rmnet_port *port, unsigned int size)
  85. {
  86. struct rmnet_fragment *frag, *tmp;
  87. if (size >= frag_desc->len) {
  88. pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
  89. __func__, size, frag_desc->len);
  90. rmnet_recycle_frag_descriptor(frag_desc, port);
  91. return NULL;
  92. }
  93. list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
  94. u32 frag_size = skb_frag_size(&frag->frag);
  95. if (!size)
  96. break;
  97. if (size >= frag_size) {
  98. /* Remove the whole frag */
  99. struct page *page = skb_frag_page(&frag->frag);
  100. if (page)
  101. put_page(page);
  102. list_del(&frag->list);
  103. size -= frag_size;
  104. frag_desc->len -= frag_size;
  105. kfree(frag);
  106. continue;
  107. }
  108. /* Pull off 'size' bytes */
  109. skb_frag_off_add(&frag->frag, size);
  110. skb_frag_size_sub(&frag->frag, size);
  111. frag_desc->len -= size;
  112. break;
  113. }
  114. return rmnet_frag_data_ptr(frag_desc);
  115. }
  116. EXPORT_SYMBOL(rmnet_frag_pull);
  117. void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
  118. struct rmnet_port *port, unsigned int size)
  119. {
  120. struct rmnet_fragment *frag, *tmp;
  121. unsigned int eat;
  122. if (!size) {
  123. pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
  124. __func__, frag_desc->len);
  125. rmnet_recycle_frag_descriptor(frag_desc, port);
  126. return NULL;
  127. }
  128. /* Growing bigger doesn't make sense */
  129. if (size >= frag_desc->len)
  130. goto out;
  131. /* Compute number of bytes to remove from the end */
  132. eat = frag_desc->len - size;
  133. list_for_each_entry_safe_reverse(frag, tmp, &frag_desc->frags, list) {
  134. u32 frag_size = skb_frag_size(&frag->frag);
  135. if (!eat)
  136. goto out;
  137. if (eat >= frag_size) {
  138. /* Remove the whole frag */
  139. struct page *page = skb_frag_page(&frag->frag);
  140. if (page)
  141. put_page(page);
  142. list_del(&frag->list);
  143. eat -= frag_size;
  144. frag_desc->len -= frag_size;
  145. kfree(frag);
  146. continue;
  147. }
  148. /* Chop off 'eat' bytes from the end */
  149. skb_frag_size_sub(&frag->frag, eat);
  150. frag_desc->len -= eat;
  151. goto out;
  152. }
  153. out:
  154. return rmnet_frag_data_ptr(frag_desc);
  155. }
  156. EXPORT_SYMBOL(rmnet_frag_trim);
  157. static int rmnet_frag_copy_data(struct rmnet_frag_descriptor *frag_desc,
  158. u32 off, u32 len, void *buf)
  159. {
  160. struct rmnet_fragment *frag;
  161. u32 frag_size, copy_len;
  162. u32 buf_offset = 0;
  163. /* Don't make me do something we'd both regret */
  164. if (off > frag_desc->len || len > frag_desc->len ||
  165. off + len > frag_desc->len)
  166. return -EINVAL;
  167. /* Copy 'len' bytes into the bufer starting from 'off' */
  168. list_for_each_entry(frag, &frag_desc->frags, list) {
  169. if (!len)
  170. break;
  171. frag_size = skb_frag_size(&frag->frag);
  172. if (off < frag_size) {
  173. copy_len = min_t(u32, len, frag_size - off);
  174. memcpy(buf + buf_offset,
  175. skb_frag_address(&frag->frag) + off,
  176. copy_len);
  177. buf_offset += copy_len;
  178. len -= copy_len;
  179. off = 0;
  180. } else {
  181. off -= frag_size;
  182. }
  183. }
  184. return 0;
  185. }
  186. void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
  187. u32 len, void *buf)
  188. {
  189. struct rmnet_fragment *frag;
  190. u8 *start;
  191. u32 frag_size, offset;
  192. /* Don't take a long pointer off a short frag */
  193. if (off > frag_desc->len || len > frag_desc->len ||
  194. off + len > frag_desc->len)
  195. return NULL;
  196. /* Find the starting fragment */
  197. offset = off;
  198. list_for_each_entry(frag, &frag_desc->frags, list) {
  199. frag_size = skb_frag_size(&frag->frag);
  200. if (off < frag_size) {
  201. start = skb_frag_address(&frag->frag) + off;
  202. /* If the header is entirely on this frag, just return
  203. * a pointer to it.
  204. */
  205. if (off + len <= frag_size)
  206. return start;
  207. /* Otherwise, we need to copy the data into a linear
  208. * buffer.
  209. */
  210. break;
  211. }
  212. off -= frag_size;
  213. }
  214. if (rmnet_frag_copy_data(frag_desc, offset, len, buf) < 0)
  215. return NULL;
  216. return buf;
  217. }
  218. EXPORT_SYMBOL(rmnet_frag_header_ptr);
  219. int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
  220. struct page *p, u32 page_offset, u32 len)
  221. {
  222. struct rmnet_fragment *frag;
  223. frag = kzalloc(sizeof(*frag), GFP_ATOMIC);
  224. if (!frag)
  225. return -ENOMEM;
  226. INIT_LIST_HEAD(&frag->list);
  227. get_page(p);
  228. __skb_frag_set_page(&frag->frag, p);
  229. skb_frag_size_set(&frag->frag, len);
  230. skb_frag_off_set(&frag->frag, page_offset);
  231. list_add_tail(&frag->list, &frag_desc->frags);
  232. frag_desc->len += len;
  233. return 0;
  234. }
  235. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frag);
  236. int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
  237. struct rmnet_frag_descriptor *from,
  238. u32 off, u32 len)
  239. {
  240. struct rmnet_fragment *frag;
  241. int rc;
  242. /* Sanity check the lengths */
  243. if (off > from->len || len > from->len || off + len > from->len)
  244. return -EINVAL;
  245. list_for_each_entry(frag, &from->frags, list) {
  246. u32 frag_size;
  247. if (!len)
  248. break;
  249. frag_size = skb_frag_size(&frag->frag);
  250. if (off < frag_size) {
  251. struct page *p = skb_frag_page(&frag->frag);
  252. u32 page_off = skb_frag_off(&frag->frag);
  253. u32 copy_len = min_t(u32, len, frag_size - off);
  254. rc = rmnet_frag_descriptor_add_frag(to, p,
  255. page_off + off,
  256. copy_len);
  257. if (rc < 0)
  258. return rc;
  259. len -= copy_len;
  260. off = 0;
  261. } else {
  262. off -= frag_size;
  263. }
  264. }
  265. return 0;
  266. }
  267. EXPORT_SYMBOL(rmnet_frag_descriptor_add_frags_from);
  268. int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
  269. int start, u8 *nexthdrp, __be16 *fragp)
  270. {
  271. u8 nexthdr = *nexthdrp;
  272. *fragp = 0;
  273. while (ipv6_ext_hdr(nexthdr)) {
  274. struct ipv6_opt_hdr *hp, __hp;
  275. int hdrlen;
  276. if (nexthdr == NEXTHDR_NONE)
  277. return -EINVAL;
  278. hp = rmnet_frag_header_ptr(frag_desc, (u32)start, sizeof(*hp),
  279. &__hp);
  280. if (!hp)
  281. return -EINVAL;
  282. if (nexthdr == NEXTHDR_FRAGMENT) {
  283. u32 off = offsetof(struct frag_hdr, frag_off);
  284. __be16 *fp, __fp;
  285. fp = rmnet_frag_header_ptr(frag_desc, (u32)start + off,
  286. sizeof(*fp), &__fp);
  287. if (!fp)
  288. return -EINVAL;
  289. *fragp = *fp;
  290. if (ntohs(*fragp) & ~0x7)
  291. break;
  292. hdrlen = 8;
  293. } else if (nexthdr == NEXTHDR_AUTH) {
  294. hdrlen = (hp->hdrlen + 2) << 2;
  295. } else {
  296. hdrlen = ipv6_optlen(hp);
  297. }
  298. nexthdr = hp->nexthdr;
  299. start += hdrlen;
  300. }
  301. *nexthdrp = nexthdr;
  302. return start;
  303. }
  304. EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
  305. static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
  306. struct rmnet_map_control_command *cmd,
  307. struct rmnet_port *port,
  308. int enable)
  309. {
  310. struct rmnet_endpoint *ep;
  311. struct net_device *vnd;
  312. u16 ip_family;
  313. u16 fc_seq;
  314. u32 qos_id;
  315. u8 mux_id;
  316. int r;
  317. mux_id = qmap->mux_id;
  318. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  319. return RX_HANDLER_CONSUMED;
  320. ep = rmnet_get_endpoint(port, mux_id);
  321. if (!ep)
  322. return RX_HANDLER_CONSUMED;
  323. vnd = ep->egress_dev;
  324. ip_family = cmd->flow_control.ip_family;
  325. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  326. qos_id = ntohl(cmd->flow_control.qos_id);
  327. /* Ignore the ip family and pass the sequence number for both v4 and v6
  328. * sequence. User space does not support creating dedicated flows for
  329. * the 2 protocols
  330. */
  331. r = rmnet_vnd_do_flow_control(vnd, enable);
  332. if (r)
  333. return RMNET_MAP_COMMAND_UNSUPPORTED;
  334. else
  335. return RMNET_MAP_COMMAND_ACK;
  336. }
  337. static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
  338. unsigned char type,
  339. struct rmnet_port *port)
  340. {
  341. struct rmnet_map_control_command *cmd;
  342. struct net_device *dev = port->dev;
  343. struct sk_buff *skb;
  344. u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
  345. skb = alloc_skb(alloc_len, GFP_ATOMIC);
  346. if (!skb)
  347. return;
  348. skb->protocol = htons(ETH_P_MAP);
  349. skb->dev = dev;
  350. cmd = rmnet_map_get_cmd_start(skb);
  351. cmd->cmd_type = type & 0x03;
  352. netif_tx_lock(dev);
  353. dev->netdev_ops->ndo_start_xmit(skb, dev);
  354. netif_tx_unlock(dev);
  355. }
  356. static void
  357. rmnet_frag_process_flow_start(struct rmnet_frag_descriptor *frag_desc,
  358. struct rmnet_map_control_command_header *cmd,
  359. struct rmnet_port *port,
  360. u16 cmd_len)
  361. {
  362. struct rmnet_map_dl_ind_hdr *dlhdr, __dlhdr;
  363. u32 offset = sizeof(struct rmnet_map_header);
  364. u32 data_format;
  365. bool is_dl_mark_v2;
  366. if (cmd_len + offset < RMNET_DL_IND_HDR_SIZE)
  367. return;
  368. data_format = port->data_format;
  369. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  370. dlhdr = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  371. sizeof(*dlhdr), &__dlhdr);
  372. if (!dlhdr)
  373. return;
  374. port->stats.dl_hdr_last_ep_id = cmd->source_id;
  375. port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
  376. port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
  377. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  378. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  379. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  380. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  381. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  382. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  383. port->stats.dl_hdr_count++;
  384. /* If a target is taking frag path, we can assume DL marker v2 is in
  385. * play
  386. */
  387. if (is_dl_mark_v2)
  388. rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
  389. }
  390. static void
  391. rmnet_frag_process_flow_end(struct rmnet_frag_descriptor *frag_desc,
  392. struct rmnet_map_control_command_header *cmd,
  393. struct rmnet_port *port, u16 cmd_len)
  394. {
  395. struct rmnet_map_dl_ind_trl *dltrl, __dltrl;
  396. u32 offset = sizeof(struct rmnet_map_header);
  397. u32 data_format;
  398. bool is_dl_mark_v2;
  399. if (cmd_len + offset < RMNET_DL_IND_TRL_SIZE)
  400. return;
  401. data_format = port->data_format;
  402. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  403. dltrl = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
  404. sizeof(*dltrl), &__dltrl);
  405. if (!dltrl)
  406. return;
  407. port->stats.dl_trl_last_seq = dltrl->seq_le;
  408. port->stats.dl_trl_count++;
  409. /* If a target is taking frag path, we can assume DL marker v2 is in
  410. * play
  411. */
  412. if (is_dl_mark_v2)
  413. rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
  414. }
  415. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  416. * name is decoded here and appropriate handler is called.
  417. */
  418. void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
  419. struct rmnet_map_header *qmap, struct rmnet_port *port)
  420. {
  421. struct rmnet_map_control_command *cmd, __cmd;
  422. unsigned char rc = 0;
  423. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(*qmap), sizeof(*cmd),
  424. &__cmd);
  425. if (!cmd)
  426. return;
  427. switch (cmd->command_name) {
  428. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  429. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 1);
  430. break;
  431. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  432. rc = rmnet_frag_do_flow_control(qmap, cmd, port, 0);
  433. break;
  434. default:
  435. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  436. break;
  437. }
  438. if (rc == RMNET_MAP_COMMAND_ACK)
  439. rmnet_frag_send_ack(qmap, rc, port);
  440. }
  441. int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
  442. struct rmnet_port *port, u16 pkt_len)
  443. {
  444. struct rmnet_map_control_command_header *cmd, __cmd;
  445. cmd = rmnet_frag_header_ptr(frag_desc, sizeof(struct rmnet_map_header),
  446. sizeof(*cmd), &__cmd);
  447. if (!cmd)
  448. return -1;
  449. switch (cmd->command_name) {
  450. case RMNET_MAP_COMMAND_FLOW_START:
  451. rmnet_frag_process_flow_start(frag_desc, cmd, port, pkt_len);
  452. break;
  453. case RMNET_MAP_COMMAND_FLOW_END:
  454. rmnet_frag_process_flow_end(frag_desc, cmd, port, pkt_len);
  455. break;
  456. default:
  457. return 1;
  458. }
  459. return 0;
  460. }
  461. EXPORT_SYMBOL(rmnet_frag_flow_command);
  462. static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
  463. struct rmnet_port *port,
  464. struct list_head *list,
  465. u32 start_frag)
  466. {
  467. struct skb_shared_info *shinfo = skb_shinfo(skb);
  468. struct rmnet_frag_descriptor *frag_desc;
  469. struct rmnet_map_header *maph, __maph;
  470. skb_frag_t *frag;
  471. u32 i;
  472. u32 pkt_len;
  473. int rc;
  474. frag = &shinfo->frags[start_frag];
  475. /* Grab the QMAP header. Careful, as there's no guarantee that it's
  476. * continugous!
  477. */
  478. if (likely(skb_frag_size(frag) >= sizeof(*maph))) {
  479. maph = skb_frag_address(frag);
  480. } else {
  481. /* The header's split across pages. We can rebuild it.
  482. * Probably not faster or stronger than before. But certainly
  483. * more linear.
  484. */
  485. if (skb_copy_bits(skb, 0, &__maph, sizeof(__maph)) < 0)
  486. return -1;
  487. maph = &__maph;
  488. }
  489. pkt_len = ntohs(maph->pkt_len);
  490. /* Catch empty frames */
  491. if (!pkt_len)
  492. return -1;
  493. frag_desc = rmnet_get_frag_descriptor(port);
  494. if (!frag_desc)
  495. return -1;
  496. pkt_len += sizeof(*maph);
  497. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  498. pkt_len += sizeof(struct rmnet_map_dl_csum_trailer);
  499. } else if ((port->data_format & (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
  500. RMNET_FLAGS_INGRESS_COALESCE)) &&
  501. !maph->cd_bit) {
  502. u32 hsize = 0;
  503. u8 type;
  504. /* Check the type. This seems like should be overkill for less
  505. * than a single byte, doesn't it?
  506. */
  507. if (likely(skb_frag_size(frag) >= sizeof(*maph) + 1)) {
  508. type = *((u8 *)maph + sizeof(*maph));
  509. } else {
  510. if (skb_copy_bits(skb, sizeof(*maph), &type,
  511. sizeof(type)) < 0)
  512. return -1;
  513. }
  514. /* Type only uses the first 7 bits */
  515. switch ((type & 0xFE) >> 1) {
  516. case RMNET_MAP_HEADER_TYPE_COALESCING:
  517. hsize = sizeof(struct rmnet_map_v5_coal_header);
  518. break;
  519. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  520. hsize = sizeof(struct rmnet_map_v5_csum_header);
  521. break;
  522. }
  523. pkt_len += hsize;
  524. }
  525. /* Add all frags containing the packet data to the descriptor */
  526. for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
  527. u32 frag_size;
  528. u32 copy_len;
  529. frag = &shinfo->frags[i];
  530. frag_size = skb_frag_size(frag);
  531. copy_len = min_t(u32, frag_size, pkt_len);
  532. rc = rmnet_frag_descriptor_add_frag(frag_desc,
  533. skb_frag_page(frag),
  534. skb_frag_off(frag),
  535. copy_len);
  536. if (rc < 0) {
  537. rmnet_recycle_frag_descriptor(frag_desc, port);
  538. return -1;
  539. }
  540. pkt_len -= copy_len;
  541. skb_frag_off_add(frag, copy_len);
  542. skb_frag_size_sub(frag, copy_len);
  543. /* If the fragment is exhausted, we can move to the next one */
  544. if (!skb_frag_size(frag))
  545. i++;
  546. }
  547. if (pkt_len) {
  548. /* Packet length is larger than the amount of data we have */
  549. rmnet_recycle_frag_descriptor(frag_desc, port);
  550. return -1;
  551. }
  552. list_add_tail(&frag_desc->list, list);
  553. return (int)(i - start_frag);
  554. }
  555. void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
  556. struct list_head *list)
  557. {
  558. struct skb_shared_info *shinfo = skb_shinfo(skb);
  559. u32 i = 0;
  560. int rc;
  561. while (i < shinfo->nr_frags) {
  562. rc = rmnet_frag_deaggregate_one(skb, port, list, i);
  563. if (rc < 0)
  564. return;
  565. i += (u32)rc;
  566. }
  567. }
  568. /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  569. * if needed (i.e. forwarding, UDP GRO)
  570. */
  571. static void rmnet_frag_gso_stamp(struct sk_buff *skb,
  572. struct rmnet_frag_descriptor *frag_desc)
  573. {
  574. struct skb_shared_info *shinfo = skb_shinfo(skb);
  575. if (frag_desc->trans_proto == IPPROTO_TCP)
  576. shinfo->gso_type = (frag_desc->ip_proto == 4) ?
  577. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  578. else
  579. shinfo->gso_type = SKB_GSO_UDP_L4;
  580. shinfo->gso_size = frag_desc->gso_size;
  581. shinfo->gso_segs = frag_desc->gso_segs;
  582. }
  583. /* Set the partial checksum information. Sets the transport checksum to the
  584. * pseudoheader checksum and sets the offload metadata.
  585. */
  586. static void rmnet_frag_partial_csum(struct sk_buff *skb,
  587. struct rmnet_frag_descriptor *frag_desc)
  588. {
  589. struct iphdr *iph = (struct iphdr *)skb->data;
  590. __sum16 pseudo;
  591. u16 pkt_len = skb->len - frag_desc->ip_len;
  592. if (frag_desc->ip_proto == 4) {
  593. iph->tot_len = htons(skb->len);
  594. iph->check = 0;
  595. iph->check = ip_fast_csum(iph, iph->ihl);
  596. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  597. pkt_len, frag_desc->trans_proto,
  598. 0);
  599. } else {
  600. struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
  601. /* Payload length includes any extension headers */
  602. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  603. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  604. pkt_len, frag_desc->trans_proto, 0);
  605. }
  606. if (frag_desc->trans_proto == IPPROTO_TCP) {
  607. struct tcphdr *tp = (struct tcphdr *)
  608. ((u8 *)iph + frag_desc->ip_len);
  609. tp->check = pseudo;
  610. skb->csum_offset = offsetof(struct tcphdr, check);
  611. } else {
  612. struct udphdr *up = (struct udphdr *)
  613. ((u8 *)iph + frag_desc->ip_len);
  614. up->len = htons(pkt_len);
  615. up->check = pseudo;
  616. skb->csum_offset = offsetof(struct udphdr, check);
  617. }
  618. skb->ip_summed = CHECKSUM_PARTIAL;
  619. skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
  620. }
  621. /* Allocate and populate an skb to contain the packet represented by the
  622. * frag descriptor.
  623. */
  624. static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
  625. struct rmnet_port *port)
  626. {
  627. struct sk_buff *head_skb, *current_skb, *skb;
  628. struct skb_shared_info *shinfo;
  629. struct rmnet_fragment *frag, *tmp;
  630. /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
  631. if (frag_desc->hdrs_valid) {
  632. u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
  633. head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
  634. GFP_ATOMIC);
  635. if (!head_skb)
  636. return NULL;
  637. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  638. rmnet_frag_copy_data(frag_desc, 0, hdr_len,
  639. skb_put(head_skb, hdr_len));
  640. skb_reset_network_header(head_skb);
  641. if (frag_desc->trans_len)
  642. skb_set_transport_header(head_skb, frag_desc->ip_len);
  643. /* Pull the headers off carefully */
  644. if (hdr_len == frag_desc->len)
  645. /* Fast forward "header only" packets */
  646. goto skip_frags;
  647. if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
  648. kfree(head_skb);
  649. return NULL;
  650. }
  651. } else {
  652. /* Allocate enough space to avoid penalties in the stack
  653. * from __pskb_pull_tail()
  654. */
  655. head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
  656. GFP_ATOMIC);
  657. if (!head_skb)
  658. return NULL;
  659. skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
  660. }
  661. shinfo = skb_shinfo(head_skb);
  662. current_skb = head_skb;
  663. /* Add in the page fragments */
  664. list_for_each_entry_safe(frag, tmp, &frag_desc->frags, list) {
  665. struct page *p = skb_frag_page(&frag->frag);
  666. u32 frag_size = skb_frag_size(&frag->frag);
  667. add_frag:
  668. if (shinfo->nr_frags < MAX_SKB_FRAGS) {
  669. get_page(p);
  670. skb_add_rx_frag(current_skb, shinfo->nr_frags, p,
  671. skb_frag_off(&frag->frag), frag_size,
  672. frag_size);
  673. if (current_skb != head_skb) {
  674. head_skb->len += frag_size;
  675. head_skb->data_len += frag_size;
  676. }
  677. } else {
  678. /* Alloc a new skb and try again */
  679. skb = alloc_skb(0, GFP_ATOMIC);
  680. if (!skb)
  681. break;
  682. if (current_skb == head_skb)
  683. shinfo->frag_list = skb;
  684. else
  685. current_skb->next = skb;
  686. current_skb = skb;
  687. shinfo = skb_shinfo(current_skb);
  688. goto add_frag;
  689. }
  690. }
  691. skip_frags:
  692. head_skb->dev = frag_desc->dev;
  693. rmnet_set_skb_proto(head_skb);
  694. /* Handle any header metadata that needs to be updated after RSB/RSC
  695. * segmentation
  696. */
  697. if (frag_desc->ip_id_set) {
  698. struct iphdr *iph;
  699. iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
  700. csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
  701. iph->id = frag_desc->ip_id;
  702. }
  703. if (frag_desc->tcp_seq_set) {
  704. struct tcphdr *th;
  705. th = (struct tcphdr *)
  706. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  707. th->seq = frag_desc->tcp_seq;
  708. }
  709. if (frag_desc->tcp_flags_set) {
  710. struct tcphdr *th;
  711. __be16 *flags;
  712. th = (struct tcphdr *)
  713. (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
  714. flags = (__be16 *)&tcp_flag_word(th);
  715. *flags = frag_desc->tcp_flags;
  716. }
  717. /* Handle csum offloading */
  718. if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
  719. /* Set the partial checksum information */
  720. rmnet_frag_partial_csum(head_skb, frag_desc);
  721. } else if (frag_desc->csum_valid) {
  722. /* Non-RSB/RSC/perf packet. The current checksum is fine */
  723. head_skb->ip_summed = CHECKSUM_UNNECESSARY;
  724. } else if (frag_desc->hdrs_valid &&
  725. (frag_desc->trans_proto == IPPROTO_TCP ||
  726. frag_desc->trans_proto == IPPROTO_UDP)) {
  727. /* Unfortunately, we have to fake a bad checksum here, since
  728. * the original bad value is lost by the hardware. The only
  729. * reliable way to do it is to calculate the actual checksum
  730. * and corrupt it.
  731. */
  732. __sum16 *check;
  733. __wsum csum;
  734. unsigned int offset = skb_transport_offset(head_skb);
  735. __sum16 pseudo;
  736. /* Calculate pseudo header and update header fields */
  737. if (frag_desc->ip_proto == 4) {
  738. struct iphdr *iph = ip_hdr(head_skb);
  739. __be16 tot_len = htons(head_skb->len);
  740. csum_replace2(&iph->check, iph->tot_len, tot_len);
  741. iph->tot_len = tot_len;
  742. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  743. head_skb->len -
  744. frag_desc->ip_len,
  745. frag_desc->trans_proto, 0);
  746. } else {
  747. struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
  748. ip6h->payload_len = htons(head_skb->len -
  749. sizeof(*ip6h));
  750. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  751. head_skb->len -
  752. frag_desc->ip_len,
  753. frag_desc->trans_proto, 0);
  754. }
  755. if (frag_desc->trans_proto == IPPROTO_TCP) {
  756. check = &tcp_hdr(head_skb)->check;
  757. } else {
  758. udp_hdr(head_skb)->len = htons(head_skb->len -
  759. frag_desc->ip_len);
  760. check = &udp_hdr(head_skb)->check;
  761. }
  762. *check = pseudo;
  763. csum = skb_checksum(head_skb, offset, head_skb->len - offset,
  764. 0);
  765. /* Add 1 to corrupt. This cannot produce a final value of 0
  766. * since csum_fold() can't return a value of 0xFFFF
  767. */
  768. *check = csum16_add(csum_fold(csum), htons(1));
  769. head_skb->ip_summed = CHECKSUM_NONE;
  770. }
  771. /* Handle any rmnet_perf metadata */
  772. if (frag_desc->hash) {
  773. head_skb->hash = frag_desc->hash;
  774. head_skb->sw_hash = 1;
  775. }
  776. if (frag_desc->flush_shs)
  777. head_skb->cb[0] = 1;
  778. /* Handle coalesced packets */
  779. if (frag_desc->gso_segs > 1)
  780. rmnet_frag_gso_stamp(head_skb, frag_desc);
  781. return head_skb;
  782. }
  783. /* Deliver the packets contained within a frag descriptor */
  784. void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
  785. struct rmnet_port *port)
  786. {
  787. struct sk_buff *skb;
  788. skb = rmnet_alloc_skb(frag_desc, port);
  789. if (skb)
  790. rmnet_deliver_skb(skb, port);
  791. rmnet_recycle_frag_descriptor(frag_desc, port);
  792. }
  793. EXPORT_SYMBOL(rmnet_frag_deliver);
  794. static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
  795. struct rmnet_port *port,
  796. struct list_head *list, u8 pkt_id,
  797. bool csum_valid)
  798. {
  799. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  800. struct rmnet_frag_descriptor *new_desc;
  801. u32 dlen = coal_desc->gso_size * coal_desc->gso_segs;
  802. u32 hlen = coal_desc->ip_len + coal_desc->trans_len;
  803. u32 offset = hlen + coal_desc->data_offset;
  804. int rc;
  805. new_desc = rmnet_get_frag_descriptor(port);
  806. if (!new_desc)
  807. return;
  808. /* Header information and most metadata is the same as the original */
  809. memcpy(new_desc, coal_desc, sizeof(*coal_desc));
  810. INIT_LIST_HEAD(&new_desc->list);
  811. INIT_LIST_HEAD(&new_desc->frags);
  812. new_desc->len = 0;
  813. /* Add the header fragments */
  814. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, 0,
  815. hlen);
  816. if (rc < 0)
  817. goto recycle;
  818. /* Add in the data fragments */
  819. rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, offset,
  820. dlen);
  821. if (rc < 0)
  822. goto recycle;
  823. /* Update protocol-specific metadata */
  824. if (coal_desc->trans_proto == IPPROTO_TCP) {
  825. struct tcphdr *th, __th;
  826. th = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  827. sizeof(*th), &__th);
  828. if (!th)
  829. goto recycle;
  830. new_desc->tcp_seq_set = 1;
  831. new_desc->tcp_seq = htonl(ntohl(th->seq) +
  832. coal_desc->data_offset);
  833. /* Don't allow any dangerous flags to appear in any segments
  834. * other than the last.
  835. */
  836. if (th->fin || th->psh) {
  837. if (offset + dlen < coal_desc->len) {
  838. __be32 flag_word = tcp_flag_word(th);
  839. /* Clear the FIN and PSH flags from this
  840. * segment.
  841. */
  842. flag_word &= ~TCP_FLAG_FIN;
  843. flag_word &= ~TCP_FLAG_PSH;
  844. new_desc->tcp_flags_set = 1;
  845. new_desc->tcp_flags = *((__be16 *)&flag_word);
  846. }
  847. }
  848. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  849. struct udphdr *uh, __uh;
  850. uh = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
  851. sizeof(*uh), &__uh);
  852. if (!uh)
  853. goto recycle;
  854. if (coal_desc->ip_proto == 4 && !uh->check)
  855. csum_valid = true;
  856. }
  857. if (coal_desc->ip_proto == 4) {
  858. struct iphdr *iph, __iph;
  859. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  860. &__iph);
  861. if (!iph)
  862. goto recycle;
  863. new_desc->ip_id_set = 1;
  864. new_desc->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
  865. }
  866. new_desc->csum_valid = csum_valid;
  867. priv->stats.coal.coal_reconstruct++;
  868. /* Update meta information to move past the data we just segmented */
  869. coal_desc->data_offset += dlen;
  870. coal_desc->pkt_id = pkt_id + 1;
  871. coal_desc->gso_segs = 0;
  872. list_add_tail(&new_desc->list, list);
  873. return;
  874. recycle:
  875. rmnet_recycle_frag_descriptor(new_desc, port);
  876. }
  877. static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
  878. {
  879. u8 *data = rmnet_frag_data_ptr(frag_desc);
  880. unsigned int datagram_len;
  881. __wsum csum;
  882. __sum16 pseudo;
  883. /* Keep analysis tools happy, since they will see that
  884. * rmnet_frag_data_ptr() could return NULL. It can't in this case,
  885. * since we can't get this far otherwise...
  886. */
  887. if (unlikely(!data))
  888. return false;
  889. datagram_len = frag_desc->len - frag_desc->ip_len;
  890. if (frag_desc->ip_proto == 4) {
  891. struct iphdr *iph = (struct iphdr *)data;
  892. pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  893. datagram_len,
  894. frag_desc->trans_proto, 0);
  895. } else {
  896. struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
  897. pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  898. datagram_len, frag_desc->trans_proto,
  899. 0);
  900. }
  901. csum = csum_partial(data + frag_desc->ip_len, datagram_len,
  902. csum_unfold(pseudo));
  903. return !csum_fold(csum);
  904. }
  905. /* Converts the coalesced frame into a list of descriptors.
  906. * NLOs containing csum erros will not be included.
  907. */
  908. static void
  909. rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
  910. u64 nlo_err_mask, struct rmnet_port *port,
  911. struct list_head *list)
  912. {
  913. struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
  914. struct rmnet_map_v5_coal_header coal_hdr;
  915. u8 *version;
  916. u16 pkt_len;
  917. u8 pkt, total_pkt = 0;
  918. u8 nlo;
  919. bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
  920. bool zero_csum = false;
  921. /* Copy the coal header into our local storage before pulling it. It's
  922. * possible that this header (or part of it) is the last port of a page
  923. * a pulling it off would cause it to be freed. Referring back to the
  924. * header would be invalid in that case.
  925. */
  926. if (rmnet_frag_copy_data(coal_desc, sizeof(struct rmnet_map_header),
  927. sizeof(coal_hdr), &coal_hdr) < 0)
  928. return;
  929. /* Pull off the headers we no longer need */
  930. if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header) +
  931. sizeof(coal_hdr)))
  932. return;
  933. /* By definition, this byte is linear, and the first byte on the
  934. * first fragment. ;) Hence why no header_ptr() call is needed
  935. * for it.
  936. */
  937. version = rmnet_frag_data_ptr(coal_desc);
  938. if (unlikely(!version))
  939. return;
  940. if ((*version & 0xF0) == 0x40) {
  941. struct iphdr *iph, __iph;
  942. iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
  943. &__iph);
  944. if (!iph)
  945. return;
  946. coal_desc->ip_proto = 4;
  947. coal_desc->ip_len = iph->ihl * 4;
  948. coal_desc->trans_proto = iph->protocol;
  949. /* Don't allow coalescing of any packets with IP options */
  950. if (iph->ihl != 5)
  951. gro = false;
  952. } else if ((*version & 0xF0) == 0x60) {
  953. struct ipv6hdr *ip6h, __ip6h;
  954. int ip_len;
  955. __be16 frag_off;
  956. u8 protocol;
  957. ip6h = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*ip6h),
  958. &__ip6h);
  959. if (!ip6h)
  960. return;
  961. coal_desc->ip_proto = 6;
  962. protocol = ip6h->nexthdr;
  963. ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
  964. sizeof(*ip6h),
  965. &protocol,
  966. &frag_off);
  967. coal_desc->trans_proto = protocol;
  968. /* If we run into a problem, or this has a fragment header
  969. * (which should technically not be possible, if the HW
  970. * works as intended...), bail.
  971. */
  972. if (ip_len < 0 || frag_off) {
  973. priv->stats.coal.coal_ip_invalid++;
  974. return;
  975. }
  976. coal_desc->ip_len = (u16)ip_len;
  977. if (coal_desc->ip_len > sizeof(*ip6h)) {
  978. /* Don't allow coalescing of any packets with IPv6
  979. * extension headers.
  980. */
  981. gro = false;
  982. }
  983. } else {
  984. priv->stats.coal.coal_ip_invalid++;
  985. return;
  986. }
  987. if (coal_desc->trans_proto == IPPROTO_TCP) {
  988. struct tcphdr *th, __th;
  989. th = rmnet_frag_header_ptr(coal_desc,
  990. coal_desc->ip_len, sizeof(*th),
  991. &__th);
  992. if (!th)
  993. return;
  994. coal_desc->trans_len = th->doff * 4;
  995. priv->stats.coal.coal_tcp++;
  996. priv->stats.coal.coal_tcp_bytes += coal_desc->len;
  997. } else if (coal_desc->trans_proto == IPPROTO_UDP) {
  998. struct udphdr *uh, __uh;
  999. uh = rmnet_frag_header_ptr(coal_desc,
  1000. coal_desc->ip_len, sizeof(*uh),
  1001. &__uh);
  1002. if (!uh)
  1003. return;
  1004. coal_desc->trans_len = sizeof(*uh);
  1005. priv->stats.coal.coal_udp++;
  1006. priv->stats.coal.coal_udp_bytes += coal_desc->len;
  1007. if (coal_desc->ip_proto == 4 && !uh->check)
  1008. zero_csum = true;
  1009. } else {
  1010. priv->stats.coal.coal_trans_invalid++;
  1011. return;
  1012. }
  1013. coal_desc->hdrs_valid = 1;
  1014. if (rmnet_map_v5_csum_buggy(&coal_hdr) && !zero_csum) {
  1015. /* Mark the checksum as valid if it checks out */
  1016. if (rmnet_frag_validate_csum(coal_desc))
  1017. coal_desc->csum_valid = true;
  1018. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1019. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1020. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1021. list_add_tail(&coal_desc->list, list);
  1022. return;
  1023. }
  1024. /* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
  1025. * no checksum errors, and are allowing GRO. We can just reuse this
  1026. * descriptor unchanged.
  1027. */
  1028. if (gro && coal_hdr.num_nlos == 1 && coal_hdr.csum_valid) {
  1029. coal_desc->csum_valid = true;
  1030. coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
  1031. coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
  1032. coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
  1033. list_add_tail(&coal_desc->list, list);
  1034. return;
  1035. }
  1036. /* Segment the coalesced descriptor into new packets */
  1037. for (nlo = 0; nlo < coal_hdr.num_nlos; nlo++) {
  1038. pkt_len = ntohs(coal_hdr.nl_pairs[nlo].pkt_len);
  1039. pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
  1040. coal_desc->gso_size = pkt_len;
  1041. for (pkt = 0; pkt < coal_hdr.nl_pairs[nlo].num_packets;
  1042. pkt++, total_pkt++, nlo_err_mask >>= 1) {
  1043. bool csum_err = nlo_err_mask & 1;
  1044. /* Segment the packet if we're not sending the larger
  1045. * packet up the stack.
  1046. */
  1047. if (!gro) {
  1048. coal_desc->gso_segs = 1;
  1049. if (csum_err)
  1050. priv->stats.coal.coal_csum_err++;
  1051. __rmnet_frag_segment_data(coal_desc, port,
  1052. list, total_pkt,
  1053. !csum_err);
  1054. continue;
  1055. }
  1056. if (csum_err) {
  1057. priv->stats.coal.coal_csum_err++;
  1058. /* Segment out the good data */
  1059. if (coal_desc->gso_segs)
  1060. __rmnet_frag_segment_data(coal_desc,
  1061. port,
  1062. list,
  1063. total_pkt,
  1064. true);
  1065. /* Segment out the bad checksum */
  1066. coal_desc->gso_segs = 1;
  1067. __rmnet_frag_segment_data(coal_desc, port,
  1068. list, total_pkt,
  1069. false);
  1070. } else {
  1071. coal_desc->gso_segs++;
  1072. }
  1073. }
  1074. /* If we're switching NLOs, we need to send out everything from
  1075. * the previous one, if we haven't done so. NLOs only switch
  1076. * when the packet length changes.
  1077. */
  1078. if (coal_desc->gso_segs)
  1079. __rmnet_frag_segment_data(coal_desc, port, list,
  1080. total_pkt, true);
  1081. }
  1082. }
  1083. /* Record reason for coalescing pipe closure */
  1084. static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
  1085. u8 code)
  1086. {
  1087. struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
  1088. switch (type) {
  1089. case RMNET_MAP_COAL_CLOSE_NON_COAL:
  1090. stats->non_coal++;
  1091. break;
  1092. case RMNET_MAP_COAL_CLOSE_IP_MISS:
  1093. stats->ip_miss++;
  1094. break;
  1095. case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
  1096. stats->trans_miss++;
  1097. break;
  1098. case RMNET_MAP_COAL_CLOSE_HW:
  1099. switch (code) {
  1100. case RMNET_MAP_COAL_CLOSE_HW_NL:
  1101. stats->hw_nl++;
  1102. break;
  1103. case RMNET_MAP_COAL_CLOSE_HW_PKT:
  1104. stats->hw_pkt++;
  1105. break;
  1106. case RMNET_MAP_COAL_CLOSE_HW_BYTE:
  1107. stats->hw_byte++;
  1108. break;
  1109. case RMNET_MAP_COAL_CLOSE_HW_TIME:
  1110. stats->hw_time++;
  1111. break;
  1112. case RMNET_MAP_COAL_CLOSE_HW_EVICT:
  1113. stats->hw_evict++;
  1114. break;
  1115. default:
  1116. break;
  1117. }
  1118. break;
  1119. case RMNET_MAP_COAL_CLOSE_COAL:
  1120. stats->coal++;
  1121. break;
  1122. default:
  1123. break;
  1124. }
  1125. }
  1126. /* Check if the coalesced header has any incorrect values, in which case, the
  1127. * entire coalesced frame must be dropped. Then check if there are any
  1128. * checksum issues
  1129. */
  1130. static int
  1131. rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
  1132. u64 *nlo_err_mask)
  1133. {
  1134. struct rmnet_map_v5_coal_header *coal_hdr, __coal_hdr;
  1135. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1136. u64 mask = 0;
  1137. int i;
  1138. u8 veid, pkts = 0;
  1139. coal_hdr = rmnet_frag_header_ptr(frag_desc,
  1140. sizeof(struct rmnet_map_header),
  1141. sizeof(*coal_hdr), &__coal_hdr);
  1142. if (!coal_hdr)
  1143. return -EINVAL;
  1144. veid = coal_hdr->virtual_channel_id;
  1145. if (coal_hdr->num_nlos == 0 ||
  1146. coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
  1147. priv->stats.coal.coal_hdr_nlo_err++;
  1148. return -EINVAL;
  1149. }
  1150. for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
  1151. /* If there is a checksum issue, we need to split
  1152. * up the skb. Rebuild the full csum error field
  1153. */
  1154. u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
  1155. u8 pkt = coal_hdr->nl_pairs[i].num_packets;
  1156. mask |= ((u64)err) << (8 * i);
  1157. /* Track total packets in frame */
  1158. pkts += pkt;
  1159. if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
  1160. priv->stats.coal.coal_hdr_pkt_err++;
  1161. return -EINVAL;
  1162. }
  1163. }
  1164. /* Track number of packets we get inside of coalesced frames */
  1165. priv->stats.coal.coal_pkts += pkts;
  1166. /* Update ethtool stats */
  1167. rmnet_frag_data_log_close_stats(priv,
  1168. coal_hdr->close_type,
  1169. coal_hdr->close_value);
  1170. if (veid < RMNET_MAX_VEID)
  1171. priv->stats.coal.coal_veid[veid]++;
  1172. *nlo_err_mask = mask;
  1173. return 0;
  1174. }
  1175. /* Process a QMAPv5 packet header */
  1176. int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
  1177. struct rmnet_port *port,
  1178. struct list_head *list,
  1179. u16 len)
  1180. {
  1181. struct rmnet_map_v5_csum_header *csum_hdr, __csum_hdr;
  1182. struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
  1183. u64 nlo_err_mask;
  1184. u32 offset = sizeof(struct rmnet_map_header);
  1185. int rc = 0;
  1186. /* Grab the header type. It's easier to grab enough for a full csum
  1187. * offload header here since it's only 8 bytes and then check the
  1188. * header type using that. This also doubles as a check to make sure
  1189. * there's enough data after the QMAP header to ensure that another
  1190. * header is present.
  1191. */
  1192. csum_hdr = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*csum_hdr),
  1193. &__csum_hdr);
  1194. if (!csum_hdr)
  1195. return -EINVAL;
  1196. switch (csum_hdr->header_type) {
  1197. case RMNET_MAP_HEADER_TYPE_COALESCING:
  1198. priv->stats.coal.coal_rx++;
  1199. rc = rmnet_frag_data_check_coal_header(frag_desc,
  1200. &nlo_err_mask);
  1201. if (rc)
  1202. return rc;
  1203. rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
  1204. list);
  1205. if (list_first_entry(list, struct rmnet_frag_descriptor,
  1206. list) != frag_desc)
  1207. rmnet_recycle_frag_descriptor(frag_desc, port);
  1208. break;
  1209. case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
  1210. if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
  1211. priv->stats.csum_sw++;
  1212. } else if (csum_hdr->csum_valid_required) {
  1213. priv->stats.csum_ok++;
  1214. frag_desc->csum_valid = true;
  1215. } else {
  1216. priv->stats.csum_valid_unset++;
  1217. }
  1218. if (!rmnet_frag_pull(frag_desc, port,
  1219. offset + sizeof(*csum_hdr))) {
  1220. rc = -EINVAL;
  1221. break;
  1222. }
  1223. /* Remove padding only for csum offload packets.
  1224. * Coalesced packets should never have padding.
  1225. */
  1226. if (!rmnet_frag_trim(frag_desc, port, len)) {
  1227. rc = -EINVAL;
  1228. break;
  1229. }
  1230. list_del_init(&frag_desc->list);
  1231. list_add_tail(&frag_desc->list, list);
  1232. break;
  1233. default:
  1234. rc = -EINVAL;
  1235. break;
  1236. }
  1237. return rc;
  1238. }
  1239. /* Perf hook handler */
  1240. rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
  1241. EXPORT_SYMBOL(rmnet_perf_desc_entry);
  1242. static void
  1243. __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
  1244. struct rmnet_port *port)
  1245. {
  1246. rmnet_perf_desc_hook_t rmnet_perf_ingress;
  1247. struct rmnet_map_header *qmap, __qmap;
  1248. struct rmnet_endpoint *ep;
  1249. struct rmnet_frag_descriptor *frag, *tmp;
  1250. LIST_HEAD(segs);
  1251. u16 len, pad;
  1252. u8 mux_id;
  1253. qmap = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*qmap), &__qmap);
  1254. if (!qmap)
  1255. goto recycle;
  1256. mux_id = qmap->mux_id;
  1257. pad = qmap->pad_len;
  1258. len = ntohs(qmap->pkt_len) - pad;
  1259. if (qmap->cd_bit) {
  1260. qmi_rmnet_set_dl_msg_active(port);
  1261. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  1262. rmnet_frag_flow_command(frag_desc, port, len);
  1263. goto recycle;
  1264. }
  1265. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  1266. rmnet_frag_command(frag_desc, qmap, port);
  1267. goto recycle;
  1268. }
  1269. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  1270. goto recycle;
  1271. ep = rmnet_get_endpoint(port, mux_id);
  1272. if (!ep)
  1273. goto recycle;
  1274. frag_desc->dev = ep->egress_dev;
  1275. /* Handle QMAPv5 packet */
  1276. if (qmap->next_hdr &&
  1277. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  1278. RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
  1279. if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
  1280. len))
  1281. goto recycle;
  1282. } else {
  1283. /* We only have the main QMAP header to worry about */
  1284. if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
  1285. return;
  1286. if (!rmnet_frag_trim(frag_desc, port, len))
  1287. return;
  1288. list_add_tail(&frag_desc->list, &segs);
  1289. }
  1290. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  1291. qmi_rmnet_work_maybe_restart(port);
  1292. rcu_read_lock();
  1293. rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
  1294. if (rmnet_perf_ingress) {
  1295. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1296. list_del_init(&frag->list);
  1297. rmnet_perf_ingress(frag, port);
  1298. }
  1299. rcu_read_unlock();
  1300. return;
  1301. }
  1302. rcu_read_unlock();
  1303. list_for_each_entry_safe(frag, tmp, &segs, list) {
  1304. list_del_init(&frag->list);
  1305. rmnet_frag_deliver(frag, port);
  1306. }
  1307. return;
  1308. recycle:
  1309. rmnet_recycle_frag_descriptor(frag_desc, port);
  1310. }
  1311. /* Notify perf at the end of SKB chain */
  1312. rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
  1313. EXPORT_SYMBOL(rmnet_perf_chain_end);
  1314. void rmnet_frag_ingress_handler(struct sk_buff *skb,
  1315. struct rmnet_port *port)
  1316. {
  1317. rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
  1318. LIST_HEAD(desc_list);
  1319. /* Deaggregation and freeing of HW originating
  1320. * buffers is done within here
  1321. */
  1322. while (skb) {
  1323. struct sk_buff *skb_frag;
  1324. rmnet_frag_deaggregate(skb, port, &desc_list);
  1325. if (!list_empty(&desc_list)) {
  1326. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1327. list_for_each_entry_safe(frag_desc, tmp, &desc_list,
  1328. list) {
  1329. list_del_init(&frag_desc->list);
  1330. __rmnet_frag_ingress_handler(frag_desc, port);
  1331. }
  1332. }
  1333. skb_frag = skb_shinfo(skb)->frag_list;
  1334. skb_shinfo(skb)->frag_list = NULL;
  1335. consume_skb(skb);
  1336. skb = skb_frag;
  1337. }
  1338. rcu_read_lock();
  1339. rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
  1340. if (rmnet_perf_opt_chain_end)
  1341. rmnet_perf_opt_chain_end();
  1342. rcu_read_unlock();
  1343. }
  1344. void rmnet_descriptor_deinit(struct rmnet_port *port)
  1345. {
  1346. struct rmnet_frag_descriptor_pool *pool;
  1347. struct rmnet_frag_descriptor *frag_desc, *tmp;
  1348. pool = port->frag_desc_pool;
  1349. list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
  1350. kfree(frag_desc);
  1351. pool->pool_size--;
  1352. }
  1353. kfree(pool);
  1354. }
  1355. int rmnet_descriptor_init(struct rmnet_port *port)
  1356. {
  1357. struct rmnet_frag_descriptor_pool *pool;
  1358. int i;
  1359. spin_lock_init(&port->desc_pool_lock);
  1360. pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
  1361. if (!pool)
  1362. return -ENOMEM;
  1363. INIT_LIST_HEAD(&pool->free_list);
  1364. port->frag_desc_pool = pool;
  1365. for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
  1366. struct rmnet_frag_descriptor *frag_desc;
  1367. frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
  1368. if (!frag_desc)
  1369. return -ENOMEM;
  1370. INIT_LIST_HEAD(&frag_desc->list);
  1371. INIT_LIST_HEAD(&frag_desc->frags);
  1372. list_add_tail(&frag_desc->list, &pool->free_list);
  1373. pool->pool_size++;
  1374. }
  1375. return 0;
  1376. }