rmnet_map.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #ifndef _RMNET_MAP_H_
  13. #define _RMNET_MAP_H_
  14. #include <linux/skbuff.h>
  15. #include "rmnet_config.h"
  16. struct rmnet_map_control_command {
  17. u8 command_name;
  18. u8 cmd_type:2;
  19. u8 reserved:6;
  20. u16 reserved2;
  21. u32 transaction_id;
  22. union {
  23. struct {
  24. u16 ip_family:2;
  25. u16 reserved:14;
  26. __be16 flow_control_seq_num;
  27. __be32 qos_id;
  28. } flow_control;
  29. u8 data[0];
  30. };
  31. } __aligned(1);
  32. enum rmnet_map_commands {
  33. RMNET_MAP_COMMAND_NONE,
  34. RMNET_MAP_COMMAND_FLOW_DISABLE,
  35. RMNET_MAP_COMMAND_FLOW_ENABLE,
  36. RMNET_MAP_COMMAND_FLOW_START = 7,
  37. RMNET_MAP_COMMAND_FLOW_END = 8,
  38. /* These should always be the last 2 elements */
  39. RMNET_MAP_COMMAND_UNKNOWN,
  40. RMNET_MAP_COMMAND_ENUM_LENGTH
  41. };
  42. enum rmnet_map_v5_header_type {
  43. RMNET_MAP_HEADER_TYPE_UNKNOWN,
  44. RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
  45. RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
  46. RMNET_MAP_HEADER_TYPE_TSO = 0x3,
  47. RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
  48. };
  49. enum rmnet_map_v5_close_type {
  50. RMNET_MAP_COAL_CLOSE_NON_COAL,
  51. RMNET_MAP_COAL_CLOSE_IP_MISS,
  52. RMNET_MAP_COAL_CLOSE_TRANS_MISS,
  53. RMNET_MAP_COAL_CLOSE_HW,
  54. RMNET_MAP_COAL_CLOSE_COAL,
  55. };
  56. enum rmnet_map_v5_close_value {
  57. RMNET_MAP_COAL_CLOSE_HW_NL,
  58. RMNET_MAP_COAL_CLOSE_HW_PKT,
  59. RMNET_MAP_COAL_CLOSE_HW_BYTE,
  60. RMNET_MAP_COAL_CLOSE_HW_TIME,
  61. RMNET_MAP_COAL_CLOSE_HW_EVICT,
  62. };
  63. /* Main QMAP header */
  64. struct rmnet_map_header {
  65. u8 pad_len:6;
  66. u8 next_hdr:1;
  67. u8 cd_bit:1;
  68. u8 mux_id;
  69. __be16 pkt_len;
  70. } __aligned(1);
  71. /* QMAP v5 headers */
  72. struct rmnet_map_v5_csum_header {
  73. u8 next_hdr:1;
  74. u8 header_type:7;
  75. u8 hw_reserved:5;
  76. u8 priority:1;
  77. u8 hw_reserved_bit:1;
  78. u8 csum_valid_required:1;
  79. __be16 reserved;
  80. } __aligned(1);
  81. struct rmnet_map_v5_nl_pair {
  82. __be16 pkt_len;
  83. u8 csum_error_bitmap;
  84. u8 num_packets;
  85. } __aligned(1);
  86. /* NLO: Number-length object */
  87. #define RMNET_MAP_V5_MAX_NLOS (6)
  88. #define RMNET_MAP_V5_MAX_PACKETS (48)
  89. struct rmnet_map_v5_coal_header {
  90. u8 next_hdr:1;
  91. u8 header_type:7;
  92. u8 reserved1:4;
  93. u8 num_nlos:3;
  94. u8 csum_valid:1;
  95. u8 close_type:4;
  96. u8 close_value:4;
  97. u8 reserved2:4;
  98. u8 virtual_channel_id:4;
  99. struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
  100. } __aligned(1);
  101. struct rmnet_map_v5_tso_header {
  102. u8 next_hdr:1;
  103. u8 header_type:7;
  104. u8 hw_reserved:5;
  105. u8 priority:1;
  106. u8 zero_csum:1;
  107. u8 ip_id_cfg:1;
  108. __be16 segment_size;
  109. } __aligned(1);
  110. /* QMAP v4 headers */
  111. struct rmnet_map_dl_csum_trailer {
  112. u8 reserved1;
  113. u8 valid:1;
  114. u8 reserved2:7;
  115. u16 csum_start_offset;
  116. u16 csum_length;
  117. __be16 csum_value;
  118. } __aligned(1);
  119. struct rmnet_map_ul_csum_header {
  120. __be16 csum_start_offset;
  121. u16 csum_insert_offset:14;
  122. u16 udp_ind:1;
  123. u16 csum_enabled:1;
  124. } __aligned(1);
  125. struct rmnet_map_control_command_header {
  126. u8 command_name;
  127. u8 cmd_type:2;
  128. u8 reserved:5;
  129. u8 e:1;
  130. u16 source_id:15;
  131. u16 ext:1;
  132. u32 transaction_id;
  133. } __aligned(1);
  134. struct rmnet_map_flow_info_le {
  135. __be32 mux_id;
  136. __be32 flow_id;
  137. __be32 bytes;
  138. __be32 pkts;
  139. } __aligned(1);
  140. struct rmnet_map_flow_info_be {
  141. u32 mux_id;
  142. u32 flow_id;
  143. u32 bytes;
  144. u32 pkts;
  145. } __aligned(1);
  146. struct rmnet_map_dl_ind_hdr {
  147. union {
  148. struct {
  149. u32 seq;
  150. u32 bytes;
  151. u32 pkts;
  152. u32 flows;
  153. struct rmnet_map_flow_info_le flow[0];
  154. } le __aligned(1);
  155. struct {
  156. __be32 seq;
  157. __be32 bytes;
  158. __be32 pkts;
  159. __be32 flows;
  160. struct rmnet_map_flow_info_be flow[0];
  161. } be __aligned(1);
  162. } __aligned(1);
  163. } __aligned(1);
  164. struct rmnet_map_dl_ind_trl {
  165. union {
  166. __be32 seq_be;
  167. u32 seq_le;
  168. } __aligned(1);
  169. } __aligned(1);
  170. struct rmnet_map_dl_ind {
  171. u8 priority;
  172. void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *dlhdr,
  173. struct rmnet_map_control_command_header *qcmd);
  174. void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *dltrl,
  175. struct rmnet_map_control_command_header *qcmd);
  176. struct list_head list;
  177. };
  178. #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
  179. (Y)->data)->mux_id)
  180. #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
  181. (Y)->data)->cd_bit)
  182. #define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
  183. (Y)->data)->pad_len)
  184. #define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
  185. ((Y)->data + \
  186. sizeof(struct rmnet_map_header)))
  187. #define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
  188. (Y)->data)->pkt_len))
  189. #define RMNET_MAP_DEAGGR_SPACING 64
  190. #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
  191. #define RMNET_MAP_COMMAND_REQUEST 0
  192. #define RMNET_MAP_COMMAND_ACK 1
  193. #define RMNET_MAP_COMMAND_UNSUPPORTED 2
  194. #define RMNET_MAP_COMMAND_INVALID 3
  195. #define RMNET_MAP_NO_PAD_BYTES 0
  196. #define RMNET_MAP_ADD_PAD_BYTES 1
  197. static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
  198. {
  199. /* Nonlinear packets we receive are entirely within frag 0 */
  200. if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
  201. return skb_frag_address(skb_shinfo(skb)->frags);
  202. return skb->data;
  203. }
  204. static inline struct rmnet_map_control_command *
  205. rmnet_map_get_cmd_start(struct sk_buff *skb)
  206. {
  207. unsigned char *data = rmnet_map_data_ptr(skb);
  208. data += sizeof(struct rmnet_map_header);
  209. return (struct rmnet_map_control_command *)data;
  210. }
  211. static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
  212. {
  213. unsigned char *data = rmnet_map_data_ptr(skb);
  214. data += sizeof(struct rmnet_map_header);
  215. return ((struct rmnet_map_v5_coal_header *)data)->header_type;
  216. }
  217. static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
  218. {
  219. unsigned char *data = rmnet_map_data_ptr(skb);
  220. data += sizeof(struct rmnet_map_header);
  221. return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
  222. }
  223. struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
  224. struct rmnet_port *port);
  225. struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
  226. int hdrlen, int pad,
  227. struct rmnet_port *port);
  228. void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
  229. int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
  230. void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
  231. struct rmnet_port *port,
  232. struct net_device *orig_dev,
  233. int csum_type);
  234. bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr);
  235. int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
  236. struct sk_buff_head *list,
  237. u16 len);
  238. int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
  239. void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
  240. void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
  241. void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
  242. void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
  243. u8 count, u8 features, u32 time);
  244. void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
  245. struct rmnet_map_dl_ind_hdr *dl_hdr,
  246. struct rmnet_map_control_command_header *qcmd);
  247. void rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
  248. struct rmnet_map_dl_ind_trl *dltrl,
  249. struct rmnet_map_control_command_header *qcmd);
  250. int rmnet_map_flow_command(struct sk_buff *skb,
  251. struct rmnet_port *port,
  252. bool rmnet_perf);
  253. void rmnet_map_cmd_init(struct rmnet_port *port);
  254. int rmnet_map_dl_ind_register(struct rmnet_port *port,
  255. struct rmnet_map_dl_ind *dl_ind);
  256. int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
  257. struct rmnet_map_dl_ind *dl_ind);
  258. void rmnet_map_cmd_exit(struct rmnet_port *port);
  259. void rmnet_map_send_agg_skb(struct rmnet_port *port, unsigned long flags);
  260. int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
  261. struct net_device *orig_dev);
  262. #endif /* _RMNET_MAP_H_ */