rmnet_map.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #ifndef _RMNET_MAP_H_
  14. #define _RMNET_MAP_H_
  15. #include <linux/skbuff.h>
  16. #include "rmnet_config.h"
  17. struct rmnet_map_control_command {
  18. u8 command_name;
  19. u8 cmd_type:2;
  20. u8 reserved:6;
  21. u16 reserved2;
  22. u32 transaction_id;
  23. union {
  24. struct {
  25. u16 ip_family:2;
  26. u16 reserved:14;
  27. __be16 flow_control_seq_num;
  28. __be32 qos_id;
  29. } flow_control;
  30. u8 data[0];
  31. };
  32. } __aligned(1);
  33. enum rmnet_map_commands {
  34. RMNET_MAP_COMMAND_NONE,
  35. RMNET_MAP_COMMAND_FLOW_DISABLE,
  36. RMNET_MAP_COMMAND_FLOW_ENABLE,
  37. RMNET_MAP_COMMAND_FLOW_START = 7,
  38. RMNET_MAP_COMMAND_FLOW_END = 8,
  39. RMNET_MAP_COMMAND_PB_BYTES = 35,
  40. /* These should always be the last 2 elements */
  41. RMNET_MAP_COMMAND_UNKNOWN,
  42. RMNET_MAP_COMMAND_ENUM_LENGTH
  43. };
  44. enum rmnet_map_v5_header_type {
  45. RMNET_MAP_HEADER_TYPE_UNKNOWN,
  46. RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
  47. RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
  48. RMNET_MAP_HEADER_TYPE_TSO = 0x3,
  49. RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
  50. };
  51. enum rmnet_map_v5_close_type {
  52. RMNET_MAP_COAL_CLOSE_NON_COAL,
  53. RMNET_MAP_COAL_CLOSE_IP_MISS,
  54. RMNET_MAP_COAL_CLOSE_TRANS_MISS,
  55. RMNET_MAP_COAL_CLOSE_HW,
  56. RMNET_MAP_COAL_CLOSE_COAL,
  57. };
  58. enum rmnet_map_v5_close_value {
  59. RMNET_MAP_COAL_CLOSE_HW_NL,
  60. RMNET_MAP_COAL_CLOSE_HW_PKT,
  61. RMNET_MAP_COAL_CLOSE_HW_BYTE,
  62. RMNET_MAP_COAL_CLOSE_HW_TIME,
  63. RMNET_MAP_COAL_CLOSE_HW_EVICT,
  64. };
  65. /* Main QMAP header */
  66. struct rmnet_map_header {
  67. u8 pad_len:6;
  68. u8 next_hdr:1;
  69. u8 cd_bit:1;
  70. u8 mux_id;
  71. __be16 pkt_len;
  72. } __aligned(1);
  73. /* QMAP v5 headers */
  74. struct rmnet_map_v5_csum_header {
  75. u8 next_hdr:1;
  76. u8 header_type:7;
  77. u8 hw_reserved:4;
  78. u8 aps_prio:1;
  79. u8 priority:1;
  80. u8 hw_reserved_bit:1;
  81. u8 csum_valid_required:1;
  82. __be16 reserved;
  83. } __aligned(1);
  84. struct rmnet_map_v5_nl_pair {
  85. __be16 pkt_len;
  86. u8 csum_error_bitmap;
  87. u8 num_packets;
  88. } __aligned(1);
  89. /* NLO: Number-length object */
  90. #define RMNET_MAP_V5_MAX_NLOS (6)
  91. #define RMNET_MAP_V5_MAX_PACKETS (48)
  92. struct rmnet_map_v5_coal_header {
  93. u8 next_hdr:1;
  94. u8 header_type:7;
  95. u8 reserved1:4;
  96. u8 num_nlos:3;
  97. u8 csum_valid:1;
  98. u8 close_type:4;
  99. u8 close_value:4;
  100. u8 reserved2:4;
  101. u8 virtual_channel_id:4;
  102. struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
  103. } __aligned(1);
  104. struct rmnet_map_v5_tso_header {
  105. u8 next_hdr:1;
  106. u8 header_type:7;
  107. u8 hw_reserved:5;
  108. u8 priority:1;
  109. u8 zero_csum:1;
  110. u8 ip_id_cfg:1;
  111. __be16 segment_size;
  112. } __aligned(1);
  113. /* QMAP v4 headers */
  114. struct rmnet_map_dl_csum_trailer {
  115. u8 reserved1;
  116. u8 valid:1;
  117. u8 reserved2:7;
  118. u16 csum_start_offset;
  119. u16 csum_length;
  120. __be16 csum_value;
  121. } __aligned(1);
  122. struct rmnet_map_ul_csum_header {
  123. __be16 csum_start_offset;
  124. u16 csum_insert_offset:14;
  125. u16 udp_ind:1;
  126. u16 csum_enabled:1;
  127. } __aligned(1);
  128. struct rmnet_map_control_command_header {
  129. u8 command_name;
  130. u8 cmd_type:2;
  131. u8 reserved:5;
  132. u8 e:1;
  133. u16 source_id:15;
  134. u16 ext:1;
  135. u32 transaction_id;
  136. } __aligned(1);
  137. struct rmnet_map_flow_info_le {
  138. __be32 mux_id;
  139. __be32 flow_id;
  140. __be32 bytes;
  141. __be32 pkts;
  142. } __aligned(1);
  143. struct rmnet_map_flow_info_be {
  144. u32 mux_id;
  145. u32 flow_id;
  146. u32 bytes;
  147. u32 pkts;
  148. } __aligned(1);
  149. struct rmnet_map_pb_ind_hdr {
  150. union {
  151. struct {
  152. u32 seq_num;
  153. u32 start_end_seq_num;
  154. u32 row_bytes_pending;
  155. u32 fc_bytes_pending;
  156. } le __aligned(1);
  157. struct {
  158. u32 seq_num;
  159. u32 start_end_seq_num;
  160. u32 row_bytes_pending;
  161. u32 fc_bytes_pending;
  162. } be __aligned(1);
  163. } __aligned(1);
  164. } __aligned(1);
  165. struct rmnet_map_pb_ind {
  166. u8 priority;
  167. void (*pb_ind_handler)(struct rmnet_map_pb_ind_hdr *pbhdr);
  168. struct list_head list;
  169. };
  170. struct rmnet_map_dl_ind_hdr {
  171. union {
  172. struct {
  173. u32 seq;
  174. u32 bytes;
  175. u32 pkts;
  176. u32 flows;
  177. struct rmnet_map_flow_info_le flow[0];
  178. } le __aligned(1);
  179. struct {
  180. __be32 seq;
  181. __be32 bytes;
  182. __be32 pkts;
  183. __be32 flows;
  184. struct rmnet_map_flow_info_be flow[0];
  185. } be __aligned(1);
  186. } __aligned(1);
  187. } __aligned(1);
  188. struct rmnet_map_dl_ind_trl {
  189. union {
  190. __be32 seq_be;
  191. u32 seq_le;
  192. } __aligned(1);
  193. } __aligned(1);
  194. struct rmnet_map_dl_ind {
  195. u8 priority;
  196. void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *dlhdr,
  197. struct rmnet_map_control_command_header *qcmd);
  198. void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *dltrl,
  199. struct rmnet_map_control_command_header *qcmd);
  200. struct list_head list;
  201. };
  202. #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
  203. (Y)->data)->mux_id)
  204. #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
  205. (Y)->data)->cd_bit)
  206. #define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
  207. (Y)->data)->pad_len)
  208. #define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
  209. ((Y)->data + \
  210. sizeof(struct rmnet_map_header)))
  211. #define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
  212. (Y)->data)->pkt_len))
  213. #define RMNET_MAP_DEAGGR_SPACING 64
  214. #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
  215. #define RMNET_MAP_COMMAND_REQUEST 0
  216. #define RMNET_MAP_COMMAND_ACK 1
  217. #define RMNET_MAP_COMMAND_UNSUPPORTED 2
  218. #define RMNET_MAP_COMMAND_INVALID 3
  219. #define RMNET_MAP_NO_PAD_BYTES 0
  220. #define RMNET_MAP_ADD_PAD_BYTES 1
  221. static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
  222. {
  223. /* Nonlinear packets we receive are entirely within frag 0 */
  224. if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
  225. return skb_frag_address(skb_shinfo(skb)->frags);
  226. return skb->data;
  227. }
  228. static inline struct rmnet_map_control_command *
  229. rmnet_map_get_cmd_start(struct sk_buff *skb)
  230. {
  231. unsigned char *data = rmnet_map_data_ptr(skb);
  232. data += sizeof(struct rmnet_map_header);
  233. return (struct rmnet_map_control_command *)data;
  234. }
  235. static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
  236. {
  237. unsigned char *data = rmnet_map_data_ptr(skb);
  238. data += sizeof(struct rmnet_map_header);
  239. return ((struct rmnet_map_v5_coal_header *)data)->header_type;
  240. }
  241. static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
  242. {
  243. unsigned char *data = rmnet_map_data_ptr(skb);
  244. data += sizeof(struct rmnet_map_header);
  245. return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
  246. }
  247. struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
  248. struct rmnet_port *port);
  249. struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
  250. int hdrlen, int pad,
  251. struct rmnet_port *port);
  252. void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
  253. int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
  254. void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
  255. struct rmnet_port *port,
  256. struct net_device *orig_dev,
  257. int csum_type);
  258. bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr);
  259. int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
  260. struct sk_buff_head *list,
  261. u16 len);
  262. int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
  263. void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
  264. bool low_latency);
  265. void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
  266. void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
  267. void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
  268. u16 size, u8 count, u8 features, u32 time);
  269. void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
  270. struct rmnet_map_dl_ind_hdr *dl_hdr,
  271. struct rmnet_map_control_command_header *qcmd);
  272. void rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
  273. struct rmnet_map_dl_ind_trl *dltrl,
  274. struct rmnet_map_control_command_header *qcmd);
  275. void rmnet_map_pb_ind_notify(struct rmnet_port *port,
  276. struct rmnet_map_pb_ind_hdr *pbhdr);
  277. int rmnet_map_flow_command(struct sk_buff *skb,
  278. struct rmnet_port *port,
  279. bool rmnet_perf);
  280. void rmnet_map_cmd_init(struct rmnet_port *port);
  281. int rmnet_map_dl_ind_register(struct rmnet_port *port,
  282. struct rmnet_map_dl_ind *dl_ind);
  283. int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
  284. struct rmnet_map_dl_ind *dl_ind);
  285. int rmnet_map_pb_ind_register(struct rmnet_port *port,
  286. struct rmnet_map_pb_ind *pb_ind);
  287. int rmnet_map_pb_ind_deregister(struct rmnet_port *port,
  288. struct rmnet_map_pb_ind *pb_ind);
  289. void rmnet_map_cmd_exit(struct rmnet_port *port);
  290. void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
  291. void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state);
  292. int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
  293. struct net_device *orig_dev);
  294. #endif /* _RMNET_MAP_H_ */