Packets are now sent over a dedicated MHI channel when indicated by the DFC driver. New dedicated channel is controlled by rmnet driver. Buffers are allocated and supplied to it as needed from a recyclable pool for RX on the channel, and packets will be sent to it and freed manually once the channel indicates that they have been sent. Low latency packets can be aggregated like standard QMAP packets, but have their own aggregation state to prevent mixing default and low latency flows, and to allow each type of flow to use their own send functions (i.e. dev_queue_xmit() versus rmnet_ll_send_skb()). Low latency packets also have their own load-balancing scheme, and do not need to use the SHS module for balancing. To facilitate this, we mark the low latency packets with a non-zero priority value upon receipt from the MHI chainnel and avoid sending any such marked packets to the SHS ingress hook. DFC has been updated with a new netlink message type to handle swapping a list of bearers from one channel to another. The actual swap is performed asynchronously, and separate netlink ACKs will be sent to the userspace socket when the switch has been completed. Change-Id: I93861d4b004f399ba203d76a71b2f01fa5c0d5d2 Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
116 lines
3.4 KiB
C
116 lines
3.4 KiB
C
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* RMNET Packet Descriptor Framework
|
|
*
|
|
*/
|
|
|
|
#ifndef _RMNET_DESCRIPTOR_H_
|
|
#define _RMNET_DESCRIPTOR_H_
|
|
|
|
#include <linux/netdevice.h>
|
|
#include <linux/list.h>
|
|
#include <linux/skbuff.h>
|
|
#include "rmnet_config.h"
|
|
#include "rmnet_map.h"
|
|
|
|
struct rmnet_frag_descriptor_pool {
|
|
struct list_head free_list;
|
|
u32 pool_size;
|
|
};
|
|
|
|
struct rmnet_fragment {
|
|
struct list_head list;
|
|
skb_frag_t frag;
|
|
};
|
|
|
|
struct rmnet_frag_descriptor {
|
|
struct list_head list;
|
|
struct list_head frags;
|
|
struct net_device *dev;
|
|
u32 len;
|
|
u32 hash;
|
|
u32 priority;
|
|
__be32 tcp_seq;
|
|
__be16 ip_id;
|
|
__be16 tcp_flags;
|
|
u16 data_offset;
|
|
u16 gso_size;
|
|
u16 gso_segs;
|
|
u16 ip_len;
|
|
u16 trans_len;
|
|
u8 ip_proto;
|
|
u8 trans_proto;
|
|
u8 pkt_id;
|
|
u8 csum_valid:1,
|
|
hdrs_valid:1,
|
|
ip_id_set:1,
|
|
tcp_seq_set:1,
|
|
flush_shs:1,
|
|
tcp_flags_set:1,
|
|
reserved:2;
|
|
};
|
|
|
|
/* Descriptor management */
|
|
struct rmnet_frag_descriptor *
|
|
rmnet_get_frag_descriptor(struct rmnet_port *port);
|
|
void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
|
|
struct rmnet_port *port);
|
|
void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
|
|
struct rmnet_port *port, unsigned int size);
|
|
void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
|
|
struct rmnet_port *port, unsigned int size);
|
|
void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
|
|
u32 len, void *buf);
|
|
int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
|
|
struct page *p, u32 page_offset, u32 len);
|
|
int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
|
|
struct rmnet_frag_descriptor *from,
|
|
u32 off, u32 len);
|
|
int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
|
|
int start, u8 *nexthdrp, __be16 *fragp);
|
|
|
|
/* QMAP command packets */
|
|
void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
|
|
struct rmnet_map_header *qmap, struct rmnet_port *port);
|
|
int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
|
|
struct rmnet_port *port, u16 pkt_len);
|
|
|
|
/* Ingress data handlers */
|
|
void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
|
|
struct list_head *list, u32 priority);
|
|
void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
|
|
struct rmnet_port *port);
|
|
int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
|
|
struct rmnet_port *port,
|
|
struct list_head *list,
|
|
u16 len);
|
|
void rmnet_frag_ingress_handler(struct sk_buff *skb,
|
|
struct rmnet_port *port);
|
|
|
|
int rmnet_descriptor_init(struct rmnet_port *port);
|
|
void rmnet_descriptor_deinit(struct rmnet_port *port);
|
|
|
|
static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
|
|
{
|
|
struct rmnet_fragment *frag;
|
|
|
|
frag = list_first_entry_or_null(&frag_desc->frags,
|
|
struct rmnet_fragment, list);
|
|
|
|
if (!frag)
|
|
return NULL;
|
|
|
|
return skb_frag_address(&frag->frag);
|
|
}
|
|
|
|
#endif /* _RMNET_DESCRIPTOR_H_ */
|