rmnet_core: Allow scatter-gather on the DL path
This patch adds the necessary handling for the physical device driver to use multiple pages to hold data in the SKBs. Essentially, the following changes are implemented: - rmnet_frag_descriptor struct now hold a list of frags, instead of a single one. Pushing, pulling, and trimming APIs are updated to use this new format. - QMAP deaggregation now loops over each element in skb_shinfo->frags looking for data. Packets are allowed to be split across mutliple pages. All pages containing data for a particular packet will be added to the frag_descriptor struct representing it. - a new API, rmnet_frag_header_ptr() has been added for safely accessing packet headers. This API, modeled after skb_header_pointer(), handles the fact that headers could potentially be split across 2 pages. A pointer to the location of the header is returned in the usual case where the header is physically contiguous. If not, the header is linearized into the user-provided buffer to allow normal header struct read access. - this new header access API is used in all places on the DL path when headers are needed, including QMAP command processing, QMAPv1 handling, QMAPv5 checksum offload, and QMAPv5 coalescing. - RSB/RSC segmentation handling is updated to add all necessary pages containing packet data to the newly created descriptor. Additionally, the pages containing L3 and L4 headers are added as well, as this allows easier downstream processing, and guarantees that the header data will not be freed until all packets that need them have been converted into SKBs. - as all frag_descriptors are now guaranteed to contain the L3 and L4 header data (and because they are no longer guaranteed to be on the same page), the hdr_ptr member has been removed as it no longer serves a purpose. Change-Id: Iebb677a6ae7e442fa55e0d131af59cde1b5ce18a Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
Dieser Commit ist enthalten in:
Datei-Diff unterdrückt, da er zu groß ist
Diff laden
@@ -27,12 +27,16 @@ struct rmnet_frag_descriptor_pool {
|
||||
u32 pool_size;
|
||||
};
|
||||
|
||||
struct rmnet_fragment {
|
||||
struct list_head list;
|
||||
skb_frag_t frag;
|
||||
};
|
||||
|
||||
struct rmnet_frag_descriptor {
|
||||
struct list_head list;
|
||||
struct list_head sub_frags;
|
||||
skb_frag_t frag;
|
||||
u8 *hdr_ptr;
|
||||
struct list_head frags;
|
||||
struct net_device *dev;
|
||||
u32 len;
|
||||
u32 hash;
|
||||
__be32 tcp_seq;
|
||||
__be16 ip_id;
|
||||
@@ -57,18 +61,28 @@ struct rmnet_frag_descriptor *
|
||||
rmnet_get_frag_descriptor(struct rmnet_port *port);
|
||||
void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port);
|
||||
void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
|
||||
struct page *p, u32 page_offset, u32 len);
|
||||
void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port, unsigned int size);
|
||||
void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port, unsigned int size);
|
||||
void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
|
||||
u32 len, void *buf);
|
||||
int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct page *p, u32 page_offset, u32 len);
|
||||
int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
|
||||
struct rmnet_frag_descriptor *from,
|
||||
u32 off, u32 len);
|
||||
int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
|
||||
int start, u8 *nexthdrp, __be16 *fragp);
|
||||
|
||||
/* QMAP command packets */
|
||||
void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port);
|
||||
int rmnet_frag_flow_command(struct rmnet_map_header *qmap,
|
||||
void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_map_header *qmap, struct rmnet_port *port);
|
||||
int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port, u16 pkt_len);
|
||||
|
||||
/* Ingress data handlers */
|
||||
void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
|
||||
void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
|
||||
struct list_head *list);
|
||||
void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port);
|
||||
@@ -84,68 +98,15 @@ void rmnet_descriptor_deinit(struct rmnet_port *port);
|
||||
|
||||
static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
|
||||
{
|
||||
return skb_frag_address(&frag_desc->frag);
|
||||
}
|
||||
struct rmnet_fragment *frag;
|
||||
|
||||
static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port,
|
||||
unsigned int size)
|
||||
{
|
||||
if (size >= skb_frag_size(&frag_desc->frag)) {
|
||||
pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
|
||||
__func__, size, skb_frag_size(&frag_desc->frag));
|
||||
rmnet_recycle_frag_descriptor(frag_desc, port);
|
||||
frag = list_first_entry_or_null(&frag_desc->frags,
|
||||
struct rmnet_fragment, list);
|
||||
|
||||
if (!frag)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
frag_desc->frag.bv_offset += size;
|
||||
skb_frag_size_sub(&frag_desc->frag, size);
|
||||
|
||||
return rmnet_frag_data_ptr(frag_desc);
|
||||
}
|
||||
|
||||
static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct rmnet_port *port,
|
||||
unsigned int size)
|
||||
{
|
||||
if (!size) {
|
||||
pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
|
||||
__func__, skb_frag_size(&frag_desc->frag));
|
||||
rmnet_recycle_frag_descriptor(frag_desc, port);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (size < skb_frag_size(&frag_desc->frag))
|
||||
skb_frag_size_set(&frag_desc->frag, size);
|
||||
|
||||
return rmnet_frag_data_ptr(frag_desc);
|
||||
}
|
||||
|
||||
static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc,
|
||||
struct page *p, u32 page_offset, u32 len)
|
||||
{
|
||||
get_page(p);
|
||||
__skb_frag_set_page(&frag_desc->frag, p);
|
||||
skb_frag_size_set(&frag_desc->frag, len);
|
||||
frag_desc->frag.bv_offset = page_offset;
|
||||
}
|
||||
|
||||
static inline u8
|
||||
rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc)
|
||||
{
|
||||
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
|
||||
|
||||
data += sizeof(struct rmnet_map_header);
|
||||
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc)
|
||||
{
|
||||
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
|
||||
|
||||
data += sizeof(struct rmnet_map_header);
|
||||
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
|
||||
return skb_frag_address(&frag->frag);
|
||||
}
|
||||
|
||||
#endif /* _RMNET_DESCRIPTOR_H_ */
|
||||
|
In neuem Issue referenzieren
Einen Benutzer sperren