|
@@ -561,28 +561,46 @@ EXPORT_SYMBOL(rmnet_frag_flow_command);
|
|
static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
|
|
static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
|
|
struct rmnet_port *port,
|
|
struct rmnet_port *port,
|
|
struct list_head *list,
|
|
struct list_head *list,
|
|
- u32 start_frag)
|
|
|
|
|
|
+ u32 start)
|
|
{
|
|
{
|
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
struct rmnet_frag_descriptor *frag_desc;
|
|
struct rmnet_frag_descriptor *frag_desc;
|
|
struct rmnet_map_header *maph, __maph;
|
|
struct rmnet_map_header *maph, __maph;
|
|
skb_frag_t *frag;
|
|
skb_frag_t *frag;
|
|
- u32 i;
|
|
|
|
- u32 pkt_len;
|
|
|
|
|
|
+ u32 start_frag, offset, i;
|
|
|
|
+ u32 start_frag_size, start_frag_off;
|
|
|
|
+ u32 pkt_len, copy_len = 0;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
- frag = &shinfo->frags[start_frag];
|
|
|
|
|
|
+ for (start_frag = 0, offset = 0; start_frag < shinfo->nr_frags;
|
|
|
|
+ start_frag++) {
|
|
|
|
+ frag = &shinfo->frags[start_frag];
|
|
|
|
+ if (start < skb_frag_size(frag) + offset)
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ offset += skb_frag_size(frag);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (start_frag == shinfo->nr_frags)
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
|
|
+ /* start - offset is the additional offset into the page to account
|
|
|
|
+ * for any data on it we've already used.
|
|
|
|
+ */
|
|
|
|
+ start_frag_size = skb_frag_size(frag) - (start - offset);
|
|
|
|
+ start_frag_off = skb_frag_off(frag) + (start - offset);
|
|
|
|
+
|
|
/* Grab the QMAP header. Careful, as there's no guarantee that it's
|
|
/* Grab the QMAP header. Careful, as there's no guarantee that it's
|
|
* continugous!
|
|
* continugous!
|
|
*/
|
|
*/
|
|
- if (likely(skb_frag_size(frag) >= sizeof(*maph))) {
|
|
|
|
- maph = skb_frag_address(frag);
|
|
|
|
|
|
+ if (likely(start_frag_size >= sizeof(*maph))) {
|
|
|
|
+ maph = skb_frag_address(frag) + (start - offset);
|
|
} else {
|
|
} else {
|
|
/* The header's split across pages. We can rebuild it.
|
|
/* The header's split across pages. We can rebuild it.
|
|
* Probably not faster or stronger than before. But certainly
|
|
* Probably not faster or stronger than before. But certainly
|
|
* more linear.
|
|
* more linear.
|
|
*/
|
|
*/
|
|
- if (skb_copy_bits(skb, 0, &__maph, sizeof(__maph)) < 0)
|
|
|
|
|
|
+ if (skb_copy_bits(skb, start, &__maph, sizeof(__maph)) < 0)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
maph = &__maph;
|
|
maph = &__maph;
|
|
@@ -609,10 +627,10 @@ static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
|
|
/* Check the type. This seems like should be overkill for less
|
|
/* Check the type. This seems like should be overkill for less
|
|
* than a single byte, doesn't it?
|
|
* than a single byte, doesn't it?
|
|
*/
|
|
*/
|
|
- if (likely(skb_frag_size(frag) >= sizeof(*maph) + 1)) {
|
|
|
|
|
|
+ if (likely(start_frag_size >= sizeof(*maph) + 1)) {
|
|
type = *((u8 *)maph + sizeof(*maph));
|
|
type = *((u8 *)maph + sizeof(*maph));
|
|
} else {
|
|
} else {
|
|
- if (skb_copy_bits(skb, sizeof(*maph), &type,
|
|
|
|
|
|
+ if (skb_copy_bits(skb, start + sizeof(*maph), &type,
|
|
sizeof(type)) < 0)
|
|
sizeof(type)) < 0)
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
@@ -632,27 +650,36 @@ static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
|
|
|
|
|
|
/* Add all frags containing the packet data to the descriptor */
|
|
/* Add all frags containing the packet data to the descriptor */
|
|
for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
|
|
for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
|
|
- u32 frag_size;
|
|
|
|
- u32 copy_len;
|
|
|
|
|
|
+ u32 size, off;
|
|
|
|
+ u32 copy;
|
|
|
|
|
|
frag = &shinfo->frags[i];
|
|
frag = &shinfo->frags[i];
|
|
- frag_size = skb_frag_size(frag);
|
|
|
|
- copy_len = min_t(u32, frag_size, pkt_len);
|
|
|
|
|
|
+ size = skb_frag_size(frag);
|
|
|
|
+ off = skb_frag_off(frag);
|
|
|
|
+ if (i == start_frag) {
|
|
|
|
+ /* These are different for the first one to account for
|
|
|
|
+ * the starting offset.
|
|
|
|
+ */
|
|
|
|
+ size = start_frag_size;
|
|
|
|
+ off = start_frag_off;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ copy = min_t(u32, size, pkt_len);
|
|
rc = rmnet_frag_descriptor_add_frag(frag_desc,
|
|
rc = rmnet_frag_descriptor_add_frag(frag_desc,
|
|
- skb_frag_page(frag),
|
|
|
|
- skb_frag_off(frag),
|
|
|
|
- copy_len);
|
|
|
|
|
|
+ skb_frag_page(frag), off,
|
|
|
|
+ copy);
|
|
if (rc < 0) {
|
|
if (rc < 0) {
|
|
rmnet_recycle_frag_descriptor(frag_desc, port);
|
|
rmnet_recycle_frag_descriptor(frag_desc, port);
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
- pkt_len -= copy_len;
|
|
|
|
- skb_frag_off_add(frag, copy_len);
|
|
|
|
- skb_frag_size_sub(frag, copy_len);
|
|
|
|
|
|
+ pkt_len -= copy;
|
|
|
|
+ copy_len += copy;
|
|
/* If the fragment is exhausted, we can move to the next one */
|
|
/* If the fragment is exhausted, we can move to the next one */
|
|
- if (!skb_frag_size(frag))
|
|
|
|
|
|
+ if (!(size - copy_len)) {
|
|
i++;
|
|
i++;
|
|
|
|
+ copy_len = 0;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
if (pkt_len) {
|
|
if (pkt_len) {
|
|
@@ -662,22 +689,21 @@ static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
|
|
}
|
|
}
|
|
|
|
|
|
list_add_tail(&frag_desc->list, list);
|
|
list_add_tail(&frag_desc->list, list);
|
|
- return (int)(i - start_frag);
|
|
|
|
|
|
+ return (int)frag_desc->len;
|
|
}
|
|
}
|
|
|
|
|
|
void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
|
|
void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
|
|
struct list_head *list)
|
|
struct list_head *list)
|
|
{
|
|
{
|
|
- struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
|
|
- u32 i = 0;
|
|
|
|
|
|
+ u32 start = 0;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
- while (i < shinfo->nr_frags) {
|
|
|
|
- rc = rmnet_frag_deaggregate_one(skb, port, list, i);
|
|
|
|
|
|
+ while (start < skb->len) {
|
|
|
|
+ rc = rmnet_frag_deaggregate_one(skb, port, list, start);
|
|
if (rc < 0)
|
|
if (rc < 0)
|
|
return;
|
|
return;
|
|
|
|
|
|
- i += (u32)rc;
|
|
|
|
|
|
+ start += (u32)rc;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|