net: add skb frag size accessors
To ease skb->truesize sanitization, its better to be able to localize all references to skb frags size. Define accessors : skb_frag_size() to fetch frag size, and skb_frag_size_{set|add|sub}() to manipulate it. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
dd767856a3
commit
9e903e0852
@@ -1135,8 +1135,8 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
|
||||
len -= SGE_TX_DESC_MAX_PLEN;
|
||||
}
|
||||
for (i = 0; nfrags--; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
len = frag->size;
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
len = skb_frag_size(frag);
|
||||
while (len > SGE_TX_DESC_MAX_PLEN) {
|
||||
count++;
|
||||
len -= SGE_TX_DESC_MAX_PLEN;
|
||||
@@ -1278,9 +1278,9 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
|
||||
frag->size, DMA_TO_DEVICE);
|
||||
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||
desc_mapping = mapping;
|
||||
desc_len = frag->size;
|
||||
desc_len = skb_frag_size(frag);
|
||||
|
||||
pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
|
||||
&desc_mapping, &desc_len,
|
||||
@@ -1290,7 +1290,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
|
||||
nfrags == 0);
|
||||
ce->skb = NULL;
|
||||
dma_unmap_addr_set(ce, dma_addr, mapping);
|
||||
dma_unmap_len_set(ce, dma_len, frag->size);
|
||||
dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
|
||||
}
|
||||
ce->skb = skb;
|
||||
wmb();
|
||||
|
@@ -254,7 +254,7 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
|
||||
|
||||
while (frag_idx < nfrags && curflit < WR_FLITS) {
|
||||
pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
|
||||
skb_shinfo(skb)->frags[frag_idx].size,
|
||||
skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
|
||||
PCI_DMA_TODEVICE);
|
||||
j ^= 1;
|
||||
if (j == 0) {
|
||||
@@ -977,11 +977,11 @@ static inline unsigned int make_sgl(const struct sk_buff *skb,
|
||||
|
||||
nfrags = skb_shinfo(skb)->nr_frags;
|
||||
for (i = 0; i < nfrags; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
|
||||
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
sgp->len[j] = cpu_to_be32(frag->size);
|
||||
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
|
||||
sgp->addr[j] = cpu_to_be64(mapping);
|
||||
j ^= 1;
|
||||
if (j == 0)
|
||||
@@ -1544,7 +1544,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
|
||||
|
||||
si = skb_shinfo(skb);
|
||||
for (i = 0; i < si->nr_frags; i++)
|
||||
pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
|
||||
pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
@@ -2118,7 +2118,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
|
||||
rx_frag += nr_frags;
|
||||
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
|
||||
rx_frag->page_offset = sd->pg_chunk.offset + offset;
|
||||
rx_frag->size = len;
|
||||
skb_frag_size_set(rx_frag, len);
|
||||
|
||||
skb->len += len;
|
||||
skb->data_len += len;
|
||||
|
@@ -215,8 +215,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
|
||||
end = &si->frags[si->nr_frags];
|
||||
|
||||
for (fp = si->frags; fp < end; fp++) {
|
||||
*++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
|
||||
DMA_TO_DEVICE);
|
||||
*++addr = dma_map_page(dev, fp->page, fp->page_offset,
|
||||
skb_frag_size(fp), DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, *addr))
|
||||
goto unwind;
|
||||
}
|
||||
@@ -224,7 +224,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
|
||||
|
||||
unwind:
|
||||
while (fp-- > si->frags)
|
||||
dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
|
||||
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
|
||||
|
||||
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
|
||||
out_err:
|
||||
@@ -243,7 +243,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
|
||||
si = skb_shinfo(skb);
|
||||
end = &si->frags[si->nr_frags];
|
||||
for (fp = si->frags; fp < end; fp++)
|
||||
dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
|
||||
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -717,7 +717,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
|
||||
sgl->addr0 = cpu_to_be64(addr[0] + start);
|
||||
nfrags++;
|
||||
} else {
|
||||
sgl->len0 = htonl(si->frags[0].size);
|
||||
sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
|
||||
sgl->addr0 = cpu_to_be64(addr[1]);
|
||||
}
|
||||
|
||||
@@ -732,13 +732,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
|
||||
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
|
||||
|
||||
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
|
||||
to->len[0] = cpu_to_be32(si->frags[i].size);
|
||||
to->len[1] = cpu_to_be32(si->frags[++i].size);
|
||||
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
|
||||
to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
|
||||
to->addr[0] = cpu_to_be64(addr[i]);
|
||||
to->addr[1] = cpu_to_be64(addr[++i]);
|
||||
}
|
||||
if (nfrags) {
|
||||
to->len[0] = cpu_to_be32(si->frags[i].size);
|
||||
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
|
||||
to->len[1] = cpu_to_be32(0);
|
||||
to->addr[0] = cpu_to_be64(addr[i + 1]);
|
||||
}
|
||||
@@ -1417,7 +1417,7 @@ static inline void copy_frags(struct skb_shared_info *ssi,
|
||||
/* usually there's just one frag */
|
||||
ssi->frags[0].page = gl->frags[0].page;
|
||||
ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
|
||||
ssi->frags[0].size = gl->frags[0].size - offset;
|
||||
skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - offset);
|
||||
ssi->nr_frags = gl->nfrags;
|
||||
n = gl->nfrags - 1;
|
||||
if (n)
|
||||
@@ -1718,8 +1718,8 @@ static int process_responses(struct sge_rspq *q, int budget)
|
||||
bufsz = get_buf_size(rsd);
|
||||
fp->page = rsd->page;
|
||||
fp->page_offset = q->offset;
|
||||
fp->size = min(bufsz, len);
|
||||
len -= fp->size;
|
||||
skb_frag_size_set(fp, min(bufsz, len));
|
||||
len -= skb_frag_size(fp);
|
||||
if (!len)
|
||||
break;
|
||||
unmap_rx_buf(q->adap, &rxq->fl);
|
||||
@@ -1731,7 +1731,7 @@ static int process_responses(struct sge_rspq *q, int budget)
|
||||
*/
|
||||
dma_sync_single_for_cpu(q->adap->pdev_dev,
|
||||
get_buf_addr(rsd),
|
||||
fp->size, DMA_FROM_DEVICE);
|
||||
skb_frag_size(fp), DMA_FROM_DEVICE);
|
||||
|
||||
si.va = page_address(si.frags[0].page) +
|
||||
si.frags[0].page_offset;
|
||||
@@ -1740,7 +1740,7 @@ static int process_responses(struct sge_rspq *q, int budget)
|
||||
si.nfrags = frags + 1;
|
||||
ret = q->handler(q, q->cur_desc, &si);
|
||||
if (likely(ret == 0))
|
||||
q->offset += ALIGN(fp->size, FL_ALIGN);
|
||||
q->offset += ALIGN(skb_frag_size(fp), FL_ALIGN);
|
||||
else
|
||||
restore_rx_bufs(&si, &rxq->fl, frags);
|
||||
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
|
||||
|
@@ -296,8 +296,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
|
||||
si = skb_shinfo(skb);
|
||||
end = &si->frags[si->nr_frags];
|
||||
for (fp = si->frags; fp < end; fp++) {
|
||||
*++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
|
||||
DMA_TO_DEVICE);
|
||||
*++addr = dma_map_page(dev, fp->page, fp->page_offset,
|
||||
skb_frag_size(fp), DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, *addr))
|
||||
goto unwind;
|
||||
}
|
||||
@@ -305,7 +305,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
|
||||
|
||||
unwind:
|
||||
while (fp-- > si->frags)
|
||||
dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
|
||||
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
|
||||
|
||||
out_err:
|
||||
@@ -899,7 +899,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
|
||||
sgl->addr0 = cpu_to_be64(addr[0] + start);
|
||||
nfrags++;
|
||||
} else {
|
||||
sgl->len0 = htonl(si->frags[0].size);
|
||||
sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
|
||||
sgl->addr0 = cpu_to_be64(addr[1]);
|
||||
}
|
||||
|
||||
@@ -915,13 +915,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
|
||||
to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
|
||||
|
||||
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
|
||||
to->len[0] = cpu_to_be32(si->frags[i].size);
|
||||
to->len[1] = cpu_to_be32(si->frags[++i].size);
|
||||
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
|
||||
to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
|
||||
to->addr[0] = cpu_to_be64(addr[i]);
|
||||
to->addr[1] = cpu_to_be64(addr[++i]);
|
||||
}
|
||||
if (nfrags) {
|
||||
to->len[0] = cpu_to_be32(si->frags[i].size);
|
||||
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
|
||||
to->len[1] = cpu_to_be32(0);
|
||||
to->addr[0] = cpu_to_be64(addr[i + 1]);
|
||||
}
|
||||
@@ -1399,7 +1399,7 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
|
||||
ssi = skb_shinfo(skb);
|
||||
ssi->frags[0].page = gl->frags[0].page;
|
||||
ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
|
||||
ssi->frags[0].size = gl->frags[0].size - pull_len;
|
||||
skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - pull_len);
|
||||
if (gl->nfrags > 1)
|
||||
memcpy(&ssi->frags[1], &gl->frags[1],
|
||||
(gl->nfrags-1) * sizeof(skb_frag_t));
|
||||
@@ -1451,7 +1451,7 @@ static inline void copy_frags(struct skb_shared_info *si,
|
||||
/* usually there's just one frag */
|
||||
si->frags[0].page = gl->frags[0].page;
|
||||
si->frags[0].page_offset = gl->frags[0].page_offset + offset;
|
||||
si->frags[0].size = gl->frags[0].size - offset;
|
||||
skb_frag_size_set(&si->frags[0], skb_frag_size(&gl->frags[0]) - offset);
|
||||
si->nr_frags = gl->nfrags;
|
||||
|
||||
n = gl->nfrags - 1;
|
||||
@@ -1702,8 +1702,8 @@ int process_responses(struct sge_rspq *rspq, int budget)
|
||||
bufsz = get_buf_size(sdesc);
|
||||
fp->page = sdesc->page;
|
||||
fp->page_offset = rspq->offset;
|
||||
fp->size = min(bufsz, len);
|
||||
len -= fp->size;
|
||||
skb_frag_size_set(fp, min(bufsz, len));
|
||||
len -= skb_frag_size(fp);
|
||||
if (!len)
|
||||
break;
|
||||
unmap_rx_buf(rspq->adapter, &rxq->fl);
|
||||
@@ -1717,7 +1717,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
|
||||
*/
|
||||
dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
|
||||
get_buf_addr(sdesc),
|
||||
fp->size, DMA_FROM_DEVICE);
|
||||
skb_frag_size(fp), DMA_FROM_DEVICE);
|
||||
gl.va = (page_address(gl.frags[0].page) +
|
||||
gl.frags[0].page_offset);
|
||||
prefetch(gl.va);
|
||||
@@ -1728,7 +1728,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
|
||||
*/
|
||||
ret = rspq->handler(rspq, rspq->cur_desc, &gl);
|
||||
if (likely(ret == 0))
|
||||
rspq->offset += ALIGN(fp->size, FL_ALIGN);
|
||||
rspq->offset += ALIGN(skb_frag_size(fp), FL_ALIGN);
|
||||
else
|
||||
restore_rx_bufs(&gl, &rxq->fl, frag);
|
||||
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
|
||||
|
Reference in New Issue
Block a user