Commit 9e903e08 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: add skb frag size accessors



To ease skb->truesize sanitization, its better to be able to localize
all references to skb frags size.

Define accessors : skb_frag_size() to fetch frag size, and
skb_frag_size_{set|add|sub}() to manipulate it.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dd767856
......@@ -1136,7 +1136,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb_frag_page(&skb_shinfo(skb)->frags[i]) +
skb_shinfo(skb)->frags[i].page_offset,
skb_shinfo(skb)->frags[i].size);
skb_frag_size(&skb_shinfo(skb)->frags[i]));
}
if (skb->len & 3)
put_dma(tx->index,eni_dev->dma,&j,zeroes,4-(skb->len & 3));
......
......@@ -800,8 +800,8 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Loop thru additional data fragments and queue them */
if (skb_shinfo(skb)->nr_frags) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
maplen = frag->size;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
maplen = skb_frag_size(frag);
mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
0, maplen, DMA_TO_DEVICE);
elem = elem->next;
......
......@@ -444,10 +444,10 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
skb_frag_t *frag =
&skb_shinfo(skb)->frags[skb_fragment_index];
bus_address = skb_frag_dma_map(&nesdev->pcidev->dev,
frag, 0, frag->size,
frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index]));
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
bus_address);
wqe_fragment_index++;
......@@ -565,7 +565,7 @@ tso_sq_no_longer_full:
&skb_shinfo(skb)->frags[tso_frag_count];
tso_bus_address[tso_frag_count] =
skb_frag_dma_map(&nesdev->pcidev->dev,
frag, 0, frag->size,
frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
}
......@@ -637,11 +637,11 @@ tso_sq_no_longer_full:
}
while (wqe_fragment_index < 5) {
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index]));
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
(u64)tso_bus_address[tso_frag_index]);
wqe_fragment_index++;
tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]);
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
if (tso_frag_index == tso_frag_count)
......
......@@ -543,7 +543,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
} else {
size = min(length, (unsigned) PAGE_SIZE);
frag->size = size;
skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += size;
skb->len += size;
......
......@@ -117,7 +117,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
size = length - IPOIB_UD_HEAD_SIZE;
frag->size = size;
skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += size;
} else
......@@ -322,10 +322,10 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + off] = ib_dma_map_page(ca,
skb_frag_page(frag),
frag->page_offset, frag->size,
frag->page_offset, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
goto partial_error;
......@@ -334,8 +334,9 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
partial_error:
for (; i > 0; --i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
}
if (off)
......@@ -359,8 +360,9 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(ca, mapping[i + off], frag->size,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
DMA_TO_DEVICE);
}
}
......@@ -510,7 +512,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
for (i = 0; i < nr_frags; ++i) {
priv->tx_sge[i + off].addr = mapping[i + off];
priv->tx_sge[i + off].length = frags[i].size;
priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
}
priv->tx_wr.num_sge = nr_frags + off;
priv->tx_wr.wr_id = wr_id;
......
......@@ -2182,12 +2182,12 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(pci_map_single(
VORTEX_PCI(vp),
(void *)skb_frag_address(frag),
frag->size, PCI_DMA_TODEVICE));
skb_frag_size(frag), PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
else
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
}
}
#else
......
......@@ -810,15 +810,15 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
txd->frag.addrHi = 0;
first_txd->numDesc++;
for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *frag_addr;
txd = (struct tx_desc *) (txRing->ringBase +
txRing->lastWrite);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
len = frag->size;
len = skb_frag_size(frag);
frag_addr = skb_frag_address(frag);
skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
PCI_DMA_TODEVICE);
......
......@@ -1256,12 +1256,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
np->tx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
} else {
skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
status |= this_frag->size;
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping =
pci_map_single(np->pci_dev,
skb_frag_address(this_frag),
this_frag->size,
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
......@@ -1378,7 +1378,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
skb_shinfo(skb)->frags[i].size,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
np->dirty_tx++;
entry++;
......
......@@ -198,7 +198,7 @@ static void greth_clean_rings(struct greth_private *greth)
dma_unmap_page(greth->dev,
greth_read_bd(&tx_bdp->addr),
frag->size,
skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
......@@ -517,7 +517,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
status = GRETH_BD_EN;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
status |= frag->size & GRETH_BD_LEN;
status |= skb_frag_size(frag) & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (curr_tx == GRETH_TXBD_NUM_MASK)
......@@ -531,7 +531,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth_write_bd(&bdp->stat, status);
dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
......@@ -713,7 +713,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
frag->size,
skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
......
......@@ -2478,18 +2478,18 @@ restart:
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
len += frag->size;
len += skb_frag_size(frag);
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
frag->size,
skb_frag_size(frag),
DMA_TO_DEVICE);
flagsize = (frag->size << 16);
flagsize = skb_frag_size(frag) << 16;
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
......@@ -2508,7 +2508,7 @@ restart:
info->skb = NULL;
}
dma_unmap_addr_set(info, mapping, mapping);
dma_unmap_len_set(info, maplen, frag->size);
dma_unmap_len_set(info, maplen, skb_frag_size(frag));
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
......
......@@ -2179,7 +2179,7 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = frag->size;
buffer_info->length = skb_frag_size(frag);
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, 0,
buffer_info->length,
......
......@@ -1593,7 +1593,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
u16 proto_hdr_len = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
fg_size = skb_shinfo(skb)->frags[i].size;
fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
}
......@@ -1744,12 +1744,12 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
const struct skb_frag_struct *frag;
u16 i;
u16 seg_num;
frag = &skb_shinfo(skb)->frags[f];
buf_len = frag->size;
buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
for (i = 0; i < seg_num; i++) {
......
......@@ -2267,11 +2267,11 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
const struct skb_frag_struct *frag;
u16 i, nseg;
frag = &skb_shinfo(skb)->frags[f];
buf_len = frag->size;
buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
ATL1_MAX_TX_BUF_LEN;
......@@ -2356,7 +2356,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
int count = 1;
int ret_val;
struct tx_packet_desc *ptpd;
u16 frag_size;
u16 vlan_tag;
unsigned int nr_frags = 0;
unsigned int mss = 0;
......@@ -2372,10 +2371,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
frag_size = skb_shinfo(skb)->frags[f].size;
if (frag_size)
count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
ATL1_MAX_TX_BUF_LEN;
unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
ATL1_MAX_TX_BUF_LEN;
}
mss = skb_shinfo(skb)->gso_size;
......
......@@ -2871,7 +2871,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
......@@ -3049,7 +3049,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
} else {
skb_frag_t *frag =
&skb_shinfo(skb)->frags[i - 1];
frag->size -= tail;
skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
}
return 0;
......@@ -5395,7 +5395,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[k].size,
skb_frag_size(&skb_shinfo(skb)->frags[k]),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(skb);
......@@ -6530,13 +6530,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf->is_gso = skb_is_gso(skb);
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
txbd = &txr->tx_desc_ring[ring_prod];
len = frag->size;
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping))
......@@ -6594,7 +6594,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[i].size,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
......
......@@ -2363,7 +2363,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Calculate the first sum - it's special */
for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
wnd_sum +=
skb_shinfo(skb)->frags[frag_idx].size;
skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
/* If there was data on linear skb data - check it */
if (first_bd_sz > 0) {
......@@ -2379,14 +2379,14 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
check all windows */
for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
wnd_sum +=
skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
if (unlikely(wnd_sum < lso_mss)) {
to_copy = 1;
break;
}
wnd_sum -=
skb_shinfo(skb)->frags[wnd_idx].size;
skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
}
} else {
/* in non-LSO too fragmented packet should always
......@@ -2796,8 +2796,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size,
DMA_TO_DEVICE);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
......@@ -2821,8 +2821,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_data_bd->nbytes = cpu_to_le16(frag->size);
le16_add_cpu(&pkt_size, frag->size);
tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
le16_add_cpu(&pkt_size, skb_frag_size(frag));
nbd++;
DP(NETIF_MSG_TX_QUEUED,
......
......@@ -5356,7 +5356,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pci_unmap_page(tp->pdev,
dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
while (ri->fragmented) {
......@@ -6510,14 +6510,14 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
}
for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);
txb = &tnapi->tx_buffers[entry];
pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
frag->size, PCI_DMA_TODEVICE);
skb_frag_size(frag), PCI_DMA_TODEVICE);
while (txb->fragmented) {
txb->fragmented = false;
......@@ -6777,7 +6777,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
len, DMA_TO_DEVICE);
......
......@@ -116,7 +116,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
for (j = 0; j < frag; j++) {
dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
dma_unmap_addr_set(&array[index], dma_addr, 0);
BNA_QE_INDX_ADD(index, 1, depth);
}
......@@ -2741,8 +2741,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis_used = 1;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
u16 size = frag->size;
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
u16 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
unmap_prod = unmap_q->producer_index;
......
......@@ -1135,8 +1135,8 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
len -= SGE_TX_DESC_MAX_PLEN;
}
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = frag->size;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
......@@ -1278,9 +1278,9 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
frag->size, DMA_TO_DEVICE);
skb_frag_size(frag), DMA_TO_DEVICE);
desc_mapping = mapping;
desc_len = frag->size;
desc_len = skb_frag_size(frag);
pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
&desc_mapping, &desc_len,
......@@ -1290,7 +1290,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
nfrags == 0);
ce->skb = NULL;
dma_unmap_addr_set(ce, dma_addr, mapping);
dma_unmap_len_set(ce, dma_len, frag->size);
dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
}
ce->skb = skb;
wmb();
......
......@@ -254,7 +254,7 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
while (frag_idx < nfrags && curflit < WR_FLITS) {
pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
skb_shinfo(skb)->frags[frag_idx].size,
skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
PCI_DMA_TODEVICE);
j ^= 1;
if (j == 0) {
......@@ -977,11 +977,11 @@ static inline unsigned int make_sgl(const struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
sgp->len[j] = cpu_to_be32(frag->size);
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;
if (j == 0)
......@@ -1544,7 +1544,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
si = skb_shinfo(skb);
for (i = 0; i < si->nr_frags; i++)
pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
PCI_DMA_TODEVICE);
}
......@@ -2118,7 +2118,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
rx_frag->page_offset = sd->pg_chunk.offset + offset;
rx_frag->size = len;
skb_frag_size_set(rx_frag, len);
skb->len += len;
skb->data_len += len;
......
......@@ -215,8 +215,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++) {
*++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
DMA_TO_DEVICE);
*++addr = dma_map_page(dev, fp->page, fp->page_offset,
skb_frag_size(fp), DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
goto unwind;
}
......@@ -224,7 +224,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
unwind:
while (fp-- > si->frags)
dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
out_err:
......@@ -243,7 +243,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++)
dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}