Commit be560521 authored by Jesse Brandeburg's avatar Jesse Brandeburg Committed by Jeff Kirsher
Browse files

i40e/i40evf: carefully fill tx ring



We need to make sure that we stay away from the cache line
where the DD bit (done) may be getting written back for
the transmit ring since the hardware may be writing the
whole cache line for a partial update.

Change-ID: Id0b6dfc01f654def6a2a021af185803be1915d7e
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: default avatarCatherine Sullivan <catherine.sullivan@intel.com>
Tested-by: default avatarKavindya Deegala <kavindya.s.deegala@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent ff80301e
...@@ -2102,7 +2102,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -2102,7 +2102,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head, * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
* otherwise try next time * otherwise try next time
*/ */
...@@ -2113,7 +2113,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -2113,7 +2113,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += skb_shinfo(skb)->nr_frags; count += skb_shinfo(skb)->nr_frags;
#endif #endif
count += TXD_USE_COUNT(skb_headlen(skb)); count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 3)) { if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
return 0; return 0;
} }
......
...@@ -1482,7 +1482,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -1482,7 +1482,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head, * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
* otherwise try next time * otherwise try next time
*/ */
...@@ -1493,7 +1493,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -1493,7 +1493,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += skb_shinfo(skb)->nr_frags; count += skb_shinfo(skb)->nr_frags;
#endif #endif
count += TXD_USE_COUNT(skb_headlen(skb)); count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 3)) { if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
return 0; return 0;
} }
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment