Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here. Daniel's bug fix for off-by-ones in the new BPF branch instructions, along with the added allowances for "data_end > ptr + x" forms collided with the metadata additions. Along with those three changes came veritifer test cases, which in their final form I tried to group together properly. If I had just trimmed GIT's conflict tags as-is, this would have split up the meta tests unnecessarily. In the socketmap code, a set of preemption disabling changes overlapped with the rename of bpf_compute_data_end() to bpf_compute_data_pointers(). Changes were made to the mv88e6060.c driver set addr method which got removed in net-next. The hyperv transport socket layer had a locking change in 'net' which overlapped with a change of socket state macro usage in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
|
||||
}
|
||||
|
||||
/**
|
||||
* __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
|
||||
* __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
|
||||
* @data: word read from the Shadow RAM
|
||||
|
@@ -1037,6 +1037,32 @@ reset_latency:
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
|
||||
* @rx_ring: rx descriptor ring to store buffers on
|
||||
* @old_buff: donor buffer to have page reused
|
||||
*
|
||||
* Synchronizes page for reuse by the adapter
|
||||
**/
|
||||
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
|
||||
struct i40e_rx_buffer *old_buff)
|
||||
{
|
||||
struct i40e_rx_buffer *new_buff;
|
||||
u16 nta = rx_ring->next_to_alloc;
|
||||
|
||||
new_buff = &rx_ring->rx_bi[nta];
|
||||
|
||||
/* update, and store next to alloc */
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
new_buff->dma = old_buff->dma;
|
||||
new_buff->page = old_buff->page;
|
||||
new_buff->page_offset = old_buff->page_offset;
|
||||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_rx_is_programming_status - check for programming status descriptor
|
||||
* @qw: qword representing status_error_len in CPU ordering
|
||||
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
|
||||
union i40e_rx_desc *rx_desc,
|
||||
u64 qw)
|
||||
{
|
||||
u32 ntc = rx_ring->next_to_clean + 1;
|
||||
struct i40e_rx_buffer *rx_buffer;
|
||||
u32 ntc = rx_ring->next_to_clean;
|
||||
u8 id;
|
||||
|
||||
/* fetch, update, and store next to clean */
|
||||
rx_buffer = &rx_ring->rx_bi[ntc++];
|
||||
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
||||
rx_ring->next_to_clean = ntc;
|
||||
|
||||
prefetch(I40E_RX_DESC(rx_ring, ntc));
|
||||
|
||||
/* place unused page back on the ring */
|
||||
i40e_reuse_rx_page(rx_ring, rx_buffer);
|
||||
rx_ring->rx_stats.page_reuse_count++;
|
||||
|
||||
/* clear contents of buffer_info */
|
||||
rx_buffer->page = NULL;
|
||||
|
||||
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
|
||||
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
|
||||
|
||||
@@ -1647,32 +1682,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
|
||||
* @rx_ring: rx descriptor ring to store buffers on
|
||||
* @old_buff: donor buffer to have page reused
|
||||
*
|
||||
* Synchronizes page for reuse by the adapter
|
||||
**/
|
||||
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
|
||||
struct i40e_rx_buffer *old_buff)
|
||||
{
|
||||
struct i40e_rx_buffer *new_buff;
|
||||
u16 nta = rx_ring->next_to_alloc;
|
||||
|
||||
new_buff = &rx_ring->rx_bi[nta];
|
||||
|
||||
/* update, and store next to alloc */
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
new_buff->dma = old_buff->dma;
|
||||
new_buff->page = old_buff->page;
|
||||
new_buff->page_offset = old_buff->page_offset;
|
||||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_page_is_reusable - check if any reuse is possible
|
||||
* @page: page struct to check
|
||||
|
Reference in New Issue
Block a user