Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - a few misc things - kexec updates - DMA-mapping updates to better support networking DMA operations - IPC updates - various MM changes to improve DAX fault handling - lots of radix-tree changes, mainly to the test suite. All leading up to reimplementing the IDA/IDR code to be a wrapper layer over the radix-tree. However the final trigger-pulling patch is held off for 4.11. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits) radix tree test suite: delete unused rcupdate.c radix tree test suite: add new tag check radix-tree: ensure counts are initialised radix tree test suite: cache recently freed objects radix tree test suite: add some more functionality idr: reduce the number of bits per level from 8 to 6 rxrpc: abstract away knowledge of IDR internals tpm: use idr_find(), not idr_find_slowpath() idr: add ida_is_empty radix tree test suite: check multiorder iteration radix-tree: fix replacement for multiorder entries radix-tree: add radix_tree_split_preload() radix-tree: add radix_tree_split radix-tree: add radix_tree_join radix-tree: delete radix_tree_range_tag_if_tagged() radix-tree: delete radix_tree_locate_item() radix-tree: improve multiorder iterators btrfs: fix race in btrfs_free_dummy_fs_info() radix-tree: improve dump output radix-tree: make radix_tree_find_next_bit more useful ...
This commit is contained in:
@@ -210,7 +210,12 @@ struct igb_tx_buffer {
|
||||
struct igb_rx_buffer {
|
||||
dma_addr_t dma;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
|
||||
__u32 page_offset;
|
||||
#else
|
||||
__u16 page_offset;
|
||||
#endif
|
||||
__u16 pagecnt_bias;
|
||||
};
|
||||
|
||||
struct igb_tx_queue_stats {
|
||||
|
@@ -3947,11 +3947,23 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
|
||||
if (!buffer_info->page)
|
||||
continue;
|
||||
|
||||
dma_unmap_page(rx_ring->dev,
|
||||
buffer_info->dma,
|
||||
PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
__free_page(buffer_info->page);
|
||||
/* Invalidate cache lines that may have been written to by
|
||||
* device so that we avoid corrupting memory.
|
||||
*/
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
buffer_info->dma,
|
||||
buffer_info->page_offset,
|
||||
IGB_RX_BUFSZ,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* free resources associated with mapping */
|
||||
dma_unmap_page_attrs(rx_ring->dev,
|
||||
buffer_info->dma,
|
||||
PAGE_SIZE,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
__page_frag_drain(buffer_info->page, 0,
|
||||
buffer_info->pagecnt_bias);
|
||||
|
||||
buffer_info->page = NULL;
|
||||
}
|
||||
@@ -6812,12 +6824,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
*new_buff = *old_buff;
|
||||
|
||||
/* sync the buffer for use by the device */
|
||||
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
|
||||
old_buff->page_offset,
|
||||
IGB_RX_BUFSZ,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static inline bool igb_page_is_reserved(struct page *page)
|
||||
@@ -6829,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
|
||||
struct page *page,
|
||||
unsigned int truesize)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(igb_page_is_reserved(page)))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
/* if we are only owner of page we can reuse it */
|
||||
if (unlikely(page_count(page) != 1))
|
||||
if (unlikely(page_ref_count(page) != pagecnt_bias))
|
||||
return false;
|
||||
|
||||
/* flip page offset to other buffer */
|
||||
@@ -6848,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
|
||||
return false;
|
||||
#endif
|
||||
|
||||
/* Even if we own the page, we are not allowed to use atomic_set()
|
||||
* This would break get_page_unless_zero() users.
|
||||
/* If we have drained the page fragment pool we need to update
|
||||
* the pagecnt_bias and page count so that we fully restock the
|
||||
* number of references the driver holds.
|
||||
*/
|
||||
page_ref_inc(page);
|
||||
if (unlikely(pagecnt_bias == 1)) {
|
||||
page_ref_add(page, USHRT_MAX);
|
||||
rx_buffer->pagecnt_bias = USHRT_MAX;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -6903,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
|
||||
return true;
|
||||
|
||||
/* this page cannot be reused so discard it */
|
||||
__free_page(page);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -6938,6 +6949,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
|
||||
page = rx_buffer->page;
|
||||
prefetchw(page);
|
||||
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
rx_buffer->page_offset,
|
||||
size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (likely(!skb)) {
|
||||
void *page_addr = page_address(page) +
|
||||
rx_buffer->page_offset;
|
||||
@@ -6962,21 +6980,18 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
|
||||
prefetchw(skb->data);
|
||||
}
|
||||
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
rx_buffer->page_offset,
|
||||
size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* pull page into skb */
|
||||
if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
|
||||
/* hand second half of page back to the ring */
|
||||
igb_reuse_rx_page(rx_ring, rx_buffer);
|
||||
} else {
|
||||
/* we are not reusing the buffer so unmap it */
|
||||
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
/* We are not reusing the buffer so unmap it and free
|
||||
* any references we are holding to it
|
||||
*/
|
||||
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
__page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
|
||||
}
|
||||
|
||||
/* clear contents of rx_buffer */
|
||||
@@ -7234,7 +7249,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
|
||||
}
|
||||
|
||||
/* map page for use */
|
||||
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
||||
/* if mapping failed free memory back to system since
|
||||
* there isn't much point in holding memory we can't use
|
||||
@@ -7249,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
|
||||
bi->dma = dma;
|
||||
bi->page = page;
|
||||
bi->page_offset = 0;
|
||||
bi->pagecnt_bias = 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -7275,6 +7292,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
|
||||
if (!igb_alloc_mapped_page(rx_ring, bi))
|
||||
break;
|
||||
|
||||
/* sync the buffer for use by the device */
|
||||
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
||||
bi->page_offset,
|
||||
IGB_RX_BUFSZ,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* Refresh the desc even if buffer_addrs didn't change
|
||||
* because each write-back erases this info.
|
||||
*/
|
||||
|
@@ -900,8 +900,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
|
||||
|
||||
/* bound gain by 2 bits value max, 3rd bit is sign */
|
||||
data->delta_gain_code[i] =
|
||||
min(abs(delta_g),
|
||||
(s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
||||
min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
||||
|
||||
if (delta_g < 0)
|
||||
/*
|
||||
|
Reference in New Issue
Block a user