Merge tag 'stable/for-linus-3.20-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen features and fixes from David Vrabel:

 - Reworked handling for foreign (grant mapped) pages to simplify the
   code, enable a number of additional use cases and fix a number of
   long-standing bugs.

 - Prefer the TSC over the Xen PV clock when dom0 (and the TSC is
   stable).

 - Assorted other cleanup and minor bug fixes.

* tag 'stable/for-linus-3.20-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (25 commits)
  xen/manage: Fix USB interaction issues when resuming
  xenbus: Add proper handling of XS_ERROR from Xenbus for transactions.
  xen/gntdev: provide find_special_page VMA operation
  xen/gntdev: mark userspace PTEs as special on x86 PV guests
  xen-blkback: safely unmap grants in case they are still in use
  xen/gntdev: safely unmap grants in case they are still in use
  xen/gntdev: convert priv->lock to a mutex
  xen/grant-table: add a mechanism to safely unmap pages that are in use
  xen-netback: use foreign page information from the pages themselves
  xen: mark grant mapped pages as foreign
  xen/grant-table: add helpers for allocating pages
  x86/xen: require ballooned pages for grant maps
  xen: remove scratch frames for ballooned pages and m2p override
  xen/grant-table: pre-populate kernel unmap ops for xen_gnttab_unmap_refs()
  mm: add 'foreign' alias for the 'pinned' page flag
  mm: provide a find_special_page vma operation
  x86/xen: cleanup arch/x86/xen/mmu.c
  x86/xen: add some __init annotations in arch/x86/xen/mmu.c
  x86/xen: add some __init and static annotations in arch/x86/xen/setup.c
  x86/xen: use correct types for addresses in arch/x86/xen/setup.c
  ...
This commit is contained in:
Linus Torvalds
2015-02-10 13:56:56 -08:00
28개의 변경된 파일509개의 추가작업 그리고 594개의 파일을 삭제

파일 보기

@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
* better enable it. The long term solution would be to use just a
* bunch of valid page descriptors, without dependency on ballooning
*/
err = alloc_xenballooned_pages(MAX_PENDING_REQS,
queue->mmap_pages,
false);
err = gnttab_alloc_pages(MAX_PENDING_REQS,
queue->mmap_pages);
if (err) {
netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
return -ENOMEM;
@@ -664,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif)
*/
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
}
void xenvif_free(struct xenvif *vif)

파일 보기

@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
unsigned long offset, int *head,
struct xenvif_queue *foreign_queue,
grant_ref_t foreign_gref)
unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
offset &= ~PAGE_MASK;
while (size > 0) {
struct xen_page_foreign *foreign;
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes;
if (foreign_queue) {
copy_gop->source.domid = foreign_queue->vif->domid;
copy_gop->source.u.ref = foreign_gref;
foreign = xen_page_foreign(page);
if (foreign) {
copy_gop->source.domid = foreign->domid;
copy_gop->source.u.ref = foreign->gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
copy_gop->source.domid = DOMID_SELF;
@@ -405,35 +406,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
}
}
/*
* Find the grant ref for a given frag in a chain of struct ubuf_info's
* skb: the skb itself
* i: the frag's number
* ubuf: a pointer to an element in the chain. It should not be NULL
*
* Returns a pointer to the element in the chain where the page were found. If
* not found, returns NULL.
* See the definition of callback_struct in common.h for more details about
* the chain.
*/
static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
const int i,
const struct ubuf_info *ubuf)
{
struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
do {
u16 pending_idx = ubuf->desc;
if (skb_shinfo(skb)->frags[i].page.p ==
foreign_queue->mmap_pages[pending_idx])
break;
ubuf = (struct ubuf_info *) ubuf->ctx;
} while (ubuf);
return ubuf;
}
/*
* Prepare an SKB to be transmitted to the frontend.
*
@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
const struct ubuf_info *const head_ubuf = ubuf;
old_meta_prod = npo->meta_prod;
@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(queue, skb, npo,
virt_to_page(data), len, offset, &head,
NULL,
0);
virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
/* This variable also signals whether foreign_gref has a real
* value or not.
*/
struct xenvif_queue *foreign_queue = NULL;
grant_ref_t foreign_gref;
if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
(ubuf->callback == &xenvif_zerocopy_callback)) {
const struct ubuf_info *const startpoint = ubuf;
/* Ideally ubuf points to the chain element which
* belongs to this frag. Or if frags were removed from
* the beginning, then shortly before it.
*/
ubuf = xenvif_find_gref(skb, i, ubuf);
/* Try again from the beginning of the list, if we
* haven't tried from there. This only makes sense in
* the unlikely event of reordering the original frags.
* For injected local pages it's an unnecessary second
* run.
*/
if (unlikely(!ubuf) && startpoint != head_ubuf)
ubuf = xenvif_find_gref(skb, i, head_ubuf);
if (likely(ubuf)) {
u16 pending_idx = ubuf->desc;
foreign_queue = ubuf_to_queue(ubuf);
foreign_gref =
foreign_queue->pending_tx_info[pending_idx].req.gref;
/* Just a safety measure. If this was the last
* element on the list, the for loop will
* iterate again if a local page were added to
* the end. Using head_ubuf here prevents the
* second search on the chain. Or the original
* frags changed order, but that's less likely.
* In any way, ubuf shouldn't be NULL.
*/
ubuf = ubuf->ctx ?
(struct ubuf_info *) ubuf->ctx :
head_ubuf;
} else
/* This frag was a local page, added to the
* array after the skb left netback.
*/
ubuf = head_ubuf;
}
xenvif_gop_frag_copy(queue, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head,
foreign_queue,
foreign_queue ? foreign_gref : UINT_MAX);
&head);
}
return npo->meta_prod - old_meta_prod;
@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
/* Take an extra reference to offset network stack's put_page */
get_page(queue->mmap_pages[pending_idx]);
}
/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
* overlaps with "index", and "mapping" is not set. I think mapping
* should be set. If delivered to local stack, it would drop this
* skb in sk_filter unless the socket has the right to use it.
*/
skb->pfmemalloc = false;
}
static int xenvif_get_extras(struct xenvif_queue *queue,