Merge 5.10.105 into android12-5.10-lts
Changes in 5.10.105 x86,bugs: Unconditionally allow spectre_v2=retpoline,amd x86/speculation: Rename RETPOLINE_AMD to RETPOLINE_LFENCE x86/speculation: Add eIBRS + Retpoline options Documentation/hw-vuln: Update spectre doc x86/speculation: Include unprivileged eBPF status in Spectre v2 mitigation reporting x86/speculation: Use generic retpoline by default on AMD x86/speculation: Update link to AMD speculation whitepaper x86/speculation: Warn about Spectre v2 LFENCE mitigation x86/speculation: Warn about eIBRS + LFENCE + Unprivileged eBPF + SMT ARM: report Spectre v2 status through sysfs ARM: early traps initialisation ARM: use LOADADDR() to get load address of sections ARM: Spectre-BHB workaround ARM: include unprivileged BPF status in Spectre V2 reporting arm64: cputype: Add CPU implementor & types for the Apple M1 cores arm64: Add Neoverse-N2, Cortex-A710 CPU part definition arm64: Add Cortex-X2 CPU part definition arm64: Add Cortex-A510 CPU part definition arm64: Add HWCAP for self-synchronising virtual counter arm64: add ID_AA64ISAR2_EL1 sys register arm64: cpufeature: add HWCAP for FEAT_AFP arm64: cpufeature: add HWCAP for FEAT_RPRES arm64: entry.S: Add ventry overflow sanity checks arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit arm64: entry: Make the trampoline cleanup optional arm64: entry: Free up another register on kpti's tramp_exit path arm64: entry: Move the trampoline data page before the text page arm64: entry: Allow tramp_alias to access symbols after the 4K boundary arm64: entry: Don't assume tramp_vectors is the start of the vectors arm64: entry: Move trampoline macros out of ifdef'd section arm64: entry: Make the kpti trampoline's kpti sequence optional arm64: entry: Allow the trampoline text to occupy multiple pages arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations arm64: entry: Add vectors that have the bhb mitigation sequences arm64: entry: Add macro for reading symbol addresses from the trampoline arm64: Add percpu vectors for EL1 arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2 KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A arm64: Mitigate spectre style branch history side channels KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated arm64: Use the clearbhb instruction in mitigations arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting ARM: fix build error when BPF_SYSCALL is disabled ARM: fix co-processor register typo ARM: Do not use NOCROSSREFS directive with ld.lld ARM: fix build warning in proc-v7-bugs.c xen/xenbus: don't let xenbus_grant_ring() remove grants in error case xen/grant-table: add gnttab_try_end_foreign_access() xen/blkfront: don't use gnttab_query_foreign_access() for mapped status xen/netfront: don't use gnttab_query_foreign_access() for mapped status xen/scsifront: don't use gnttab_query_foreign_access() for mapped status xen/gntalloc: don't use gnttab_query_foreign_access() xen: remove gnttab_query_foreign_access() xen/9p: use alloc/free_pages_exact() xen/pvcalls: use alloc/free_pages_exact() xen/gnttab: fix gnttab_end_foreign_access() without page specified xen/netfront: react properly to failing gnttab_end_foreign_access_ref() Revert "ACPI: PM: s2idle: Cancel wakeup before dispatching EC GPE" Linux 5.10.105 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I5aa84c7fb301301f8123e8483363b0721890b8e3
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 104
|
||||
SUBLEVEL = 105
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@@ -2064,16 +2064,6 @@ bool acpi_ec_dispatch_gpe(void)
|
||||
if (acpi_any_gpe_status_set(first_ec->gpe))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Cancel the SCI wakeup and process all pending events in case there
|
||||
* are any wakeup ones in there.
|
||||
*
|
||||
* Note that if any non-EC GPEs are active at this point, the SCI will
|
||||
* retrigger after the rearming in acpi_s2idle_wake(), so no events
|
||||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
|
||||
/*
|
||||
* Dispatch the EC GPE in-band, but do not report wakeup in any case
|
||||
* to allow the caller to process events properly after that.
|
||||
|
@@ -1012,15 +1012,21 @@ static bool acpi_s2idle_wake(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check non-EC GPE wakeups and if there are none, cancel the
|
||||
* SCI-related wakeup and dispatch the EC GPE.
|
||||
*/
|
||||
/* Check non-EC GPE wakeups and dispatch the EC GPE. */
|
||||
if (acpi_ec_dispatch_gpe()) {
|
||||
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cancel the SCI wakeup and process all pending events in case
|
||||
* there are any wakeup ones in there.
|
||||
*
|
||||
* Note that if any non-EC GPEs are active at this point, the
|
||||
* SCI will retrigger after the rearming below, so no events
|
||||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
acpi_os_wait_events_complete();
|
||||
|
||||
/*
|
||||
|
@@ -1352,7 +1352,8 @@ free_shadow:
|
||||
rinfo->ring_ref[i] = GRANT_INVALID_REF;
|
||||
}
|
||||
}
|
||||
free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
|
||||
free_pages_exact(rinfo->ring.sring,
|
||||
info->nr_ring_pages * XEN_PAGE_SIZE);
|
||||
rinfo->ring.sring = NULL;
|
||||
|
||||
if (rinfo->irq)
|
||||
@@ -1436,9 +1437,15 @@ static int blkif_get_final_status(enum blk_req_status s1,
|
||||
return BLKIF_RSP_OKAY;
|
||||
}
|
||||
|
||||
static bool blkif_completion(unsigned long *id,
|
||||
struct blkfront_ring_info *rinfo,
|
||||
struct blkif_response *bret)
|
||||
/*
|
||||
* Return values:
|
||||
* 1 response processed.
|
||||
* 0 missing further responses.
|
||||
* -1 error while processing.
|
||||
*/
|
||||
static int blkif_completion(unsigned long *id,
|
||||
struct blkfront_ring_info *rinfo,
|
||||
struct blkif_response *bret)
|
||||
{
|
||||
int i = 0;
|
||||
struct scatterlist *sg;
|
||||
@@ -1461,7 +1468,7 @@ static bool blkif_completion(unsigned long *id,
|
||||
|
||||
/* Wait the second response if not yet here. */
|
||||
if (s2->status < REQ_DONE)
|
||||
return false;
|
||||
return 0;
|
||||
|
||||
bret->status = blkif_get_final_status(s->status,
|
||||
s2->status);
|
||||
@@ -1512,42 +1519,43 @@ static bool blkif_completion(unsigned long *id,
|
||||
}
|
||||
/* Add the persistent grant into the list of free grants */
|
||||
for (i = 0; i < num_grant; i++) {
|
||||
if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
|
||||
if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
|
||||
/*
|
||||
* If the grant is still mapped by the backend (the
|
||||
* backend has chosen to make this grant persistent)
|
||||
* we add it at the head of the list, so it will be
|
||||
* reused first.
|
||||
*/
|
||||
if (!info->feature_persistent)
|
||||
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
|
||||
s->grants_used[i]->gref);
|
||||
if (!info->feature_persistent) {
|
||||
pr_alert("backed has not unmapped grant: %u\n",
|
||||
s->grants_used[i]->gref);
|
||||
return -1;
|
||||
}
|
||||
list_add(&s->grants_used[i]->node, &rinfo->grants);
|
||||
rinfo->persistent_gnts_c++;
|
||||
} else {
|
||||
/*
|
||||
* If the grant is not mapped by the backend we end the
|
||||
* foreign access and add it to the tail of the list,
|
||||
* so it will not be picked again unless we run out of
|
||||
* persistent grants.
|
||||
* If the grant is not mapped by the backend we add it
|
||||
* to the tail of the list, so it will not be picked
|
||||
* again unless we run out of persistent grants.
|
||||
*/
|
||||
gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
|
||||
s->grants_used[i]->gref = GRANT_INVALID_REF;
|
||||
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
|
||||
}
|
||||
}
|
||||
if (s->req.operation == BLKIF_OP_INDIRECT) {
|
||||
for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
|
||||
if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
|
||||
if (!info->feature_persistent)
|
||||
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
|
||||
s->indirect_grants[i]->gref);
|
||||
if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
|
||||
if (!info->feature_persistent) {
|
||||
pr_alert("backed has not unmapped grant: %u\n",
|
||||
s->indirect_grants[i]->gref);
|
||||
return -1;
|
||||
}
|
||||
list_add(&s->indirect_grants[i]->node, &rinfo->grants);
|
||||
rinfo->persistent_gnts_c++;
|
||||
} else {
|
||||
struct page *indirect_page;
|
||||
|
||||
gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
|
||||
/*
|
||||
* Add the used indirect page back to the list of
|
||||
* available pages for indirect grefs.
|
||||
@@ -1562,7 +1570,7 @@ static bool blkif_completion(unsigned long *id,
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
@@ -1628,12 +1636,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (bret.operation != BLKIF_OP_DISCARD) {
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We may need to wait for an extra response if the
|
||||
* I/O request is split in 2
|
||||
*/
|
||||
if (!blkif_completion(&id, rinfo, &bret))
|
||||
ret = blkif_completion(&id, rinfo, &bret);
|
||||
if (!ret)
|
||||
continue;
|
||||
if (unlikely(ret < 0))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (add_id_to_freelist(rinfo, id)) {
|
||||
@@ -1740,8 +1753,7 @@ static int setup_blkring(struct xenbus_device *dev,
|
||||
for (i = 0; i < info->nr_ring_pages; i++)
|
||||
rinfo->ring_ref[i] = GRANT_INVALID_REF;
|
||||
|
||||
sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
|
||||
get_order(ring_size));
|
||||
sring = alloc_pages_exact(ring_size, GFP_NOIO);
|
||||
if (!sring) {
|
||||
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
|
||||
return -ENOMEM;
|
||||
@@ -1751,7 +1763,7 @@ static int setup_blkring(struct xenbus_device *dev,
|
||||
|
||||
err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
|
||||
if (err < 0) {
|
||||
free_pages((unsigned long)sring, get_order(ring_size));
|
||||
free_pages_exact(sring, ring_size);
|
||||
rinfo->ring.sring = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@@ -2729,11 +2741,10 @@ static void purge_persistent_grants(struct blkfront_info *info)
|
||||
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
|
||||
node) {
|
||||
if (gnt_list_entry->gref == GRANT_INVALID_REF ||
|
||||
gnttab_query_foreign_access(gnt_list_entry->gref))
|
||||
!gnttab_try_end_foreign_access(gnt_list_entry->gref))
|
||||
continue;
|
||||
|
||||
list_del(&gnt_list_entry->node);
|
||||
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
|
||||
rinfo->persistent_gnts_c--;
|
||||
gnt_list_entry->gref = GRANT_INVALID_REF;
|
||||
list_add_tail(&gnt_list_entry->node, &rinfo->grants);
|
||||
|
@@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||
queue->tx_link[id] = TX_LINK_NONE;
|
||||
skb = queue->tx_skbs[id];
|
||||
queue->tx_skbs[id] = NULL;
|
||||
if (unlikely(gnttab_query_foreign_access(
|
||||
queue->grant_tx_ref[id]) != 0)) {
|
||||
if (unlikely(!gnttab_end_foreign_access_ref(
|
||||
queue->grant_tx_ref[id], GNTMAP_readonly))) {
|
||||
dev_alert(dev,
|
||||
"Grant still in use by backend domain\n");
|
||||
goto err;
|
||||
}
|
||||
gnttab_end_foreign_access_ref(
|
||||
queue->grant_tx_ref[id], GNTMAP_readonly);
|
||||
gnttab_release_grant_reference(
|
||||
&queue->gref_tx_head, queue->grant_tx_ref[id]);
|
||||
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
|
||||
@@ -992,7 +990,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
||||
struct device *dev = &queue->info->netdev->dev;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct xdp_buff xdp;
|
||||
unsigned long ret;
|
||||
int slots = 1;
|
||||
int err = 0;
|
||||
u32 verdict;
|
||||
@@ -1034,8 +1031,13 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
||||
goto next;
|
||||
}
|
||||
|
||||
ret = gnttab_end_foreign_access_ref(ref, 0);
|
||||
BUG_ON(!ret);
|
||||
if (!gnttab_end_foreign_access_ref(ref, 0)) {
|
||||
dev_alert(dev,
|
||||
"Grant still in use by backend domain\n");
|
||||
queue->info->broken = true;
|
||||
dev_alert(dev, "Disabled for further use\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
|
||||
|
||||
@@ -1256,6 +1258,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
||||
&need_xdp_flush);
|
||||
|
||||
if (unlikely(err)) {
|
||||
if (queue->info->broken) {
|
||||
spin_unlock(&queue->rx_lock);
|
||||
return 0;
|
||||
}
|
||||
err:
|
||||
while ((skb = __skb_dequeue(&tmpq)))
|
||||
__skb_queue_tail(&errq, skb);
|
||||
@@ -1920,7 +1926,7 @@ static int setup_netfront(struct xenbus_device *dev,
|
||||
struct netfront_queue *queue, unsigned int feature_split_evtchn)
|
||||
{
|
||||
struct xen_netif_tx_sring *txs;
|
||||
struct xen_netif_rx_sring *rxs;
|
||||
struct xen_netif_rx_sring *rxs = NULL;
|
||||
grant_ref_t gref;
|
||||
int err;
|
||||
|
||||
@@ -1940,21 +1946,21 @@ static int setup_netfront(struct xenbus_device *dev,
|
||||
|
||||
err = xenbus_grant_ring(dev, txs, 1, &gref);
|
||||
if (err < 0)
|
||||
goto grant_tx_ring_fail;
|
||||
goto fail;
|
||||
queue->tx_ring_ref = gref;
|
||||
|
||||
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
|
||||
if (!rxs) {
|
||||
err = -ENOMEM;
|
||||
xenbus_dev_fatal(dev, err, "allocating rx ring page");
|
||||
goto alloc_rx_ring_fail;
|
||||
goto fail;
|
||||
}
|
||||
SHARED_RING_INIT(rxs);
|
||||
FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
|
||||
|
||||
err = xenbus_grant_ring(dev, rxs, 1, &gref);
|
||||
if (err < 0)
|
||||
goto grant_rx_ring_fail;
|
||||
goto fail;
|
||||
queue->rx_ring_ref = gref;
|
||||
|
||||
if (feature_split_evtchn)
|
||||
@@ -1967,22 +1973,28 @@ static int setup_netfront(struct xenbus_device *dev,
|
||||
err = setup_netfront_single(queue);
|
||||
|
||||
if (err)
|
||||
goto alloc_evtchn_fail;
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
/* If we fail to setup netfront, it is safe to just revoke access to
|
||||
* granted pages because backend is not accessing it at this point.
|
||||
*/
|
||||
alloc_evtchn_fail:
|
||||
gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
|
||||
grant_rx_ring_fail:
|
||||
free_page((unsigned long)rxs);
|
||||
alloc_rx_ring_fail:
|
||||
gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
|
||||
grant_tx_ring_fail:
|
||||
free_page((unsigned long)txs);
|
||||
fail:
|
||||
fail:
|
||||
if (queue->rx_ring_ref != GRANT_INVALID_REF) {
|
||||
gnttab_end_foreign_access(queue->rx_ring_ref, 0,
|
||||
(unsigned long)rxs);
|
||||
queue->rx_ring_ref = GRANT_INVALID_REF;
|
||||
} else {
|
||||
free_page((unsigned long)rxs);
|
||||
}
|
||||
if (queue->tx_ring_ref != GRANT_INVALID_REF) {
|
||||
gnttab_end_foreign_access(queue->tx_ring_ref, 0,
|
||||
(unsigned long)txs);
|
||||
queue->tx_ring_ref = GRANT_INVALID_REF;
|
||||
} else {
|
||||
free_page((unsigned long)txs);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
|
||||
return;
|
||||
|
||||
for (i = 0; i < shadow->nr_grants; i++) {
|
||||
if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
|
||||
if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
|
||||
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
|
||||
"grant still in use by backend\n");
|
||||
BUG();
|
||||
}
|
||||
gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
|
||||
}
|
||||
|
||||
kfree(shadow->sg);
|
||||
|
@@ -169,20 +169,14 @@ undo:
|
||||
__del_gref(gref);
|
||||
}
|
||||
|
||||
/* It's possible for the target domain to map the just-allocated grant
|
||||
* references by blindly guessing their IDs; if this is done, then
|
||||
* __del_gref will leave them in the queue_gref list. They need to be
|
||||
* added to the global list so that we can free them when they are no
|
||||
* longer referenced.
|
||||
*/
|
||||
if (unlikely(!list_empty(&queue_gref)))
|
||||
list_splice_tail(&queue_gref, &gref_list);
|
||||
mutex_unlock(&gref_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __del_gref(struct gntalloc_gref *gref)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
|
||||
uint8_t *tmp = kmap(gref->page);
|
||||
tmp[gref->notify.pgoff] = 0;
|
||||
@@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref)
|
||||
gref->notify.flags = 0;
|
||||
|
||||
if (gref->gref_id) {
|
||||
if (gnttab_query_foreign_access(gref->gref_id))
|
||||
return;
|
||||
|
||||
if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
|
||||
return;
|
||||
|
||||
gnttab_free_grant_reference(gref->gref_id);
|
||||
if (gref->page) {
|
||||
addr = (unsigned long)page_to_virt(gref->page);
|
||||
gnttab_end_foreign_access(gref->gref_id, 0, addr);
|
||||
} else
|
||||
gnttab_free_grant_reference(gref->gref_id);
|
||||
}
|
||||
|
||||
gref_size--;
|
||||
list_del(&gref->next_gref);
|
||||
|
||||
if (gref->page)
|
||||
__free_page(gref->page);
|
||||
|
||||
kfree(gref);
|
||||
}
|
||||
|
||||
|
@@ -134,12 +134,9 @@ struct gnttab_ops {
|
||||
*/
|
||||
unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
|
||||
/*
|
||||
* Query the status of a grant entry. Ref parameter is reference of
|
||||
* queried grant entry, return value is the status of queried entry.
|
||||
* Detailed status(writing/reading) can be gotten from the return value
|
||||
* by bit operations.
|
||||
* Read the frame number related to a given grant reference.
|
||||
*/
|
||||
int (*query_foreign_access)(grant_ref_t ref);
|
||||
unsigned long (*read_frame)(grant_ref_t ref);
|
||||
};
|
||||
|
||||
struct unmap_refs_callback_data {
|
||||
@@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
|
||||
|
||||
static int gnttab_query_foreign_access_v1(grant_ref_t ref)
|
||||
{
|
||||
return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
|
||||
}
|
||||
|
||||
static int gnttab_query_foreign_access_v2(grant_ref_t ref)
|
||||
{
|
||||
return grstatus[ref] & (GTF_reading|GTF_writing);
|
||||
}
|
||||
|
||||
int gnttab_query_foreign_access(grant_ref_t ref)
|
||||
{
|
||||
return gnttab_interface->query_foreign_access(ref);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
|
||||
|
||||
static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
|
||||
{
|
||||
u16 flags, nflags;
|
||||
@@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
|
||||
|
||||
static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
|
||||
{
|
||||
return gnttab_shared.v1[ref].frame;
|
||||
}
|
||||
|
||||
static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
|
||||
{
|
||||
return gnttab_shared.v2[ref].full_page.frame;
|
||||
}
|
||||
|
||||
struct deferred_entry {
|
||||
struct list_head list;
|
||||
grant_ref_t ref;
|
||||
@@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused)
|
||||
spin_unlock_irqrestore(&gnttab_list_lock, flags);
|
||||
if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
|
||||
put_free_entry(entry->ref);
|
||||
if (entry->page) {
|
||||
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
|
||||
entry->ref, page_to_pfn(entry->page));
|
||||
put_page(entry->page);
|
||||
} else
|
||||
pr_info("freeing g.e. %#x\n", entry->ref);
|
||||
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
|
||||
entry->ref, page_to_pfn(entry->page));
|
||||
put_page(entry->page);
|
||||
kfree(entry);
|
||||
entry = NULL;
|
||||
} else {
|
||||
@@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused)
|
||||
static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
|
||||
struct page *page)
|
||||
{
|
||||
struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
struct deferred_entry *entry;
|
||||
gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
|
||||
const char *what = KERN_WARNING "leaking";
|
||||
|
||||
entry = kmalloc(sizeof(*entry), gfp);
|
||||
if (!page) {
|
||||
unsigned long gfn = gnttab_interface->read_frame(ref);
|
||||
|
||||
page = pfn_to_page(gfn_to_pfn(gfn));
|
||||
get_page(page);
|
||||
}
|
||||
|
||||
if (entry) {
|
||||
unsigned long flags;
|
||||
|
||||
@@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
|
||||
what, ref, page ? page_to_pfn(page) : -1);
|
||||
}
|
||||
|
||||
int gnttab_try_end_foreign_access(grant_ref_t ref)
|
||||
{
|
||||
int ret = _gnttab_end_foreign_access_ref(ref, 0);
|
||||
|
||||
if (ret)
|
||||
put_free_entry(ref);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
|
||||
|
||||
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
|
||||
unsigned long page)
|
||||
{
|
||||
if (gnttab_end_foreign_access_ref(ref, readonly)) {
|
||||
put_free_entry(ref);
|
||||
if (gnttab_try_end_foreign_access(ref)) {
|
||||
if (page != 0)
|
||||
put_page(virt_to_page(page));
|
||||
} else
|
||||
@@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = {
|
||||
.update_entry = gnttab_update_entry_v1,
|
||||
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
|
||||
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
|
||||
.query_foreign_access = gnttab_query_foreign_access_v1,
|
||||
.read_frame = gnttab_read_frame_v1,
|
||||
};
|
||||
|
||||
static const struct gnttab_ops gnttab_v2_ops = {
|
||||
@@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = {
|
||||
.update_entry = gnttab_update_entry_v2,
|
||||
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
|
||||
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
|
||||
.query_foreign_access = gnttab_query_foreign_access_v2,
|
||||
.read_frame = gnttab_read_frame_v2,
|
||||
};
|
||||
|
||||
static bool gnttab_need_v2(void)
|
||||
|
@@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map)
|
||||
if (!map->active.ring)
|
||||
return;
|
||||
|
||||
free_pages((unsigned long)map->active.data.in,
|
||||
map->active.ring->ring_order);
|
||||
free_pages_exact(map->active.data.in,
|
||||
PAGE_SIZE << map->active.ring->ring_order);
|
||||
free_page((unsigned long)map->active.ring);
|
||||
}
|
||||
|
||||
@@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map)
|
||||
goto out;
|
||||
|
||||
map->active.ring->ring_order = PVCALLS_RING_ORDER;
|
||||
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
PVCALLS_RING_ORDER);
|
||||
bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER,
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!bytes)
|
||||
goto out;
|
||||
|
||||
|
@@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
|
||||
unsigned int nr_pages, grant_ref_t *grefs)
|
||||
{
|
||||
int err;
|
||||
int i, j;
|
||||
unsigned int i;
|
||||
grant_ref_t gref_head;
|
||||
|
||||
err = gnttab_alloc_grant_references(nr_pages, &gref_head);
|
||||
if (err) {
|
||||
xenbus_dev_fatal(dev, err, "granting access to ring page");
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
unsigned long gfn;
|
||||
@@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
|
||||
else
|
||||
gfn = virt_to_gfn(vaddr);
|
||||
|
||||
err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
|
||||
if (err < 0) {
|
||||
xenbus_dev_fatal(dev, err,
|
||||
"granting access to ring page");
|
||||
goto fail;
|
||||
}
|
||||
grefs[i] = err;
|
||||
grefs[i] = gnttab_claim_grant_reference(&gref_head);
|
||||
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
|
||||
gfn, 0);
|
||||
|
||||
vaddr = vaddr + XEN_PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
for (j = 0; j < i; j++)
|
||||
gnttab_end_foreign_access_ref(grefs[j], 0);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_grant_ring);
|
||||
|
||||
|
@@ -97,17 +97,32 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
|
||||
* access has been ended, free the given page too. Access will be ended
|
||||
* immediately iff the grant entry is not in use, otherwise it will happen
|
||||
* some time later. page may be 0, in which case no freeing will occur.
|
||||
* Note that the granted page might still be accessed (read or write) by the
|
||||
* other side after gnttab_end_foreign_access() returns, so even if page was
|
||||
* specified as 0 it is not allowed to just reuse the page for other
|
||||
* purposes immediately. gnttab_end_foreign_access() will take an additional
|
||||
* reference to the granted page in this case, which is dropped only after
|
||||
* the grant is no longer in use.
|
||||
* This requires that multi page allocations for areas subject to
|
||||
* gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
|
||||
* via free_pages_exact()) in order to avoid high order pages.
|
||||
*/
|
||||
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
|
||||
unsigned long page);
|
||||
|
||||
/*
|
||||
* End access through the given grant reference, iff the grant entry is
|
||||
* no longer in use. In case of success ending foreign access, the
|
||||
* grant reference is deallocated.
|
||||
* Return 1 if the grant entry was freed, 0 if it is still in use.
|
||||
*/
|
||||
int gnttab_try_end_foreign_access(grant_ref_t ref);
|
||||
|
||||
int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
|
||||
|
||||
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
|
||||
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
|
||||
|
||||
int gnttab_query_foreign_access(grant_ref_t ref);
|
||||
|
||||
/*
|
||||
* operations on reserved batches of grant references
|
||||
*/
|
||||
|
@@ -304,9 +304,9 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
|
||||
ref = priv->rings[i].intf->ref[j];
|
||||
gnttab_end_foreign_access(ref, 0, 0);
|
||||
}
|
||||
free_pages((unsigned long)priv->rings[i].data.in,
|
||||
priv->rings[i].intf->ring_order -
|
||||
(PAGE_SHIFT - XEN_PAGE_SHIFT));
|
||||
free_pages_exact(priv->rings[i].data.in,
|
||||
1UL << (priv->rings[i].intf->ring_order +
|
||||
XEN_PAGE_SHIFT));
|
||||
}
|
||||
gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
|
||||
free_page((unsigned long)priv->rings[i].intf);
|
||||
@@ -345,8 +345,8 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ring->ref = ret;
|
||||
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
order - (PAGE_SHIFT - XEN_PAGE_SHIFT));
|
||||
bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!bytes) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@@ -377,9 +377,7 @@ out:
|
||||
if (bytes) {
|
||||
for (i--; i >= 0; i--)
|
||||
gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
|
||||
free_pages((unsigned long)bytes,
|
||||
ring->intf->ring_order -
|
||||
(PAGE_SHIFT - XEN_PAGE_SHIFT));
|
||||
free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
|
||||
}
|
||||
gnttab_end_foreign_access(ring->ref, 0, 0);
|
||||
free_page((unsigned long)ring->intf);
|
||||
|
Reference in New Issue
Block a user