Merge tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen features and fixes from David Vrabel: - use a single source list of hypercalls, generating other tables etc. at build time. - add a "Xen PV" APIC driver to support >255 VCPUs in PV guests. - significant performance improve to guest save/restore/migration. - scsiback/front save/restore support. - infrastructure for multi-page xenbus rings. - misc fixes. * tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/pci: Try harder to get PXM information for Xen xenbus_client: Extend interface to support multi-page ring xen-pciback: also support disabling of bus-mastering and memory-write-invalidate xen: support suspend/resume in pvscsi frontend xen: scsiback: add LUN of restored domain xen-scsiback: define a pr_fmt macro with xen-pvscsi xen/mce: fix up xen_late_init_mcelog() error handling xen/privcmd: improve performance of MMAPBATCH_V2 xen: unify foreign GFN map/unmap for auto-xlated physmap guests x86/xen/apic: WARN with details. x86/xen: Provide a "Xen PV" APIC driver to support >255 VCPUs xen/pciback: Don't print scary messages when unsupported by hypervisor. xen: use generated hypercall symbols in arch/x86/xen/xen-head.S xen: use generated hypervisor symbols in arch/x86/xen/trace.c xen: synchronize include/xen/interface/xen.h with xen xen: build infrastructure for generating hypercall depending symbols xen: balloon: Use static attribute groups for sysfs entries xen: pcpu: Use static attribute groups for sysfs entry
这个提交包含在:
@@ -270,4 +270,10 @@ config XEN_EFI
|
||||
def_bool y
|
||||
depends on X86_64 && EFI
|
||||
|
||||
config XEN_AUTO_XLATE
|
||||
def_bool y
|
||||
depends on ARM || ARM64 || XEN_PVHVM
|
||||
help
|
||||
Support for auto-translated physmap guests.
|
||||
|
||||
endmenu
|
||||
|
@@ -37,6 +37,7 @@ obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
|
||||
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
|
||||
obj-$(CONFIG_XEN_EFI) += efi.o
|
||||
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
|
||||
obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
|
||||
xen-evtchn-y := evtchn.o
|
||||
xen-gntdev-y := gntdev.o
|
||||
xen-gntalloc-y := gntalloc.o
|
||||
|
@@ -393,14 +393,25 @@ static int bind_virq_for_mce(void)
|
||||
|
||||
static int __init xen_late_init_mcelog(void)
|
||||
{
|
||||
/* Only DOM0 is responsible for MCE logging */
|
||||
if (xen_initial_domain()) {
|
||||
/* register character device /dev/mcelog for xen mcelog */
|
||||
if (misc_register(&xen_mce_chrdev_device))
|
||||
return -ENODEV;
|
||||
return bind_virq_for_mce();
|
||||
}
|
||||
int ret;
|
||||
|
||||
return -ENODEV;
|
||||
/* Only DOM0 is responsible for MCE logging */
|
||||
if (!xen_initial_domain())
|
||||
return -ENODEV;
|
||||
|
||||
/* register character device /dev/mcelog for xen mcelog */
|
||||
ret = misc_register(&xen_mce_chrdev_device);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bind_virq_for_mce();
|
||||
if (ret)
|
||||
goto deregister;
|
||||
|
||||
return 0;
|
||||
|
||||
deregister:
|
||||
misc_deregister(&xen_mce_chrdev_device);
|
||||
return ret;
|
||||
}
|
||||
device_initcall(xen_late_init_mcelog);
|
||||
|
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci-acpi.h>
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/physdev.h>
|
||||
#include <xen/interface/xen.h>
|
||||
@@ -67,12 +68,22 @@ static int xen_add_device(struct device *dev)
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
handle = ACPI_HANDLE(&pci_dev->dev);
|
||||
if (!handle && pci_dev->bus->bridge)
|
||||
handle = ACPI_HANDLE(pci_dev->bus->bridge);
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
if (!handle && pci_dev->is_virtfn)
|
||||
handle = ACPI_HANDLE(physfn->bus->bridge);
|
||||
#endif
|
||||
if (!handle) {
|
||||
/*
|
||||
* This device was not listed in the ACPI name space at
|
||||
* all. Try to get acpi handle of parent pci bus.
|
||||
*/
|
||||
struct pci_bus *pbus;
|
||||
for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
|
||||
handle = acpi_pci_get_bridge_handle(pbus);
|
||||
if (handle)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (handle) {
|
||||
acpi_status status;
|
||||
|
||||
|
@@ -132,6 +132,33 @@ static ssize_t __ref store_online(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
|
||||
|
||||
static struct attribute *pcpu_dev_attrs[] = {
|
||||
&dev_attr_online.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static umode_t pcpu_dev_is_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int idx)
|
||||
{
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
/*
|
||||
* Xen never offline cpu0 due to several restrictions
|
||||
* and assumptions. This basically doesn't add a sys control
|
||||
* to user, one cannot attempt to offline BSP.
|
||||
*/
|
||||
return dev->id ? attr->mode : 0;
|
||||
}
|
||||
|
||||
static const struct attribute_group pcpu_dev_group = {
|
||||
.attrs = pcpu_dev_attrs,
|
||||
.is_visible = pcpu_dev_is_visible,
|
||||
};
|
||||
|
||||
static const struct attribute_group *pcpu_dev_groups[] = {
|
||||
&pcpu_dev_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
static bool xen_pcpu_online(uint32_t flags)
|
||||
{
|
||||
return !!(flags & XEN_PCPU_FLAGS_ONLINE);
|
||||
@@ -181,9 +208,6 @@ static void unregister_and_remove_pcpu(struct pcpu *pcpu)
|
||||
return;
|
||||
|
||||
dev = &pcpu->dev;
|
||||
if (dev->id)
|
||||
device_remove_file(dev, &dev_attr_online);
|
||||
|
||||
/* pcpu remove would be implicitly done */
|
||||
device_unregister(dev);
|
||||
}
|
||||
@@ -200,6 +224,7 @@ static int register_pcpu(struct pcpu *pcpu)
|
||||
dev->bus = &xen_pcpu_subsys;
|
||||
dev->id = pcpu->cpu_id;
|
||||
dev->release = pcpu_release;
|
||||
dev->groups = pcpu_dev_groups;
|
||||
|
||||
err = device_register(dev);
|
||||
if (err) {
|
||||
@@ -207,19 +232,6 @@ static int register_pcpu(struct pcpu *pcpu)
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Xen never offline cpu0 due to several restrictions
|
||||
* and assumptions. This basically doesn't add a sys control
|
||||
* to user, one cannot attempt to offline BSP.
|
||||
*/
|
||||
if (dev->id) {
|
||||
err = device_create_file(dev, &dev_attr_online);
|
||||
if (err) {
|
||||
device_unregister(dev);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -159,6 +159,40 @@ static int traverse_pages(unsigned nelem, size_t size,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to traverse_pages, but use each page as a "block" of
|
||||
* data to be processed as one unit.
|
||||
*/
|
||||
static int traverse_pages_block(unsigned nelem, size_t size,
|
||||
struct list_head *pos,
|
||||
int (*fn)(void *data, int nr, void *state),
|
||||
void *state)
|
||||
{
|
||||
void *pagedata;
|
||||
unsigned pageidx;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(size > PAGE_SIZE);
|
||||
|
||||
pageidx = PAGE_SIZE;
|
||||
|
||||
while (nelem) {
|
||||
int nr = (PAGE_SIZE/size);
|
||||
struct page *page;
|
||||
if (nr > nelem)
|
||||
nr = nelem;
|
||||
pos = pos->next;
|
||||
page = list_entry(pos, struct page, lru);
|
||||
pagedata = page_address(page);
|
||||
ret = (*fn)(pagedata, nr, state);
|
||||
if (ret)
|
||||
break;
|
||||
nelem -= nr;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct mmap_mfn_state {
|
||||
unsigned long va;
|
||||
struct vm_area_struct *vma;
|
||||
@@ -274,39 +308,25 @@ struct mmap_batch_state {
|
||||
/* auto translated dom0 note: if domU being created is PV, then mfn is
|
||||
* mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
|
||||
*/
|
||||
static int mmap_batch_fn(void *data, void *state)
|
||||
static int mmap_batch_fn(void *data, int nr, void *state)
|
||||
{
|
||||
xen_pfn_t *mfnp = data;
|
||||
struct mmap_batch_state *st = state;
|
||||
struct vm_area_struct *vma = st->vma;
|
||||
struct page **pages = vma->vm_private_data;
|
||||
struct page *cur_page = NULL;
|
||||
struct page **cur_pages = NULL;
|
||||
int ret;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
cur_page = pages[st->index++];
|
||||
cur_pages = &pages[st->index];
|
||||
|
||||
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
|
||||
st->vma->vm_page_prot, st->domain,
|
||||
&cur_page);
|
||||
BUG_ON(nr < 0);
|
||||
ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
|
||||
(int *)mfnp, st->vma->vm_page_prot,
|
||||
st->domain, cur_pages);
|
||||
|
||||
/* Store error code for second pass. */
|
||||
if (st->version == 1) {
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* V1 encodes the error codes in the 32bit top nibble of the
|
||||
* mfn (with its known limitations vis-a-vis 64 bit callers).
|
||||
*/
|
||||
*mfnp |= (ret == -ENOENT) ?
|
||||
PRIVCMD_MMAPBATCH_PAGED_ERROR :
|
||||
PRIVCMD_MMAPBATCH_MFN_ERROR;
|
||||
}
|
||||
} else { /* st->version == 2 */
|
||||
*((int *) mfnp) = ret;
|
||||
}
|
||||
|
||||
/* And see if it affects the global_error. */
|
||||
if (ret < 0) {
|
||||
/* Adjust the global_error? */
|
||||
if (ret != nr) {
|
||||
if (ret == -ENOENT)
|
||||
st->global_error = -ENOENT;
|
||||
else {
|
||||
@@ -315,23 +335,35 @@ static int mmap_batch_fn(void *data, void *state)
|
||||
st->global_error = 1;
|
||||
}
|
||||
}
|
||||
st->va += PAGE_SIZE;
|
||||
st->va += PAGE_SIZE * nr;
|
||||
st->index += nr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmap_return_errors(void *data, void *state)
|
||||
static int mmap_return_error(int err, struct mmap_batch_state *st)
|
||||
{
|
||||
struct mmap_batch_state *st = state;
|
||||
int ret;
|
||||
|
||||
if (st->version == 1) {
|
||||
xen_pfn_t mfnp = *((xen_pfn_t *) data);
|
||||
if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
|
||||
return __put_user(mfnp, st->user_mfn++);
|
||||
else
|
||||
if (err) {
|
||||
xen_pfn_t mfn;
|
||||
|
||||
ret = get_user(mfn, st->user_mfn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
/*
|
||||
* V1 encodes the error codes in the 32bit top
|
||||
* nibble of the mfn (with its known
|
||||
* limitations vis-a-vis 64 bit callers).
|
||||
*/
|
||||
mfn |= (err == -ENOENT) ?
|
||||
PRIVCMD_MMAPBATCH_PAGED_ERROR :
|
||||
PRIVCMD_MMAPBATCH_MFN_ERROR;
|
||||
return __put_user(mfn, st->user_mfn++);
|
||||
} else
|
||||
st->user_mfn++;
|
||||
} else { /* st->version == 2 */
|
||||
int err = *((int *) data);
|
||||
if (err)
|
||||
return __put_user(err, st->user_err++);
|
||||
else
|
||||
@@ -341,6 +373,21 @@ static int mmap_return_errors(void *data, void *state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmap_return_errors(void *data, int nr, void *state)
|
||||
{
|
||||
struct mmap_batch_state *st = state;
|
||||
int *errs = data;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
ret = mmap_return_error(errs[i], st);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
|
||||
* the vma with the page info to use later.
|
||||
* Returns: 0 if success, otherwise -errno
|
||||
@@ -472,8 +519,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
||||
state.version = version;
|
||||
|
||||
/* mmap_batch_fn guarantees ret == 0 */
|
||||
BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
|
||||
&pagelist, mmap_batch_fn, &state));
|
||||
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
|
||||
&pagelist, mmap_batch_fn, &state));
|
||||
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
@@ -481,8 +528,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
||||
/* Write back errors in second pass. */
|
||||
state.user_mfn = (xen_pfn_t *)m.arr;
|
||||
state.user_err = m.err;
|
||||
ret = traverse_pages(m.num, sizeof(xen_pfn_t),
|
||||
&pagelist, mmap_return_errors, &state);
|
||||
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
|
||||
&pagelist, mmap_return_errors, &state);
|
||||
} else
|
||||
ret = 0;
|
||||
|
||||
|
@@ -193,13 +193,18 @@ static DEVICE_ATTR(target, S_IRUGO | S_IWUSR,
|
||||
show_target, store_target);
|
||||
|
||||
|
||||
static struct device_attribute *balloon_attrs[] = {
|
||||
&dev_attr_target_kb,
|
||||
&dev_attr_target,
|
||||
&dev_attr_schedule_delay.attr,
|
||||
&dev_attr_max_schedule_delay.attr,
|
||||
&dev_attr_retry_count.attr,
|
||||
&dev_attr_max_retry_count.attr
|
||||
static struct attribute *balloon_attrs[] = {
|
||||
&dev_attr_target_kb.attr,
|
||||
&dev_attr_target.attr,
|
||||
&dev_attr_schedule_delay.attr.attr,
|
||||
&dev_attr_max_schedule_delay.attr.attr,
|
||||
&dev_attr_retry_count.attr.attr,
|
||||
&dev_attr_max_retry_count.attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group balloon_group = {
|
||||
.attrs = balloon_attrs
|
||||
};
|
||||
|
||||
static struct attribute *balloon_info_attrs[] = {
|
||||
@@ -214,6 +219,12 @@ static const struct attribute_group balloon_info_group = {
|
||||
.attrs = balloon_info_attrs
|
||||
};
|
||||
|
||||
static const struct attribute_group *balloon_groups[] = {
|
||||
&balloon_group,
|
||||
&balloon_info_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct bus_type balloon_subsys = {
|
||||
.name = BALLOON_CLASS_NAME,
|
||||
.dev_name = BALLOON_CLASS_NAME,
|
||||
@@ -221,7 +232,7 @@ static struct bus_type balloon_subsys = {
|
||||
|
||||
static int register_balloon(struct device *dev)
|
||||
{
|
||||
int i, error;
|
||||
int error;
|
||||
|
||||
error = subsys_system_register(&balloon_subsys, NULL);
|
||||
if (error)
|
||||
@@ -229,6 +240,7 @@ static int register_balloon(struct device *dev)
|
||||
|
||||
dev->id = 0;
|
||||
dev->bus = &balloon_subsys;
|
||||
dev->groups = balloon_groups;
|
||||
|
||||
error = device_register(dev);
|
||||
if (error) {
|
||||
@@ -236,24 +248,7 @@ static int register_balloon(struct device *dev)
|
||||
return error;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
|
||||
error = device_create_file(dev, balloon_attrs[i]);
|
||||
if (error)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = sysfs_create_group(&dev->kobj, &balloon_info_group);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (--i >= 0)
|
||||
device_remove_file(dev, balloon_attrs[i]);
|
||||
device_unregister(dev);
|
||||
bus_unregister(&balloon_subsys);
|
||||
return error;
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -88,9 +88,15 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
|
||||
printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
|
||||
pci_name(dev));
|
||||
pci_set_master(dev);
|
||||
} else if (dev->is_busmaster && !is_master_cmd(value)) {
|
||||
if (unlikely(verbose_request))
|
||||
printk(KERN_DEBUG DRV_NAME ": %s: clear bus master\n",
|
||||
pci_name(dev));
|
||||
pci_clear_master(dev);
|
||||
}
|
||||
|
||||
if (value & PCI_COMMAND_INVALIDATE) {
|
||||
if (!(cmd->val & PCI_COMMAND_INVALIDATE) &&
|
||||
(value & PCI_COMMAND_INVALIDATE)) {
|
||||
if (unlikely(verbose_request))
|
||||
printk(KERN_DEBUG
|
||||
DRV_NAME ": %s: enable memory-write-invalidate\n",
|
||||
@@ -101,6 +107,13 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
|
||||
pci_name(dev), err);
|
||||
value &= ~PCI_COMMAND_INVALIDATE;
|
||||
}
|
||||
} else if ((cmd->val & PCI_COMMAND_INVALIDATE) &&
|
||||
!(value & PCI_COMMAND_INVALIDATE)) {
|
||||
if (unlikely(verbose_request))
|
||||
printk(KERN_DEBUG
|
||||
DRV_NAME ": %s: disable memory-write-invalidate\n",
|
||||
pci_name(dev));
|
||||
pci_clear_mwi(dev);
|
||||
}
|
||||
|
||||
cmd->val = value;
|
||||
|
@@ -118,7 +118,7 @@ static void pcistub_device_release(struct kref *kref)
|
||||
int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
|
||||
&ppdev);
|
||||
|
||||
if (err)
|
||||
if (err && err != -ENOSYS)
|
||||
dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
|
||||
err);
|
||||
}
|
||||
@@ -402,7 +402,7 @@ static int pcistub_init_device(struct pci_dev *dev)
|
||||
};
|
||||
|
||||
err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
|
||||
if (err)
|
||||
if (err && err != -ENOSYS)
|
||||
dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
|
||||
err);
|
||||
}
|
||||
|
@@ -113,7 +113,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
|
||||
"Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
|
||||
gnt_ref, remote_evtchn);
|
||||
|
||||
err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr);
|
||||
err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr);
|
||||
if (err < 0) {
|
||||
xenbus_dev_fatal(pdev->xdev, err,
|
||||
"Error mapping other domain page in ours.");
|
||||
|
@@ -31,6 +31,8 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "xen-pvscsi: " fmt
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
@@ -69,9 +71,6 @@
|
||||
#include <xen/interface/grant_table.h>
|
||||
#include <xen/interface/io/vscsiif.h>
|
||||
|
||||
#define DPRINTK(_f, _a...) \
|
||||
pr_debug("(file=%s, line=%d) " _f, __FILE__ , __LINE__ , ## _a)
|
||||
|
||||
#define VSCSI_VERSION "v0.1"
|
||||
#define VSCSI_NAMELEN 32
|
||||
|
||||
@@ -271,7 +270,7 @@ static void scsiback_print_status(char *sense_buffer, int errors,
|
||||
{
|
||||
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
|
||||
|
||||
pr_err("xen-pvscsi[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
|
||||
pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
|
||||
tpg->tport->tport_name, pending_req->v2p->lun,
|
||||
pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
|
||||
host_byte(errors), driver_byte(errors));
|
||||
@@ -427,7 +426,7 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
|
||||
BUG_ON(err);
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (unlikely(map[i].status != GNTST_okay)) {
|
||||
pr_err("xen-pvscsi: invalid buffer -- could not remap it\n");
|
||||
pr_err("invalid buffer -- could not remap it\n");
|
||||
map[i].handle = SCSIBACK_INVALID_HANDLE;
|
||||
err = -ENOMEM;
|
||||
} else {
|
||||
@@ -449,7 +448,7 @@ static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (get_free_page(pg + mapcount)) {
|
||||
put_free_pages(pg, mapcount);
|
||||
pr_err("xen-pvscsi: no grant page\n");
|
||||
pr_err("no grant page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
|
||||
@@ -492,7 +491,7 @@ static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
|
||||
return 0;
|
||||
|
||||
if (nr_segments > VSCSIIF_SG_TABLESIZE) {
|
||||
DPRINTK("xen-pvscsi: invalid parameter nr_seg = %d\n",
|
||||
pr_debug("invalid parameter nr_seg = %d\n",
|
||||
ring_req->nr_segments);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -516,13 +515,12 @@ static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
|
||||
nr_segments += n_segs;
|
||||
}
|
||||
if (nr_segments > SG_ALL) {
|
||||
DPRINTK("xen-pvscsi: invalid nr_seg = %d\n",
|
||||
nr_segments);
|
||||
pr_debug("invalid nr_seg = %d\n", nr_segments);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* free of (sgl) in fast_flush_area()*/
|
||||
/* free of (sgl) in fast_flush_area() */
|
||||
pending_req->sgl = kmalloc_array(nr_segments,
|
||||
sizeof(struct scatterlist), GFP_KERNEL);
|
||||
if (!pending_req->sgl)
|
||||
@@ -679,7 +677,8 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
|
||||
v2p = scsiback_do_translation(info, &vir);
|
||||
if (!v2p) {
|
||||
pending_req->v2p = NULL;
|
||||
DPRINTK("xen-pvscsi: doesn't exist.\n");
|
||||
pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
|
||||
vir.chn, vir.tgt, vir.lun);
|
||||
return -ENODEV;
|
||||
}
|
||||
pending_req->v2p = v2p;
|
||||
@@ -690,14 +689,14 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
|
||||
(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
|
||||
(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
|
||||
(pending_req->sc_data_direction != DMA_NONE)) {
|
||||
DPRINTK("xen-pvscsi: invalid parameter data_dir = %d\n",
|
||||
pr_debug("invalid parameter data_dir = %d\n",
|
||||
pending_req->sc_data_direction);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pending_req->cmd_len = ring_req->cmd_len;
|
||||
if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
|
||||
DPRINTK("xen-pvscsi: invalid parameter cmd_len = %d\n",
|
||||
pr_debug("invalid parameter cmd_len = %d\n",
|
||||
pending_req->cmd_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -721,7 +720,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
|
||||
if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
|
||||
rc = ring->rsp_prod_pvt;
|
||||
pr_warn("xen-pvscsi: Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
|
||||
pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
|
||||
info->domid, rp, rc, rp - rc);
|
||||
info->ring_error = 1;
|
||||
return 0;
|
||||
@@ -772,7 +771,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
|
||||
break;
|
||||
default:
|
||||
pr_err_ratelimited("xen-pvscsi: invalid request\n");
|
||||
pr_err_ratelimited("invalid request\n");
|
||||
scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
|
||||
0, pending_req);
|
||||
kmem_cache_free(scsiback_cachep, pending_req);
|
||||
@@ -810,7 +809,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
|
||||
if (info->irq)
|
||||
return -1;
|
||||
|
||||
err = xenbus_map_ring_valloc(info->dev, ring_ref, &area);
|
||||
err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -874,14 +873,13 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
|
||||
|
||||
lunp = strrchr(phy, ':');
|
||||
if (!lunp) {
|
||||
pr_err("xen-pvscsi: illegal format of physical device %s\n",
|
||||
phy);
|
||||
pr_err("illegal format of physical device %s\n", phy);
|
||||
return -EINVAL;
|
||||
}
|
||||
*lunp = 0;
|
||||
lunp++;
|
||||
if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
|
||||
pr_err("xen-pvscsi: lun number not valid: %s\n", lunp);
|
||||
pr_err("lun number not valid: %s\n", lunp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -909,7 +907,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
|
||||
mutex_unlock(&scsiback_mutex);
|
||||
|
||||
if (!tpg) {
|
||||
pr_err("xen-pvscsi: %s:%d %s\n", phy, lun, error);
|
||||
pr_err("%s:%d %s\n", phy, lun, error);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@@ -926,7 +924,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
|
||||
if ((entry->v.chn == v->chn) &&
|
||||
(entry->v.tgt == v->tgt) &&
|
||||
(entry->v.lun == v->lun)) {
|
||||
pr_warn("xen-pvscsi: Virtual ID is already used. Assignment was not performed.\n");
|
||||
pr_warn("Virtual ID is already used. Assignment was not performed.\n");
|
||||
err = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
@@ -992,15 +990,15 @@ found:
|
||||
}
|
||||
|
||||
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
|
||||
char *phy, struct ids_tuple *vir)
|
||||
char *phy, struct ids_tuple *vir, int try)
|
||||
{
|
||||
if (!scsiback_add_translation_entry(info, phy, vir)) {
|
||||
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
|
||||
"%d", XenbusStateInitialised)) {
|
||||
pr_err("xen-pvscsi: xenbus_printf error %s\n", state);
|
||||
pr_err("xenbus_printf error %s\n", state);
|
||||
scsiback_del_translation_entry(info, vir);
|
||||
}
|
||||
} else {
|
||||
} else if (!try) {
|
||||
xenbus_printf(XBT_NIL, info->dev->nodename, state,
|
||||
"%d", XenbusStateClosed);
|
||||
}
|
||||
@@ -1012,7 +1010,7 @@ static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state,
|
||||
if (!scsiback_del_translation_entry(info, vir)) {
|
||||
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
|
||||
"%d", XenbusStateClosed))
|
||||
pr_err("xen-pvscsi: xenbus_printf error %s\n", state);
|
||||
pr_err("xenbus_printf error %s\n", state);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1060,10 +1058,19 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
|
||||
|
||||
switch (op) {
|
||||
case VSCSIBACK_OP_ADD_OR_DEL_LUN:
|
||||
if (device_state == XenbusStateInitialising)
|
||||
scsiback_do_add_lun(info, state, phy, &vir);
|
||||
if (device_state == XenbusStateClosing)
|
||||
switch (device_state) {
|
||||
case XenbusStateInitialising:
|
||||
scsiback_do_add_lun(info, state, phy, &vir, 0);
|
||||
break;
|
||||
case XenbusStateConnected:
|
||||
scsiback_do_add_lun(info, state, phy, &vir, 1);
|
||||
break;
|
||||
case XenbusStateClosing:
|
||||
scsiback_do_del_lun(info, state, &vir);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case VSCSIBACK_OP_UPDATEDEV_STATE:
|
||||
@@ -1071,15 +1078,14 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
|
||||
/* modify vscsi-devs/dev-x/state */
|
||||
if (xenbus_printf(XBT_NIL, dev->nodename, state,
|
||||
"%d", XenbusStateConnected)) {
|
||||
pr_err("xen-pvscsi: xenbus_printf error %s\n",
|
||||
str);
|
||||
pr_err("xenbus_printf error %s\n", str);
|
||||
scsiback_del_translation_entry(info, &vir);
|
||||
xenbus_printf(XBT_NIL, dev->nodename, state,
|
||||
"%d", XenbusStateClosed);
|
||||
}
|
||||
}
|
||||
break;
|
||||
/*When it is necessary, processing is added here.*/
|
||||
/* When it is necessary, processing is added here. */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -1196,7 +1202,7 @@ static int scsiback_probe(struct xenbus_device *dev,
|
||||
struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info),
|
||||
GFP_KERNEL);
|
||||
|
||||
DPRINTK("%p %d\n", dev, dev->otherend_id);
|
||||
pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
|
||||
|
||||
if (!info) {
|
||||
xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure");
|
||||
@@ -1227,7 +1233,7 @@ static int scsiback_probe(struct xenbus_device *dev,
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
pr_warn("xen-pvscsi: %s failed\n", __func__);
|
||||
pr_warn("%s failed\n", __func__);
|
||||
scsiback_remove(dev);
|
||||
|
||||
return err;
|
||||
@@ -1432,7 +1438,7 @@ check_len:
|
||||
}
|
||||
snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]);
|
||||
|
||||
pr_debug("xen-pvscsi: Allocated emulated Target %s Address: %s\n",
|
||||
pr_debug("Allocated emulated Target %s Address: %s\n",
|
||||
scsiback_dump_proto_id(tport), name);
|
||||
|
||||
return &tport->tport_wwn;
|
||||
@@ -1443,7 +1449,7 @@ static void scsiback_drop_tport(struct se_wwn *wwn)
|
||||
struct scsiback_tport *tport = container_of(wwn,
|
||||
struct scsiback_tport, tport_wwn);
|
||||
|
||||
pr_debug("xen-pvscsi: Deallocating emulated Target %s Address: %s\n",
|
||||
pr_debug("Deallocating emulated Target %s Address: %s\n",
|
||||
scsiback_dump_proto_id(tport), tport->tport_name);
|
||||
|
||||
kfree(tport);
|
||||
@@ -1470,8 +1476,8 @@ static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
|
||||
static int scsiback_check_stop_free(struct se_cmd *se_cmd)
|
||||
{
|
||||
/*
|
||||
* Do not release struct se_cmd's containing a valid TMR
|
||||
* pointer. These will be released directly in scsiback_device_action()
|
||||
* Do not release struct se_cmd's containing a valid TMR pointer.
|
||||
* These will be released directly in scsiback_device_action()
|
||||
* with transport_generic_free_cmd().
|
||||
*/
|
||||
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
|
||||
@@ -1637,7 +1643,7 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
|
||||
return -ENOMEM;
|
||||
}
|
||||
/*
|
||||
* Initialize the struct se_session pointer
|
||||
* Initialize the struct se_session pointer
|
||||
*/
|
||||
tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
|
||||
if (IS_ERR(tv_nexus->tvn_se_sess)) {
|
||||
@@ -1705,7 +1711,7 @@ static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pr_debug("xen-pvscsi: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
|
||||
pr_debug("Removing I_T Nexus to emulated %s Initiator Port: %s\n",
|
||||
scsiback_dump_proto_id(tpg->tport),
|
||||
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
|
||||
|
||||
@@ -1751,7 +1757,7 @@ static ssize_t scsiback_tpg_store_nexus(struct se_portal_group *se_tpg,
|
||||
unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr;
|
||||
int ret;
|
||||
/*
|
||||
* Shutdown the active I_T nexus if 'NULL' is passed..
|
||||
* Shutdown the active I_T nexus if 'NULL' is passed.
|
||||
*/
|
||||
if (!strncmp(page, "NULL", 4)) {
|
||||
ret = scsiback_drop_nexus(tpg);
|
||||
@@ -1922,7 +1928,7 @@ static void scsiback_drop_tpg(struct se_portal_group *se_tpg)
|
||||
*/
|
||||
scsiback_drop_nexus(tpg);
|
||||
/*
|
||||
* Deregister the se_tpg from TCM..
|
||||
* Deregister the se_tpg from TCM.
|
||||
*/
|
||||
core_tpg_deregister(se_tpg);
|
||||
kfree(tpg);
|
||||
@@ -1992,7 +1998,7 @@ static int scsiback_register_configfs(void)
|
||||
struct target_fabric_configfs *fabric;
|
||||
int ret;
|
||||
|
||||
pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
|
||||
pr_debug("fabric module %s on %s/%s on "UTS_RELEASE"\n",
|
||||
VSCSI_VERSION, utsname()->sysname, utsname()->machine);
|
||||
/*
|
||||
* Register the top level struct config_item_type with TCM core
|
||||
@@ -2029,7 +2035,7 @@ static int scsiback_register_configfs(void)
|
||||
* Setup our local pointer to *fabric
|
||||
*/
|
||||
scsiback_fabric_configfs = fabric;
|
||||
pr_debug("xen-pvscsi: Set fabric -> scsiback_fabric_configfs\n");
|
||||
pr_debug("Set fabric -> scsiback_fabric_configfs\n");
|
||||
return 0;
|
||||
};
|
||||
|
||||
@@ -2040,7 +2046,7 @@ static void scsiback_deregister_configfs(void)
|
||||
|
||||
target_fabric_configfs_deregister(scsiback_fabric_configfs);
|
||||
scsiback_fabric_configfs = NULL;
|
||||
pr_debug("xen-pvscsi: Cleared scsiback_fabric_configfs\n");
|
||||
pr_debug("Cleared scsiback_fabric_configfs\n");
|
||||
};
|
||||
|
||||
static const struct xenbus_device_id scsiback_ids[] = {
|
||||
@@ -2091,7 +2097,7 @@ out_unregister_xenbus:
|
||||
xenbus_unregister_driver(&scsiback_driver);
|
||||
out_cache_destroy:
|
||||
kmem_cache_destroy(scsiback_cachep);
|
||||
pr_err("xen-pvscsi: %s: error %d\n", __func__, ret);
|
||||
pr_err("%s: error %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -52,17 +52,25 @@
|
||||
struct xenbus_map_node {
|
||||
struct list_head next;
|
||||
union {
|
||||
struct vm_struct *area; /* PV */
|
||||
struct page *page; /* HVM */
|
||||
struct {
|
||||
struct vm_struct *area;
|
||||
} pv;
|
||||
struct {
|
||||
struct page *pages[XENBUS_MAX_RING_PAGES];
|
||||
void *addr;
|
||||
} hvm;
|
||||
};
|
||||
grant_handle_t handle;
|
||||
grant_handle_t handles[XENBUS_MAX_RING_PAGES];
|
||||
unsigned int nr_handles;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(xenbus_valloc_lock);
|
||||
static LIST_HEAD(xenbus_valloc_pages);
|
||||
|
||||
struct xenbus_ring_ops {
|
||||
int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
|
||||
int (*map)(struct xenbus_device *dev,
|
||||
grant_ref_t *gnt_refs, unsigned int nr_grefs,
|
||||
void **vaddr);
|
||||
int (*unmap)(struct xenbus_device *dev, void *vaddr);
|
||||
};
|
||||
|
||||
@@ -355,17 +363,39 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
|
||||
/**
|
||||
* xenbus_grant_ring
|
||||
* @dev: xenbus device
|
||||
* @ring_mfn: mfn of ring to grant
|
||||
|
||||
* Grant access to the given @ring_mfn to the peer of the given device. Return
|
||||
* a grant reference on success, or -errno on error. On error, the device will
|
||||
* switch to XenbusStateClosing, and the error will be saved in the store.
|
||||
* @vaddr: starting virtual address of the ring
|
||||
* @nr_pages: number of pages to be granted
|
||||
* @grefs: grant reference array to be filled in
|
||||
*
|
||||
* Grant access to the given @vaddr to the peer of the given device.
|
||||
* Then fill in @grefs with grant references. Return 0 on success, or
|
||||
* -errno on error. On error, the device will switch to
|
||||
* XenbusStateClosing, and the error will be saved in the store.
|
||||
*/
|
||||
int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
|
||||
int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
|
||||
unsigned int nr_pages, grant_ref_t *grefs)
|
||||
{
|
||||
int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
|
||||
if (err < 0)
|
||||
xenbus_dev_fatal(dev, err, "granting access to ring page");
|
||||
int err;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
unsigned long addr = (unsigned long)vaddr +
|
||||
(PAGE_SIZE * i);
|
||||
err = gnttab_grant_foreign_access(dev->otherend_id,
|
||||
virt_to_mfn(addr), 0);
|
||||
if (err < 0) {
|
||||
xenbus_dev_fatal(dev, err,
|
||||
"granting access to ring page");
|
||||
goto fail;
|
||||
}
|
||||
grefs[i] = err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
for (j = 0; j < i; j++)
|
||||
gnttab_end_foreign_access_ref(grefs[j], 0);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_grant_ring);
|
||||
@@ -419,62 +449,130 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
|
||||
/**
|
||||
* xenbus_map_ring_valloc
|
||||
* @dev: xenbus device
|
||||
* @gnt_ref: grant reference
|
||||
* @gnt_refs: grant reference array
|
||||
* @nr_grefs: number of grant references
|
||||
* @vaddr: pointer to address to be filled out by mapping
|
||||
*
|
||||
* Based on Rusty Russell's skeleton driver's map_page.
|
||||
* Map a page of memory into this domain from another domain's grant table.
|
||||
* xenbus_map_ring_valloc allocates a page of virtual address space, maps the
|
||||
* page to that address, and sets *vaddr to that address.
|
||||
* Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
|
||||
* or -ENOMEM on error. If an error is returned, device will switch to
|
||||
* Map @nr_grefs pages of memory into this domain from another
|
||||
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
|
||||
* pages of virtual address space, maps the pages to that address, and
|
||||
* sets *vaddr to that address. Returns 0 on success, and GNTST_*
|
||||
* (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
|
||||
* error. If an error is returned, device will switch to
|
||||
* XenbusStateClosing and the error message will be saved in XenStore.
|
||||
*/
|
||||
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
|
||||
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
|
||||
unsigned int nr_grefs, void **vaddr)
|
||||
{
|
||||
return ring_ops->map(dev, gnt_ref, vaddr);
|
||||
return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
|
||||
|
||||
static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
|
||||
int gnt_ref, void **vaddr)
|
||||
/* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
|
||||
* long), e.g. 32-on-64. Caller is responsible for preparing the
|
||||
* right array to feed into this function */
|
||||
static int __xenbus_map_ring(struct xenbus_device *dev,
|
||||
grant_ref_t *gnt_refs,
|
||||
unsigned int nr_grefs,
|
||||
grant_handle_t *handles,
|
||||
phys_addr_t *addrs,
|
||||
unsigned int flags,
|
||||
bool *leaked)
|
||||
{
|
||||
struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES];
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
|
||||
int i, j;
|
||||
int err = GNTST_okay;
|
||||
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < nr_grefs; i++) {
|
||||
memset(&map[i], 0, sizeof(map[i]));
|
||||
gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
|
||||
dev->otherend_id);
|
||||
handles[i] = INVALID_GRANT_HANDLE;
|
||||
}
|
||||
|
||||
gnttab_batch_map(map, i);
|
||||
|
||||
for (i = 0; i < nr_grefs; i++) {
|
||||
if (map[i].status != GNTST_okay) {
|
||||
err = map[i].status;
|
||||
xenbus_dev_fatal(dev, map[i].status,
|
||||
"mapping in shared page %d from domain %d",
|
||||
gnt_refs[i], dev->otherend_id);
|
||||
goto fail;
|
||||
} else
|
||||
handles[i] = map[i].handle;
|
||||
}
|
||||
|
||||
return GNTST_okay;
|
||||
|
||||
fail:
|
||||
for (i = j = 0; i < nr_grefs; i++) {
|
||||
if (handles[i] != INVALID_GRANT_HANDLE) {
|
||||
memset(&unmap[j], 0, sizeof(unmap[j]));
|
||||
gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
|
||||
GNTMAP_host_map, handles[i]);
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
|
||||
BUG();
|
||||
|
||||
*leaked = false;
|
||||
for (i = 0; i < j; i++) {
|
||||
if (unmap[i].status != GNTST_okay) {
|
||||
*leaked = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
|
||||
grant_ref_t *gnt_refs,
|
||||
unsigned int nr_grefs,
|
||||
void **vaddr)
|
||||
{
|
||||
struct gnttab_map_grant_ref op = {
|
||||
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
|
||||
.ref = gnt_ref,
|
||||
.dom = dev->otherend_id,
|
||||
};
|
||||
struct xenbus_map_node *node;
|
||||
struct vm_struct *area;
|
||||
pte_t *pte;
|
||||
pte_t *ptes[XENBUS_MAX_RING_PAGES];
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
|
||||
int err = GNTST_okay;
|
||||
int i;
|
||||
bool leaked;
|
||||
|
||||
*vaddr = NULL;
|
||||
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
area = alloc_vm_area(PAGE_SIZE, &pte);
|
||||
area = alloc_vm_area(PAGE_SIZE * nr_grefs, ptes);
|
||||
if (!area) {
|
||||
kfree(node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
|
||||
for (i = 0; i < nr_grefs; i++)
|
||||
phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
|
||||
|
||||
gnttab_batch_map(&op, 1);
|
||||
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
|
||||
phys_addrs,
|
||||
GNTMAP_host_map | GNTMAP_contains_pte,
|
||||
&leaked);
|
||||
if (err)
|
||||
goto failed;
|
||||
|
||||
if (op.status != GNTST_okay) {
|
||||
free_vm_area(area);
|
||||
kfree(node);
|
||||
xenbus_dev_fatal(dev, op.status,
|
||||
"mapping in shared page %d from domain %d",
|
||||
gnt_ref, dev->otherend_id);
|
||||
return op.status;
|
||||
}
|
||||
|
||||
node->handle = op.handle;
|
||||
node->area = area;
|
||||
node->nr_handles = nr_grefs;
|
||||
node->pv.area = area;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_add(&node->next, &xenbus_valloc_pages);
|
||||
@@ -482,14 +580,33 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
|
||||
|
||||
*vaddr = area->addr;
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
if (!leaked)
|
||||
free_vm_area(area);
|
||||
else
|
||||
pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
|
||||
|
||||
kfree(node);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
|
||||
int gnt_ref, void **vaddr)
|
||||
grant_ref_t *gnt_ref,
|
||||
unsigned int nr_grefs,
|
||||
void **vaddr)
|
||||
{
|
||||
struct xenbus_map_node *node;
|
||||
int i;
|
||||
int err;
|
||||
void *addr;
|
||||
bool leaked = false;
|
||||
/* Why do we need two arrays? See comment of __xenbus_map_ring */
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
|
||||
unsigned long addrs[XENBUS_MAX_RING_PAGES];
|
||||
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
*vaddr = NULL;
|
||||
|
||||
@@ -497,15 +614,32 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
|
||||
err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages,
|
||||
false /* lowmem */);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
addr = pfn_to_kaddr(page_to_pfn(node->page));
|
||||
for (i = 0; i < nr_grefs; i++) {
|
||||
unsigned long pfn = page_to_pfn(node->hvm.pages[i]);
|
||||
phys_addrs[i] = (unsigned long)pfn_to_kaddr(pfn);
|
||||
addrs[i] = (unsigned long)pfn_to_kaddr(pfn);
|
||||
}
|
||||
|
||||
err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
|
||||
phys_addrs, GNTMAP_host_map, &leaked);
|
||||
node->nr_handles = nr_grefs;
|
||||
|
||||
err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
|
||||
if (err)
|
||||
goto out_err_free_ballooned_pages;
|
||||
goto out_free_ballooned_pages;
|
||||
|
||||
addr = vmap(node->hvm.pages, nr_grefs, VM_MAP | VM_IOREMAP,
|
||||
PAGE_KERNEL);
|
||||
if (!addr) {
|
||||
err = -ENOMEM;
|
||||
goto out_xenbus_unmap_ring;
|
||||
}
|
||||
|
||||
node->hvm.addr = addr;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_add(&node->next, &xenbus_valloc_pages);
|
||||
@@ -514,8 +648,16 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
|
||||
*vaddr = addr;
|
||||
return 0;
|
||||
|
||||
out_err_free_ballooned_pages:
|
||||
free_xenballooned_pages(1, &node->page);
|
||||
out_xenbus_unmap_ring:
|
||||
if (!leaked)
|
||||
xenbus_unmap_ring(dev, node->handles, node->nr_handles,
|
||||
addrs);
|
||||
else
|
||||
pr_alert("leaking %p size %u page(s)",
|
||||
addr, nr_grefs);
|
||||
out_free_ballooned_pages:
|
||||
if (!leaked)
|
||||
free_xenballooned_pages(nr_grefs, node->hvm.pages);
|
||||
out_err:
|
||||
kfree(node);
|
||||
return err;
|
||||
@@ -525,35 +667,37 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
|
||||
/**
|
||||
* xenbus_map_ring
|
||||
* @dev: xenbus device
|
||||
* @gnt_ref: grant reference
|
||||
* @handle: pointer to grant handle to be filled
|
||||
* @vaddr: address to be mapped to
|
||||
* @gnt_refs: grant reference array
|
||||
* @nr_grefs: number of grant reference
|
||||
* @handles: pointer to grant handle to be filled
|
||||
* @vaddrs: addresses to be mapped to
|
||||
* @leaked: fail to clean up a failed map, caller should not free vaddr
|
||||
*
|
||||
* Map a page of memory into this domain from another domain's grant table.
|
||||
* Map pages of memory into this domain from another domain's grant table.
|
||||
* xenbus_map_ring does not allocate the virtual address space (you must do
|
||||
* this yourself!). It only maps in the page to the specified address.
|
||||
* this yourself!). It only maps in the pages to the specified address.
|
||||
* Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
|
||||
* or -ENOMEM on error. If an error is returned, device will switch to
|
||||
* XenbusStateClosing and the error message will be saved in XenStore.
|
||||
* or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
|
||||
* XenbusStateClosing and the first error message will be saved in XenStore.
|
||||
* Further more if we fail to map the ring, caller should check @leaked.
|
||||
* If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
|
||||
* should not free the address space of @vaddr.
|
||||
*/
|
||||
int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
|
||||
grant_handle_t *handle, void *vaddr)
|
||||
int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
|
||||
unsigned int nr_grefs, grant_handle_t *handles,
|
||||
unsigned long *vaddrs, bool *leaked)
|
||||
{
|
||||
struct gnttab_map_grant_ref op;
|
||||
phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
|
||||
int i;
|
||||
|
||||
gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
|
||||
dev->otherend_id);
|
||||
if (nr_grefs > XENBUS_MAX_RING_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
gnttab_batch_map(&op, 1);
|
||||
for (i = 0; i < nr_grefs; i++)
|
||||
phys_addrs[i] = (unsigned long)vaddrs[i];
|
||||
|
||||
if (op.status != GNTST_okay) {
|
||||
xenbus_dev_fatal(dev, op.status,
|
||||
"mapping in shared page %d from domain %d",
|
||||
gnt_ref, dev->otherend_id);
|
||||
} else
|
||||
*handle = op.handle;
|
||||
|
||||
return op.status;
|
||||
return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
|
||||
phys_addrs, GNTMAP_host_map, leaked);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_map_ring);
|
||||
|
||||
@@ -579,14 +723,15 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
|
||||
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
|
||||
{
|
||||
struct xenbus_map_node *node;
|
||||
struct gnttab_unmap_grant_ref op = {
|
||||
.host_addr = (unsigned long)vaddr,
|
||||
};
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
|
||||
unsigned int level;
|
||||
int i;
|
||||
bool leaked = false;
|
||||
int err;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_for_each_entry(node, &xenbus_valloc_pages, next) {
|
||||
if (node->area->addr == vaddr) {
|
||||
if (node->pv.area->addr == vaddr) {
|
||||
list_del(&node->next);
|
||||
goto found;
|
||||
}
|
||||
@@ -601,22 +746,41 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
|
||||
return GNTST_bad_virt_addr;
|
||||
}
|
||||
|
||||
op.handle = node->handle;
|
||||
op.host_addr = arbitrary_virt_to_machine(
|
||||
lookup_address((unsigned long)vaddr, &level)).maddr;
|
||||
for (i = 0; i < node->nr_handles; i++) {
|
||||
unsigned long addr;
|
||||
|
||||
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
|
||||
memset(&unmap[i], 0, sizeof(unmap[i]));
|
||||
addr = (unsigned long)vaddr + (PAGE_SIZE * i);
|
||||
unmap[i].host_addr = arbitrary_virt_to_machine(
|
||||
lookup_address(addr, &level)).maddr;
|
||||
unmap[i].dev_bus_addr = 0;
|
||||
unmap[i].handle = node->handles[i];
|
||||
}
|
||||
|
||||
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
|
||||
BUG();
|
||||
|
||||
if (op.status == GNTST_okay)
|
||||
free_vm_area(node->area);
|
||||
err = GNTST_okay;
|
||||
leaked = false;
|
||||
for (i = 0; i < node->nr_handles; i++) {
|
||||
if (unmap[i].status != GNTST_okay) {
|
||||
leaked = true;
|
||||
xenbus_dev_error(dev, unmap[i].status,
|
||||
"unmapping page at handle %d error %d",
|
||||
node->handles[i], unmap[i].status);
|
||||
err = unmap[i].status;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!leaked)
|
||||
free_vm_area(node->pv.area);
|
||||
else
|
||||
xenbus_dev_error(dev, op.status,
|
||||
"unmapping page at handle %d error %d",
|
||||
node->handle, op.status);
|
||||
pr_alert("leaking VM area %p size %u page(s)",
|
||||
node->pv.area, node->nr_handles);
|
||||
|
||||
kfree(node);
|
||||
return op.status;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
|
||||
@@ -624,10 +788,12 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
|
||||
int rv;
|
||||
struct xenbus_map_node *node;
|
||||
void *addr;
|
||||
unsigned long addrs[XENBUS_MAX_RING_PAGES];
|
||||
int i;
|
||||
|
||||
spin_lock(&xenbus_valloc_lock);
|
||||
list_for_each_entry(node, &xenbus_valloc_pages, next) {
|
||||
addr = pfn_to_kaddr(page_to_pfn(node->page));
|
||||
addr = node->hvm.addr;
|
||||
if (addr == vaddr) {
|
||||
list_del(&node->next);
|
||||
goto found;
|
||||
@@ -643,12 +809,16 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
|
||||
return GNTST_bad_virt_addr;
|
||||
}
|
||||
|
||||
rv = xenbus_unmap_ring(dev, node->handle, addr);
|
||||
for (i = 0; i < node->nr_handles; i++)
|
||||
addrs[i] = (unsigned long)pfn_to_kaddr(page_to_pfn(node->hvm.pages[i]));
|
||||
|
||||
rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
|
||||
addrs);
|
||||
if (!rv)
|
||||
free_xenballooned_pages(1, &node->page);
|
||||
vunmap(vaddr);
|
||||
else
|
||||
WARN(1, "Leaking %p\n", vaddr);
|
||||
WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
|
||||
node->nr_handles);
|
||||
|
||||
kfree(node);
|
||||
return rv;
|
||||
@@ -657,29 +827,44 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
|
||||
/**
|
||||
* xenbus_unmap_ring
|
||||
* @dev: xenbus device
|
||||
* @handle: grant handle
|
||||
* @vaddr: addr to unmap
|
||||
* @handles: grant handle array
|
||||
* @nr_handles: number of handles in the array
|
||||
* @vaddrs: addresses to unmap
|
||||
*
|
||||
* Unmap a page of memory in this domain that was imported from another domain.
|
||||
* Unmap memory in this domain that was imported from another domain.
|
||||
* Returns 0 on success and returns GNTST_* on error
|
||||
* (see xen/include/interface/grant_table.h).
|
||||
*/
|
||||
int xenbus_unmap_ring(struct xenbus_device *dev,
|
||||
grant_handle_t handle, void *vaddr)
|
||||
grant_handle_t *handles, unsigned int nr_handles,
|
||||
unsigned long *vaddrs)
|
||||
{
|
||||
struct gnttab_unmap_grant_ref op;
|
||||
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
|
||||
int i;
|
||||
int err;
|
||||
|
||||
gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
|
||||
if (nr_handles > XENBUS_MAX_RING_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
|
||||
for (i = 0; i < nr_handles; i++)
|
||||
gnttab_set_unmap_op(&unmap[i], vaddrs[i],
|
||||
GNTMAP_host_map, handles[i]);
|
||||
|
||||
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
|
||||
BUG();
|
||||
|
||||
if (op.status != GNTST_okay)
|
||||
xenbus_dev_error(dev, op.status,
|
||||
"unmapping page at handle %d error %d",
|
||||
handle, op.status);
|
||||
err = GNTST_okay;
|
||||
for (i = 0; i < nr_handles; i++) {
|
||||
if (unmap[i].status != GNTST_okay) {
|
||||
xenbus_dev_error(dev, unmap[i].status,
|
||||
"unmapping page at handle %d error %d",
|
||||
handles[i], unmap[i].status);
|
||||
err = unmap[i].status;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return op.status;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
|
||||
|
||||
|
143
drivers/xen/xlate_mmu.c
普通文件
143
drivers/xen/xlate_mmu.c
普通文件
@@ -0,0 +1,143 @@
|
||||
/*
|
||||
* MMU operations common to all auto-translated physmap guests.
|
||||
*
|
||||
* Copyright (C) 2015 Citrix Systems R&D Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation; or, when distributed
|
||||
* separately from the Linux kernel or incorporated into other
|
||||
* software packages, subject to the following license:
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this source file (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy, modify,
|
||||
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/page.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/memory.h>
|
||||
|
||||
/* map fgmfn of domid to lpfn in the current domain */
|
||||
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
|
||||
unsigned int domid)
|
||||
{
|
||||
int rc;
|
||||
struct xen_add_to_physmap_range xatp = {
|
||||
.domid = DOMID_SELF,
|
||||
.foreign_domid = domid,
|
||||
.size = 1,
|
||||
.space = XENMAPSPACE_gmfn_foreign,
|
||||
};
|
||||
xen_ulong_t idx = fgmfn;
|
||||
xen_pfn_t gpfn = lpfn;
|
||||
int err = 0;
|
||||
|
||||
set_xen_guest_handle(xatp.idxs, &idx);
|
||||
set_xen_guest_handle(xatp.gpfns, &gpfn);
|
||||
set_xen_guest_handle(xatp.errs, &err);
|
||||
|
||||
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
|
||||
return rc < 0 ? rc : err;
|
||||
}
|
||||
|
||||
struct remap_data {
|
||||
xen_pfn_t *fgmfn; /* foreign domain's gmfn */
|
||||
pgprot_t prot;
|
||||
domid_t domid;
|
||||
struct vm_area_struct *vma;
|
||||
int index;
|
||||
struct page **pages;
|
||||
struct xen_remap_mfn_info *info;
|
||||
int *err_ptr;
|
||||
int mapped;
|
||||
};
|
||||
|
||||
static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
void *data)
|
||||
{
|
||||
struct remap_data *info = data;
|
||||
struct page *page = info->pages[info->index++];
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
|
||||
int rc;
|
||||
|
||||
rc = map_foreign_page(pfn, *info->fgmfn, info->domid);
|
||||
*info->err_ptr++ = rc;
|
||||
if (!rc) {
|
||||
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
|
||||
info->mapped++;
|
||||
}
|
||||
info->fgmfn++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *mfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned domid,
|
||||
struct page **pages)
|
||||
{
|
||||
int err;
|
||||
struct remap_data data;
|
||||
unsigned long range = nr << PAGE_SHIFT;
|
||||
|
||||
/* Kept here for the purpose of making sure code doesn't break
|
||||
x86 PVOPS */
|
||||
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
|
||||
|
||||
data.fgmfn = mfn;
|
||||
data.prot = prot;
|
||||
data.domid = domid;
|
||||
data.vma = vma;
|
||||
data.pages = pages;
|
||||
data.index = 0;
|
||||
data.err_ptr = err_ptr;
|
||||
data.mapped = 0;
|
||||
|
||||
err = apply_to_page_range(vma->vm_mm, addr, range,
|
||||
remap_pte_fn, &data);
|
||||
return err < 0 ? err : data.mapped;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
|
||||
|
||||
int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
|
||||
int nr, struct page **pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct xen_remove_from_physmap xrp;
|
||||
unsigned long pfn;
|
||||
|
||||
pfn = page_to_pfn(pages[i]);
|
||||
|
||||
xrp.domid = DOMID_SELF;
|
||||
xrp.gpfn = pfn;
|
||||
(void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
|
在新工单中引用
屏蔽一个用户