Merge tag 'v5.5-rc3' into sched/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2019-12-25 10:41:37 +01:00
2365 changed files with 73848 additions and 24153 deletions

View File

@@ -394,7 +394,8 @@ static struct notifier_block xen_memory_nb = {
#else
static enum bp_state reserve_additional_memory(void)
{
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.target_pages = balloon_stats.current_pages +
balloon_stats.target_unpopulated;
return BP_ECANCELED;
}
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */

View File

@@ -1213,31 +1213,21 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
notify_remote_via_irq(irq);
}
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
static void __xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int cpu = get_cpu();
unsigned count;
int cpu = smp_processor_id();
do {
vcpu_info->evtchn_upcall_pending = 0;
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
xen_evtchn_handle_events(cpu);
BUG_ON(!irqs_disabled());
count = __this_cpu_read(xed_nesting_count);
__this_cpu_write(xed_nesting_count, 0);
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
virt_rmb(); /* Hypervisor can set upcall pending. */
out:
put_cpu();
} while (vcpu_info->evtchn_upcall_pending);
}
void xen_evtchn_do_upcall(struct pt_regs *regs)

View File

@@ -81,7 +81,7 @@ void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
bool gntdev_account_mapped_pages(int count);
bool gntdev_test_page_count(unsigned int count);
int gntdev_map_grant_pages(struct gntdev_grant_map *map);

View File

@@ -446,7 +446,7 @@ dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
{
struct gntdev_grant_map *map;
if (unlikely(count <= 0))
if (unlikely(gntdev_test_page_count(count)))
return ERR_PTR(-EINVAL);
if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
@@ -459,11 +459,6 @@ dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
if (!map)
return ERR_PTR(-ENOMEM);
if (unlikely(gntdev_account_mapped_pages(count))) {
pr_debug("can't map %d pages: over limit\n", count);
gntdev_put_map(NULL, map);
return ERR_PTR(-ENOMEM);
}
return map;
}
@@ -771,7 +766,7 @@ long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
if (unlikely(op.count <= 0))
if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
@@ -818,7 +813,7 @@ long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
if (unlikely(op.count <= 0))
if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,

View File

@@ -55,12 +55,10 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
static int limit = 1024*1024;
module_param(limit, int, 0644);
MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
"the gntdev device");
static atomic_t pages_mapped = ATOMIC_INIT(0);
static unsigned int limit = 64*1024;
module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
static int use_ptemod;
@@ -71,9 +69,9 @@ static struct miscdevice gntdev_miscdev;
/* ------------------------------------------------------------------ */
bool gntdev_account_mapped_pages(int count)
bool gntdev_test_page_count(unsigned int count)
{
return atomic_add_return(count, &pages_mapped) > limit;
return !count || count > limit;
}
static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -114,14 +112,14 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
gnttab_free_pages(map->count, map->pages);
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
kfree(map->frames);
kvfree(map->frames);
#endif
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
kfree(map->unmap_ops);
kfree(map->kmap_ops);
kfree(map->kunmap_ops);
kvfree(map->pages);
kvfree(map->grants);
kvfree(map->map_ops);
kvfree(map->unmap_ops);
kvfree(map->kmap_ops);
kvfree(map->kunmap_ops);
kfree(map);
}
@@ -135,12 +133,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
if (NULL == add)
return NULL;
add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
add->kunmap_ops = kvcalloc(count,
sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
@@ -159,8 +158,8 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
struct gnttab_dma_alloc_args args;
add->frames = kcalloc(count, sizeof(add->frames[0]),
GFP_KERNEL);
add->frames = kvcalloc(count, sizeof(add->frames[0]),
GFP_KERNEL);
if (!add->frames)
goto err;
@@ -241,8 +240,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
atomic_sub(map->count, &pages_mapped);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -506,7 +503,6 @@ static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
static int gntdev_open(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv;
int ret = 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -518,17 +514,13 @@ static int gntdev_open(struct inode *inode, struct file *flip)
#ifdef CONFIG_XEN_GNTDEV_DMABUF
priv->dmabuf_priv = gntdev_dmabuf_init(flip);
if (IS_ERR(priv->dmabuf_priv)) {
ret = PTR_ERR(priv->dmabuf_priv);
int ret = PTR_ERR(priv->dmabuf_priv);
kfree(priv);
return ret;
}
#endif
if (ret) {
kfree(priv);
return ret;
}
flip->private_data = priv;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
priv->dma_dev = gntdev_miscdev.this_device;
@@ -573,7 +565,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, add %d\n", priv, op.count);
if (unlikely(op.count <= 0))
if (unlikely(gntdev_test_page_count(op.count)))
return -EINVAL;
err = -ENOMEM;
@@ -581,12 +573,6 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
if (!map)
return err;
if (unlikely(gntdev_account_mapped_pages(op.count))) {
pr_debug("can't map: over limit\n");
gntdev_put_map(NULL, map);
return err;
}
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
gntdev_put_map(NULL, map);

View File

@@ -664,7 +664,6 @@ static int grow_gnttab_list(unsigned int more_frames)
unsigned int nr_glist_frames, new_nr_glist_frames;
unsigned int grefs_per_frame;
BUG_ON(gnttab_interface == NULL);
grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
new_nr_grant_frames = nr_grant_frames + more_frames;
@@ -1160,7 +1159,6 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
static unsigned int nr_status_frames(unsigned int nr_grant_frames)
{
BUG_ON(gnttab_interface == NULL);
return gnttab_frames(nr_grant_frames, SPP);
}
@@ -1388,7 +1386,6 @@ static int gnttab_expand(unsigned int req_entries)
int rc;
unsigned int cur, extra;
BUG_ON(gnttab_interface == NULL);
cur = nr_grant_frames;
extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
gnttab_interface->grefs_per_grant_frame);
@@ -1423,7 +1420,6 @@ int gnttab_init(void)
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
BUG_ON(gnttab_interface == NULL);
max_nr_glist_frames = (max_nr_grant_frames *
gnttab_interface->grefs_per_grant_frame / RPP);

View File

@@ -116,8 +116,6 @@ int xenbus_probe_devices(struct xen_bus_type *bus);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
void xenbus_dev_shutdown(struct device *_dev);
int xenbus_dev_suspend(struct device *dev);
int xenbus_dev_resume(struct device *dev);
int xenbus_dev_cancel(struct device *dev);

View File

@@ -232,9 +232,16 @@ int xenbus_dev_probe(struct device *_dev)
return err;
}
if (!try_module_get(drv->driver.owner)) {
dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
drv->driver.name);
err = -ESRCH;
goto fail;
}
err = drv->probe(dev, id);
if (err)
goto fail;
goto fail_put;
err = watch_otherend(dev);
if (err) {
@@ -244,9 +251,10 @@ int xenbus_dev_probe(struct device *_dev)
}
return 0;
fail_put:
module_put(drv->driver.owner);
fail:
xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
xenbus_switch_state(dev, XenbusStateClosed);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_dev_probe);
@@ -263,36 +271,24 @@ int xenbus_dev_remove(struct device *_dev)
if (drv->remove)
drv->remove(dev);
module_put(drv->driver.owner);
free_otherend_details(dev);
xenbus_switch_state(dev, XenbusStateClosed);
/*
* If the toolstack has forced the device state to closing then set
* the state to closed now to allow it to be cleaned up.
* Similarly, if the driver does not support re-bind, set the
* closed.
*/
if (!drv->allow_rebind ||
xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
xenbus_switch_state(dev, XenbusStateClosed);
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_dev_remove);
void xenbus_dev_shutdown(struct device *_dev)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
unsigned long timeout = 5*HZ;
DPRINTK("%s", dev->nodename);
get_device(&dev->dev);
if (dev->state != XenbusStateConnected) {
pr_info("%s: %s: %s != Connected, skipping\n",
__func__, dev->nodename, xenbus_strstate(dev->state));
goto out;
}
xenbus_switch_state(dev, XenbusStateClosing);
timeout = wait_for_completion_timeout(&dev->down, timeout);
if (!timeout)
pr_info("%s: %s timeout closing device\n",
__func__, dev->nodename);
out:
put_device(&dev->dev);
}
EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv,
struct xen_bus_type *bus,
struct module *owner, const char *mod_name)

View File

@@ -198,7 +198,6 @@ static struct xen_bus_type xenbus_backend = {
.uevent = xenbus_uevent_backend,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_groups = xenbus_dev_groups,
},
};

View File

@@ -126,6 +126,28 @@ static int xenbus_frontend_dev_probe(struct device *dev)
return xenbus_dev_probe(dev);
}
static void xenbus_frontend_dev_shutdown(struct device *_dev)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
unsigned long timeout = 5*HZ;
DPRINTK("%s", dev->nodename);
get_device(&dev->dev);
if (dev->state != XenbusStateConnected) {
pr_info("%s: %s: %s != Connected, skipping\n",
__func__, dev->nodename, xenbus_strstate(dev->state));
goto out;
}
xenbus_switch_state(dev, XenbusStateClosing);
timeout = wait_for_completion_timeout(&dev->down, timeout);
if (!timeout)
pr_info("%s: %s timeout closing device\n",
__func__, dev->nodename);
out:
put_device(&dev->dev);
}
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
.resume = xenbus_frontend_dev_resume,
@@ -146,7 +168,7 @@ static struct xen_bus_type xenbus_frontend = {
.uevent = xenbus_uevent_frontend,
.probe = xenbus_frontend_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.shutdown = xenbus_frontend_dev_shutdown,
.dev_groups = xenbus_dev_groups,
.pm = &xenbus_pm_ops,