Merge 4.17-rc3 into char-misc-next
We want the fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
@@ -261,6 +261,7 @@
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/uuid.h>
|
||||
@@ -438,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
|
||||
static void process_random_ready_list(void);
|
||||
static void _get_random_bytes(void *buf, int nbytes);
|
||||
|
||||
static struct ratelimit_state unseeded_warning =
|
||||
RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
|
||||
static struct ratelimit_state urandom_warning =
|
||||
RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
|
||||
|
||||
static int ratelimit_disable __read_mostly;
|
||||
|
||||
module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
|
||||
MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
|
||||
|
||||
/**********************************************************************
|
||||
*
|
||||
* OS independent entropy store. Here are the functions which handle
|
||||
@@ -789,7 +800,7 @@ static void crng_initialize(struct crng_state *crng)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
static void numa_crng_init(void)
|
||||
static void do_numa_crng_init(struct work_struct *work)
|
||||
{
|
||||
int i;
|
||||
struct crng_state *crng;
|
||||
@@ -810,6 +821,13 @@ static void numa_crng_init(void)
|
||||
kfree(pool);
|
||||
}
|
||||
}
|
||||
|
||||
static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
|
||||
|
||||
static void numa_crng_init(void)
|
||||
{
|
||||
schedule_work(&numa_crng_init_work);
|
||||
}
|
||||
#else
|
||||
static void numa_crng_init(void) {}
|
||||
#endif
|
||||
@@ -925,6 +943,18 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
||||
process_random_ready_list();
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
pr_notice("random: crng init done\n");
|
||||
if (unseeded_warning.missed) {
|
||||
pr_notice("random: %d get_random_xx warning(s) missed "
|
||||
"due to ratelimiting\n",
|
||||
unseeded_warning.missed);
|
||||
unseeded_warning.missed = 0;
|
||||
}
|
||||
if (urandom_warning.missed) {
|
||||
pr_notice("random: %d urandom warning(s) missed "
|
||||
"due to ratelimiting\n",
|
||||
urandom_warning.missed);
|
||||
urandom_warning.missed = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1565,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
|
||||
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
|
||||
print_once = true;
|
||||
#endif
|
||||
pr_notice("random: %s called from %pS with crng_init=%d\n",
|
||||
func_name, caller, crng_init);
|
||||
if (__ratelimit(&unseeded_warning))
|
||||
pr_notice("random: %s called from %pS with crng_init=%d\n",
|
||||
func_name, caller, crng_init);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1760,6 +1791,10 @@ static int rand_initialize(void)
|
||||
init_std_data(&blocking_pool);
|
||||
crng_initialize(&primary_crng);
|
||||
crng_global_init_time = jiffies;
|
||||
if (ratelimit_disable) {
|
||||
urandom_warning.interval = 0;
|
||||
unseeded_warning.interval = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(rand_initialize);
|
||||
@@ -1827,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
|
||||
|
||||
if (!crng_ready() && maxwarn > 0) {
|
||||
maxwarn--;
|
||||
printk(KERN_NOTICE "random: %s: uninitialized urandom read "
|
||||
"(%zd bytes read)\n",
|
||||
current->comm, nbytes);
|
||||
if (__ratelimit(&urandom_warning))
|
||||
printk(KERN_NOTICE "random: %s: uninitialized "
|
||||
"urandom read (%zd bytes read)\n",
|
||||
current->comm, nbytes);
|
||||
spin_lock_irqsave(&primary_crng.lock, flags);
|
||||
crng_init_cnt = 0;
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
|
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
|
||||
}
|
||||
}
|
||||
|
||||
static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
|
||||
static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
|
||||
int pages)
|
||||
{
|
||||
struct port_buffer *buf;
|
||||
@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
|
||||
return buf;
|
||||
}
|
||||
|
||||
if (is_rproc_serial(vq->vdev)) {
|
||||
if (is_rproc_serial(vdev)) {
|
||||
/*
|
||||
* Allocate DMA memory from ancestor. When a virtio
|
||||
* device is created by remoteproc, the DMA memory is
|
||||
* associated with the grandparent device:
|
||||
* vdev => rproc => platform-dev.
|
||||
*/
|
||||
if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
|
||||
if (!vdev->dev.parent || !vdev->dev.parent->parent)
|
||||
goto free_buf;
|
||||
buf->dev = vq->vdev->dev.parent->parent;
|
||||
buf->dev = vdev->dev.parent->parent;
|
||||
|
||||
/* Increase device refcnt to avoid freeing it */
|
||||
get_device(buf->dev);
|
||||
@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
|
||||
|
||||
count = min((size_t)(32 * 1024), count);
|
||||
|
||||
buf = alloc_buf(port->out_vq, count, 0);
|
||||
buf = alloc_buf(port->portdev->vdev, count, 0);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
|
||||
if (ret < 0)
|
||||
goto error_out;
|
||||
|
||||
buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
|
||||
buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto error_out;
|
||||
@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
|
||||
|
||||
nr_added_bufs = 0;
|
||||
do {
|
||||
buf = alloc_buf(vq, PAGE_SIZE, 0);
|
||||
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
|
||||
if (!buf)
|
||||
break;
|
||||
|
||||
@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
|
||||
{
|
||||
char debugfs_name[16];
|
||||
struct port *port;
|
||||
struct port_buffer *buf;
|
||||
dev_t devt;
|
||||
unsigned int nr_added_bufs;
|
||||
int err;
|
||||
@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
|
||||
return 0;
|
||||
|
||||
free_inbufs:
|
||||
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
|
||||
free_buf(buf, true);
|
||||
free_device:
|
||||
device_destroy(pdrvdata.class, port->dev->devt);
|
||||
free_cdev:
|
||||
@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
|
||||
|
||||
static void remove_port_data(struct port *port)
|
||||
{
|
||||
struct port_buffer *buf;
|
||||
|
||||
spin_lock_irq(&port->inbuf_lock);
|
||||
/* Remove unused data this port might have received. */
|
||||
discard_port_data(port);
|
||||
spin_unlock_irq(&port->inbuf_lock);
|
||||
|
||||
/* Remove buffers we queued up for the Host to send us data in. */
|
||||
do {
|
||||
spin_lock_irq(&port->inbuf_lock);
|
||||
buf = virtqueue_detach_unused_buf(port->in_vq);
|
||||
spin_unlock_irq(&port->inbuf_lock);
|
||||
if (buf)
|
||||
free_buf(buf, true);
|
||||
} while (buf);
|
||||
|
||||
spin_lock_irq(&port->outvq_lock);
|
||||
reclaim_consumed_buffers(port);
|
||||
spin_unlock_irq(&port->outvq_lock);
|
||||
|
||||
/* Free pending buffers from the out-queue. */
|
||||
do {
|
||||
spin_lock_irq(&port->outvq_lock);
|
||||
buf = virtqueue_detach_unused_buf(port->out_vq);
|
||||
spin_unlock_irq(&port->outvq_lock);
|
||||
if (buf)
|
||||
free_buf(buf, true);
|
||||
} while (buf);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
|
||||
spin_unlock(&portdev->c_ivq_lock);
|
||||
}
|
||||
|
||||
static void flush_bufs(struct virtqueue *vq, bool can_sleep)
|
||||
{
|
||||
struct port_buffer *buf;
|
||||
unsigned int len;
|
||||
|
||||
while ((buf = virtqueue_get_buf(vq, &len)))
|
||||
free_buf(buf, can_sleep);
|
||||
}
|
||||
|
||||
static void out_intr(struct virtqueue *vq)
|
||||
{
|
||||
struct port *port;
|
||||
|
||||
port = find_port_by_vq(vq->vdev->priv, vq);
|
||||
if (!port)
|
||||
if (!port) {
|
||||
flush_bufs(vq, false);
|
||||
return;
|
||||
}
|
||||
|
||||
wake_up_interruptible(&port->waitqueue);
|
||||
}
|
||||
@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
|
||||
unsigned long flags;
|
||||
|
||||
port = find_port_by_vq(vq->vdev->priv, vq);
|
||||
if (!port)
|
||||
if (!port) {
|
||||
flush_bufs(vq, false);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&port->inbuf_lock, flags);
|
||||
port->inbuf = get_inbuf(port);
|
||||
@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
|
||||
|
||||
static void remove_vqs(struct ports_device *portdev)
|
||||
{
|
||||
struct virtqueue *vq;
|
||||
|
||||
virtio_device_for_each_vq(portdev->vdev, vq) {
|
||||
struct port_buffer *buf;
|
||||
|
||||
flush_bufs(vq, true);
|
||||
while ((buf = virtqueue_detach_unused_buf(vq)))
|
||||
free_buf(buf, true);
|
||||
}
|
||||
portdev->vdev->config->del_vqs(portdev->vdev);
|
||||
kfree(portdev->in_vqs);
|
||||
kfree(portdev->out_vqs);
|
||||
}
|
||||
|
||||
static void remove_controlq_data(struct ports_device *portdev)
|
||||
static void virtcons_remove(struct virtio_device *vdev)
|
||||
{
|
||||
struct port_buffer *buf;
|
||||
unsigned int len;
|
||||
struct ports_device *portdev;
|
||||
struct port *port, *port2;
|
||||
|
||||
if (!use_multiport(portdev))
|
||||
return;
|
||||
portdev = vdev->priv;
|
||||
|
||||
while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
|
||||
free_buf(buf, true);
|
||||
spin_lock_irq(&pdrvdata_lock);
|
||||
list_del(&portdev->list);
|
||||
spin_unlock_irq(&pdrvdata_lock);
|
||||
|
||||
while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
|
||||
free_buf(buf, true);
|
||||
/* Disable interrupts for vqs */
|
||||
vdev->config->reset(vdev);
|
||||
/* Finish up work that's lined up */
|
||||
if (use_multiport(portdev))
|
||||
cancel_work_sync(&portdev->control_work);
|
||||
else
|
||||
cancel_work_sync(&portdev->config_work);
|
||||
|
||||
list_for_each_entry_safe(port, port2, &portdev->ports, list)
|
||||
unplug_port(port);
|
||||
|
||||
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
|
||||
|
||||
/*
|
||||
* When yanking out a device, we immediately lose the
|
||||
* (device-side) queues. So there's no point in keeping the
|
||||
* guest side around till we drop our final reference. This
|
||||
* also means that any ports which are in an open state will
|
||||
* have to just stop using the port, as the vqs are going
|
||||
* away.
|
||||
*/
|
||||
remove_vqs(portdev);
|
||||
kfree(portdev);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
|
||||
|
||||
spin_lock_init(&portdev->ports_lock);
|
||||
INIT_LIST_HEAD(&portdev->ports);
|
||||
INIT_LIST_HEAD(&portdev->list);
|
||||
|
||||
virtio_device_ready(portdev->vdev);
|
||||
|
||||
@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
|
||||
if (!nr_added_bufs) {
|
||||
dev_err(&vdev->dev,
|
||||
"Error allocating buffers for control queue\n");
|
||||
err = -ENOMEM;
|
||||
goto free_vqs;
|
||||
/*
|
||||
* The host might want to notify mgmt sw about device
|
||||
* add failure.
|
||||
*/
|
||||
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
|
||||
VIRTIO_CONSOLE_DEVICE_READY, 0);
|
||||
/* Device was functional: we need full cleanup. */
|
||||
virtcons_remove(vdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
|
||||
|
||||
return 0;
|
||||
|
||||
free_vqs:
|
||||
/* The host might want to notify mgmt sw about device add failure */
|
||||
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
|
||||
VIRTIO_CONSOLE_DEVICE_READY, 0);
|
||||
remove_vqs(portdev);
|
||||
free_chrdev:
|
||||
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
|
||||
free:
|
||||
@@ -2132,43 +2155,6 @@ fail:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void virtcons_remove(struct virtio_device *vdev)
|
||||
{
|
||||
struct ports_device *portdev;
|
||||
struct port *port, *port2;
|
||||
|
||||
portdev = vdev->priv;
|
||||
|
||||
spin_lock_irq(&pdrvdata_lock);
|
||||
list_del(&portdev->list);
|
||||
spin_unlock_irq(&pdrvdata_lock);
|
||||
|
||||
/* Disable interrupts for vqs */
|
||||
vdev->config->reset(vdev);
|
||||
/* Finish up work that's lined up */
|
||||
if (use_multiport(portdev))
|
||||
cancel_work_sync(&portdev->control_work);
|
||||
else
|
||||
cancel_work_sync(&portdev->config_work);
|
||||
|
||||
list_for_each_entry_safe(port, port2, &portdev->ports, list)
|
||||
unplug_port(port);
|
||||
|
||||
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
|
||||
|
||||
/*
|
||||
* When yanking out a device, we immediately lose the
|
||||
* (device-side) queues. So there's no point in keeping the
|
||||
* guest side around till we drop our final reference. This
|
||||
* also means that any ports which are in an open state will
|
||||
* have to just stop using the port, as the vqs are going
|
||||
* away.
|
||||
*/
|
||||
remove_controlq_data(portdev);
|
||||
remove_vqs(portdev);
|
||||
kfree(portdev);
|
||||
}
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
|
||||
*/
|
||||
if (use_multiport(portdev))
|
||||
virtqueue_disable_cb(portdev->c_ivq);
|
||||
remove_controlq_data(portdev);
|
||||
|
||||
list_for_each_entry(port, &portdev->ports, list) {
|
||||
virtqueue_disable_cb(port->in_vq);
|
||||
|
Reference in New Issue
Block a user