Merge 5.10.5 into android12-5.10

Changes in 5.10.5
	net/sched: sch_taprio: reset child qdiscs before freeing them
	mptcp: fix security context on server socket
	ethtool: fix error paths in ethnl_set_channels()
	ethtool: fix string set id check
	md/raid10: initialize r10_bio->read_slot before use.
	drm/amd/display: Add get_dig_frontend implementation for DCEx
	io_uring: close a small race gap for files cancel
	jffs2: Allow setting rp_size to zero during remounting
	jffs2: Fix NULL pointer dereference in rp_size fs option parsing
	spi: dw-bt1: Fix undefined devm_mux_control_get symbol
	opp: fix memory leak in _allocate_opp_table
	opp: Call the missing clk_put() on error
	scsi: block: Fix a race in the runtime power management code
	mm/hugetlb: fix deadlock in hugetlb_cow error path
	mm: memmap defer init doesn't work as expected
	lib/zlib: fix inflating zlib streams on s390
	io_uring: don't assume mm is constant across submits
	io_uring: use bottom half safe lock for fixed file data
	io_uring: add a helper for setting a ref node
	io_uring: fix io_sqe_files_unregister() hangs
	uapi: move constants from <linux/kernel.h> to <linux/const.h>
	tools headers UAPI: Sync linux/const.h with the kernel headers
	cgroup: Fix memory leak when parsing multiple source parameters
	zlib: move EXPORT_SYMBOL() and MODULE_LICENSE() out of dfltcc_syms.c
	scsi: cxgb4i: Fix TLS dependency
	Bluetooth: hci_h5: close serdev device and free hu in h5_close
	fbcon: Disable accelerated scrolling
	reiserfs: add check for an invalid ih_entry_count
	misc: vmw_vmci: fix kernel info-leak by initializing dbells in vmci_ctx_get_chkpt_doorbells()
	media: gp8psk: initialize stats at power control logic
	f2fs: fix shift-out-of-bounds in sanity_check_raw_super()
	ALSA: seq: Use bool for snd_seq_queue internal flags
	ALSA: rawmidi: Access runtime->avail always in spinlock
	bfs: don't use WARNING: string when it's just info.
	ext4: check for invalid block size early when mounting a file system
	fcntl: Fix potential deadlock in send_sig{io, urg}()
	io_uring: check kthread stopped flag when sq thread is unparked
	rtc: sun6i: Fix memleak in sun6i_rtc_clk_init
	module: set MODULE_STATE_GOING state when a module fails to load
	quota: Don't overflow quota file offsets
	rtc: pl031: fix resource leak in pl031_probe
	powerpc: sysdev: add missing iounmap() on error in mpic_msgr_probe()
	i3c master: fix missing destroy_workqueue() on error in i3c_master_register
	NFSv4: Fix a pNFS layout related use-after-free race when freeing the inode
	f2fs: avoid race condition for shrinker count
	f2fs: fix race of pending_pages in decompression
	module: delay kobject uevent until after module init call
	powerpc/64: irq replay remove decrementer overflow check
	fs/namespace.c: WARN if mnt_count has become negative
	watchdog: rti-wdt: fix reference leak in rti_wdt_probe
	um: random: Register random as hwrng-core device
	um: ubd: Submit all data segments atomically
	NFSv4.2: Don't error when exiting early on a READ_PLUS buffer overflow
	ceph: fix inode refcount leak when ceph_fill_inode on non-I_NEW inode fails
	drm/amd/display: updated wm table for Renoir
	tick/sched: Remove bogus boot "safety" check
	s390: always clear kernel stack backchain before calling functions
	io_uring: remove racy overflow list fast checks
	ALSA: pcm: Clear the full allocated memory at hw_params
	dm verity: skip verity work if I/O error when system is shutting down
	ext4: avoid s_mb_prefetch to be zero in individual scenarios
	device-dax: Fix range release
	Linux 5.10.5

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I2b481bfac06bafdef2cf3cc1ac2c2a4ddf9913dc
This commit is contained in:
Greg Kroah-Hartman
2021-01-08 15:25:55 +01:00
73 changed files with 624 additions and 496 deletions

View File

@@ -273,6 +273,24 @@ Contact: Daniel Vetter, Noralf Tronnes
Level: Advanced Level: Advanced
Garbage collect fbdev scrolling acceleration
--------------------------------------------
Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode =
SCROLL_REDRAW. There's a ton of code this will allow us to remove:
- lots of code in fbcon.c
- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called
directly instead of the function table (with a switch on p->rotate)
- fb_copyarea is unused after this, and can be deleted from all drivers
Note that not all acceleration code can be deleted, since clearing and cursor
support is still accelerated, which might be good candidates for further
deletion projects.
Contact: Daniel Vetter
Level: Intermediate
idr_init_base() idr_init_base()
--------------- ---------------

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 4 SUBLEVEL = 5
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@@ -536,7 +536,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end) if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start), memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start), args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
return 0; return 0;
} }
@@ -546,7 +546,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn) unsigned long start_pfn)
{ {
if (!vmem_map) { if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else { } else {
struct page *start; struct page *start;

View File

@@ -102,14 +102,6 @@ static inline notrace unsigned long get_irq_happened(void)
return happened; return happened;
} }
static inline notrace int decrementer_check_overflow(void)
{
u64 now = get_tb();
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
return now >= *next_tb;
}
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
/* This is called whenever we are re-enabling interrupts /* This is called whenever we are re-enabling interrupts
@@ -142,35 +134,6 @@ notrace unsigned int __check_irq_replay(void)
trace_hardirqs_on(); trace_hardirqs_on();
trace_hardirqs_off(); trace_hardirqs_off();
/*
* We are always hard disabled here, but PACA_IRQ_HARD_DIS may
* not be set, which means interrupts have only just been hard
* disabled as part of the local_irq_restore or interrupt return
* code. In that case, skip the decrementr check becaus it's
* expensive to read the TB.
*
* HARD_DIS then gets cleared here, but it's reconciled later.
* Either local_irq_disable will replay the interrupt and that
* will reconcile state like other hard interrupts. Or interrupt
* retur will replay the interrupt and in that case it sets
* PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
*/
if (happened & PACA_IRQ_HARD_DIS) {
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
/*
* We may have missed a decrementer interrupt if hard disabled.
* Check the decrementer register in case we had a rollover
* while hard disabled.
*/
if (!(happened & PACA_IRQ_DEC)) {
if (decrementer_check_overflow()) {
local_paca->irq_happened |= PACA_IRQ_DEC;
happened |= PACA_IRQ_DEC;
}
}
}
if (happened & PACA_IRQ_DEC) { if (happened & PACA_IRQ_DEC) {
local_paca->irq_happened &= ~PACA_IRQ_DEC; local_paca->irq_happened &= ~PACA_IRQ_DEC;
return 0x900; return 0x900;
@@ -186,6 +149,9 @@ notrace unsigned int __check_irq_replay(void)
return 0x280; return 0x280;
} }
if (happened & PACA_IRQ_HARD_DIS)
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
/* There should be nothing left ! */ /* There should be nothing left ! */
BUG_ON(local_paca->irq_happened != 0); BUG_ON(local_paca->irq_happened != 0);
@@ -229,18 +195,6 @@ again:
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(mfmsr() & MSR_EE); WARN_ON_ONCE(mfmsr() & MSR_EE);
if (happened & PACA_IRQ_HARD_DIS) {
/*
* We may have missed a decrementer interrupt if hard disabled.
* Check the decrementer register in case we had a rollover
* while hard disabled.
*/
if (!(happened & PACA_IRQ_DEC)) {
if (decrementer_check_overflow())
happened |= PACA_IRQ_DEC;
}
}
/* /*
* Force the delivery of pending soft-disabled interrupts on PS3. * Force the delivery of pending soft-disabled interrupts on PS3.
* Any HV call will have this side effect. * Any HV call will have this side effect.
@@ -345,6 +299,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(!(mfmsr() & MSR_EE)); WARN_ON_ONCE(!(mfmsr() & MSR_EE));
__hard_irq_disable(); __hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
} else { } else {
/* /*
* We should already be hard disabled here. We had bugs * We should already be hard disabled here. We had bugs

View File

@@ -552,14 +552,11 @@ void timer_interrupt(struct pt_regs *regs)
struct pt_regs *old_regs; struct pt_regs *old_regs;
u64 now; u64 now;
/* Some implementations of hotplug will get timer interrupts while /*
* offline, just ignore these and we also need to set * Some implementations of hotplug will get timer interrupts while
* decrementers_next_tb as MAX to make sure __check_irq_replay * offline, just ignore these.
* don't replay timer interrupt when return, otherwise we'll trap
* here infinitely :(
*/ */
if (unlikely(!cpu_online(smp_processor_id()))) { if (unlikely(!cpu_online(smp_processor_id()))) {
*next_tb = ~(u64)0;
set_dec(decrementer_max); set_dec(decrementer_max);
return; return;
} }

View File

@@ -731,7 +731,7 @@ int opal_hmi_exception_early2(struct pt_regs *regs)
return 1; return 1;
} }
/* HMI exception handler called in virtual mode during check_irq_replay. */ /* HMI exception handler called in virtual mode when irqs are next enabled. */
int opal_handle_hmi_exception(struct pt_regs *regs) int opal_handle_hmi_exception(struct pt_regs *regs)
{ {
/* /*

View File

@@ -191,7 +191,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */ /* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc); of_address_to_resource(np, 0, &rsrc);
msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) { if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers"); dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT; return -EFAULT;

View File

@@ -406,6 +406,7 @@ ENTRY(system_call)
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11) stg %r14,__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
ENABLE_INTS ENABLE_INTS
.Lsysc_do_svc: .Lsysc_do_svc:
# clear user controlled register to prevent speculative use # clear user controlled register to prevent speculative use
@@ -422,7 +423,6 @@ ENTRY(system_call)
jnl .Lsysc_nr_ok jnl .Lsysc_nr_ok
slag %r8,%r1,3 slag %r8,%r1,3
.Lsysc_nr_ok: .Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11) stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15) stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r9,0(%r8,%r10) # get system call add. lg %r9,0(%r8,%r10) # get system call add.
@@ -712,8 +712,8 @@ ENTRY(pgm_check_handler)
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6: RESTORE_SM_CLEAR_PER 6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) RESTORE_SM_CLEAR_PER
larl %r1,pgm_check_table larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11) llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f nill %r10,0x007f
@@ -734,8 +734,8 @@ ENTRY(pgm_check_handler)
# PER event in supervisor state, must be kprobes # PER event in supervisor state, must be kprobes
# #
.Lpgm_kprobe: .Lpgm_kprobe:
RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
RESTORE_SM_CLEAR_PER
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_per_trap brasl %r14,do_per_trap
j .Lpgm_return j .Lpgm_return
@@ -777,10 +777,10 @@ ENTRY(io_int_handler)
stmg %r8,%r9,__PT_PSW(%r11) stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore jo .Lio_restore
TRACE_IRQS_OFF TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop: .Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,IO_INTERRUPT lghi %r3,IO_INTERRUPT
@@ -980,10 +980,10 @@ ENTRY(ext_int_handler)
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore jo .Lio_restore
TRACE_IRQS_OFF TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ brasl %r14,do_IRQ

View File

@@ -11,6 +11,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/hw_random.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <init.h> #include <init.h>
@@ -18,9 +19,8 @@
#include <os.h> #include <os.h>
/* /*
* core module and version information * core module information
*/ */
#define RNG_VERSION "1.0.0"
#define RNG_MODULE_NAME "hw_random" #define RNG_MODULE_NAME "hw_random"
/* Changed at init time, in the non-modular case, and at module load /* Changed at init time, in the non-modular case, and at module load
@@ -28,88 +28,36 @@
* protects against a module being loaded twice at the same time. * protects against a module being loaded twice at the same time.
*/ */
static int random_fd = -1; static int random_fd = -1;
static DECLARE_WAIT_QUEUE_HEAD(host_read_wait); static struct hwrng hwrng = { 0, };
static DECLARE_COMPLETION(have_data);
static int rng_dev_open (struct inode *inode, struct file *filp) static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block)
{ {
/* enforce read-only access to this chrdev */ int ret;
if ((filp->f_mode & FMODE_READ) == 0)
return -EINVAL;
if ((filp->f_mode & FMODE_WRITE) != 0)
return -EINVAL;
return 0; for (;;) {
} ret = os_read_file(random_fd, buf, max);
if (block && ret == -EAGAIN) {
static atomic_t host_sleep_count = ATOMIC_INIT(0);
static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
loff_t *offp)
{
u32 data;
int n, ret = 0, have_data;
while (size) {
n = os_read_file(random_fd, &data, sizeof(data));
if (n > 0) {
have_data = n;
while (have_data && size) {
if (put_user((u8) data, buf++)) {
ret = ret ? : -EFAULT;
break;
}
size--;
ret++;
have_data--;
data >>= 8;
}
}
else if (n == -EAGAIN) {
DECLARE_WAITQUEUE(wait, current);
if (filp->f_flags & O_NONBLOCK)
return ret ? : -EAGAIN;
atomic_inc(&host_sleep_count);
add_sigio_fd(random_fd); add_sigio_fd(random_fd);
add_wait_queue(&host_read_wait, &wait); ret = wait_for_completion_killable(&have_data);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(&host_read_wait, &wait);
if (atomic_dec_and_test(&host_sleep_count)) {
ignore_sigio_fd(random_fd); ignore_sigio_fd(random_fd);
deactivate_fd(random_fd, RANDOM_IRQ); deactivate_fd(random_fd, RANDOM_IRQ);
}
}
else
return n;
if (signal_pending (current)) if (ret < 0)
return ret ? : -ERESTARTSYS; break;
} else {
break;
} }
return ret;
} }
static const struct file_operations rng_chrdev_ops = { return ret != -EAGAIN ? ret : 0;
.owner = THIS_MODULE, }
.open = rng_dev_open,
.read = rng_dev_read,
.llseek = noop_llseek,
};
/* rng_init shouldn't be called more than once at boot time */
static struct miscdevice rng_miscdev = {
HWRNG_MINOR,
RNG_MODULE_NAME,
&rng_chrdev_ops,
};
static irqreturn_t random_interrupt(int irq, void *data) static irqreturn_t random_interrupt(int irq, void *data)
{ {
wake_up(&host_read_wait); complete(&have_data);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@@ -126,18 +74,19 @@ static int __init rng_init (void)
goto out; goto out;
random_fd = err; random_fd = err;
err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt, err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
0, "random", NULL); 0, "random", NULL);
if (err) if (err)
goto err_out_cleanup_hw; goto err_out_cleanup_hw;
sigio_broken(random_fd, 1); sigio_broken(random_fd, 1);
hwrng.name = RNG_MODULE_NAME;
hwrng.read = rng_dev_read;
hwrng.quality = 1024;
err = misc_register (&rng_miscdev); err = hwrng_register(&hwrng);
if (err) { if (err) {
printk (KERN_ERR RNG_MODULE_NAME ": misc device register " pr_err(RNG_MODULE_NAME " registering failed (%d)\n", err);
"failed\n");
goto err_out_cleanup_hw; goto err_out_cleanup_hw;
} }
out: out:
@@ -161,8 +110,8 @@ static void cleanup(void)
static void __exit rng_cleanup(void) static void __exit rng_cleanup(void)
{ {
hwrng_unregister(&hwrng);
os_close_file(random_fd); os_close_file(random_fd);
misc_deregister (&rng_miscdev);
} }
module_init (rng_init); module_init (rng_init);

View File

@@ -47,18 +47,25 @@
/* Max request size is determined by sector mask - 32K */ /* Max request size is determined by sector mask - 32K */
#define UBD_MAX_REQUEST (8 * sizeof(long)) #define UBD_MAX_REQUEST (8 * sizeof(long))
struct io_desc {
char *buffer;
unsigned long length;
unsigned long sector_mask;
unsigned long long cow_offset;
unsigned long bitmap_words[2];
};
struct io_thread_req { struct io_thread_req {
struct request *req; struct request *req;
int fds[2]; int fds[2];
unsigned long offsets[2]; unsigned long offsets[2];
unsigned long long offset; unsigned long long offset;
unsigned long length;
char *buffer;
int sectorsize; int sectorsize;
unsigned long sector_mask;
unsigned long long cow_offset;
unsigned long bitmap_words[2];
int error; int error;
int desc_cnt;
/* io_desc has to be the last element of the struct */
struct io_desc io_desc[];
}; };
@@ -525,12 +532,7 @@ static void ubd_handler(void)
blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q);
} }
if ((io_req->error) || (io_req->buffer == NULL))
blk_mq_end_request(io_req->req, io_req->error); blk_mq_end_request(io_req->req, io_req->error);
else {
if (!blk_update_request(io_req->req, io_req->error, io_req->length))
__blk_mq_end_request(io_req->req, io_req->error);
}
kfree(io_req); kfree(io_req);
} }
} }
@@ -946,6 +948,7 @@ static int ubd_add(int n, char **error_out)
blk_queue_write_cache(ubd_dev->queue, true, false); blk_queue_write_cache(ubd_dev->queue, true, false);
blk_queue_max_segments(ubd_dev->queue, MAX_SG); blk_queue_max_segments(ubd_dev->queue, MAX_SG);
blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){ if(err){
*error_out = "Failed to register device"; *error_out = "Failed to register device";
@@ -1289,37 +1292,74 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
*cow_offset += bitmap_offset; *cow_offset += bitmap_offset;
} }
static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, static void cowify_req(struct io_thread_req *req, struct io_desc *segment,
unsigned long offset, unsigned long *bitmap,
__u64 bitmap_offset, __u64 bitmap_len) __u64 bitmap_offset, __u64 bitmap_len)
{ {
__u64 sector = req->offset >> SECTOR_SHIFT; __u64 sector = offset >> SECTOR_SHIFT;
int i; int i;
if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT) if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT)
panic("Operation too long"); panic("Operation too long");
if (req_op(req->req) == REQ_OP_READ) { if (req_op(req->req) == REQ_OP_READ) {
for (i = 0; i < req->length >> SECTOR_SHIFT; i++) { for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) {
if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
ubd_set_bit(i, (unsigned char *) ubd_set_bit(i, (unsigned char *)
&req->sector_mask); &segment->sector_mask);
} }
} else {
cowify_bitmap(offset, segment->length, &segment->sector_mask,
&segment->cow_offset, bitmap, bitmap_offset,
segment->bitmap_words, bitmap_len);
} }
else cowify_bitmap(req->offset, req->length, &req->sector_mask,
&req->cow_offset, bitmap, bitmap_offset,
req->bitmap_words, bitmap_len);
} }
static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
u64 off, struct bio_vec *bvec) struct request *req)
{ {
struct ubd *dev = hctx->queue->queuedata; struct bio_vec bvec;
struct io_thread_req *io_req; struct req_iterator iter;
int ret; int i = 0;
unsigned long byte_offset = io_req->offset;
int op = req_op(req);
io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) {
io_req->io_desc[0].buffer = NULL;
io_req->io_desc[0].length = blk_rq_bytes(req);
} else {
rq_for_each_segment(bvec, req, iter) {
BUG_ON(i >= io_req->desc_cnt);
io_req->io_desc[i].buffer =
page_address(bvec.bv_page) + bvec.bv_offset;
io_req->io_desc[i].length = bvec.bv_len;
i++;
}
}
if (dev->cow.file) {
for (i = 0; i < io_req->desc_cnt; i++) {
cowify_req(io_req, &io_req->io_desc[i], byte_offset,
dev->cow.bitmap, dev->cow.bitmap_offset,
dev->cow.bitmap_len);
byte_offset += io_req->io_desc[i].length;
}
}
}
static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req,
int desc_cnt)
{
struct io_thread_req *io_req;
int i;
io_req = kmalloc(sizeof(*io_req) +
(desc_cnt * sizeof(struct io_desc)),
GFP_ATOMIC);
if (!io_req) if (!io_req)
return -ENOMEM; return NULL;
io_req->req = req; io_req->req = req;
if (dev->cow.file) if (dev->cow.file)
@@ -1327,26 +1367,41 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
else else
io_req->fds[0] = dev->fd; io_req->fds[0] = dev->fd;
io_req->error = 0; io_req->error = 0;
if (bvec != NULL) {
io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
io_req->length = bvec->bv_len;
} else {
io_req->buffer = NULL;
io_req->length = blk_rq_bytes(req);
}
io_req->sectorsize = SECTOR_SIZE; io_req->sectorsize = SECTOR_SIZE;
io_req->fds[1] = dev->fd; io_req->fds[1] = dev->fd;
io_req->cow_offset = -1; io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT;
io_req->offset = off;
io_req->sector_mask = 0;
io_req->offsets[0] = 0; io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset; io_req->offsets[1] = dev->cow.data_offset;
if (dev->cow.file) for (i = 0 ; i < desc_cnt; i++) {
cowify_req(io_req, dev->cow.bitmap, io_req->io_desc[i].sector_mask = 0;
dev->cow.bitmap_offset, dev->cow.bitmap_len); io_req->io_desc[i].cow_offset = -1;
}
return io_req;
}
static int ubd_submit_request(struct ubd *dev, struct request *req)
{
int segs = 0;
struct io_thread_req *io_req;
int ret;
int op = req_op(req);
if (op == REQ_OP_FLUSH)
segs = 0;
else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD)
segs = 1;
else
segs = blk_rq_nr_phys_segments(req);
io_req = ubd_alloc_req(dev, req, segs);
if (!io_req)
return -ENOMEM;
io_req->desc_cnt = segs;
if (segs)
ubd_map_req(dev, io_req, req);
ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
if (ret != sizeof(io_req)) { if (ret != sizeof(io_req)) {
@@ -1357,22 +1412,6 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
return ret; return ret;
} }
static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req)
{
struct req_iterator iter;
struct bio_vec bvec;
int ret;
u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT;
rq_for_each_segment(bvec, req, iter) {
ret = ubd_queue_one_vec(hctx, req, off, &bvec);
if (ret < 0)
return ret;
off += bvec.bv_len;
}
return 0;
}
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
@@ -1385,17 +1424,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(&ubd_dev->lock); spin_lock_irq(&ubd_dev->lock);
switch (req_op(req)) { switch (req_op(req)) {
/* operations with no lentgth/offset arguments */
case REQ_OP_FLUSH: case REQ_OP_FLUSH:
ret = ubd_queue_one_vec(hctx, req, 0, NULL);
break;
case REQ_OP_READ: case REQ_OP_READ:
case REQ_OP_WRITE: case REQ_OP_WRITE:
ret = queue_rw_req(hctx, req);
break;
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL); ret = ubd_submit_request(ubd_dev, req);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
@@ -1483,22 +1517,22 @@ static int map_error(int error_code)
* will result in unpredictable behaviour and/or crashes. * will result in unpredictable behaviour and/or crashes.
*/ */
static int update_bitmap(struct io_thread_req *req) static int update_bitmap(struct io_thread_req *req, struct io_desc *segment)
{ {
int n; int n;
if(req->cow_offset == -1) if (segment->cow_offset == -1)
return map_error(0); return map_error(0);
n = os_pwrite_file(req->fds[1], &req->bitmap_words, n = os_pwrite_file(req->fds[1], &segment->bitmap_words,
sizeof(req->bitmap_words), req->cow_offset); sizeof(segment->bitmap_words), segment->cow_offset);
if (n != sizeof(req->bitmap_words)) if (n != sizeof(segment->bitmap_words))
return map_error(-n); return map_error(-n);
return map_error(0); return map_error(0);
} }
static void do_io(struct io_thread_req *req) static void do_io(struct io_thread_req *req, struct io_desc *desc)
{ {
char *buf = NULL; char *buf = NULL;
unsigned long len; unsigned long len;
@@ -1513,21 +1547,20 @@ static void do_io(struct io_thread_req *req)
return; return;
} }
nsectors = req->length / req->sectorsize; nsectors = desc->length / req->sectorsize;
start = 0; start = 0;
do { do {
bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask);
end = start; end = start;
while((end < nsectors) && while((end < nsectors) &&
(ubd_test_bit(end, (unsigned char *) (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit))
&req->sector_mask) == bit))
end++; end++;
off = req->offset + req->offsets[bit] + off = req->offset + req->offsets[bit] +
start * req->sectorsize; start * req->sectorsize;
len = (end - start) * req->sectorsize; len = (end - start) * req->sectorsize;
if (req->buffer != NULL) if (desc->buffer != NULL)
buf = &req->buffer[start * req->sectorsize]; buf = &desc->buffer[start * req->sectorsize];
switch (req_op(req->req)) { switch (req_op(req->req)) {
case REQ_OP_READ: case REQ_OP_READ:
@@ -1567,7 +1600,8 @@ static void do_io(struct io_thread_req *req)
start = end; start = end;
} while(start < nsectors); } while(start < nsectors);
req->error = update_bitmap(req); req->offset += len;
req->error = update_bitmap(req, desc);
} }
/* Changed in start_io_thread, which is serialized by being called only /* Changed in start_io_thread, which is serialized by being called only
@@ -1600,8 +1634,13 @@ int io_thread(void *arg)
} }
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
struct io_thread_req *req = (*io_req_buffer)[count];
int i;
io_count++; io_count++;
do_io((*io_req_buffer)[count]); for (i = 0; !req->error && i < req->desc_cnt; i++)
do_io(req, &(req->io_desc[i]));
} }
written = 0; written = 0;

View File

@@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_SUSPENDING;
spin_unlock_irq(&q->queue_lock);
/* /*
* Increase the pm_only counter before checking whether any * Increase the pm_only counter before checking whether any
* non-PM blk_queue_enter() calls are in progress to avoid that any * non-PM blk_queue_enter() calls are in progress to avoid that any
@@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
/* Switch q_usage_counter back to per-cpu mode. */ /* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
if (ret < 0) {
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
if (ret < 0) q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev); pm_runtime_mark_last_busy(q->dev);
else
q->rpm_status = RPM_SUSPENDING;
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
if (ret)
blk_clear_pm_only(q); blk_clear_pm_only(q);
}
return ret; return ret;
} }

View File

@@ -251,8 +251,12 @@ static int h5_close(struct hci_uart *hu)
if (h5->vnd && h5->vnd->close) if (h5->vnd && h5->vnd->close)
h5->vnd->close(h5); h5->vnd->close(h5);
if (!hu->serdev) if (hu->serdev)
serdev_device_close(hu->serdev);
kfree_skb(h5->rx_skb);
kfree(h5); kfree(h5);
h5 = NULL;
return 0; return 0;
} }

View File

@@ -540,15 +540,15 @@ endif # HW_RANDOM
config UML_RANDOM config UML_RANDOM
depends on UML depends on UML
tristate "Hardware random number generator" select HW_RANDOM
tristate "UML Random Number Generator support"
help help
This option enables UML's "hardware" random number generator. It This option enables UML's "hardware" random number generator. It
attaches itself to the host's /dev/random, supplying as much entropy attaches itself to the host's /dev/random, supplying as much entropy
as the host has, rather than the small amount the UML gets from its as the host has, rather than the small amount the UML gets from its
own drivers. It registers itself as a standard hardware random number own drivers. It registers itself as a rng-core driver thus providing
generator, major 10, minor 183, and the canonical device name is a device which is usually called /dev/hwrng. This hardware random
/dev/hwrng. number generator does feed into the kernel's random number generator
The way to make use of this is to install the rng-tools package entropy pool.
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads If unsure, say Y.
/dev/hwrng and injects the entropy into /dev/random.

View File

@@ -367,19 +367,28 @@ void kill_dev_dax(struct dev_dax *dev_dax)
} }
EXPORT_SYMBOL_GPL(kill_dev_dax); EXPORT_SYMBOL_GPL(kill_dev_dax);
static void free_dev_dax_ranges(struct dev_dax *dev_dax) static void trim_dev_dax_range(struct dev_dax *dev_dax)
{ {
int i = dev_dax->nr_range - 1;
struct range *range = &dev_dax->ranges[i].range;
struct dax_region *dax_region = dev_dax->region; struct dax_region *dax_region = dev_dax->region;
int i;
device_lock_assert(dax_region->dev); device_lock_assert(dax_region->dev);
for (i = 0; i < dev_dax->nr_range; i++) { dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
struct range *range = &dev_dax->ranges[i].range; (unsigned long long)range->start,
(unsigned long long)range->end);
__release_region(&dax_region->res, range->start, __release_region(&dax_region->res, range->start, range_len(range));
range_len(range)); if (--dev_dax->nr_range == 0) {
kfree(dev_dax->ranges);
dev_dax->ranges = NULL;
} }
dev_dax->nr_range = 0; }
static void free_dev_dax_ranges(struct dev_dax *dev_dax)
{
while (dev_dax->nr_range)
trim_dev_dax_range(dev_dax);
} }
static void unregister_dev_dax(void *dev) static void unregister_dev_dax(void *dev)
@@ -804,15 +813,10 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
return 0; return 0;
rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
if (rc) { if (rc)
dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, trim_dev_dax_range(dev_dax);
&alloc->start, &alloc->end);
dev_dax->nr_range--;
__release_region(res, alloc->start, resource_size(alloc));
return rc;
}
return 0; return rc;
} }
static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size) static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
@@ -885,12 +889,7 @@ static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
if (shrink >= range_len(range)) { if (shrink >= range_len(range)) {
devm_release_action(dax_region->dev, devm_release_action(dax_region->dev,
unregister_dax_mapping, &mapping->dev); unregister_dax_mapping, &mapping->dev);
__release_region(&dax_region->res, range->start, trim_dev_dax_range(dev_dax);
range_len(range));
dev_dax->nr_range--;
dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i,
(unsigned long long) range->start,
(unsigned long long) range->end);
to_shrink -= shrink; to_shrink -= shrink;
if (!to_shrink) if (!to_shrink)
break; break;
@@ -1274,7 +1273,6 @@ static void dev_dax_release(struct device *dev)
put_dax(dax_dev); put_dax(dax_dev);
free_dev_dax_id(dev_dax); free_dev_dax_id(dev_dax);
dax_region_put(dax_region); dax_region_put(dax_region);
kfree(dev_dax->ranges);
kfree(dev_dax->pgmap); kfree(dev_dax->pgmap);
kfree(dev_dax); kfree(dev_dax);
} }

View File

@@ -704,24 +704,24 @@ static struct wm_table ddr4_wm_table_rn = {
.wm_inst = WM_B, .wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG, .wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72, .pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12, .sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 11.48, .sr_enter_plus_exit_time_us = 12.48,
.valid = true, .valid = true,
}, },
{ {
.wm_inst = WM_C, .wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG, .wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72, .pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12, .sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 11.48, .sr_enter_plus_exit_time_us = 12.48,
.valid = true, .valid = true,
}, },
{ {
.wm_inst = WM_D, .wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG, .wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72, .pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12, .sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 11.48, .sr_enter_plus_exit_time_us = 12.48,
.valid = true, .valid = true,
}, },
} }

View File

@@ -119,7 +119,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd, .disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled, .is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy, .destroy = dce110_link_encoder_destroy,
.get_max_link_cap = dce110_link_encoder_get_max_link_cap .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
.get_dig_frontend = dce110_get_dig_frontend,
}; };
static enum bp_result link_transmitter_control( static enum bp_result link_transmitter_control(
@@ -235,6 +236,44 @@ static void set_link_training_complete(
} }
unsigned int dce110_get_dig_frontend(struct link_encoder *enc)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
u32 value;
enum engine_id result;
REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
switch (value) {
case DCE110_DIG_FE_SOURCE_SELECT_DIGA:
result = ENGINE_ID_DIGA;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGB:
result = ENGINE_ID_DIGB;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGC:
result = ENGINE_ID_DIGC;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGD:
result = ENGINE_ID_DIGD;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGE:
result = ENGINE_ID_DIGE;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGF:
result = ENGINE_ID_DIGF;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGG:
result = ENGINE_ID_DIGG;
break;
default:
// invalid source select DIG
result = ENGINE_ID_UNKNOWN;
}
return result;
}
void dce110_link_encoder_set_dp_phy_pattern_training_pattern( void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc, struct link_encoder *enc,
uint32_t index) uint32_t index)
@@ -1665,7 +1704,8 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd, .disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled, .is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy, .destroy = dce110_link_encoder_destroy,
.get_max_link_cap = dce110_link_encoder_get_max_link_cap .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
.get_dig_frontend = dce110_get_dig_frontend
}; };
void dce60_link_encoder_construct( void dce60_link_encoder_construct(

View File

@@ -295,6 +295,8 @@ void dce110_link_encoder_connect_dig_be_to_fe(
enum engine_id engine, enum engine_id engine,
bool connect); bool connect);
unsigned int dce110_get_dig_frontend(struct link_encoder *enc);
void dce110_link_encoder_set_dp_phy_pattern_training_pattern( void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc, struct link_encoder *enc,
uint32_t index); uint32_t index);

View File

@@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller *master,
ret = i3c_master_bus_init(master); ret = i3c_master_bus_init(master);
if (ret) if (ret)
goto err_put_dev; goto err_destroy_wq;
ret = device_add(&master->dev); ret = device_add(&master->dev);
if (ret) if (ret)
@@ -2568,6 +2568,9 @@ err_del_dev:
err_cleanup_bus: err_cleanup_bus:
i3c_master_bus_cleanup(master); i3c_master_bus_cleanup(master);
err_destroy_wq:
destroy_workqueue(master->wq);
err_put_dev: err_put_dev:
put_device(&master->dev); put_device(&master->dev);

View File

@@ -541,6 +541,15 @@ static int verity_verify_io(struct dm_verity_io *io)
return 0; return 0;
} }
/*
* Skip verity work in response to I/O error when system is shutting down.
*/
static inline bool verity_is_system_shutting_down(void)
{
return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
|| system_state == SYSTEM_RESTART;
}
/* /*
* End one "io" structure with a given error. * End one "io" structure with a given error.
*/ */
@@ -568,7 +577,8 @@ static void verity_end_io(struct bio *bio)
{ {
struct dm_verity_io *io = bio->bi_private; struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status && !verity_fec_is_enabled(io->v)) { if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
verity_finish_io(io, bio->bi_status); verity_finish_io(io, bio->bi_status);
return; return;
} }

View File

@@ -1128,7 +1128,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct md_rdev *err_rdev = NULL; struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO; gfp_t gfp = GFP_NOIO;
if (r10_bio->devs[slot].rdev) { if (slot >= 0 && r10_bio->devs[slot].rdev) {
/* /*
* This is an error retry, but we cannot * This is an error retry, but we cannot
* safely dereference the rdev in the r10_bio, * safely dereference the rdev in the r10_bio,
@@ -1493,6 +1493,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
r10_bio->mddev = mddev; r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0; r10_bio->state = 0;
r10_bio->read_slot = -1;
memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
if (bio_data_dir(bio) == READ) if (bio_data_dir(bio) == READ)

View File

@@ -182,7 +182,7 @@ out_rel_fw:
static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
{ {
u8 status, buf; u8 status = 0, buf;
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
if (onoff) { if (onoff) {

View File

@@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
return VMCI_ERROR_MORE_DATA; return VMCI_ERROR_MORE_DATA;
} }
dbells = kmalloc(data_size, GFP_ATOMIC); dbells = kzalloc(data_size, GFP_ATOMIC);
if (!dbells) if (!dbells)
return VMCI_ERROR_NO_MEM; return VMCI_ERROR_NO_MEM;

View File

@@ -1102,7 +1102,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
if (IS_ERR(opp_table->clk)) { if (IS_ERR(opp_table->clk)) {
ret = PTR_ERR(opp_table->clk); ret = PTR_ERR(opp_table->clk);
if (ret == -EPROBE_DEFER) if (ret == -EPROBE_DEFER)
goto err; goto remove_opp_dev;
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
} }
@@ -1111,7 +1111,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
if (ret) { if (ret) {
if (ret == -EPROBE_DEFER) if (ret == -EPROBE_DEFER)
goto err; goto put_clk;
dev_warn(dev, "%s: Error finding interconnect paths: %d\n", dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret); __func__, ret);
@@ -1125,6 +1125,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
list_add(&opp_table->node, &opp_tables); list_add(&opp_table->node, &opp_tables);
return opp_table; return opp_table;
put_clk:
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
remove_opp_dev:
_remove_opp_dev(opp_dev, opp_table);
err: err:
kfree(opp_table); kfree(opp_table);
return ERR_PTR(ret); return ERR_PTR(ret);

View File

@@ -361,8 +361,10 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
device_init_wakeup(&adev->dev, true); device_init_wakeup(&adev->dev, true);
ldata->rtc = devm_rtc_allocate_device(&adev->dev); ldata->rtc = devm_rtc_allocate_device(&adev->dev);
if (IS_ERR(ldata->rtc)) if (IS_ERR(ldata->rtc)) {
return PTR_ERR(ldata->rtc); ret = PTR_ERR(ldata->rtc);
goto out;
}
ldata->rtc->ops = ops; ldata->rtc->ops = ops;
ldata->rtc->range_min = vendor->range_min; ldata->rtc->range_min = vendor->range_min;

View File

@@ -272,7 +272,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
300000000); 300000000);
if (IS_ERR(rtc->int_osc)) { if (IS_ERR(rtc->int_osc)) {
pr_crit("Couldn't register the internal oscillator\n"); pr_crit("Couldn't register the internal oscillator\n");
return; goto err;
} }
parents[0] = clk_hw_get_name(rtc->int_osc); parents[0] = clk_hw_get_name(rtc->int_osc);
@@ -290,7 +290,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
rtc->losc = clk_register(NULL, &rtc->hw); rtc->losc = clk_register(NULL, &rtc->hw);
if (IS_ERR(rtc->losc)) { if (IS_ERR(rtc->losc)) {
pr_crit("Couldn't register the LOSC clock\n"); pr_crit("Couldn't register the LOSC clock\n");
return; goto err_register;
} }
of_property_read_string_index(node, "clock-output-names", 1, of_property_read_string_index(node, "clock-output-names", 1,
@@ -301,7 +301,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
&rtc->lock); &rtc->lock);
if (IS_ERR(rtc->ext_losc)) { if (IS_ERR(rtc->ext_losc)) {
pr_crit("Couldn't register the LOSC external gate\n"); pr_crit("Couldn't register the LOSC external gate\n");
return; goto err_register;
} }
clk_data->num = 2; clk_data->num = 2;
@@ -314,6 +314,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return; return;
err_register:
clk_hw_unregister_fixed_rate(rtc->int_osc);
err: err:
kfree(clk_data); kfree(clk_data);
} }

View File

@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
depends on PCI && INET && (IPV6 || IPV6=n) depends on PCI && INET && (IPV6 || IPV6=n)
depends on THERMAL || !THERMAL depends on THERMAL || !THERMAL
depends on ETHERNET depends on ETHERNET
depends on TLS || TLS=n
select NET_VENDOR_CHELSIO select NET_VENDOR_CHELSIO
select CHELSIO_T4 select CHELSIO_T4
select CHELSIO_LIB select CHELSIO_LIB

View File

@@ -256,6 +256,7 @@ config SPI_DW_BT1
tristate "Baikal-T1 SPI driver for DW SPI core" tristate "Baikal-T1 SPI driver for DW SPI core"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST depends on MIPS_BAIKAL_T1 || COMPILE_TEST
select MULTIPLEXER select MULTIPLEXER
select MUX_MMIO
help help
Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
controllers. Two of them are pretty much normal: with IRQ, DMA, controllers. Two of them are pretty much normal: with IRQ, DMA,
@@ -269,8 +270,6 @@ config SPI_DW_BT1
config SPI_DW_BT1_DIRMAP config SPI_DW_BT1_DIRMAP
bool "Directly mapped Baikal-T1 Boot SPI flash support" bool "Directly mapped Baikal-T1 Boot SPI flash support"
depends on SPI_DW_BT1 depends on SPI_DW_BT1
select MULTIPLEXER
select MUX_MMIO
help help
Directly mapped SPI flash memory is an interface specific to the Directly mapped SPI flash memory is an interface specific to the
Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which

View File

@@ -1033,7 +1033,7 @@ static void fbcon_init(struct vc_data *vc, int init)
struct vc_data *svc = *default_mode; struct vc_data *svc = *default_mode;
struct fbcon_display *t, *p = &fb_display[vc->vc_num]; struct fbcon_display *t, *p = &fb_display[vc->vc_num];
int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256; int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256;
int cap, ret; int ret;
if (WARN_ON(info_idx == -1)) if (WARN_ON(info_idx == -1))
return; return;
@@ -1042,7 +1042,6 @@ static void fbcon_init(struct vc_data *vc, int init)
con2fb_map[vc->vc_num] = info_idx; con2fb_map[vc->vc_num] = info_idx;
info = registered_fb[con2fb_map[vc->vc_num]]; info = registered_fb[con2fb_map[vc->vc_num]];
cap = info->flags;
if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
logo_shown = FBCON_LOGO_DONTSHOW; logo_shown = FBCON_LOGO_DONTSHOW;
@@ -1147,10 +1146,12 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->graphics = 0; ops->graphics = 0;
if ((cap & FBINFO_HWACCEL_COPYAREA) && /*
!(cap & FBINFO_HWACCEL_DISABLED)) * No more hw acceleration for fbcon.
p->scrollmode = SCROLL_MOVE; *
else /* default to something safe */ * FIXME: Garbage collect all the now dead code after sufficient time
* has passed.
*/
p->scrollmode = SCROLL_REDRAW; p->scrollmode = SCROLL_REDRAW;
/* /*
@@ -1961,45 +1962,15 @@ static void updatescrollmode(struct fbcon_display *p,
{ {
struct fbcon_ops *ops = info->fbcon_par; struct fbcon_ops *ops = info->fbcon_par;
int fh = vc->vc_font.height; int fh = vc->vc_font.height;
int cap = info->flags;
u16 t = 0;
int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
info->fix.xpanstep);
int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
info->var.xres_virtual); info->var.xres_virtual);
int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
divides(ypan, vc->vc_font.height) && vyres > yres;
int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
divides(ywrap, vc->vc_font.height) &&
divides(vc->vc_font.height, vyres) &&
divides(vc->vc_font.height, yres);
int reading_fast = cap & FBINFO_READS_FAST;
int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
!(cap & FBINFO_HWACCEL_DISABLED);
int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
!(cap & FBINFO_HWACCEL_DISABLED);
p->vrows = vyres/fh; p->vrows = vyres/fh;
if (yres > (fh * (vc->vc_rows + 1))) if (yres > (fh * (vc->vc_rows + 1)))
p->vrows -= (yres - (fh * vc->vc_rows)) / fh; p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
if ((yres % fh) && (vyres % fh < yres % fh)) if ((yres % fh) && (vyres % fh < yres % fh))
p->vrows--; p->vrows--;
if (good_wrap || good_pan) {
if (reading_fast || fast_copyarea)
p->scrollmode = good_wrap ?
SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
else
p->scrollmode = good_wrap ? SCROLL_REDRAW :
SCROLL_PAN_REDRAW;
} else {
if (reading_fast || (fast_copyarea && !fast_imageblit))
p->scrollmode = SCROLL_MOVE;
else
p->scrollmode = SCROLL_REDRAW;
}
} }
#define PITCH(w) (((w) + 7) >> 3) #define PITCH(w) (((w) + 7) >> 3)

View File

@@ -227,8 +227,10 @@ static int rti_wdt_probe(struct platform_device *pdev)
pm_runtime_enable(dev); pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev); ret = pm_runtime_get_sync(dev);
if (ret) if (ret) {
pm_runtime_put_noidle(dev);
return dev_err_probe(dev, ret, "runtime pm failed\n"); return dev_err_probe(dev, ret, "runtime pm failed\n");
}
platform_set_drvdata(pdev, wdt); platform_set_drvdata(pdev, wdt);

View File

@@ -350,7 +350,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1;
if (info->si_lasti == BFS_MAX_LASTI) if (info->si_lasti == BFS_MAX_LASTI)
printf("WARNING: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id); printf("NOTE: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id);
else if (info->si_lasti > BFS_MAX_LASTI) { else if (info->si_lasti > BFS_MAX_LASTI) {
printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id); printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id);
goto out1; goto out1;

View File

@@ -1335,6 +1335,8 @@ retry_lookup:
in, ceph_vinop(in)); in, ceph_vinop(in));
if (in->i_state & I_NEW) if (in->i_state & I_NEW)
discard_new_inode(in); discard_new_inode(in);
else
iput(in);
goto done; goto done;
} }
req->r_target_inode = in; req->r_target_inode = in;

View File

@@ -2395,9 +2395,9 @@ repeat:
nr = sbi->s_mb_prefetch; nr = sbi->s_mb_prefetch;
if (ext4_has_feature_flex_bg(sb)) { if (ext4_has_feature_flex_bg(sb)) {
nr = (group / sbi->s_mb_prefetch) * nr = 1 << sbi->s_log_groups_per_flex;
sbi->s_mb_prefetch; nr -= group & (nr - 1);
nr = nr + sbi->s_mb_prefetch - group; nr = min(nr, sbi->s_mb_prefetch);
} }
prefetch_grp = ext4_mb_prefetch(sb, group, prefetch_grp = ext4_mb_prefetch(sb, group,
nr, &prefetch_ios); nr, &prefetch_ios);
@@ -2733,7 +2733,8 @@ static int ext4_mb_init_backend(struct super_block *sb)
if (ext4_has_feature_flex_bg(sb)) { if (ext4_has_feature_flex_bg(sb)) {
/* a single flex group is supposed to be read by a single IO */ /* a single flex group is supposed to be read by a single IO */
sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex; sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,
BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
} else { } else {
sbi->s_mb_prefetch = 32; sbi->s_mb_prefetch = 32;

View File

@@ -4186,19 +4186,26 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/ */
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (le32_to_cpu(es->s_log_block_size) >
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log block size: %u",
le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
if (le32_to_cpu(es->s_log_cluster_size) >
(EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log cluster size: %u",
le32_to_cpu(es->s_log_cluster_size));
goto failed_mount;
}
blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize == PAGE_SIZE) if (blocksize == PAGE_SIZE)
set_opt(sb, DIOREAD_NOLOCK); set_opt(sb, DIOREAD_NOLOCK);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
"Unsupported filesystem blocksize %d (%d log_block_size)",
blocksize, le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
@@ -4410,21 +4417,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
goto failed_mount; goto failed_mount;
if (le32_to_cpu(es->s_log_block_size) >
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log block size: %u",
le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
if (le32_to_cpu(es->s_log_cluster_size) >
(EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log cluster size: %u",
le32_to_cpu(es->s_log_cluster_size));
goto failed_mount;
}
if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
"Number of reserved GDT blocks insanely large: %d", "Number of reserved GDT blocks insanely large: %d",

View File

@@ -781,9 +781,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
{ {
struct task_struct *p; struct task_struct *p;
enum pid_type type; enum pid_type type;
unsigned long flags;
struct pid *pid; struct pid *pid;
read_lock(&fown->lock); read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type; type = fown->pid_type;
pid = fown->pid; pid = fown->pid;
@@ -804,7 +805,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
out_unlock_fown: out_unlock_fown:
read_unlock(&fown->lock); read_unlock_irqrestore(&fown->lock, flags);
} }
static void send_sigurg_to_task(struct task_struct *p, static void send_sigurg_to_task(struct task_struct *p,
@@ -819,9 +820,10 @@ int send_sigurg(struct fown_struct *fown)
struct task_struct *p; struct task_struct *p;
enum pid_type type; enum pid_type type;
struct pid *pid; struct pid *pid;
unsigned long flags;
int ret = 0; int ret = 0;
read_lock(&fown->lock); read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type; type = fown->pid_type;
pid = fown->pid; pid = fown->pid;
@@ -844,7 +846,7 @@ int send_sigurg(struct fown_struct *fown)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
out_unlock_fown: out_unlock_fown:
read_unlock(&fown->lock); read_unlock_irqrestore(&fown->lock, flags);
return ret; return ret;
} }

View File

@@ -941,6 +941,10 @@ enum io_mem_account {
ACCT_PINNED, ACCT_PINNED,
}; };
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
struct io_ring_ctx *ctx);
static void __io_complete_rw(struct io_kiocb *req, long res, long res2, static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
struct io_comp_state *cs); struct io_comp_state *cs);
static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_cqring_fill_event(struct io_kiocb *req, long res);
@@ -1369,6 +1373,13 @@ static bool io_grab_identity(struct io_kiocb *req)
spin_unlock_irq(&ctx->inflight_lock); spin_unlock_irq(&ctx->inflight_lock);
req->work.flags |= IO_WQ_WORK_FILES; req->work.flags |= IO_WQ_WORK_FILES;
} }
if (!(req->work.flags & IO_WQ_WORK_MM) &&
(def->work_flags & IO_WQ_WORK_MM)) {
if (id->mm != current->mm)
return false;
mmgrab(id->mm);
req->work.flags |= IO_WQ_WORK_MM;
}
return true; return true;
} }
@@ -1393,13 +1404,6 @@ static void io_prep_async_work(struct io_kiocb *req)
req->work.flags |= IO_WQ_WORK_UNBOUND; req->work.flags |= IO_WQ_WORK_UNBOUND;
} }
/* ->mm can never change on us */
if (!(req->work.flags & IO_WQ_WORK_MM) &&
(def->work_flags & IO_WQ_WORK_MM)) {
mmgrab(id->mm);
req->work.flags |= IO_WQ_WORK_MM;
}
/* if we fail grabbing identity, we must COW, regrab, and retry */ /* if we fail grabbing identity, we must COW, regrab, and retry */
if (io_grab_identity(req)) if (io_grab_identity(req))
return; return;
@@ -1632,8 +1636,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
LIST_HEAD(list); LIST_HEAD(list);
if (!force) { if (!force) {
if (list_empty_careful(&ctx->cq_overflow_list))
return true;
if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) == if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
rings->cq_ring_entries)) rings->cq_ring_entries))
return false; return false;
@@ -5861,15 +5863,15 @@ static void io_req_drop_files(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ctx->inflight_lock, flags);
list_del(&req->inflight_entry);
if (waitqueue_active(&ctx->inflight_wait))
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
req->flags &= ~REQ_F_INFLIGHT;
put_files_struct(req->work.identity->files); put_files_struct(req->work.identity->files);
put_nsproxy(req->work.identity->nsproxy); put_nsproxy(req->work.identity->nsproxy);
spin_lock_irqsave(&ctx->inflight_lock, flags);
list_del(&req->inflight_entry);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
req->flags &= ~REQ_F_INFLIGHT;
req->work.flags &= ~IO_WQ_WORK_FILES; req->work.flags &= ~IO_WQ_WORK_FILES;
if (waitqueue_active(&ctx->inflight_wait))
wake_up(&ctx->inflight_wait);
} }
static void __io_clean_op(struct io_kiocb *req) static void __io_clean_op(struct io_kiocb *req)
@@ -6575,8 +6577,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
/* if we have a backlog and couldn't flush it all, return BUSY */ /* if we have a backlog and couldn't flush it all, return BUSY */
if (test_bit(0, &ctx->sq_check_overflow)) { if (test_bit(0, &ctx->sq_check_overflow)) {
if (!list_empty(&ctx->cq_overflow_list) && if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
!io_cqring_overflow_flush(ctx, false, NULL, NULL))
return -EBUSY; return -EBUSY;
} }
@@ -6798,8 +6799,16 @@ static int io_sq_thread(void *data)
* kthread parking. This synchronizes the thread vs users, * kthread parking. This synchronizes the thread vs users,
* the users are synchronized on the sqd->ctx_lock. * the users are synchronized on the sqd->ctx_lock.
*/ */
if (kthread_should_park()) if (kthread_should_park()) {
kthread_parkme(); kthread_parkme();
/*
* When sq thread is unparked, in case the previous park operation
* comes from io_put_sq_data(), which means that sq thread is going
* to be stopped, so here needs to have a check.
*/
if (kthread_should_stop())
break;
}
if (unlikely(!list_empty(&sqd->ctx_new_list))) if (unlikely(!list_empty(&sqd->ctx_new_list)))
io_sqd_init_new(sqd); io_sqd_init_new(sqd);
@@ -6991,18 +7000,32 @@ static void io_file_ref_kill(struct percpu_ref *ref)
complete(&data->done); complete(&data->done);
} }
static void io_sqe_files_set_node(struct fixed_file_data *file_data,
struct fixed_file_ref_node *ref_node)
{
spin_lock_bh(&file_data->lock);
file_data->node = ref_node;
list_add_tail(&ref_node->node, &file_data->ref_list);
spin_unlock_bh(&file_data->lock);
percpu_ref_get(&file_data->refs);
}
static int io_sqe_files_unregister(struct io_ring_ctx *ctx) static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{ {
struct fixed_file_data *data = ctx->file_data; struct fixed_file_data *data = ctx->file_data;
struct fixed_file_ref_node *ref_node = NULL; struct fixed_file_ref_node *backup_node, *ref_node = NULL;
unsigned nr_tables, i; unsigned nr_tables, i;
int ret;
if (!data) if (!data)
return -ENXIO; return -ENXIO;
backup_node = alloc_fixed_file_ref_node(ctx);
if (!backup_node)
return -ENOMEM;
spin_lock(&data->lock); spin_lock_bh(&data->lock);
ref_node = data->node; ref_node = data->node;
spin_unlock(&data->lock); spin_unlock_bh(&data->lock);
if (ref_node) if (ref_node)
percpu_ref_kill(&ref_node->refs); percpu_ref_kill(&ref_node->refs);
@@ -7010,7 +7033,18 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
/* wait for all refs nodes to complete */ /* wait for all refs nodes to complete */
flush_delayed_work(&ctx->file_put_work); flush_delayed_work(&ctx->file_put_work);
wait_for_completion(&data->done); do {
ret = wait_for_completion_interruptible(&data->done);
if (!ret)
break;
ret = io_run_task_work_sig();
if (ret < 0) {
percpu_ref_resurrect(&data->refs);
reinit_completion(&data->done);
io_sqe_files_set_node(data, backup_node);
return ret;
}
} while (1);
__io_sqe_files_unregister(ctx); __io_sqe_files_unregister(ctx);
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
@@ -7021,6 +7055,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
kfree(data); kfree(data);
ctx->file_data = NULL; ctx->file_data = NULL;
ctx->nr_user_files = 0; ctx->nr_user_files = 0;
destroy_fixed_file_ref_node(backup_node);
return 0; return 0;
} }
@@ -7385,7 +7420,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
data = ref_node->file_data; data = ref_node->file_data;
ctx = data->ctx; ctx = data->ctx;
spin_lock(&data->lock); spin_lock_bh(&data->lock);
ref_node->done = true; ref_node->done = true;
while (!list_empty(&data->ref_list)) { while (!list_empty(&data->ref_list)) {
@@ -7397,7 +7432,7 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
list_del(&ref_node->node); list_del(&ref_node->node);
first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist); first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
} }
spin_unlock(&data->lock); spin_unlock_bh(&data->lock);
if (percpu_ref_is_dying(&data->refs)) if (percpu_ref_is_dying(&data->refs))
delay = 0; delay = 0;
@@ -7519,11 +7554,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return PTR_ERR(ref_node); return PTR_ERR(ref_node);
} }
file_data->node = ref_node; io_sqe_files_set_node(file_data, ref_node);
spin_lock(&file_data->lock);
list_add_tail(&ref_node->node, &file_data->ref_list);
spin_unlock(&file_data->lock);
percpu_ref_get(&file_data->refs);
return ret; return ret;
out_fput: out_fput:
for (i = 0; i < ctx->nr_user_files; i++) { for (i = 0; i < ctx->nr_user_files; i++) {
@@ -7679,11 +7710,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (needs_switch) { if (needs_switch) {
percpu_ref_kill(&data->node->refs); percpu_ref_kill(&data->node->refs);
spin_lock(&data->lock); io_sqe_files_set_node(data, ref_node);
list_add_tail(&ref_node->node, &data->ref_list);
data->node = ref_node;
spin_unlock(&data->lock);
percpu_ref_get(&ctx->file_data->refs);
} else } else
destroy_fixed_file_ref_node(ref_node); destroy_fixed_file_ref_node(ref_node);

View File

@@ -38,6 +38,7 @@ struct jffs2_mount_opts {
* users. This is implemented simply by means of not allowing the * users. This is implemented simply by means of not allowing the
* latter users to write to the file system if the amount if the * latter users to write to the file system if the amount if the
* available space is less then 'rp_size'. */ * available space is less then 'rp_size'. */
bool set_rp_size;
unsigned int rp_size; unsigned int rp_size;
}; };

View File

@@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
if (opts->override_compr) if (opts->override_compr)
seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr)); seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
if (opts->rp_size) if (opts->set_rp_size)
seq_printf(s, ",rp_size=%u", opts->rp_size / 1024); seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
return 0; return 0;
@@ -202,11 +202,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_rp_size: case Opt_rp_size:
if (result.uint_32 > UINT_MAX / 1024) if (result.uint_32 > UINT_MAX / 1024)
return invalf(fc, "jffs2: rp_size unrepresentable"); return invalf(fc, "jffs2: rp_size unrepresentable");
opt = result.uint_32 * 1024; c->mount_opts.rp_size = result.uint_32 * 1024;
if (opt > c->mtd->size) c->mount_opts.set_rp_size = true;
return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
c->mtd->size / 1024);
c->mount_opts.rp_size = opt;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@@ -225,8 +222,10 @@ static inline void jffs2_update_mount_opts(struct fs_context *fc)
c->mount_opts.override_compr = new_c->mount_opts.override_compr; c->mount_opts.override_compr = new_c->mount_opts.override_compr;
c->mount_opts.compr = new_c->mount_opts.compr; c->mount_opts.compr = new_c->mount_opts.compr;
} }
if (new_c->mount_opts.rp_size) if (new_c->mount_opts.set_rp_size) {
c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
c->mount_opts.rp_size = new_c->mount_opts.rp_size; c->mount_opts.rp_size = new_c->mount_opts.rp_size;
}
mutex_unlock(&c->alloc_sem); mutex_unlock(&c->alloc_sem);
} }
@@ -266,6 +265,10 @@ static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
c->mtd = sb->s_mtd; c->mtd = sb->s_mtd;
c->os_priv = sb; c->os_priv = sb;
if (c->mount_opts.rp_size > c->mtd->size)
return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
c->mtd->size / 1024);
/* Initialize JFFS2 superblock locks, the further initialization will /* Initialize JFFS2 superblock locks, the further initialization will
* be done later */ * be done later */
mutex_init(&c->alloc_sem); mutex_init(&c->alloc_sem);

View File

@@ -156,10 +156,10 @@ static inline void mnt_add_count(struct mount *mnt, int n)
/* /*
* vfsmount lock must be held for write * vfsmount lock must be held for write
*/ */
unsigned int mnt_get_count(struct mount *mnt) int mnt_get_count(struct mount *mnt)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int count = 0; int count = 0;
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
@@ -1139,6 +1139,7 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt) static void mntput_no_expire(struct mount *mnt)
{ {
LIST_HEAD(list); LIST_HEAD(list);
int count;
rcu_read_lock(); rcu_read_lock();
if (likely(READ_ONCE(mnt->mnt_ns))) { if (likely(READ_ONCE(mnt->mnt_ns))) {
@@ -1162,7 +1163,9 @@ static void mntput_no_expire(struct mount *mnt)
*/ */
smp_mb(); smp_mb();
mnt_add_count(mnt, -1); mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) { count = mnt_get_count(mnt);
if (count != 0) {
WARN_ON(count < 0);
rcu_read_unlock(); rcu_read_unlock();
unlock_mount_hash(); unlock_mount_hash();
return; return;

View File

@@ -1019,29 +1019,24 @@ static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *re
return decode_op_hdr(xdr, OP_DEALLOCATE); return decode_op_hdr(xdr, OP_DEALLOCATE);
} }
static int decode_read_plus_data(struct xdr_stream *xdr, struct nfs_pgio_res *res, static int decode_read_plus_data(struct xdr_stream *xdr,
uint32_t *eof) struct nfs_pgio_res *res)
{ {
uint32_t count, recvd; uint32_t count, recvd;
uint64_t offset; uint64_t offset;
__be32 *p; __be32 *p;
p = xdr_inline_decode(xdr, 8 + 4); p = xdr_inline_decode(xdr, 8 + 4);
if (unlikely(!p)) if (!p)
return -EIO; return 1;
p = xdr_decode_hyper(p, &offset); p = xdr_decode_hyper(p, &offset);
count = be32_to_cpup(p); count = be32_to_cpup(p);
recvd = xdr_align_data(xdr, res->count, count); recvd = xdr_align_data(xdr, res->count, count);
res->count += recvd; res->count += recvd;
if (count > recvd) { if (count > recvd)
dprintk("NFS: server cheating in read reply: "
"count %u > recvd %u\n", count, recvd);
*eof = 0;
return 1; return 1;
}
return 0; return 0;
} }
@@ -1052,18 +1047,16 @@ static int decode_read_plus_hole(struct xdr_stream *xdr, struct nfs_pgio_res *re
__be32 *p; __be32 *p;
p = xdr_inline_decode(xdr, 8 + 8); p = xdr_inline_decode(xdr, 8 + 8);
if (unlikely(!p)) if (!p)
return -EIO; return 1;
p = xdr_decode_hyper(p, &offset); p = xdr_decode_hyper(p, &offset);
p = xdr_decode_hyper(p, &length); p = xdr_decode_hyper(p, &length);
recvd = xdr_expand_hole(xdr, res->count, length); recvd = xdr_expand_hole(xdr, res->count, length);
res->count += recvd; res->count += recvd;
if (recvd < length) { if (recvd < length)
*eof = 0;
return 1; return 1;
}
return 0; return 0;
} }
@@ -1088,12 +1081,12 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
for (i = 0; i < segments; i++) { for (i = 0; i < segments; i++) {
p = xdr_inline_decode(xdr, 4); p = xdr_inline_decode(xdr, 4);
if (unlikely(!p)) if (!p)
return -EIO; goto early_out;
type = be32_to_cpup(p++); type = be32_to_cpup(p++);
if (type == NFS4_CONTENT_DATA) if (type == NFS4_CONTENT_DATA)
status = decode_read_plus_data(xdr, res, &eof); status = decode_read_plus_data(xdr, res);
else if (type == NFS4_CONTENT_HOLE) else if (type == NFS4_CONTENT_HOLE)
status = decode_read_plus_hole(xdr, res, &eof); status = decode_read_plus_hole(xdr, res, &eof);
else else
@@ -1102,12 +1095,17 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
if (status < 0) if (status < 0)
return status; return status;
if (status > 0) if (status > 0)
break; goto early_out;
} }
out: out:
res->eof = eof; res->eof = eof;
return 0; return 0;
early_out:
if (unlikely(!i))
return -EIO;
res->eof = 0;
return 0;
} }
static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res) static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)

View File

@@ -67,7 +67,7 @@ static void nfs4_evict_inode(struct inode *inode)
nfs_inode_evict_delegation(inode); nfs_inode_evict_delegation(inode);
/* Note that above delegreturn would trigger pnfs return-on-close */ /* Note that above delegreturn would trigger pnfs return-on-close */
pnfs_return_layout(inode); pnfs_return_layout(inode);
pnfs_destroy_layout(NFS_I(inode)); pnfs_destroy_layout_final(NFS_I(inode));
/* First call standard NFS clear_inode() code */ /* First call standard NFS clear_inode() code */
nfs_clear_inode(inode); nfs_clear_inode(inode);
nfs4_xattr_cache_zap(inode); nfs4_xattr_cache_zap(inode);

View File

@@ -294,6 +294,7 @@ void
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
{ {
struct inode *inode; struct inode *inode;
unsigned long i_state;
if (!lo) if (!lo)
return; return;
@@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
if (!list_empty(&lo->plh_segs)) if (!list_empty(&lo->plh_segs))
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo); pnfs_detach_layout_hdr(lo);
i_state = inode->i_state;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo); pnfs_free_layout_hdr(lo);
/* Notify pnfs_destroy_layout_final() that we're done */
if (i_state & (I_FREEING | I_CLEAR))
wake_up_var(lo);
} }
} }
@@ -734,8 +739,7 @@ pnfs_free_lseg_list(struct list_head *free_me)
} }
} }
void static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
pnfs_destroy_layout(struct nfs_inode *nfsi)
{ {
struct pnfs_layout_hdr *lo; struct pnfs_layout_hdr *lo;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
@@ -753,9 +757,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
pnfs_put_layout_hdr(lo); pnfs_put_layout_hdr(lo);
} else } else
spin_unlock(&nfsi->vfs_inode.i_lock); spin_unlock(&nfsi->vfs_inode.i_lock);
return lo;
}
void pnfs_destroy_layout(struct nfs_inode *nfsi)
{
__pnfs_destroy_layout(nfsi);
} }
EXPORT_SYMBOL_GPL(pnfs_destroy_layout); EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
static bool pnfs_layout_removed(struct nfs_inode *nfsi,
struct pnfs_layout_hdr *lo)
{
bool ret;
spin_lock(&nfsi->vfs_inode.i_lock);
ret = nfsi->layout != lo;
spin_unlock(&nfsi->vfs_inode.i_lock);
return ret;
}
void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{
struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
if (lo)
wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
}
static bool static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode, pnfs_layout_add_bulk_destroy_list(struct inode *inode,
struct list_head *layout_list) struct list_head *layout_list)

View File

@@ -266,6 +266,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_layoutget_free(struct nfs4_layoutget *lgp); void pnfs_layoutget_free(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list); void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *); void pnfs_destroy_layout(struct nfs_inode *);
void pnfs_destroy_layout_final(struct nfs_inode *);
void pnfs_destroy_all_layouts(struct nfs_client *); void pnfs_destroy_all_layouts(struct nfs_client *);
int pnfs_destroy_layouts_byfsid(struct nfs_client *clp, int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
struct nfs_fsid *fsid, struct nfs_fsid *fsid,
@@ -710,6 +711,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
{ {
} }
static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
{
}
static inline struct pnfs_layout_segment * static inline struct pnfs_layout_segment *
pnfs_get_lseg(struct pnfs_layout_segment *lseg) pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{ {

View File

@@ -44,7 +44,7 @@ int propagate_mount_busy(struct mount *, int);
void propagate_mount_unlock(struct mount *); void propagate_mount_unlock(struct mount *);
void mnt_release_group_id(struct mount *); void mnt_release_group_id(struct mount *);
int get_dominating_id(struct mount *mnt, const struct path *root); int get_dominating_id(struct mount *mnt, const struct path *root);
unsigned int mnt_get_count(struct mount *mnt); int mnt_get_count(struct mount *mnt);
void mnt_set_mountpoint(struct mount *, struct mountpoint *, void mnt_set_mountpoint(struct mount *, struct mountpoint *,
struct mount *); struct mount *);
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,

View File

@@ -62,7 +62,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
memset(buf, 0, info->dqi_usable_bs); memset(buf, 0, info->dqi_usable_bs);
return sb->s_op->quota_read(sb, info->dqi_type, buf, return sb->s_op->quota_read(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits); info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
} }
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
@@ -71,7 +71,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
ssize_t ret; ssize_t ret;
ret = sb->s_op->quota_write(sb, info->dqi_type, buf, ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits); info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
if (ret != info->dqi_usable_bs) { if (ret != info->dqi_usable_bs) {
quota_error(sb, "dquota write failed"); quota_error(sb, "dquota write failed");
if (ret >= 0) if (ret >= 0)
@@ -284,7 +284,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
blk); blk);
goto out_buf; goto out_buf;
} }
dquot->dq_off = (blk << info->dqi_blocksize_bits) + dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
sizeof(struct qt_disk_dqdbheader) + sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size; i * info->dqi_entry_size;
kfree(buf); kfree(buf);
@@ -559,7 +559,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
ret = -EIO; ret = -EIO;
goto out_buf; goto out_buf;
} else { } else {
ret = (blk << info->dqi_blocksize_bits) + sizeof(struct ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
qt_disk_dqdbheader) + i * info->dqi_entry_size; qt_disk_dqdbheader) + i * info->dqi_entry_size;
} }
out_buf: out_buf:

View File

@@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
"(second one): %h", ih); "(second one): %h", ih);
return 0; return 0;
} }
if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
reiserfs_warning(NULL, "reiserfs-5093",
"item entry count seems wrong %h",
ih);
return 0;
}
prev_location = ih_location(ih); prev_location = ih_location(ih);
} }

View File

@@ -2440,8 +2440,9 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
#endif #endif
extern void set_dma_reserve(unsigned long new_dma_reserve); extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, extern void memmap_init_zone(unsigned long, int, unsigned long,
enum meminit_context, struct vmem_altmap *, int migratetype); unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void); extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void); extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void); extern void mem_init(void);

View File

@@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x)) #define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x))
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif /* _UAPI_LINUX_CONST_H */ #endif /* _UAPI_LINUX_CONST_H */

View File

@@ -14,7 +14,7 @@
#ifndef _UAPI_LINUX_ETHTOOL_H #ifndef _UAPI_LINUX_ETHTOOL_H
#define _UAPI_LINUX_ETHTOOL_H #define _UAPI_LINUX_ETHTOOL_H
#include <linux/kernel.h> #include <linux/const.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>

View File

@@ -3,13 +3,6 @@
#define _UAPI_LINUX_KERNEL_H #define _UAPI_LINUX_KERNEL_H
#include <linux/sysinfo.h> #include <linux/sysinfo.h>
#include <linux/const.h>
/*
* 'kernel.h' contains some often-used function prototypes etc
*/
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif /* _UAPI_LINUX_KERNEL_H */ #endif /* _UAPI_LINUX_KERNEL_H */

View File

@@ -21,7 +21,7 @@
#define _UAPI_LINUX_LIGHTNVM_H #define _UAPI_LINUX_LIGHTNVM_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/kernel.h> #include <linux/const.h>
#include <linux/ioctl.h> #include <linux/ioctl.h>
#else /* __KERNEL__ */ #else /* __KERNEL__ */
#include <stdio.h> #include <stdio.h>

View File

@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_MROUTE6_H #ifndef _UAPI__LINUX_MROUTE6_H
#define _UAPI__LINUX_MROUTE6_H #define _UAPI__LINUX_MROUTE6_H
#include <linux/kernel.h> #include <linux/const.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/sockios.h> #include <linux/sockios.h>
#include <linux/in6.h> /* For struct sockaddr_in6. */ #include <linux/in6.h> /* For struct sockaddr_in6. */

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_X_TABLES_H #ifndef _UAPI_X_TABLES_H
#define _UAPI_X_TABLES_H #define _UAPI_X_TABLES_H
#include <linux/kernel.h> #include <linux/const.h>
#include <linux/types.h> #include <linux/types.h>
#define XT_FUNCTION_MAXNAMELEN 30 #define XT_FUNCTION_MAXNAMELEN 30

View File

@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_NETLINK_H #ifndef _UAPI__LINUX_NETLINK_H
#define _UAPI__LINUX_NETLINK_H #define _UAPI__LINUX_NETLINK_H
#include <linux/kernel.h> #include <linux/const.h>
#include <linux/socket.h> /* for __kernel_sa_family_t */ #include <linux/socket.h> /* for __kernel_sa_family_t */
#include <linux/types.h> #include <linux/types.h>

View File

@@ -23,7 +23,7 @@
#ifndef _UAPI_LINUX_SYSCTL_H #ifndef _UAPI_LINUX_SYSCTL_H
#define _UAPI_LINUX_SYSCTL_H #define _UAPI_LINUX_SYSCTL_H
#include <linux/kernel.h> #include <linux/const.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>

View File

@@ -909,6 +909,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
opt = fs_parse(fc, cgroup1_fs_parameters, param, &result); opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
if (opt == -ENOPARAM) { if (opt == -ENOPARAM) {
if (strcmp(param->key, "source") == 0) { if (strcmp(param->key, "source") == 0) {
if (fc->source)
return invalf(fc, "Multiple sources not supported");
fc->source = param->string; fc->source = param->string;
param->string = NULL; param->string = NULL;
return 0; return 0;

View File

@@ -1895,7 +1895,6 @@ static int mod_sysfs_init(struct module *mod)
if (err) if (err)
mod_kobject_put(mod); mod_kobject_put(mod);
/* delay uevent until full sysfs population */
out: out:
return err; return err;
} }
@@ -1932,7 +1931,6 @@ static int mod_sysfs_setup(struct module *mod,
add_sect_attrs(mod, info); add_sect_attrs(mod, info);
add_notes_attrs(mod, info); add_notes_attrs(mod, info);
kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
return 0; return 0;
out_unreg_modinfo_attrs: out_unreg_modinfo_attrs:
@@ -3639,6 +3637,9 @@ static noinline int do_init_module(struct module *mod)
blocking_notifier_call_chain(&module_notify_list, blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_LIVE, mod); MODULE_STATE_LIVE, mod);
/* Delay uevent until module has finished its init routine */
kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
/* /*
* We need to finish all async code before the module init sequence * We need to finish all async code before the module init sequence
* is done. This has potential to deadlock. For example, a newly * is done. This has potential to deadlock. For example, a newly
@@ -3991,6 +3992,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
MODULE_STATE_GOING, mod); MODULE_STATE_GOING, mod);
klp_module_going(mod); klp_module_going(mod);
bug_cleanup: bug_cleanup:
mod->state = MODULE_STATE_GOING;
/* module_bug_cleanup needs module_mutex protection */ /* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
module_bug_cleanup(mod); module_bug_cleanup(mod);

View File

@@ -944,13 +944,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
*/ */
if (tick_do_timer_cpu == cpu) if (tick_do_timer_cpu == cpu)
return false; return false;
/*
* Boot safety: make sure the timekeeping duty has been
* assigned before entering dyntick-idle mode,
* tick_do_timer_cpu is TICK_DO_TIMER_BOOT
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
return false;
/* Should not happen for nohz-full */ /* Should not happen for nohz-full */
if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))

View File

@@ -8,4 +8,4 @@
obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o

View File

@@ -1,7 +1,8 @@
// SPDX-License-Identifier: Zlib // SPDX-License-Identifier: Zlib
/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */ /* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
#include <linux/zutil.h> #include <linux/export.h>
#include <linux/module.h>
#include "dfltcc_util.h" #include "dfltcc_util.h"
#include "dfltcc.h" #include "dfltcc.h"
@@ -53,3 +54,6 @@ void dfltcc_reset(
dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE; dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
dfltcc_state->param.ribm = DFLTCC_RIBM; dfltcc_state->param.ribm = DFLTCC_RIBM;
} }
EXPORT_SYMBOL(dfltcc_reset);
MODULE_LICENSE("GPL");

View File

@@ -4,6 +4,7 @@
#include "dfltcc_util.h" #include "dfltcc_util.h"
#include "dfltcc.h" #include "dfltcc.h"
#include <asm/setup.h> #include <asm/setup.h>
#include <linux/export.h>
#include <linux/zutil.h> #include <linux/zutil.h>
/* /*
@@ -34,6 +35,7 @@ int dfltcc_can_deflate(
return 1; return 1;
} }
EXPORT_SYMBOL(dfltcc_can_deflate);
static void dfltcc_gdht( static void dfltcc_gdht(
z_streamp strm z_streamp strm
@@ -277,3 +279,4 @@ again:
goto again; /* deflate() must use all input or all output */ goto again; /* deflate() must use all input or all output */
return 1; return 1;
} }
EXPORT_SYMBOL(dfltcc_deflate);

View File

@@ -125,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate(
param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1); param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
if (param->hl) if (param->hl)
param->nt = 0; /* Honor history for the first block */ param->nt = 0; /* Honor history for the first block */
param->cv = state->flags ? REVERSE(state->check) : state->check; param->cv = state->check;
/* Inflate */ /* Inflate */
do { do {
@@ -138,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate(
state->bits = param->sbb; state->bits = param->sbb;
state->whave = param->hl; state->whave = param->hl;
state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1); state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
state->check = state->flags ? REVERSE(param->cv) : param->cv; state->check = param->cv;
if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) { if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
/* Report an error if stream is corrupted */ /* Report an error if stream is corrupted */
state->mode = BAD; state->mode = BAD;

View File

@@ -1,17 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/zlib_dfltcc/dfltcc_syms.c
*
* Exported symbols for the s390 zlib dfltcc support.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/zlib.h>
#include "dfltcc.h"
EXPORT_SYMBOL(dfltcc_can_deflate);
EXPORT_SYMBOL(dfltcc_deflate);
EXPORT_SYMBOL(dfltcc_reset);
MODULE_LICENSE("GPL");

View File

@@ -4106,10 +4106,30 @@ retry_avoidcopy:
* may get SIGKILLed if it later faults. * may get SIGKILLed if it later faults.
*/ */
if (outside_reserve) { if (outside_reserve) {
struct address_space *mapping = vma->vm_file->f_mapping;
pgoff_t idx;
u32 hash;
put_page(old_page); put_page(old_page);
BUG_ON(huge_pte_none(pte)); BUG_ON(huge_pte_none(pte));
/*
* Drop hugetlb_fault_mutex and i_mmap_rwsem before
* unmapping. unmapping needs to hold i_mmap_rwsem
* in write mode. Dropping i_mmap_rwsem in read mode
* here is OK as COW mappings do not interact with
* PMD sharing.
*
* Reacquire both after unmap operation.
*/
idx = vma_hugecache_offset(h, vma, haddr);
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
i_mmap_unlock_read(mapping);
unmap_ref_private(mm, vma, old_page, haddr); unmap_ref_private(mm, vma, old_page, haddr);
BUG_ON(huge_pte_none(pte));
i_mmap_lock_read(mapping);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
spin_lock(ptl); spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep && if (likely(ptep &&

View File

@@ -714,7 +714,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* expects the zone spans the pfn range. All the pages in the range * expects the zone spans the pfn range. All the pages in the range
* are reserved so nobody should be touching them so we should be safe * are reserved so nobody should be touching them so we should be safe
*/ */
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
MEMINIT_HOTPLUG, altmap, migratetype); MEMINIT_HOTPLUG, altmap, migratetype);
set_zone_contiguous(zone); set_zone_contiguous(zone);

View File

@@ -460,6 +460,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
return false; return false;
if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
return true;
/* /*
* We start only with one section of pages, more pages are added as * We start only with one section of pages, more pages are added as
* needed until the rest of deferred pages are initialized. * needed until the rest of deferred pages are initialized.
@@ -6109,7 +6111,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* zone stats (e.g., nr_isolate_pageblock) are touched. * zone stats (e.g., nr_isolate_pageblock) are touched.
*/ */
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, unsigned long start_pfn, unsigned long zone_end_pfn,
enum meminit_context context, enum meminit_context context,
struct vmem_altmap *altmap, int migratetype) struct vmem_altmap *altmap, int migratetype)
{ {
@@ -6145,7 +6147,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (context == MEMINIT_EARLY) { if (context == MEMINIT_EARLY) {
if (overlap_memmap_init(zone, &pfn)) if (overlap_memmap_init(zone, &pfn))
continue; continue;
if (defer_init(nid, pfn, end_pfn)) if (defer_init(nid, pfn, zone_end_pfn))
break; break;
} }
@@ -6259,7 +6261,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
if (end_pfn > start_pfn) { if (end_pfn > start_pfn) {
size = end_pfn - start_pfn; size = end_pfn - start_pfn;
memmap_init_zone(size, nid, zone, start_pfn, memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} }
} }

View File

@@ -194,8 +194,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
if (netif_is_rxfh_configured(dev) && if (netif_is_rxfh_configured(dev) &&
!ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) && !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
(channels.combined_count + channels.rx_count) <= max_rx_in_use) { (channels.combined_count + channels.rx_count) <= max_rx_in_use) {
ret = -EINVAL;
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings"); GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
return -EINVAL; goto out_ops;
} }
/* Disabling channels, query zero-copy AF_XDP sockets */ /* Disabling channels, query zero-copy AF_XDP sockets */
@@ -203,8 +204,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
min(channels.rx_count, channels.tx_count); min(channels.rx_count, channels.tx_count);
for (i = from_channel; i < old_total; i++) for (i = from_channel; i < old_total; i++)
if (xsk_get_pool_from_qid(dev, i)) { if (xsk_get_pool_from_qid(dev, i)) {
ret = -EINVAL;
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets"); GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
return -EINVAL; goto out_ops;
} }
ret = dev->ethtool_ops->set_channels(dev, &channels); ret = dev->ethtool_ops->set_channels(dev, &channels);

View File

@@ -182,7 +182,7 @@ static int strset_parse_request(struct ethnl_req_info *req_base,
ret = strset_get_id(attr, &id, extack); ret = strset_get_id(attr, &id, extack);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret >= ETH_SS_COUNT) { if (id >= ETH_SS_COUNT) {
NL_SET_ERR_MSG_ATTR(extack, attr, NL_SET_ERR_MSG_ATTR(extack, attr,
"unknown string set id"); "unknown string set id");
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@@ -2081,6 +2081,8 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
sock_reset_flag(nsk, SOCK_RCU_FREE); sock_reset_flag(nsk, SOCK_RCU_FREE);
/* will be fully established after successful MPC subflow creation */ /* will be fully established after successful MPC subflow creation */
inet_sk_state_store(nsk, TCP_SYN_RECV); inet_sk_state_store(nsk, TCP_SYN_RECV);
security_inet_csk_clone(nsk, req);
bh_unlock_sock(nsk); bh_unlock_sock(nsk);
/* keep a single reference */ /* keep a single reference */

View File

@@ -1596,6 +1596,21 @@ free_sched:
return err; return err;
} }
static void taprio_reset(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int i;
hrtimer_cancel(&q->advance_timer);
if (q->qdiscs) {
for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
qdisc_reset(q->qdiscs[i]);
}
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
static void taprio_destroy(struct Qdisc *sch) static void taprio_destroy(struct Qdisc *sch)
{ {
struct taprio_sched *q = qdisc_priv(sch); struct taprio_sched *q = qdisc_priv(sch);
@@ -1606,7 +1621,6 @@ static void taprio_destroy(struct Qdisc *sch)
list_del(&q->taprio_list); list_del(&q->taprio_list);
spin_unlock(&taprio_list_lock); spin_unlock(&taprio_list_lock);
hrtimer_cancel(&q->advance_timer);
taprio_disable_offload(dev, q, NULL); taprio_disable_offload(dev, q, NULL);
@@ -1953,6 +1967,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.init = taprio_init, .init = taprio_init,
.change = taprio_change, .change = taprio_change,
.destroy = taprio_destroy, .destroy = taprio_destroy,
.reset = taprio_reset,
.peek = taprio_peek, .peek = taprio_peek,
.dequeue = taprio_dequeue, .dequeue = taprio_dequeue,
.enqueue = taprio_enqueue, .enqueue = taprio_enqueue,

View File

@@ -755,8 +755,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
runtime->boundary *= 2; runtime->boundary *= 2;
/* clear the buffer for avoiding possible kernel info leaks */ /* clear the buffer for avoiding possible kernel info leaks */
if (runtime->dma_area && !substream->ops->copy_user) if (runtime->dma_area && !substream->ops->copy_user) {
memset(runtime->dma_area, 0, runtime->dma_bytes); size_t size = runtime->dma_bytes;
if (runtime->info & SNDRV_PCM_INFO_MMAP)
size = PAGE_ALIGN(size);
memset(runtime->dma_area, 0, size);
}
snd_pcm_timer_resolution_change(substream); snd_pcm_timer_resolution_change(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);

View File

@@ -95,11 +95,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file)
} }
} }
static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream) static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime)
{
return runtime->avail >= runtime->avail_min;
}
static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
{ {
struct snd_rawmidi_runtime *runtime = substream->runtime; struct snd_rawmidi_runtime *runtime = substream->runtime;
unsigned long flags;
bool ready;
return runtime->avail >= runtime->avail_min; spin_lock_irqsave(&runtime->lock, flags);
ready = __snd_rawmidi_ready(runtime);
spin_unlock_irqrestore(&runtime->lock, flags);
return ready;
} }
static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream, static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream,
@@ -1019,7 +1029,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
if (result > 0) { if (result > 0) {
if (runtime->event) if (runtime->event)
schedule_work(&runtime->event_work); schedule_work(&runtime->event_work);
else if (snd_rawmidi_ready(substream)) else if (__snd_rawmidi_ready(runtime))
wake_up(&runtime->sleep); wake_up(&runtime->sleep);
} }
spin_unlock_irqrestore(&runtime->lock, flags); spin_unlock_irqrestore(&runtime->lock, flags);
@@ -1098,7 +1108,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
result = 0; result = 0;
while (count > 0) { while (count > 0) {
spin_lock_irq(&runtime->lock); spin_lock_irq(&runtime->lock);
while (!snd_rawmidi_ready(substream)) { while (!__snd_rawmidi_ready(runtime)) {
wait_queue_entry_t wait; wait_queue_entry_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
@@ -1115,9 +1125,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
return -ENODEV; return -ENODEV;
if (signal_pending(current)) if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS; return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail)
return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock); spin_lock_irq(&runtime->lock);
if (!runtime->avail) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EIO;
}
} }
spin_unlock_irq(&runtime->lock); spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_read1(substream, count1 = snd_rawmidi_kernel_read1(substream,
@@ -1255,7 +1267,7 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun
runtime->avail += count; runtime->avail += count;
substream->bytes += count; substream->bytes += count;
if (count > 0) { if (count > 0) {
if (runtime->drain || snd_rawmidi_ready(substream)) if (runtime->drain || __snd_rawmidi_ready(runtime))
wake_up(&runtime->sleep); wake_up(&runtime->sleep);
} }
return count; return count;
@@ -1444,9 +1456,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
return -ENODEV; return -ENODEV;
if (signal_pending(current)) if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS; return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail && !timeout)
return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock); spin_lock_irq(&runtime->lock);
if (!runtime->avail && !timeout) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EIO;
}
} }
spin_unlock_irq(&runtime->lock); spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count); count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
@@ -1526,6 +1540,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
struct snd_rawmidi *rmidi; struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *substream; struct snd_rawmidi_substream *substream;
struct snd_rawmidi_runtime *runtime; struct snd_rawmidi_runtime *runtime;
unsigned long buffer_size, avail, xruns;
rmidi = entry->private_data; rmidi = entry->private_data;
snd_iprintf(buffer, "%s\n\n", rmidi->name); snd_iprintf(buffer, "%s\n\n", rmidi->name);
@@ -1544,13 +1559,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n", " Owner PID : %d\n",
pid_vnr(substream->pid)); pid_vnr(substream->pid));
runtime = substream->runtime; runtime = substream->runtime;
spin_lock_irq(&runtime->lock);
buffer_size = runtime->buffer_size;
avail = runtime->avail;
spin_unlock_irq(&runtime->lock);
snd_iprintf(buffer, snd_iprintf(buffer,
" Mode : %s\n" " Mode : %s\n"
" Buffer size : %lu\n" " Buffer size : %lu\n"
" Avail : %lu\n", " Avail : %lu\n",
runtime->oss ? "OSS compatible" : "native", runtime->oss ? "OSS compatible" : "native",
(unsigned long) runtime->buffer_size, buffer_size, avail);
(unsigned long) runtime->avail);
} }
} }
} }
@@ -1568,13 +1586,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n", " Owner PID : %d\n",
pid_vnr(substream->pid)); pid_vnr(substream->pid));
runtime = substream->runtime; runtime = substream->runtime;
spin_lock_irq(&runtime->lock);
buffer_size = runtime->buffer_size;
avail = runtime->avail;
xruns = runtime->xruns;
spin_unlock_irq(&runtime->lock);
snd_iprintf(buffer, snd_iprintf(buffer,
" Buffer size : %lu\n" " Buffer size : %lu\n"
" Avail : %lu\n" " Avail : %lu\n"
" Overruns : %lu\n", " Overruns : %lu\n",
(unsigned long) runtime->buffer_size, buffer_size, avail, xruns);
(unsigned long) runtime->avail,
(unsigned long) runtime->xruns);
} }
} }
} }

View File

@@ -26,10 +26,10 @@ struct snd_seq_queue {
struct snd_seq_timer *timer; /* time keeper for this queue */ struct snd_seq_timer *timer; /* time keeper for this queue */
int owner; /* client that 'owns' the timer */ int owner; /* client that 'owns' the timer */
unsigned int locked:1, /* timer is only accesibble by owner if set */ bool locked; /* timer is only accesibble by owner if set */
klocked:1, /* kernel lock (after START) */ bool klocked; /* kernel lock (after START) */
check_again:1, bool check_again; /* concurrent access happened during check */
check_blocked:1; bool check_blocked; /* queue being checked */
unsigned int flags; /* status flags */ unsigned int flags; /* status flags */
unsigned int info_flags; /* info for sync */ unsigned int info_flags; /* info for sync */

View File

@@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x)) #define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x))
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif /* _UAPI_LINUX_CONST_H */ #endif /* _UAPI_LINUX_CONST_H */