Merge 5.10.165 into android12-5.10-lts

Changes in 5.10.165
	btrfs: fix trace event name typo for FLUSH_DELAYED_REFS
	pNFS/filelayout: Fix coalescing test for single DS
	selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID
	tools/virtio: initialize spinlocks in vring_test.c
	net/ethtool/ioctl: return -EOPNOTSUPP if we have no phy stats
	RDMA/srp: Move large values to a new enum for gcc13
	btrfs: always report error in run_one_delayed_ref()
	x86/asm: Fix an assembler warning with current binutils
	f2fs: let's avoid panic if extent_tree is not created
	wifi: brcmfmac: fix regression for Broadcom PCIe wifi devices
	wifi: mac80211: sdata can be NULL during AMPDU start
	Add exception protection processing for vd in axi_chan_handle_err function
	zonefs: Detect append writes at invalid locations
	nilfs2: fix general protection fault in nilfs_btree_insert()
	efi: fix userspace infinite retry read efivars after EFI runtime services page fault
	ALSA: hda/realtek - Turn on power early
	drm/i915/gt: Reset twice
	Bluetooth: hci_qca: Wait for timeout during suspend
	Bluetooth: hci_qca: Fix driver shutdown on closed serdev
	io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL
	io_uring: improve send/recv error handling
	io_uring: ensure recv and recvmsg handle MSG_WAITALL correctly
	io_uring: add flag for disabling provided buffer recycling
	io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG)
	io_uring: allow re-poll if we made progress
	io_uring: fix async accept on O_NONBLOCK sockets
	io_uring: check for valid register opcode earlier
	io_uring: lock overflowing for IOPOLL
	io_uring: fix CQ waiting timeout handling
	io_uring: ensure that cached task references are always put on exit
	io_uring: remove duplicated calls to io_kiocb_ppos
	io_uring: update kiocb->ki_pos at execution time
	io_uring: do not recalculate ppos unnecessarily
	io_uring/rw: defer fsnotify calls to task context
	xhci-pci: set the dma max_seg_size
	usb: xhci: Check endpoint is valid before dereferencing it
	xhci: Fix null pointer dereference when host dies
	xhci: Add update_hub_device override for PCI xHCI hosts
	xhci: Add a flag to disable USB3 lpm on a xhci root port level.
	usb: acpi: add helper to check port lpm capability using acpi _DSM
	xhci: Detect lpm incapable xHC USB3 roothub ports from ACPI tables
	prlimit: do_prlimit needs to have a speculation check
	USB: serial: option: add Quectel EM05-G (GR) modem
	USB: serial: option: add Quectel EM05-G (CS) modem
	USB: serial: option: add Quectel EM05-G (RS) modem
	USB: serial: option: add Quectel EC200U modem
	USB: serial: option: add Quectel EM05CN (SG) modem
	USB: serial: option: add Quectel EM05CN modem
	staging: vchiq_arm: fix enum vchiq_status return types
	USB: misc: iowarrior: fix up header size for USB_DEVICE_ID_CODEMERCS_IOW100
	misc: fastrpc: Don't remove map on creater_process and device_release
	misc: fastrpc: Fix use-after-free race condition for maps
	usb: core: hub: disable autosuspend for TI TUSB8041
	comedi: adv_pci1760: Fix PWM instruction handling
	mmc: sunxi-mmc: Fix clock refcount imbalance during unbind
	mmc: sdhci-esdhc-imx: correct the tuning start tap and step setting
	btrfs: fix race between quota rescan and disable leading to NULL pointer deref
	cifs: do not include page data when checking signature
	thunderbolt: Use correct function to calculate maximum USB3 link rate
	tty: serial: qcom-geni-serial: fix slab-out-of-bounds on RX FIFO buffer
	USB: gadgetfs: Fix race between mounting and unmounting
	USB: serial: cp210x: add SCALANCE LPE-9000 device id
	usb: host: ehci-fsl: Fix module alias
	usb: typec: altmodes/displayport: Add pin assignment helper
	usb: typec: altmodes/displayport: Fix pin assignment calculation
	usb: gadget: g_webcam: Send color matching descriptor per frame
	usb: gadget: f_ncm: fix potential NULL ptr deref in ncm_bitrate()
	usb-storage: apply IGNORE_UAS only for HIKSEMI MD202 on RTL9210
	dt-bindings: phy: g12a-usb2-phy: fix compatible string documentation
	dt-bindings: phy: g12a-usb3-pcie-phy: fix compatible string documentation
	serial: pch_uart: Pass correct sg to dma_unmap_sg()
	dmaengine: tegra210-adma: fix global intr clear
	serial: atmel: fix incorrect baudrate setup
	gsmi: fix null-deref in gsmi_get_variable
	mei: me: add meteor lake point M DID
	drm/i915: re-disable RC6p on Sandy Bridge
	drm/amd/display: Fix set scaling doesn's work
	drm/amd/display: Calculate output_color_space after pixel encoding adjustment
	drm/amd/display: Fix COLOR_SPACE_YCBCR2020_TYPE matrix
	arm64: efi: Execute runtime services from a dedicated stack
	efi: rt-wrapper: Add missing include
	Revert "drm/amdgpu: make display pinning more flexible (v2)"
	x86/fpu: Use _Alignof to avoid undefined behavior in TYPE_ALIGN
	tracing: Use alignof__(struct {type b;}) instead of offsetof()
	io_uring: io_kiocb_update_pos() should not touch file for non -1 offset
	io_uring/net: fix fast_iov assignment in io_setup_async_msg()
	net/ulp: use consistent error code when blocking ULP
	net/mlx5: fix missing mutex_unlock in mlx5_fw_fatal_reporter_err_work()
	Revert "wifi: mac80211: fix memory leak in ieee80211_if_add()"
	soc: qcom: apr: Make qcom,protection-domain optional again
	Bluetooth: hci_qca: Wait for SSR completion during suspend
	Bluetooth: hci_qca: check for SSR triggered flag while suspend
	Bluetooth: hci_qca: Fixed issue during suspend
	mm/khugepaged: fix collapse_pte_mapped_thp() to allow anon_vma
	io_uring: Clean up a false-positive warning from GCC 9.3.0
	io_uring: fix double poll leak on repolling
	io_uring/rw: ensure kiocb_end_write() is always called
	io_uring/rw: remove leftover debug statement
	Linux 5.10.165

Change-Id: Icb91157d9fa1b56cd79eedb8a9cc6118d0705244
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-02-16 14:03:07 +00:00
72 changed files with 811 additions and 221 deletions

View File

@@ -588,6 +588,7 @@ struct io_sr_msg {
int msg_flags;
int bgid;
size_t len;
size_t done_io;
struct io_buffer *kbuf;
};
@@ -749,6 +750,7 @@ enum {
REQ_F_CREDS_BIT,
REQ_F_REFCOUNT_BIT,
REQ_F_ARM_LTIMEOUT_BIT,
REQ_F_PARTIAL_IO_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_NOWAIT_READ_BIT,
REQ_F_NOWAIT_WRITE_BIT,
@@ -804,6 +806,8 @@ enum {
REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
/* there is a linked timeout that has to be armed */
REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
/* request has already done partial IO */
REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
};
struct async_poll {
@@ -2488,12 +2492,26 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
io_init_req_batch(&rb);
while (!list_empty(done)) {
struct io_uring_cqe *cqe;
unsigned cflags;
req = list_first_entry(done, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
io_fill_cqe_req(req, req->result, io_put_rw_kbuf(req));
cflags = io_put_rw_kbuf(req);
(*nr_events)++;
cqe = io_get_cqe(ctx);
if (cqe) {
WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, req->result);
WRITE_ONCE(cqe->flags, cflags);
} else {
spin_lock(&ctx->completion_lock);
io_cqring_event_overflow(ctx, req->user_data,
req->result, cflags);
spin_unlock(&ctx->completion_lock);
}
if (req_ref_put_and_test(req))
io_req_free_batch(&rb, req, &ctx->submit_state);
}
@@ -2692,17 +2710,32 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
}
#endif
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
/*
* Trigger the notifications after having done some IO, and finish the write
* accounting, if any.
*/
static void io_req_io_end(struct io_kiocb *req)
{
if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
struct io_rw *rw = &req->rw;
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
}
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{
if (res != req->result) {
if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
io_rw_should_reissue(req)) {
/*
* Reissue will start accounting again, finish the
* current cycle.
*/
io_req_io_end(req);
req->flags |= REQ_F_REISSUE;
return true;
}
@@ -2744,12 +2777,10 @@ static void io_req_task_complete(struct io_kiocb *req, bool *locked)
}
}
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
unsigned int issue_flags)
static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
{
if (__io_complete_rw_common(req, res))
return;
__io_req_complete(req, issue_flags, io_fixup_rw_res(req, res), io_put_rw_kbuf(req));
io_req_io_end(req);
io_req_task_complete(req, locked);
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
@@ -2759,7 +2790,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
if (__io_complete_rw_common(req, res))
return;
req->result = io_fixup_rw_res(req, res);
req->io_task_work.func = io_req_task_complete;
req->io_task_work.func = io_req_rw_complete;
io_req_task_work_add(req);
}
@@ -2911,14 +2942,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->flags |= REQ_F_ISREG;
kiocb->ki_pos = READ_ONCE(sqe->off);
if (kiocb->ki_pos == -1) {
if (!(file->f_mode & FMODE_STREAM)) {
req->flags |= REQ_F_CUR_POS;
kiocb->ki_pos = file->f_pos;
} else {
kiocb->ki_pos = 0;
}
}
kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
@@ -3000,6 +3023,23 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
{
struct kiocb *kiocb = &req->rw.kiocb;
if (kiocb->ki_pos != -1)
return &kiocb->ki_pos;
if (!(req->file->f_mode & FMODE_STREAM)) {
req->flags |= REQ_F_CUR_POS;
kiocb->ki_pos = req->file->f_pos;
return &kiocb->ki_pos;
}
kiocb->ki_pos = 0;
return NULL;
}
static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
unsigned int issue_flags)
{
@@ -3007,10 +3047,20 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
__io_complete_rw(req, ret, 0, issue_flags);
else
if (ret >= 0 && (kiocb->ki_complete == io_complete_rw)) {
if (!__io_complete_rw_common(req, ret)) {
/*
* Safe to call io_end from here as we're inline
* from the submission path.
*/
io_req_io_end(req);
__io_req_complete(req, issue_flags,
io_fixup_rw_res(req, ret),
io_put_rw_kbuf(req));
}
} else {
io_rw_done(kiocb, ret);
}
if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
@@ -3292,6 +3342,7 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
struct kiocb *kiocb = &req->rw.kiocb;
struct file *file = req->file;
ssize_t ret = 0;
loff_t *ppos;
/*
* Don't support polled IO through this interface, and we can't
@@ -3303,6 +3354,8 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
if (kiocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
ppos = io_kiocb_ppos(kiocb);
while (iov_iter_count(iter)) {
struct iovec iovec;
ssize_t nr;
@@ -3316,10 +3369,10 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
if (rw == READ) {
nr = file->f_op->read(file, iovec.iov_base,
iovec.iov_len, io_kiocb_ppos(kiocb));
iovec.iov_len, ppos);
} else {
nr = file->f_op->write(file, iovec.iov_base,
iovec.iov_len, io_kiocb_ppos(kiocb));
iovec.iov_len, ppos);
}
if (nr < 0) {
@@ -3520,6 +3573,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
struct iov_iter_state __state, *state;
ssize_t ret, ret2;
loff_t *ppos;
if (rw) {
iter = &rw->iter;
@@ -3552,7 +3606,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
return ret ?: -EAGAIN;
}
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
ppos = io_kiocb_update_pos(req);
ret = rw_verify_area(READ, req->file, ppos, req->result);
if (unlikely(ret)) {
kfree(iovec);
return ret;
@@ -3656,6 +3712,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
struct iov_iter_state __state, *state;
ssize_t ret, ret2;
loff_t *ppos;
if (rw) {
iter = &rw->iter;
@@ -3686,7 +3743,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
(req->flags & REQ_F_ISREG))
goto copy_iov;
ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
ppos = io_kiocb_update_pos(req);
ret = rw_verify_area(WRITE, req->file, ppos, req->result);
if (unlikely(ret))
goto out_free;
@@ -4623,6 +4682,13 @@ static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
}
#if defined(CONFIG_NET)
static bool io_net_retry(struct socket *sock, int flags)
{
if (!(flags & MSG_WAITALL))
return false;
return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
}
static int io_setup_async_msg(struct io_kiocb *req,
struct io_async_msghdr *kmsg)
{
@@ -4640,8 +4706,10 @@ static int io_setup_async_msg(struct io_kiocb *req,
if (async_msg->msg.msg_name)
async_msg->msg.msg_name = &async_msg->addr;
/* if were using fast_iov, set it to the new one */
if (!async_msg->free_iov)
async_msg->msg.msg_iter.iov = async_msg->fast_iov;
if (!kmsg->free_iov) {
size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
}
return -EAGAIN;
}
@@ -4686,12 +4754,14 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
sr->done_io = 0;
return 0;
}
static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct io_sr_msg *sr = &req->sr_msg;
struct socket *sock;
unsigned flags;
int min_ret = 0;
@@ -4716,17 +4786,27 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_msg(req, kmsg);
}
req_set_fail(req);
}
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < min_ret)
req_set_fail(req);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
@@ -4762,13 +4842,24 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg);
if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret < min_ret)
if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
return -EAGAIN;
}
req_set_fail(req);
}
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
@@ -4912,12 +5003,14 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
sr->done_io = 0;
return 0;
}
static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct io_sr_msg *sr = &req->sr_msg;
struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
@@ -4955,10 +5048,20 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
kmsg->uaddr, flags);
if (force_nonblock && ret == -EAGAIN)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_msg(req, kmsg);
}
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
req_set_fail(req);
}
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_recv_kbuf(req);
@@ -4966,8 +5069,10 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail(req);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
__io_req_complete(req, issue_flags, ret, cflags);
return 0;
}
@@ -5014,15 +5119,29 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&msg.msg_iter);
ret = sock_recvmsg(sock, &msg, flags);
if (force_nonblock && ret == -EAGAIN)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
return -EAGAIN;
}
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
req_set_fail(req);
}
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_recv_kbuf(req);
if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail(req);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
__io_req_complete(req, issue_flags, ret, cflags);
return 0;
}
@@ -5060,9 +5179,6 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
struct file *file;
int ret, fd;
if (req->file->f_flags & O_NONBLOCK)
req->flags |= REQ_F_NOWAIT;
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
if (unlikely(fd < 0))
@@ -5075,6 +5191,8 @@ static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
if (!fixed)
put_unused_fd(fd);
ret = PTR_ERR(file);
/* safe to retry */
req->flags |= REQ_F_PARTIAL_IO;
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
if (ret == -ERESTARTSYS)
@@ -5642,7 +5760,7 @@ static int io_arm_poll_handler(struct io_kiocb *req)
if (!req->file || !file_can_poll(req->file))
return IO_APOLL_ABORTED;
if (req->flags & REQ_F_POLLED)
if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
return IO_APOLL_ABORTED;
if (!def->pollin && !def->pollout)
return IO_APOLL_ABORTED;
@@ -5658,7 +5776,12 @@ static int io_arm_poll_handler(struct io_kiocb *req)
mask |= POLLOUT | POLLWRNORM;
}
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll;
kfree(apoll->double_poll);
} else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
}
if (unlikely(!apoll))
return IO_APOLL_ABORTED;
apoll->double_poll = NULL;
@@ -7450,7 +7573,7 @@ static int io_run_task_work_sig(void)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
ktime_t timeout)
ktime_t *timeout)
{
int ret;
@@ -7462,7 +7585,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (test_bit(0, &ctx->check_cq_overflow))
return 1;
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
return -ETIME;
return 1;
}
@@ -7525,7 +7648,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
finish_wait(&ctx->cq_wait, &iowq.wq);
cond_resched();
} while (ret > 0);
@@ -9445,6 +9568,10 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
/* if we failed setting up the ctx, we might not have any rings */
io_iopoll_try_reap_events(ctx);
/* drop cached put refs after potentially doing completions */
if (current->io_uring)
io_uring_drop_tctx_refs(current);
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/*
* Use system_unbound_wq to avoid spawning tons of event kworkers
@@ -10751,8 +10878,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
return -ENXIO;
if (ctx->restricted) {
if (opcode >= IORING_REGISTER_LAST)
return -EINVAL;
opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
if (!test_bit(opcode, ctx->restrictions.register_op))
return -EACCES;
@@ -10884,6 +11009,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
long ret = -EBADF;
struct fd f;
if (opcode >= IORING_REGISTER_LAST)
return -EINVAL;
f = fdget(fd);
if (!f.file)
return -EBADF;