Merge 5.10.12 into android12-5.10
Changes in 5.10.12 gpio: mvebu: fix pwm .get_state period calculation Revert "mm/slub: fix a memory leak in sysfs_slab_add()" futex: Ensure the correct return value from futex_lock_pi() futex: Replace pointless printk in fixup_owner() futex: Provide and use pi_state_update_owner() rtmutex: Remove unused argument from rt_mutex_proxy_unlock() futex: Use pi_state_update_owner() in put_pi_state() futex: Simplify fixup_pi_state_owner() futex: Handle faults correctly for PI futexes HID: wacom: Correct NULL dereference on AES pen proximity HID: multitouch: Apply MT_QUIRK_CONFIDENCE quirk for multi-input devices media: Revert "media: videobuf2: Fix length check for single plane dmabuf queueing" media: v4l2-subdev.h: BIT() is not available in userspace RDMA/vmw_pvrdma: Fix network_hdr_type reported in WC iwlwifi: dbg: Don't touch the tlv data kernel/io_uring: cancel io_uring before task works io_uring: inline io_uring_attempt_task_drop() io_uring: add warn_once for io_uring_flush() io_uring: stop SQPOLL submit on creator's death io_uring: fix null-deref in io_disable_sqo_submit io_uring: do sqo disable on install_fd error io_uring: fix false positive sqo warning on flush io_uring: fix uring_flush in exit_files() warning io_uring: fix skipping disabling sqo on exec io_uring: dont kill fasync under completion_lock io_uring: fix sleeping under spin in __io_clean_op objtool: Don't fail on missing symbol table mm/page_alloc: add a missing mm_page_alloc_zone_locked() tracepoint mm: fix a race on nr_swap_pages tools: Factor HOSTCC, HOSTLD, HOSTAR definitions printk: fix buffer overflow potential for print_text() printk: fix string termination for record_print_text() Linux 5.10.12 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I6d96ec78494ebbc0daf4fdecfc13e522c6bd6b42
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 11
|
SUBLEVEL = 12
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
|
@@ -660,9 +660,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
|
|||||||
|
|
||||||
spin_lock_irqsave(&mvpwm->lock, flags);
|
spin_lock_irqsave(&mvpwm->lock, flags);
|
||||||
|
|
||||||
val = (unsigned long long)
|
u = readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
|
||||||
readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
|
val = (unsigned long long) u * NSEC_PER_SEC;
|
||||||
val *= NSEC_PER_SEC;
|
|
||||||
do_div(val, mvpwm->clk_rate);
|
do_div(val, mvpwm->clk_rate);
|
||||||
if (val > UINT_MAX)
|
if (val > UINT_MAX)
|
||||||
state->duty_cycle = UINT_MAX;
|
state->duty_cycle = UINT_MAX;
|
||||||
@@ -671,21 +670,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
|
|||||||
else
|
else
|
||||||
state->duty_cycle = 1;
|
state->duty_cycle = 1;
|
||||||
|
|
||||||
val = (unsigned long long)
|
val = (unsigned long long) u; /* on duration */
|
||||||
readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
|
/* period = on + off duration */
|
||||||
|
val += readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
|
||||||
val *= NSEC_PER_SEC;
|
val *= NSEC_PER_SEC;
|
||||||
do_div(val, mvpwm->clk_rate);
|
do_div(val, mvpwm->clk_rate);
|
||||||
if (val < state->duty_cycle) {
|
|
||||||
state->period = 1;
|
|
||||||
} else {
|
|
||||||
val -= state->duty_cycle;
|
|
||||||
if (val > UINT_MAX)
|
if (val > UINT_MAX)
|
||||||
state->period = UINT_MAX;
|
state->period = UINT_MAX;
|
||||||
else if (val)
|
else if (val)
|
||||||
state->period = val;
|
state->period = val;
|
||||||
else
|
else
|
||||||
state->period = 1;
|
state->period = 1;
|
||||||
}
|
|
||||||
|
|
||||||
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
|
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
|
||||||
if (u)
|
if (u)
|
||||||
|
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
|||||||
MT_STORE_FIELD(inrange_state);
|
MT_STORE_FIELD(inrange_state);
|
||||||
return 1;
|
return 1;
|
||||||
case HID_DG_CONFIDENCE:
|
case HID_DG_CONFIDENCE:
|
||||||
if (cls->name == MT_CLS_WIN_8 &&
|
if ((cls->name == MT_CLS_WIN_8 ||
|
||||||
|
cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
|
||||||
(field->application == HID_DG_TOUCHPAD ||
|
(field->application == HID_DG_TOUCHPAD ||
|
||||||
field->application == HID_DG_TOUCHSCREEN))
|
field->application == HID_DG_TOUCHSCREEN))
|
||||||
app->quirks |= MT_QUIRK_CONFIDENCE;
|
app->quirks |= MT_QUIRK_CONFIDENCE;
|
||||||
|
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flush)
|
if (flush)
|
||||||
wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
|
wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
|
||||||
else if (insert)
|
else if (insert)
|
||||||
wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
|
wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
|
||||||
raw_data, report_size);
|
raw_data, report_size);
|
||||||
|
|
||||||
return insert && !flush;
|
return insert && !flush;
|
||||||
@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
|
|||||||
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
|
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
|
||||||
{
|
{
|
||||||
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
|
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
|
||||||
struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
|
struct kfifo_rec_ptr_2 *pen_fifo;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
pen_fifo = devres_alloc(wacom_devm_kfifo_release,
|
pen_fifo = devres_alloc(wacom_devm_kfifo_release,
|
||||||
@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
|
|||||||
}
|
}
|
||||||
|
|
||||||
devres_add(&wacom->hdev->dev, pen_fifo);
|
devres_add(&wacom->hdev->dev, pen_fifo);
|
||||||
|
wacom_wac->pen_fifo = pen_fifo;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -342,7 +342,7 @@ struct wacom_wac {
|
|||||||
struct input_dev *pen_input;
|
struct input_dev *pen_input;
|
||||||
struct input_dev *touch_input;
|
struct input_dev *touch_input;
|
||||||
struct input_dev *pad_input;
|
struct input_dev *pad_input;
|
||||||
struct kfifo_rec_ptr_2 pen_fifo;
|
struct kfifo_rec_ptr_2 *pen_fifo;
|
||||||
int pid;
|
int pid;
|
||||||
int num_contacts_left;
|
int num_contacts_left;
|
||||||
u8 bt_features;
|
u8 bt_features;
|
||||||
|
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
|
|||||||
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
|
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
|
||||||
|
{
|
||||||
|
switch (type) {
|
||||||
|
case PVRDMA_NETWORK_ROCE_V1:
|
||||||
|
return RDMA_NETWORK_ROCE_V1;
|
||||||
|
case PVRDMA_NETWORK_IPV4:
|
||||||
|
return RDMA_NETWORK_IPV4;
|
||||||
|
case PVRDMA_NETWORK_IPV6:
|
||||||
|
return RDMA_NETWORK_IPV6;
|
||||||
|
default:
|
||||||
|
return RDMA_NETWORK_IPV6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
|
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
|
||||||
const struct pvrdma_qp_cap *src);
|
const struct pvrdma_qp_cap *src);
|
||||||
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
|
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
|
||||||
|
@@ -364,7 +364,7 @@ retry:
|
|||||||
wc->dlid_path_bits = cqe->dlid_path_bits;
|
wc->dlid_path_bits = cqe->dlid_path_bits;
|
||||||
wc->port_num = cqe->port_num;
|
wc->port_num = cqe->port_num;
|
||||||
wc->vendor_err = cqe->vendor_err;
|
wc->vendor_err = cqe->vendor_err;
|
||||||
wc->network_hdr_type = cqe->network_hdr_type;
|
wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
|
||||||
|
|
||||||
/* Update shared ring state */
|
/* Update shared ring state */
|
||||||
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
|
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
|
||||||
|
@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
length = (b->memory == VB2_MEMORY_USERPTR ||
|
length = (b->memory == VB2_MEMORY_USERPTR)
|
||||||
b->memory == VB2_MEMORY_DMABUF)
|
|
||||||
? b->length : vb->planes[0].length;
|
? b->length : vb->planes[0].length;
|
||||||
|
|
||||||
if (b->bytesused > length)
|
if (b->bytesused > length)
|
||||||
|
@@ -237,13 +237,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
|
|||||||
if (le32_to_cpu(tlv->length) < sizeof(*reg))
|
if (le32_to_cpu(tlv->length) < sizeof(*reg))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* For safe using a string from FW make sure we have a
|
|
||||||
* null terminator
|
|
||||||
*/
|
|
||||||
reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
|
|
||||||
|
|
||||||
IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
|
|
||||||
|
|
||||||
if (id >= IWL_FW_INI_MAX_REGION_ID) {
|
if (id >= IWL_FW_INI_MAX_REGION_ID) {
|
||||||
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
|
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@@ -21,7 +21,6 @@
|
|||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/close_range.h>
|
#include <linux/close_range.h>
|
||||||
#include <net/sock.h>
|
#include <net/sock.h>
|
||||||
#include <linux/io_uring.h>
|
|
||||||
|
|
||||||
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
|
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
|
||||||
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
|
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
|
||||||
@@ -453,7 +452,6 @@ void exit_files(struct task_struct *tsk)
|
|||||||
struct files_struct * files = tsk->files;
|
struct files_struct * files = tsk->files;
|
||||||
|
|
||||||
if (files) {
|
if (files) {
|
||||||
io_uring_files_cancel(files);
|
|
||||||
task_lock(tsk);
|
task_lock(tsk);
|
||||||
tsk->files = NULL;
|
tsk->files = NULL;
|
||||||
task_unlock(tsk);
|
task_unlock(tsk);
|
||||||
|
119
fs/io_uring.c
119
fs/io_uring.c
@@ -260,6 +260,7 @@ struct io_ring_ctx {
|
|||||||
unsigned int drain_next: 1;
|
unsigned int drain_next: 1;
|
||||||
unsigned int eventfd_async: 1;
|
unsigned int eventfd_async: 1;
|
||||||
unsigned int restricted: 1;
|
unsigned int restricted: 1;
|
||||||
|
unsigned int sqo_dead: 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ring buffer of indices into array of io_uring_sqe, which is
|
* Ring buffer of indices into array of io_uring_sqe, which is
|
||||||
@@ -970,6 +971,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
|
|||||||
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
|
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
|
||||||
const struct iovec *fast_iov,
|
const struct iovec *fast_iov,
|
||||||
struct iov_iter *iter, bool force);
|
struct iov_iter *iter, bool force);
|
||||||
|
static void io_req_drop_files(struct io_kiocb *req);
|
||||||
|
|
||||||
static struct kmem_cache *req_cachep;
|
static struct kmem_cache *req_cachep;
|
||||||
|
|
||||||
@@ -990,8 +992,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
|
|||||||
|
|
||||||
static inline void io_clean_op(struct io_kiocb *req)
|
static inline void io_clean_op(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
|
if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
|
||||||
REQ_F_INFLIGHT))
|
|
||||||
__io_clean_op(req);
|
__io_clean_op(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1213,11 +1214,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
|
|||||||
|
|
||||||
/* order cqe stores with ring update */
|
/* order cqe stores with ring update */
|
||||||
smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
|
smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
|
||||||
|
|
||||||
if (wq_has_sleeper(&ctx->cq_wait)) {
|
|
||||||
wake_up_interruptible(&ctx->cq_wait);
|
|
||||||
kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
|
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
|
||||||
@@ -1260,6 +1256,8 @@ static void io_req_clean_work(struct io_kiocb *req)
|
|||||||
free_fs_struct(fs);
|
free_fs_struct(fs);
|
||||||
req->work.flags &= ~IO_WQ_WORK_FS;
|
req->work.flags &= ~IO_WQ_WORK_FS;
|
||||||
}
|
}
|
||||||
|
if (req->flags & REQ_F_INFLIGHT)
|
||||||
|
io_req_drop_files(req);
|
||||||
|
|
||||||
io_put_identity(req->task->io_uring, req);
|
io_put_identity(req->task->io_uring, req);
|
||||||
}
|
}
|
||||||
@@ -1603,6 +1601,10 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
|
|||||||
|
|
||||||
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
|
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
if (wq_has_sleeper(&ctx->cq_wait)) {
|
||||||
|
wake_up_interruptible(&ctx->cq_wait);
|
||||||
|
kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
|
||||||
|
}
|
||||||
if (waitqueue_active(&ctx->wait))
|
if (waitqueue_active(&ctx->wait))
|
||||||
wake_up(&ctx->wait);
|
wake_up(&ctx->wait);
|
||||||
if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
|
if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
|
||||||
@@ -2083,11 +2085,9 @@ static void io_req_task_cancel(struct callback_head *cb)
|
|||||||
static void __io_req_task_submit(struct io_kiocb *req)
|
static void __io_req_task_submit(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
bool fail;
|
|
||||||
|
|
||||||
fail = __io_sq_thread_acquire_mm(ctx);
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
if (!fail)
|
if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx))
|
||||||
__io_queue_sqe(req, NULL);
|
__io_queue_sqe(req, NULL);
|
||||||
else
|
else
|
||||||
__io_req_task_cancel(req, -EFAULT);
|
__io_req_task_cancel(req, -EFAULT);
|
||||||
@@ -5962,9 +5962,6 @@ static void __io_clean_op(struct io_kiocb *req)
|
|||||||
}
|
}
|
||||||
req->flags &= ~REQ_F_NEED_CLEANUP;
|
req->flags &= ~REQ_F_NEED_CLEANUP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (req->flags & REQ_F_INFLIGHT)
|
|
||||||
io_req_drop_files(req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
|
static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
|
||||||
@@ -6796,7 +6793,7 @@ again:
|
|||||||
to_submit = 8;
|
to_submit = 8;
|
||||||
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
if (likely(!percpu_ref_is_dying(&ctx->refs)))
|
if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead))
|
||||||
ret = io_submit_sqes(ctx, to_submit);
|
ret = io_submit_sqes(ctx, to_submit);
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
|
||||||
@@ -8487,6 +8484,10 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
|
|||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
percpu_ref_kill(&ctx->refs);
|
percpu_ref_kill(&ctx->refs);
|
||||||
/* if force is set, the ring is going away. always drop after that */
|
/* if force is set, the ring is going away. always drop after that */
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
|
||||||
|
ctx->sqo_dead = 1;
|
||||||
|
|
||||||
ctx->cq_overflow_flushed = 1;
|
ctx->cq_overflow_flushed = 1;
|
||||||
if (ctx->rings)
|
if (ctx->rings)
|
||||||
__io_cqring_overflow_flush(ctx, true, NULL, NULL);
|
__io_cqring_overflow_flush(ctx, true, NULL, NULL);
|
||||||
@@ -8698,6 +8699,8 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
|
|||||||
break;
|
break;
|
||||||
/* cancel this request, or head link requests */
|
/* cancel this request, or head link requests */
|
||||||
io_attempt_cancel(ctx, cancel_req);
|
io_attempt_cancel(ctx, cancel_req);
|
||||||
|
io_cqring_overflow_flush(ctx, true, task, files);
|
||||||
|
|
||||||
io_put_req(cancel_req);
|
io_put_req(cancel_req);
|
||||||
/* cancellations _may_ trigger task work */
|
/* cancellations _may_ trigger task work */
|
||||||
io_run_task_work();
|
io_run_task_work();
|
||||||
@@ -8745,6 +8748,17 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
|
||||||
|
{
|
||||||
|
mutex_lock(&ctx->uring_lock);
|
||||||
|
ctx->sqo_dead = 1;
|
||||||
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
|
||||||
|
/* make sure callers enter the ring to get error */
|
||||||
|
if (ctx->rings)
|
||||||
|
io_ring_set_wakeup_flag(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to iteratively cancel requests, in case a request has dependent
|
* We need to iteratively cancel requests, in case a request has dependent
|
||||||
* hard links. These persist even for failure of cancelations, hence keep
|
* hard links. These persist even for failure of cancelations, hence keep
|
||||||
@@ -8756,6 +8770,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
|
|||||||
struct task_struct *task = current;
|
struct task_struct *task = current;
|
||||||
|
|
||||||
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
|
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
|
||||||
|
/* for SQPOLL only sqo_task has task notes */
|
||||||
|
WARN_ON_ONCE(ctx->sqo_task != current);
|
||||||
|
io_disable_sqo_submit(ctx);
|
||||||
task = ctx->sq_data->thread;
|
task = ctx->sq_data->thread;
|
||||||
atomic_inc(&task->io_uring->in_idle);
|
atomic_inc(&task->io_uring->in_idle);
|
||||||
io_sq_thread_park(ctx->sq_data);
|
io_sq_thread_park(ctx->sq_data);
|
||||||
@@ -8835,23 +8852,6 @@ static void io_uring_del_task_file(struct file *file)
|
|||||||
fput(file);
|
fput(file);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Drop task note for this file if we're the only ones that hold it after
|
|
||||||
* pending fput()
|
|
||||||
*/
|
|
||||||
static void io_uring_attempt_task_drop(struct file *file)
|
|
||||||
{
|
|
||||||
if (!current->io_uring)
|
|
||||||
return;
|
|
||||||
/*
|
|
||||||
* fput() is pending, will be 2 if the only other ref is our potential
|
|
||||||
* task file note. If the task is exiting, drop regardless of count.
|
|
||||||
*/
|
|
||||||
if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
|
|
||||||
atomic_long_read(&file->f_count) == 2)
|
|
||||||
io_uring_del_task_file(file);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void io_uring_remove_task_files(struct io_uring_task *tctx)
|
static void io_uring_remove_task_files(struct io_uring_task *tctx)
|
||||||
{
|
{
|
||||||
struct file *file;
|
struct file *file;
|
||||||
@@ -8917,6 +8917,10 @@ void __io_uring_task_cancel(void)
|
|||||||
/* make sure overflow events are dropped */
|
/* make sure overflow events are dropped */
|
||||||
atomic_inc(&tctx->in_idle);
|
atomic_inc(&tctx->in_idle);
|
||||||
|
|
||||||
|
/* trigger io_disable_sqo_submit() */
|
||||||
|
if (tctx->sqpoll)
|
||||||
|
__io_uring_files_cancel(NULL);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/* read completions before cancelations */
|
/* read completions before cancelations */
|
||||||
inflight = tctx_inflight(tctx);
|
inflight = tctx_inflight(tctx);
|
||||||
@@ -8943,7 +8947,36 @@ void __io_uring_task_cancel(void)
|
|||||||
|
|
||||||
static int io_uring_flush(struct file *file, void *data)
|
static int io_uring_flush(struct file *file, void *data)
|
||||||
{
|
{
|
||||||
io_uring_attempt_task_drop(file);
|
struct io_uring_task *tctx = current->io_uring;
|
||||||
|
struct io_ring_ctx *ctx = file->private_data;
|
||||||
|
|
||||||
|
if (!tctx)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* we should have cancelled and erased it before PF_EXITING */
|
||||||
|
WARN_ON_ONCE((current->flags & PF_EXITING) &&
|
||||||
|
xa_load(&tctx->xa, (unsigned long)file));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* fput() is pending, will be 2 if the only other ref is our potential
|
||||||
|
* task file note. If the task is exiting, drop regardless of count.
|
||||||
|
*/
|
||||||
|
if (atomic_long_read(&file->f_count) != 2)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (ctx->flags & IORING_SETUP_SQPOLL) {
|
||||||
|
/* there is only one file note, which is owned by sqo_task */
|
||||||
|
WARN_ON_ONCE(ctx->sqo_task != current &&
|
||||||
|
xa_load(&tctx->xa, (unsigned long)file));
|
||||||
|
/* sqo_dead check is for when this happens after cancellation */
|
||||||
|
WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
|
||||||
|
!xa_load(&tctx->xa, (unsigned long)file));
|
||||||
|
|
||||||
|
io_disable_sqo_submit(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
|
||||||
|
io_uring_del_task_file(file);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -9017,8 +9050,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
|
|||||||
|
|
||||||
#endif /* !CONFIG_MMU */
|
#endif /* !CONFIG_MMU */
|
||||||
|
|
||||||
static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
|
static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
@@ -9027,6 +9061,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
|
|||||||
|
|
||||||
prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
|
if (unlikely(ctx->sqo_dead)) {
|
||||||
|
ret = -EOWNERDEAD;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (!io_sqring_full(ctx))
|
if (!io_sqring_full(ctx))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -9034,6 +9073,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
|
|||||||
} while (!signal_pending(current));
|
} while (!signal_pending(current));
|
||||||
|
|
||||||
finish_wait(&ctx->sqo_sq_wait, &wait);
|
finish_wait(&ctx->sqo_sq_wait, &wait);
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
||||||
@@ -9077,10 +9118,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
|||||||
if (ctx->flags & IORING_SETUP_SQPOLL) {
|
if (ctx->flags & IORING_SETUP_SQPOLL) {
|
||||||
io_cqring_overflow_flush(ctx, false, NULL, NULL);
|
io_cqring_overflow_flush(ctx, false, NULL, NULL);
|
||||||
|
|
||||||
|
ret = -EOWNERDEAD;
|
||||||
|
if (unlikely(ctx->sqo_dead))
|
||||||
|
goto out;
|
||||||
if (flags & IORING_ENTER_SQ_WAKEUP)
|
if (flags & IORING_ENTER_SQ_WAKEUP)
|
||||||
wake_up(&ctx->sq_data->wait);
|
wake_up(&ctx->sq_data->wait);
|
||||||
if (flags & IORING_ENTER_SQ_WAIT)
|
if (flags & IORING_ENTER_SQ_WAIT) {
|
||||||
io_sqpoll_wait_sq(ctx);
|
ret = io_sqpoll_wait_sq(ctx);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
submitted = to_submit;
|
submitted = to_submit;
|
||||||
} else if (to_submit) {
|
} else if (to_submit) {
|
||||||
ret = io_uring_add_task_file(ctx, f.file);
|
ret = io_uring_add_task_file(ctx, f.file);
|
||||||
@@ -9491,6 +9538,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
|
|||||||
*/
|
*/
|
||||||
ret = io_uring_install_fd(ctx, file);
|
ret = io_uring_install_fd(ctx, file);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
io_disable_sqo_submit(ctx);
|
||||||
/* fput will clean it up */
|
/* fput will clean it up */
|
||||||
fput(file);
|
fput(file);
|
||||||
return ret;
|
return ret;
|
||||||
@@ -9499,6 +9547,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
|
|||||||
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
|
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
|
||||||
return ret;
|
return ret;
|
||||||
err:
|
err:
|
||||||
|
io_disable_sqo_submit(ctx);
|
||||||
io_ring_ctx_wait_and_kill(ctx);
|
io_ring_ctx_wait_and_kill(ctx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@@ -176,7 +176,7 @@ struct v4l2_subdev_capability {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* The v4l2 sub-device video device node is registered in read-only mode. */
|
/* The v4l2 sub-device video device node is registered in read-only mode. */
|
||||||
#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0)
|
#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
|
||||||
|
|
||||||
/* Backwards compatibility define --- to be removed */
|
/* Backwards compatibility define --- to be removed */
|
||||||
#define v4l2_subdev_edid v4l2_edid
|
#define v4l2_subdev_edid v4l2_edid
|
||||||
|
@@ -133,6 +133,13 @@ enum pvrdma_wc_flags {
|
|||||||
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
|
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum pvrdma_network_type {
|
||||||
|
PVRDMA_NETWORK_IB,
|
||||||
|
PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
|
||||||
|
PVRDMA_NETWORK_IPV4,
|
||||||
|
PVRDMA_NETWORK_IPV6
|
||||||
|
};
|
||||||
|
|
||||||
struct pvrdma_alloc_ucontext_resp {
|
struct pvrdma_alloc_ucontext_resp {
|
||||||
__u32 qp_tab_size;
|
__u32 qp_tab_size;
|
||||||
__u32 reserved;
|
__u32 reserved;
|
||||||
|
@@ -63,6 +63,7 @@
|
|||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/rcuwait.h>
|
#include <linux/rcuwait.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
#include <linux/io_uring.h>
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
@@ -762,6 +763,7 @@ void __noreturn do_exit(long code)
|
|||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
io_uring_files_cancel(tsk->files);
|
||||||
exit_signals(tsk); /* sets PF_EXITING */
|
exit_signals(tsk); /* sets PF_EXITING */
|
||||||
|
|
||||||
/* sync mm's RSS info before statistics gathering */
|
/* sync mm's RSS info before statistics gathering */
|
||||||
|
211
kernel/futex.c
211
kernel/futex.c
@@ -766,6 +766,29 @@ static struct futex_pi_state *alloc_pi_state(void)
|
|||||||
return pi_state;
|
return pi_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pi_state_update_owner(struct futex_pi_state *pi_state,
|
||||||
|
struct task_struct *new_owner)
|
||||||
|
{
|
||||||
|
struct task_struct *old_owner = pi_state->owner;
|
||||||
|
|
||||||
|
lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
|
||||||
|
|
||||||
|
if (old_owner) {
|
||||||
|
raw_spin_lock(&old_owner->pi_lock);
|
||||||
|
WARN_ON(list_empty(&pi_state->list));
|
||||||
|
list_del_init(&pi_state->list);
|
||||||
|
raw_spin_unlock(&old_owner->pi_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_owner) {
|
||||||
|
raw_spin_lock(&new_owner->pi_lock);
|
||||||
|
WARN_ON(!list_empty(&pi_state->list));
|
||||||
|
list_add(&pi_state->list, &new_owner->pi_state_list);
|
||||||
|
pi_state->owner = new_owner;
|
||||||
|
raw_spin_unlock(&new_owner->pi_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void get_pi_state(struct futex_pi_state *pi_state)
|
static void get_pi_state(struct futex_pi_state *pi_state)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
|
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
|
||||||
@@ -788,17 +811,11 @@ static void put_pi_state(struct futex_pi_state *pi_state)
|
|||||||
* and has cleaned up the pi_state already
|
* and has cleaned up the pi_state already
|
||||||
*/
|
*/
|
||||||
if (pi_state->owner) {
|
if (pi_state->owner) {
|
||||||
struct task_struct *owner;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
|
raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
|
||||||
owner = pi_state->owner;
|
pi_state_update_owner(pi_state, NULL);
|
||||||
if (owner) {
|
rt_mutex_proxy_unlock(&pi_state->pi_mutex);
|
||||||
raw_spin_lock(&owner->pi_lock);
|
|
||||||
list_del_init(&pi_state->list);
|
|
||||||
raw_spin_unlock(&owner->pi_lock);
|
|
||||||
}
|
|
||||||
rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
|
|
||||||
raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
|
raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -944,7 +961,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
|
|||||||
* FUTEX_OWNER_DIED bit. See [4]
|
* FUTEX_OWNER_DIED bit. See [4]
|
||||||
*
|
*
|
||||||
* [10] There is no transient state which leaves owner and user space
|
* [10] There is no transient state which leaves owner and user space
|
||||||
* TID out of sync.
|
* TID out of sync. Except one error case where the kernel is denied
|
||||||
|
* write access to the user address, see fixup_pi_state_owner().
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* Serialization and lifetime rules:
|
* Serialization and lifetime rules:
|
||||||
@@ -1524,26 +1542,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret)
|
if (!ret) {
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a point of no return; once we modify the uval there is no
|
* This is a point of no return; once we modified the uval
|
||||||
* going back and subsequent operations must not fail.
|
* there is no going back and subsequent operations must
|
||||||
|
* not fail.
|
||||||
*/
|
*/
|
||||||
|
pi_state_update_owner(pi_state, new_owner);
|
||||||
raw_spin_lock(&pi_state->owner->pi_lock);
|
|
||||||
WARN_ON(list_empty(&pi_state->list));
|
|
||||||
list_del_init(&pi_state->list);
|
|
||||||
raw_spin_unlock(&pi_state->owner->pi_lock);
|
|
||||||
|
|
||||||
raw_spin_lock(&new_owner->pi_lock);
|
|
||||||
WARN_ON(!list_empty(&pi_state->list));
|
|
||||||
list_add(&pi_state->list, &new_owner->pi_state_list);
|
|
||||||
pi_state->owner = new_owner;
|
|
||||||
raw_spin_unlock(&new_owner->pi_lock);
|
|
||||||
|
|
||||||
postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
||||||
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||||||
@@ -2329,18 +2336,13 @@ static void unqueue_me_pi(struct futex_q *q)
|
|||||||
spin_unlock(q->lock_ptr);
|
spin_unlock(q->lock_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||||
struct task_struct *argowner)
|
struct task_struct *argowner)
|
||||||
{
|
{
|
||||||
struct futex_pi_state *pi_state = q->pi_state;
|
struct futex_pi_state *pi_state = q->pi_state;
|
||||||
u32 uval, curval, newval;
|
|
||||||
struct task_struct *oldowner, *newowner;
|
struct task_struct *oldowner, *newowner;
|
||||||
u32 newtid;
|
u32 uval, curval, newval, newtid;
|
||||||
int ret, err = 0;
|
int err = 0;
|
||||||
|
|
||||||
lockdep_assert_held(q->lock_ptr);
|
|
||||||
|
|
||||||
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
||||||
|
|
||||||
oldowner = pi_state->owner;
|
oldowner = pi_state->owner;
|
||||||
|
|
||||||
@@ -2374,14 +2376,12 @@ retry:
|
|||||||
* We raced against a concurrent self; things are
|
* We raced against a concurrent self; things are
|
||||||
* already fixed up. Nothing to do.
|
* already fixed up. Nothing to do.
|
||||||
*/
|
*/
|
||||||
ret = 0;
|
return 0;
|
||||||
goto out_unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
|
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
|
||||||
/* We got the lock after all, nothing to fix. */
|
/* We got the lock. pi_state is correct. Tell caller. */
|
||||||
ret = 0;
|
return 1;
|
||||||
goto out_unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2408,8 +2408,7 @@ retry:
|
|||||||
* We raced against a concurrent self; things are
|
* We raced against a concurrent self; things are
|
||||||
* already fixed up. Nothing to do.
|
* already fixed up. Nothing to do.
|
||||||
*/
|
*/
|
||||||
ret = 0;
|
return 1;
|
||||||
goto out_unlock;
|
|
||||||
}
|
}
|
||||||
newowner = argowner;
|
newowner = argowner;
|
||||||
}
|
}
|
||||||
@@ -2439,22 +2438,9 @@ retry:
|
|||||||
* We fixed up user space. Now we need to fix the pi_state
|
* We fixed up user space. Now we need to fix the pi_state
|
||||||
* itself.
|
* itself.
|
||||||
*/
|
*/
|
||||||
if (pi_state->owner != NULL) {
|
pi_state_update_owner(pi_state, newowner);
|
||||||
raw_spin_lock(&pi_state->owner->pi_lock);
|
|
||||||
WARN_ON(list_empty(&pi_state->list));
|
|
||||||
list_del_init(&pi_state->list);
|
|
||||||
raw_spin_unlock(&pi_state->owner->pi_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
pi_state->owner = newowner;
|
return argowner == current;
|
||||||
|
|
||||||
raw_spin_lock(&newowner->pi_lock);
|
|
||||||
WARN_ON(!list_empty(&pi_state->list));
|
|
||||||
list_add(&pi_state->list, &newowner->pi_state_list);
|
|
||||||
raw_spin_unlock(&newowner->pi_lock);
|
|
||||||
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In order to reschedule or handle a page fault, we need to drop the
|
* In order to reschedule or handle a page fault, we need to drop the
|
||||||
@@ -2475,17 +2461,16 @@ handle_err:
|
|||||||
|
|
||||||
switch (err) {
|
switch (err) {
|
||||||
case -EFAULT:
|
case -EFAULT:
|
||||||
ret = fault_in_user_writeable(uaddr);
|
err = fault_in_user_writeable(uaddr);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case -EAGAIN:
|
case -EAGAIN:
|
||||||
cond_resched();
|
cond_resched();
|
||||||
ret = 0;
|
err = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
ret = err;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2495,17 +2480,44 @@ handle_err:
|
|||||||
/*
|
/*
|
||||||
* Check if someone else fixed it for us:
|
* Check if someone else fixed it for us:
|
||||||
*/
|
*/
|
||||||
if (pi_state->owner != oldowner) {
|
if (pi_state->owner != oldowner)
|
||||||
ret = 0;
|
return argowner == current;
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
|
/* Retry if err was -EAGAIN or the fault in succeeded */
|
||||||
|
if (!err)
|
||||||
goto retry;
|
goto retry;
|
||||||
|
|
||||||
out_unlock:
|
/*
|
||||||
|
* fault_in_user_writeable() failed so user state is immutable. At
|
||||||
|
* best we can make the kernel state consistent but user state will
|
||||||
|
* be most likely hosed and any subsequent unlock operation will be
|
||||||
|
* rejected due to PI futex rule [10].
|
||||||
|
*
|
||||||
|
* Ensure that the rtmutex owner is also the pi_state owner despite
|
||||||
|
* the user space value claiming something different. There is no
|
||||||
|
* point in unlocking the rtmutex if current is the owner as it
|
||||||
|
* would need to wait until the next waiter has taken the rtmutex
|
||||||
|
* to guarantee consistent state. Keep it simple. Userspace asked
|
||||||
|
* for this wreckaged state.
|
||||||
|
*
|
||||||
|
* The rtmutex has an owner - either current or some other
|
||||||
|
* task. See the EAGAIN loop above.
|
||||||
|
*/
|
||||||
|
pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||||
|
struct task_struct *argowner)
|
||||||
|
{
|
||||||
|
struct futex_pi_state *pi_state = q->pi_state;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
lockdep_assert_held(q->lock_ptr);
|
||||||
|
|
||||||
|
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
||||||
|
ret = __fixup_pi_state_owner(uaddr, q, argowner);
|
||||||
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -2529,8 +2541,6 @@ static long futex_wait_restart(struct restart_block *restart);
|
|||||||
*/
|
*/
|
||||||
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (locked) {
|
if (locked) {
|
||||||
/*
|
/*
|
||||||
* Got the lock. We might not be the anticipated owner if we
|
* Got the lock. We might not be the anticipated owner if we
|
||||||
@@ -2541,8 +2551,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
|||||||
* stable state, anything else needs more attention.
|
* stable state, anything else needs more attention.
|
||||||
*/
|
*/
|
||||||
if (q->pi_state->owner != current)
|
if (q->pi_state->owner != current)
|
||||||
ret = fixup_pi_state_owner(uaddr, q, current);
|
return fixup_pi_state_owner(uaddr, q, current);
|
||||||
return ret ? ret : locked;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2553,23 +2563,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
|||||||
* Another speculative read; pi_state->owner == current is unstable
|
* Another speculative read; pi_state->owner == current is unstable
|
||||||
* but needs our attention.
|
* but needs our attention.
|
||||||
*/
|
*/
|
||||||
if (q->pi_state->owner == current) {
|
if (q->pi_state->owner == current)
|
||||||
ret = fixup_pi_state_owner(uaddr, q, NULL);
|
return fixup_pi_state_owner(uaddr, q, NULL);
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Paranoia check. If we did not take the lock, then we should not be
|
* Paranoia check. If we did not take the lock, then we should not be
|
||||||
* the owner of the rt_mutex.
|
* the owner of the rt_mutex. Warn and establish consistent state.
|
||||||
*/
|
*/
|
||||||
if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
|
if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
|
||||||
printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
|
return fixup_pi_state_owner(uaddr, q, current);
|
||||||
"pi-state %p\n", ret,
|
|
||||||
q->pi_state->pi_mutex.owner,
|
|
||||||
q->pi_state->owner);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -2777,7 +2781,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|||||||
ktime_t *time, int trylock)
|
ktime_t *time, int trylock)
|
||||||
{
|
{
|
||||||
struct hrtimer_sleeper timeout, *to;
|
struct hrtimer_sleeper timeout, *to;
|
||||||
struct futex_pi_state *pi_state = NULL;
|
|
||||||
struct task_struct *exiting = NULL;
|
struct task_struct *exiting = NULL;
|
||||||
struct rt_mutex_waiter rt_waiter;
|
struct rt_mutex_waiter rt_waiter;
|
||||||
struct futex_hash_bucket *hb;
|
struct futex_hash_bucket *hb;
|
||||||
@@ -2913,23 +2916,8 @@ no_block:
|
|||||||
if (res)
|
if (res)
|
||||||
ret = (res < 0) ? res : 0;
|
ret = (res < 0) ? res : 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* If fixup_owner() faulted and was unable to handle the fault, unlock
|
|
||||||
* it and return the fault to userspace.
|
|
||||||
*/
|
|
||||||
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
|
|
||||||
pi_state = q.pi_state;
|
|
||||||
get_pi_state(pi_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Unqueue and drop the lock */
|
/* Unqueue and drop the lock */
|
||||||
unqueue_me_pi(&q);
|
unqueue_me_pi(&q);
|
||||||
|
|
||||||
if (pi_state) {
|
|
||||||
rt_mutex_futex_unlock(&pi_state->pi_mutex);
|
|
||||||
put_pi_state(pi_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
out_unlock_put_key:
|
out_unlock_put_key:
|
||||||
@@ -3189,7 +3177,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|||||||
u32 __user *uaddr2)
|
u32 __user *uaddr2)
|
||||||
{
|
{
|
||||||
struct hrtimer_sleeper timeout, *to;
|
struct hrtimer_sleeper timeout, *to;
|
||||||
struct futex_pi_state *pi_state = NULL;
|
|
||||||
struct rt_mutex_waiter rt_waiter;
|
struct rt_mutex_waiter rt_waiter;
|
||||||
struct futex_hash_bucket *hb;
|
struct futex_hash_bucket *hb;
|
||||||
union futex_key key2 = FUTEX_KEY_INIT;
|
union futex_key key2 = FUTEX_KEY_INIT;
|
||||||
@@ -3267,16 +3254,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|||||||
if (q.pi_state && (q.pi_state->owner != current)) {
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
||||||
spin_lock(q.lock_ptr);
|
spin_lock(q.lock_ptr);
|
||||||
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
||||||
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
|
|
||||||
pi_state = q.pi_state;
|
|
||||||
get_pi_state(pi_state);
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Drop the reference to the pi state which
|
* Drop the reference to the pi state which
|
||||||
* the requeue_pi() code acquired for us.
|
* the requeue_pi() code acquired for us.
|
||||||
*/
|
*/
|
||||||
put_pi_state(q.pi_state);
|
put_pi_state(q.pi_state);
|
||||||
spin_unlock(q.lock_ptr);
|
spin_unlock(q.lock_ptr);
|
||||||
|
/*
|
||||||
|
* Adjust the return value. It's either -EFAULT or
|
||||||
|
* success (1) but the caller expects 0 for success.
|
||||||
|
*/
|
||||||
|
ret = ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
struct rt_mutex *pi_mutex;
|
struct rt_mutex *pi_mutex;
|
||||||
@@ -3307,25 +3295,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|||||||
if (res)
|
if (res)
|
||||||
ret = (res < 0) ? res : 0;
|
ret = (res < 0) ? res : 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* If fixup_pi_state_owner() faulted and was unable to handle
|
|
||||||
* the fault, unlock the rt_mutex and return the fault to
|
|
||||||
* userspace.
|
|
||||||
*/
|
|
||||||
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
|
|
||||||
pi_state = q.pi_state;
|
|
||||||
get_pi_state(pi_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Unqueue and drop the lock. */
|
/* Unqueue and drop the lock. */
|
||||||
unqueue_me_pi(&q);
|
unqueue_me_pi(&q);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pi_state) {
|
|
||||||
rt_mutex_futex_unlock(&pi_state->pi_mutex);
|
|
||||||
put_pi_state(pi_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret == -EINTR) {
|
if (ret == -EINTR) {
|
||||||
/*
|
/*
|
||||||
* We've already been requeued, but cannot restart by calling
|
* We've already been requeued, but cannot restart by calling
|
||||||
|
@@ -1716,8 +1716,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|||||||
* possible because it belongs to the pi_state which is about to be freed
|
* possible because it belongs to the pi_state which is about to be freed
|
||||||
* and it is not longer visible to other tasks.
|
* and it is not longer visible to other tasks.
|
||||||
*/
|
*/
|
||||||
void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
void rt_mutex_proxy_unlock(struct rt_mutex *lock)
|
||||||
struct task_struct *proxy_owner)
|
|
||||||
{
|
{
|
||||||
debug_rt_mutex_proxy_unlock(lock);
|
debug_rt_mutex_proxy_unlock(lock);
|
||||||
rt_mutex_set_owner(lock, NULL);
|
rt_mutex_set_owner(lock, NULL);
|
||||||
|
@@ -133,8 +133,7 @@ enum rtmutex_chainwalk {
|
|||||||
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
||||||
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
||||||
struct task_struct *proxy_owner);
|
struct task_struct *proxy_owner);
|
||||||
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
|
||||||
struct task_struct *proxy_owner);
|
|
||||||
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
|
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
|
||||||
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||||
struct rt_mutex_waiter *waiter,
|
struct rt_mutex_waiter *waiter,
|
||||||
|
@@ -1342,11 +1342,16 @@ static size_t info_print_prefix(const struct printk_info *info, bool syslog,
|
|||||||
* done:
|
* done:
|
||||||
*
|
*
|
||||||
* - Add prefix for each line.
|
* - Add prefix for each line.
|
||||||
|
* - Drop truncated lines that no longer fit into the buffer.
|
||||||
* - Add the trailing newline that has been removed in vprintk_store().
|
* - Add the trailing newline that has been removed in vprintk_store().
|
||||||
* - Drop truncated lines that do not longer fit into the buffer.
|
* - Add a string terminator.
|
||||||
|
*
|
||||||
|
* Since the produced string is always terminated, the maximum possible
|
||||||
|
* return value is @r->text_buf_size - 1;
|
||||||
*
|
*
|
||||||
* Return: The length of the updated/prepared text, including the added
|
* Return: The length of the updated/prepared text, including the added
|
||||||
* prefixes and the newline. The dropped line(s) are not counted.
|
* prefixes and the newline. The terminator is not counted. The dropped
|
||||||
|
* line(s) are not counted.
|
||||||
*/
|
*/
|
||||||
static size_t record_print_text(struct printk_record *r, bool syslog,
|
static size_t record_print_text(struct printk_record *r, bool syslog,
|
||||||
bool time)
|
bool time)
|
||||||
@@ -1389,26 +1394,31 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Truncate the text if there is not enough space to add the
|
* Truncate the text if there is not enough space to add the
|
||||||
* prefix and a trailing newline.
|
* prefix and a trailing newline and a terminator.
|
||||||
*/
|
*/
|
||||||
if (len + prefix_len + text_len + 1 > buf_size) {
|
if (len + prefix_len + text_len + 1 + 1 > buf_size) {
|
||||||
/* Drop even the current line if no space. */
|
/* Drop even the current line if no space. */
|
||||||
if (len + prefix_len + line_len + 1 > buf_size)
|
if (len + prefix_len + line_len + 1 + 1 > buf_size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
text_len = buf_size - len - prefix_len - 1;
|
text_len = buf_size - len - prefix_len - 1 - 1;
|
||||||
truncated = true;
|
truncated = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
memmove(text + prefix_len, text, text_len);
|
memmove(text + prefix_len, text, text_len);
|
||||||
memcpy(text, prefix, prefix_len);
|
memcpy(text, prefix, prefix_len);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Increment the prepared length to include the text and
|
||||||
|
* prefix that were just moved+copied. Also increment for the
|
||||||
|
* newline at the end of this line. If this is the last line,
|
||||||
|
* there is no newline, but it will be added immediately below.
|
||||||
|
*/
|
||||||
len += prefix_len + line_len + 1;
|
len += prefix_len + line_len + 1;
|
||||||
|
|
||||||
if (text_len == line_len) {
|
if (text_len == line_len) {
|
||||||
/*
|
/*
|
||||||
* Add the trailing newline removed in
|
* This is the last line. Add the trailing newline
|
||||||
* vprintk_store().
|
* removed in vprintk_store().
|
||||||
*/
|
*/
|
||||||
text[prefix_len + line_len] = '\n';
|
text[prefix_len + line_len] = '\n';
|
||||||
break;
|
break;
|
||||||
@@ -1433,6 +1443,14 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
|
|||||||
text_len -= line_len + 1;
|
text_len -= line_len + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a buffer was provided, it will be terminated. Space for the
|
||||||
|
* string terminator is guaranteed to be available. The terminator is
|
||||||
|
* not counted in the return value.
|
||||||
|
*/
|
||||||
|
if (buf_size > 0)
|
||||||
|
r->text_buf[len] = 0;
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -5624,10 +5624,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
|
|||||||
|
|
||||||
s->kobj.kset = kset;
|
s->kobj.kset = kset;
|
||||||
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
|
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
|
||||||
if (err) {
|
if (err)
|
||||||
kobject_put(&s->kobj);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
err = sysfs_create_group(&s->kobj, &slab_attr_group);
|
err = sysfs_create_group(&s->kobj, &slab_attr_group);
|
||||||
if (err)
|
if (err)
|
||||||
|
@@ -1045,16 +1045,18 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
|
|||||||
/* Only single cluster request supported */
|
/* Only single cluster request supported */
|
||||||
WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
|
WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
|
||||||
|
|
||||||
|
spin_lock(&swap_avail_lock);
|
||||||
|
|
||||||
avail_pgs = atomic_long_read(&nr_swap_pages) / size;
|
avail_pgs = atomic_long_read(&nr_swap_pages) / size;
|
||||||
if (avail_pgs <= 0)
|
if (avail_pgs <= 0) {
|
||||||
|
spin_unlock(&swap_avail_lock);
|
||||||
goto noswap;
|
goto noswap;
|
||||||
|
}
|
||||||
|
|
||||||
n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
|
n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
|
||||||
|
|
||||||
atomic_long_sub(n_goal * size, &nr_swap_pages);
|
atomic_long_sub(n_goal * size, &nr_swap_pages);
|
||||||
|
|
||||||
spin_lock(&swap_avail_lock);
|
|
||||||
|
|
||||||
start_over:
|
start_over:
|
||||||
node = numa_node_id();
|
node = numa_node_id();
|
||||||
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
|
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
|
||||||
@@ -1128,14 +1130,13 @@ swp_entry_t get_swap_page_of_type(int type)
|
|||||||
|
|
||||||
spin_lock(&si->lock);
|
spin_lock(&si->lock);
|
||||||
if (si->flags & SWP_WRITEOK) {
|
if (si->flags & SWP_WRITEOK) {
|
||||||
atomic_long_dec(&nr_swap_pages);
|
|
||||||
/* This is called for allocating swap entry, not cache */
|
/* This is called for allocating swap entry, not cache */
|
||||||
offset = scan_swap_map(si, 1);
|
offset = scan_swap_map(si, 1);
|
||||||
if (offset) {
|
if (offset) {
|
||||||
|
atomic_long_dec(&nr_swap_pages);
|
||||||
spin_unlock(&si->lock);
|
spin_unlock(&si->lock);
|
||||||
return swp_entry(type, offset);
|
return swp_entry(type, offset);
|
||||||
}
|
}
|
||||||
atomic_long_inc(&nr_swap_pages);
|
|
||||||
}
|
}
|
||||||
spin_unlock(&si->lock);
|
spin_unlock(&si->lock);
|
||||||
fail:
|
fail:
|
||||||
|
@@ -354,8 +354,11 @@ static int read_symbols(struct elf *elf)
|
|||||||
|
|
||||||
symtab = find_section_by_name(elf, ".symtab");
|
symtab = find_section_by_name(elf, ".symtab");
|
||||||
if (!symtab) {
|
if (!symtab) {
|
||||||
WARN("missing symbol table");
|
/*
|
||||||
return -1;
|
* A missing symbol table is actually possible if it's an empty
|
||||||
|
* .o file. This can happen for thunk_64.o.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
|
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
|
||||||
|
Reference in New Issue
Block a user