Merge branch 'for-3.15/core' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe: "This is the pull request for the core block IO bits for the 3.15 kernel. It's a smaller round this time, it contains: - Various little blk-mq fixes and additions from Christoph and myself. - Cleanup of the IPI usage from the block layer, and associated helper code. From Frederic Weisbecker and Jan Kara. - Duplicate code cleanup in bio-integrity from Gu Zheng. This will give you a merge conflict, but that should be easy to resolve. - blk-mq notify spinlock fix for RT from Mike Galbraith. - A blktrace partial accounting bug fix from Roman Pen. - Missing REQ_SYNC detection fix for blk-mq from Shaohua Li" * 'for-3.15/core' of git://git.kernel.dk/linux-block: (25 commits) blk-mq: add REQ_SYNC early rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock blk-mq: support partial I/O completions blk-mq: merge blk_mq_insert_request and blk_mq_run_request blk-mq: remove blk_mq_alloc_rq blk-mq: don't dump CPU -> hw queue map on driver load blk-mq: fix wrong usage of hctx->state vs hctx->flags blk-mq: allow blk_mq_init_commands() to return failure block: remove old blk_iopoll_enabled variable blktrace: fix accounting of partially completed requests smp: Rename __smp_call_function_single() to smp_call_function_single_async() smp: Remove wait argument from __smp_call_function_single() watchdog: Simplify a little the IPI call smp: Move __smp_call_function_single() below its safe version smp: Consolidate the various smp_call_function_single() declensions smp: Teach __smp_call_function_single() to check for offline cpus smp: Remove unused list_head from csd smp: Iterate functions through llist_for_each_entry_safe() block: Stop abusing rq->csd.list in blk-softirq block: Remove useless IPI struct initialization ...
This commit is contained in:
@@ -432,7 +432,7 @@ void hrtick_start(struct rq *rq, u64 delay)
|
||||
if (rq == this_rq()) {
|
||||
__hrtick_restart(rq);
|
||||
} else if (!rq->hrtick_csd_pending) {
|
||||
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
|
||||
smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
|
||||
rq->hrtick_csd_pending = 1;
|
||||
}
|
||||
}
|
||||
|
141
kernel/smp.c
141
kernel/smp.c
@@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd)
|
||||
csd->flags &= ~CSD_FLAG_LOCK;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
|
||||
|
||||
/*
|
||||
* Insert a previously allocated call_single_data element
|
||||
* for execution on the given CPU. data must already have
|
||||
* ->func, ->info, and ->flags set.
|
||||
*/
|
||||
static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
|
||||
static int generic_exec_single(int cpu, struct call_single_data *csd,
|
||||
smp_call_func_t func, void *info, int wait)
|
||||
{
|
||||
struct call_single_data csd_stack = { .flags = 0 };
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
if (cpu == smp_processor_id()) {
|
||||
local_irq_save(flags);
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
|
||||
return -ENXIO;
|
||||
|
||||
|
||||
if (!csd) {
|
||||
csd = &csd_stack;
|
||||
if (!wait)
|
||||
csd = &__get_cpu_var(csd_data);
|
||||
}
|
||||
|
||||
csd_lock(csd);
|
||||
|
||||
csd->func = func;
|
||||
csd->info = info;
|
||||
|
||||
if (wait)
|
||||
csd->flags |= CSD_FLAG_WAIT;
|
||||
|
||||
@@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
|
||||
|
||||
if (wait)
|
||||
csd_lock_wait(csd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
|
||||
*/
|
||||
void generic_smp_call_function_single_interrupt(void)
|
||||
{
|
||||
struct llist_node *entry, *next;
|
||||
struct llist_node *entry;
|
||||
struct call_single_data *csd, *csd_next;
|
||||
|
||||
/*
|
||||
* Shouldn't receive this interrupt on a cpu that is not yet online.
|
||||
@@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
entry = llist_del_all(&__get_cpu_var(call_single_queue));
|
||||
entry = llist_reverse_order(entry);
|
||||
|
||||
while (entry) {
|
||||
struct call_single_data *csd;
|
||||
|
||||
next = entry->next;
|
||||
|
||||
csd = llist_entry(entry, struct call_single_data, llist);
|
||||
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
|
||||
csd->func(csd->info);
|
||||
csd_unlock(csd);
|
||||
|
||||
entry = next;
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
|
||||
|
||||
/*
|
||||
* smp_call_function_single - Run a function on a specific CPU
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
@@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
|
||||
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
||||
int wait)
|
||||
{
|
||||
struct call_single_data d = {
|
||||
.flags = 0,
|
||||
};
|
||||
unsigned long flags;
|
||||
int this_cpu;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* prevent preemption and reschedule on another processor,
|
||||
@@ -209,26 +229,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
||||
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
|
||||
&& !oops_in_progress);
|
||||
|
||||
if (cpu == this_cpu) {
|
||||
local_irq_save(flags);
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
|
||||
struct call_single_data *csd = &d;
|
||||
|
||||
if (!wait)
|
||||
csd = &__get_cpu_var(csd_data);
|
||||
|
||||
csd_lock(csd);
|
||||
|
||||
csd->func = func;
|
||||
csd->info = info;
|
||||
generic_exec_single(cpu, csd, wait);
|
||||
} else {
|
||||
err = -ENXIO; /* CPU not online */
|
||||
}
|
||||
}
|
||||
err = generic_exec_single(cpu, NULL, func, info, wait);
|
||||
|
||||
put_cpu();
|
||||
|
||||
@@ -236,6 +237,34 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
/**
|
||||
* smp_call_function_single_async(): Run an asynchronous function on a
|
||||
* specific CPU.
|
||||
* @cpu: The CPU to run on.
|
||||
* @csd: Pre-allocated and setup data structure
|
||||
*
|
||||
* Like smp_call_function_single(), but the call is asynchonous and
|
||||
* can thus be done from contexts with disabled interrupts.
|
||||
*
|
||||
* The caller passes his own pre-allocated data structure
|
||||
* (ie: embedded in an object) and is responsible for synchronizing it
|
||||
* such that the IPIs performed on the @csd are strictly serialized.
|
||||
*
|
||||
* NOTE: Be careful, there is unfortunately no current debugging facility to
|
||||
* validate the correctness of this serialization.
|
||||
*/
|
||||
int smp_call_function_single_async(int cpu, struct call_single_data *csd)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
preempt_disable();
|
||||
err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
|
||||
preempt_enable();
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_single_async);
|
||||
|
||||
/*
|
||||
* smp_call_function_any - Run a function on any of the given cpus
|
||||
* @mask: The mask of cpus it can run on.
|
||||
@@ -279,44 +308,6 @@ call:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_any);
|
||||
|
||||
/**
|
||||
* __smp_call_function_single(): Run a function on a specific CPU
|
||||
* @cpu: The CPU to run on.
|
||||
* @data: Pre-allocated and setup data structure
|
||||
* @wait: If true, wait until function has completed on specified CPU.
|
||||
*
|
||||
* Like smp_call_function_single(), but allow caller to pass in a
|
||||
* pre-allocated data structure. Useful for embedding @data inside
|
||||
* other structures, for instance.
|
||||
*/
|
||||
void __smp_call_function_single(int cpu, struct call_single_data *csd,
|
||||
int wait)
|
||||
{
|
||||
unsigned int this_cpu;
|
||||
unsigned long flags;
|
||||
|
||||
this_cpu = get_cpu();
|
||||
/*
|
||||
* Can deadlock when called with interrupts disabled.
|
||||
* We allow cpu's that are not yet online though, as no one else can
|
||||
* send smp call function interrupt to this cpu and as such deadlocks
|
||||
* can't happen.
|
||||
*/
|
||||
WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
|
||||
&& !oops_in_progress);
|
||||
|
||||
if (cpu == this_cpu) {
|
||||
local_irq_save(flags);
|
||||
csd->func(csd->info);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
csd_lock(csd);
|
||||
generic_exec_single(cpu, csd, wait);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__smp_call_function_single);
|
||||
|
||||
/**
|
||||
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on (only runs on online subset).
|
||||
|
@@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
|
||||
#ifndef CONFIG_MMU
|
||||
extern int sysctl_nr_trim_pages;
|
||||
#endif
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern int blk_iopoll_enabled;
|
||||
#endif
|
||||
|
||||
/* Constants used for minimum and maximum */
|
||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||
@@ -1086,15 +1083,6 @@ static struct ctl_table kern_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_BLOCK
|
||||
{
|
||||
.procname = "blk_iopoll",
|
||||
.data = &blk_iopoll_enabled,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
{ }
|
||||
};
|
||||
|
@@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q)
|
||||
* blk_add_trace_rq - Add a trace for a request oriented action
|
||||
* @q: queue the io is for
|
||||
* @rq: the source request
|
||||
* @nr_bytes: number of completed bytes
|
||||
* @what: the action
|
||||
*
|
||||
* Description:
|
||||
@@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q)
|
||||
*
|
||||
**/
|
||||
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
||||
u32 what)
|
||||
unsigned int nr_bytes, u32 what)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
|
||||
@@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
what |= BLK_TC_ACT(BLK_TC_PC);
|
||||
__blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
|
||||
__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
|
||||
what, rq->errors, rq->cmd_len, rq->cmd);
|
||||
} else {
|
||||
what |= BLK_TC_ACT(BLK_TC_FS);
|
||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
||||
__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
|
||||
rq->cmd_flags, what, rq->errors, 0, NULL);
|
||||
}
|
||||
}
|
||||
@@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
||||
static void blk_add_trace_rq_abort(void *ignore,
|
||||
struct request_queue *q, struct request *rq)
|
||||
{
|
||||
blk_add_trace_rq(q, rq, BLK_TA_ABORT);
|
||||
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
|
||||
}
|
||||
|
||||
static void blk_add_trace_rq_insert(void *ignore,
|
||||
struct request_queue *q, struct request *rq)
|
||||
{
|
||||
blk_add_trace_rq(q, rq, BLK_TA_INSERT);
|
||||
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
|
||||
}
|
||||
|
||||
static void blk_add_trace_rq_issue(void *ignore,
|
||||
struct request_queue *q, struct request *rq)
|
||||
{
|
||||
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
|
||||
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
|
||||
}
|
||||
|
||||
static void blk_add_trace_rq_requeue(void *ignore,
|
||||
struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
|
||||
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
|
||||
}
|
||||
|
||||
static void blk_add_trace_rq_complete(void *ignore,
|
||||
struct request_queue *q,
|
||||
struct request *rq)
|
||||
struct request *rq,
|
||||
unsigned int nr_bytes)
|
||||
{
|
||||
blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
|
||||
blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -22,16 +22,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
void __smp_call_function_single(int cpu, struct call_single_data *csd,
|
||||
int wait)
|
||||
int smp_call_function_single_async(int cpu, struct call_single_data *csd)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
csd->func(csd->info);
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__smp_call_function_single);
|
||||
EXPORT_SYMBOL(smp_call_function_single_async);
|
||||
|
||||
int on_each_cpu(smp_call_func_t func, void *info, int wait)
|
||||
{
|
||||
|
@@ -505,7 +505,6 @@ static void restart_watchdog_hrtimer(void *info)
|
||||
|
||||
static void update_timers(int cpu)
|
||||
{
|
||||
struct call_single_data data = {.func = restart_watchdog_hrtimer};
|
||||
/*
|
||||
* Make sure that perf event counter will adopt to a new
|
||||
* sampling period. Updating the sampling period directly would
|
||||
@@ -515,7 +514,7 @@ static void update_timers(int cpu)
|
||||
* might be late already so we have to restart the timer as well.
|
||||
*/
|
||||
watchdog_nmi_disable(cpu);
|
||||
__smp_call_function_single(cpu, &data, 1);
|
||||
smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
|
||||
watchdog_nmi_enable(cpu);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user