sched/membarrier: reduce the ability to hammer on sys_membarrier
commit 944d5fe50f3f03daacfea16300e656a1691c4a23 upstream. On some systems, sys_membarrier can be very expensive, causing overall slowdowns for everything. So put a lock on the path in order to serialize the accesses to prevent the ability for this to be called at too high of a frequency and saturate the machine. Reviewed-and-tested-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: Borislav Petkov <bp@alien8.de> Fixes:22e4ebb975
("membarrier: Provide expedited private command") Fixes:c5f58bd58f
("membarrier: Provide GLOBAL_EXPEDITED command") Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [ converted to explicit mutex_*() calls - cleanup.h is not in this stable branch - gregkh ] Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
8f8f185643
commit
db896bbe4a
@@ -34,6 +34,8 @@
|
|||||||
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
|
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
|
||||||
| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
|
| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(membarrier_ipi_mutex);
|
||||||
|
|
||||||
static void ipi_mb(void *info)
|
static void ipi_mb(void *info)
|
||||||
{
|
{
|
||||||
smp_mb(); /* IPIs should be serializing but paranoid. */
|
smp_mb(); /* IPIs should be serializing but paranoid. */
|
||||||
@@ -119,6 +121,7 @@ static int membarrier_global_expedited(void)
|
|||||||
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mutex_lock(&membarrier_ipi_mutex);
|
||||||
cpus_read_lock();
|
cpus_read_lock();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
@@ -165,6 +168,8 @@ static int membarrier_global_expedited(void)
|
|||||||
* rq->curr modification in scheduler.
|
* rq->curr modification in scheduler.
|
||||||
*/
|
*/
|
||||||
smp_mb(); /* exit from system call is not a mb */
|
smp_mb(); /* exit from system call is not a mb */
|
||||||
|
mutex_unlock(&membarrier_ipi_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,6 +213,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
|
|||||||
if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mutex_lock(&membarrier_ipi_mutex);
|
||||||
cpus_read_lock();
|
cpus_read_lock();
|
||||||
|
|
||||||
if (cpu_id >= 0) {
|
if (cpu_id >= 0) {
|
||||||
@@ -280,6 +286,7 @@ out:
|
|||||||
* rq->curr modification in scheduler.
|
* rq->curr modification in scheduler.
|
||||||
*/
|
*/
|
||||||
smp_mb(); /* exit from system call is not a mb */
|
smp_mb(); /* exit from system call is not a mb */
|
||||||
|
mutex_unlock(&membarrier_ipi_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -321,6 +328,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
|
|||||||
* between threads which are users of @mm has its membarrier state
|
* between threads which are users of @mm has its membarrier state
|
||||||
* updated.
|
* updated.
|
||||||
*/
|
*/
|
||||||
|
mutex_lock(&membarrier_ipi_mutex);
|
||||||
cpus_read_lock();
|
cpus_read_lock();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
@@ -337,6 +345,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
|
|||||||
|
|
||||||
free_cpumask_var(tmpmask);
|
free_cpumask_var(tmpmask);
|
||||||
cpus_read_unlock();
|
cpus_read_unlock();
|
||||||
|
mutex_unlock(&membarrier_ipi_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user