Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The biggest part of this tree is the new auto-generated atomics API wrappers by Mark Rutland. The primary motivation was to allow instrumentation without uglifying the primary source code. The linecount increase comes from adding the auto-generated files to the Git space as well: include/asm-generic/atomic-instrumented.h | 1689 ++++++++++++++++-- include/asm-generic/atomic-long.h | 1174 ++++++++++--- include/linux/atomic-fallback.h | 2295 +++++++++++++++++++++++++ include/linux/atomic.h | 1241 +------------ I preferred this approach, so that the full call stack of the (already complex) locking APIs is still fully visible in 'git grep'. But if this is excessive we could certainly hide them. There's a separate build-time mechanism to determine whether the headers are out of date (they should never be stale if we do our job right). Anyway, nothing from this should be visible to regular kernel developers. Other changes: - Add support for dynamic keys, which removes a source of false positives in the workqueue code, among other things (Bart Van Assche) - Updates to tools/memory-model (Andrea Parri, Paul E. McKenney) - qspinlock, wake_q and lockdep micro-optimizations (Waiman Long) - misc other updates and enhancements" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits) locking/lockdep: Shrink struct lock_class_key locking/lockdep: Add module_param to enable consistency checks lockdep/lib/tests: Test dynamic key registration lockdep/lib/tests: Fix run_tests.sh kernel/workqueue: Use dynamic lockdep keys for workqueues locking/lockdep: Add support for dynamic keys locking/lockdep: Verify whether lock objects are small enough to be used as class keys locking/lockdep: Check data structure consistency locking/lockdep: Reuse lock chains that have been freed locking/lockdep: Fix a comment in add_chain_cache() locking/lockdep: Introduce lockdep_next_lockchain() and lock_chain_count() locking/lockdep: Reuse list entries that are no longer in use locking/lockdep: Free lock classes that are no longer in use locking/lockdep: Update two outdated comments locking/lockdep: Make it easy to detect whether or not inside a selftest locking/lockdep: Split lockdep_free_key_range() and lockdep_reset_lock() locking/lockdep: Initialize the locks_before and locks_after lists earlier locking/lockdep: Make zap_class() remove all matching lock order entries locking/lockdep: Reorder struct lock_class members locking/lockdep: Avoid that add_chain_cache() adds an invalid chain to the cache ...
This commit is contained in:
@@ -313,6 +313,15 @@ void cpus_write_unlock(void)
|
||||
|
||||
void lockdep_assert_cpus_held(void)
|
||||
{
|
||||
/*
|
||||
* We can't have hotplug operations before userspace starts running,
|
||||
* and some init codepaths will knowingly not take the hotplug lock.
|
||||
* This is all valid, so mute lockdep until it makes sense to report
|
||||
* unheld locks.
|
||||
*/
|
||||
if (system_state < SYSTEM_RUNNING)
|
||||
return;
|
||||
|
||||
percpu_rwsem_assert_held(&cpu_hotplug_lock);
|
||||
}
|
||||
|
||||
|
@@ -68,6 +68,7 @@
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#include <asm/futex.h>
|
||||
|
||||
@@ -212,7 +213,7 @@ struct futex_pi_state {
|
||||
struct rt_mutex pi_mutex;
|
||||
|
||||
struct task_struct *owner;
|
||||
atomic_t refcount;
|
||||
refcount_t refcount;
|
||||
|
||||
union futex_key key;
|
||||
} __randomize_layout;
|
||||
@@ -321,12 +322,8 @@ static int __init fail_futex_debugfs(void)
|
||||
if (IS_ERR(dir))
|
||||
return PTR_ERR(dir);
|
||||
|
||||
if (!debugfs_create_bool("ignore-private", mode, dir,
|
||||
&fail_futex.ignore_private)) {
|
||||
debugfs_remove_recursive(dir);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
debugfs_create_bool("ignore-private", mode, dir,
|
||||
&fail_futex.ignore_private);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -803,7 +800,7 @@ static int refill_pi_state_cache(void)
|
||||
INIT_LIST_HEAD(&pi_state->list);
|
||||
/* pi_mutex gets initialized later */
|
||||
pi_state->owner = NULL;
|
||||
atomic_set(&pi_state->refcount, 1);
|
||||
refcount_set(&pi_state->refcount, 1);
|
||||
pi_state->key = FUTEX_KEY_INIT;
|
||||
|
||||
current->pi_state_cache = pi_state;
|
||||
@@ -823,7 +820,7 @@ static struct futex_pi_state *alloc_pi_state(void)
|
||||
|
||||
static void get_pi_state(struct futex_pi_state *pi_state)
|
||||
{
|
||||
WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
|
||||
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -835,7 +832,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
|
||||
if (!pi_state)
|
||||
return;
|
||||
|
||||
if (!atomic_dec_and_test(&pi_state->refcount))
|
||||
if (!refcount_dec_and_test(&pi_state->refcount))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -865,7 +862,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
|
||||
* refcount is at 0 - put it back to 1.
|
||||
*/
|
||||
pi_state->owner = NULL;
|
||||
atomic_set(&pi_state->refcount, 1);
|
||||
refcount_set(&pi_state->refcount, 1);
|
||||
current->pi_state_cache = pi_state;
|
||||
}
|
||||
}
|
||||
@@ -908,7 +905,7 @@ void exit_pi_state_list(struct task_struct *curr)
|
||||
* In that case; drop the locks to let put_pi_state() make
|
||||
* progress and retry the loop.
|
||||
*/
|
||||
if (!atomic_inc_not_zero(&pi_state->refcount)) {
|
||||
if (!refcount_inc_not_zero(&pi_state->refcount)) {
|
||||
raw_spin_unlock_irq(&curr->pi_lock);
|
||||
cpu_relax();
|
||||
raw_spin_lock_irq(&curr->pi_lock);
|
||||
@@ -1064,7 +1061,7 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
|
||||
* and futex_wait_requeue_pi() as it cannot go to 0 and consequently
|
||||
* free pi_state before we can take a reference ourselves.
|
||||
*/
|
||||
WARN_ON(!atomic_read(&pi_state->refcount));
|
||||
WARN_ON(!refcount_read(&pi_state->refcount));
|
||||
|
||||
/*
|
||||
* Now that we have a pi_state, we can acquire wait_lock
|
||||
@@ -1467,8 +1464,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
|
||||
* Queue the task for later wakeup for after we've released
|
||||
* the hb->lock. wake_q_add() grabs reference to p.
|
||||
*/
|
||||
wake_q_add(wake_q, p);
|
||||
put_task_struct(p);
|
||||
wake_q_add_safe(wake_q, p);
|
||||
}
|
||||
|
||||
/*
|
||||
|
文件差異過大導致無法顯示
Load Diff
@@ -22,6 +22,10 @@ enum lock_usage_bit {
|
||||
LOCK_USAGE_STATES
|
||||
};
|
||||
|
||||
#define LOCK_USAGE_READ_MASK 1
|
||||
#define LOCK_USAGE_DIR_MASK 2
|
||||
#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
|
||||
|
||||
/*
|
||||
* Usage-state bitmasks:
|
||||
*/
|
||||
@@ -96,7 +100,8 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
|
||||
|
||||
extern unsigned long nr_lock_classes;
|
||||
extern unsigned long nr_list_entries;
|
||||
extern unsigned long nr_lock_chains;
|
||||
long lockdep_next_lockchain(long i);
|
||||
unsigned long lock_chain_count(void);
|
||||
extern int nr_chain_hlocks;
|
||||
extern unsigned long nr_stack_trace_entries;
|
||||
|
||||
|
@@ -104,18 +104,18 @@ static const struct seq_operations lockdep_ops = {
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
static void *lc_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
if (*pos < 0)
|
||||
return NULL;
|
||||
|
||||
if (*pos == 0)
|
||||
return SEQ_START_TOKEN;
|
||||
|
||||
if (*pos - 1 < nr_lock_chains)
|
||||
return lock_chains + (*pos - 1);
|
||||
|
||||
return NULL;
|
||||
return lock_chains + (*pos - 1);
|
||||
}
|
||||
|
||||
static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
(*pos)++;
|
||||
*pos = lockdep_next_lockchain(*pos - 1) + 1;
|
||||
return lc_start(m, pos);
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
|
||||
nr_lock_chains, MAX_LOCKDEP_CHAINS);
|
||||
lock_chain_count(), MAX_LOCKDEP_CHAINS);
|
||||
seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
|
||||
nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
|
||||
#endif
|
||||
|
@@ -124,9 +124,6 @@ static inline __pure u32 encode_tail(int cpu, int idx)
|
||||
{
|
||||
u32 tail;
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
BUG_ON(idx > 3);
|
||||
#endif
|
||||
tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
|
||||
tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
|
||||
|
||||
@@ -412,12 +409,28 @@ pv_queue:
|
||||
idx = node->count++;
|
||||
tail = encode_tail(smp_processor_id(), idx);
|
||||
|
||||
/*
|
||||
* 4 nodes are allocated based on the assumption that there will
|
||||
* not be nested NMIs taking spinlocks. That may not be true in
|
||||
* some architectures even though the chance of needing more than
|
||||
* 4 nodes will still be extremely unlikely. When that happens,
|
||||
* we fall back to spinning on the lock directly without using
|
||||
* any MCS node. This is not the most elegant solution, but is
|
||||
* simple enough.
|
||||
*/
|
||||
if (unlikely(idx >= MAX_NODES)) {
|
||||
qstat_inc(qstat_lock_no_node, true);
|
||||
while (!queued_spin_trylock(lock))
|
||||
cpu_relax();
|
||||
goto release;
|
||||
}
|
||||
|
||||
node = grab_mcs_node(node, idx);
|
||||
|
||||
/*
|
||||
* Keep counts of non-zero index values:
|
||||
*/
|
||||
qstat_inc(qstat_lock_idx1 + idx - 1, idx);
|
||||
qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
|
||||
|
||||
/*
|
||||
* Ensure that we increment the head node->count before initialising
|
||||
|
@@ -30,6 +30,13 @@
|
||||
* pv_wait_node - # of vCPU wait's at a non-head queue node
|
||||
* lock_pending - # of locking operations via pending code
|
||||
* lock_slowpath - # of locking operations via MCS lock queue
|
||||
* lock_use_node2 - # of locking operations that use 2nd per-CPU node
|
||||
* lock_use_node3 - # of locking operations that use 3rd per-CPU node
|
||||
* lock_use_node4 - # of locking operations that use 4th per-CPU node
|
||||
* lock_no_node - # of locking operations without using per-CPU node
|
||||
*
|
||||
* Subtracting lock_use_node[234] from lock_slowpath will give you
|
||||
* lock_use_node1.
|
||||
*
|
||||
* Writing to the "reset_counters" file will reset all the above counter
|
||||
* values.
|
||||
@@ -55,9 +62,10 @@ enum qlock_stats {
|
||||
qstat_pv_wait_node,
|
||||
qstat_lock_pending,
|
||||
qstat_lock_slowpath,
|
||||
qstat_lock_idx1,
|
||||
qstat_lock_idx2,
|
||||
qstat_lock_idx3,
|
||||
qstat_lock_use_node2,
|
||||
qstat_lock_use_node3,
|
||||
qstat_lock_use_node4,
|
||||
qstat_lock_no_node,
|
||||
qstat_num, /* Total number of statistical counters */
|
||||
qstat_reset_cnts = qstat_num,
|
||||
};
|
||||
@@ -85,9 +93,10 @@ static const char * const qstat_names[qstat_num + 1] = {
|
||||
[qstat_pv_wait_node] = "pv_wait_node",
|
||||
[qstat_lock_pending] = "lock_pending",
|
||||
[qstat_lock_slowpath] = "lock_slowpath",
|
||||
[qstat_lock_idx1] = "lock_index1",
|
||||
[qstat_lock_idx2] = "lock_index2",
|
||||
[qstat_lock_idx3] = "lock_index3",
|
||||
[qstat_lock_use_node2] = "lock_use_node2",
|
||||
[qstat_lock_use_node3] = "lock_use_node3",
|
||||
[qstat_lock_use_node4] = "lock_use_node4",
|
||||
[qstat_lock_no_node] = "lock_no_node",
|
||||
[qstat_reset_cnts] = "reset_counters",
|
||||
};
|
||||
|
||||
|
@@ -211,9 +211,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
|
||||
* Ensure issuing the wakeup (either by us or someone else)
|
||||
* after setting the reader waiter to nil.
|
||||
*/
|
||||
wake_q_add(wake_q, tsk);
|
||||
/* wake_q_add() already take the task ref */
|
||||
put_task_struct(tsk);
|
||||
wake_q_add_safe(wake_q, tsk);
|
||||
}
|
||||
|
||||
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
|
||||
|
@@ -396,6 +396,30 @@ static bool set_nr_if_polling(struct task_struct *p)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
||||
{
|
||||
struct wake_q_node *node = &task->wake_q;
|
||||
|
||||
/*
|
||||
* Atomically grab the task, if ->wake_q is !nil already it means
|
||||
* its already queued (either by us or someone else) and will get the
|
||||
* wakeup due to that.
|
||||
*
|
||||
* In order to ensure that a pending wakeup will observe our pending
|
||||
* state, even in the failed case, an explicit smp_mb() must be used.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The head is context local, there can be no concurrency.
|
||||
*/
|
||||
*head->lastp = node;
|
||||
head->lastp = &node->next;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* wake_q_add() - queue a wakeup for 'later' waking.
|
||||
* @head: the wake_q_head to add @task to
|
||||
@@ -410,27 +434,31 @@ static bool set_nr_if_polling(struct task_struct *p)
|
||||
*/
|
||||
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
||||
{
|
||||
struct wake_q_node *node = &task->wake_q;
|
||||
if (__wake_q_add(head, task))
|
||||
get_task_struct(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically grab the task, if ->wake_q is !nil already it means
|
||||
* its already queued (either by us or someone else) and will get the
|
||||
* wakeup due to that.
|
||||
*
|
||||
* In order to ensure that a pending wakeup will observe our pending
|
||||
* state, even in the failed case, an explicit smp_mb() must be used.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
|
||||
return;
|
||||
|
||||
get_task_struct(task);
|
||||
|
||||
/*
|
||||
* The head is context local, there can be no concurrency.
|
||||
*/
|
||||
*head->lastp = node;
|
||||
head->lastp = &node->next;
|
||||
/**
|
||||
* wake_q_add_safe() - safely queue a wakeup for 'later' waking.
|
||||
* @head: the wake_q_head to add @task to
|
||||
* @task: the task to queue for 'later' wakeup
|
||||
*
|
||||
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
|
||||
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
|
||||
* instantly.
|
||||
*
|
||||
* This function must be used as-if it were wake_up_process(); IOW the task
|
||||
* must be ready to be woken at this location.
|
||||
*
|
||||
* This function is essentially a task-safe equivalent to wake_q_add(). Callers
|
||||
* that already hold reference to @task can call the 'safe' version and trust
|
||||
* wake_q to do the right thing depending whether or not the @task is already
|
||||
* queued for wakeup.
|
||||
*/
|
||||
void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
|
||||
{
|
||||
if (!__wake_q_add(head, task))
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
void wake_up_q(struct wake_q_head *head)
|
||||
@@ -5866,14 +5894,11 @@ void __init sched_init_smp(void)
|
||||
/*
|
||||
* There's no userspace yet to cause hotplug operations; hence all the
|
||||
* CPU masks are stable and all blatant races in the below code cannot
|
||||
* happen. The hotplug lock is nevertheless taken to satisfy lockdep,
|
||||
* but there won't be any contention on it.
|
||||
* happen.
|
||||
*/
|
||||
cpus_read_lock();
|
||||
mutex_lock(&sched_domains_mutex);
|
||||
sched_init_domains(cpu_active_mask);
|
||||
mutex_unlock(&sched_domains_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
/* Move init over to a non-isolated CPU */
|
||||
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
|
||||
|
@@ -259,6 +259,8 @@ struct workqueue_struct {
|
||||
struct wq_device *wq_dev; /* I: for sysfs interface */
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
char *lock_name;
|
||||
struct lock_class_key key;
|
||||
struct lockdep_map lockdep_map;
|
||||
#endif
|
||||
char name[WQ_NAME_LEN]; /* I: workqueue name */
|
||||
@@ -3337,11 +3339,49 @@ static int init_worker_pool(struct worker_pool *pool)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static void wq_init_lockdep(struct workqueue_struct *wq)
|
||||
{
|
||||
char *lock_name;
|
||||
|
||||
lockdep_register_key(&wq->key);
|
||||
lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
|
||||
if (!lock_name)
|
||||
lock_name = wq->name;
|
||||
lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
|
||||
}
|
||||
|
||||
static void wq_unregister_lockdep(struct workqueue_struct *wq)
|
||||
{
|
||||
lockdep_unregister_key(&wq->key);
|
||||
}
|
||||
|
||||
static void wq_free_lockdep(struct workqueue_struct *wq)
|
||||
{
|
||||
if (wq->lock_name != wq->name)
|
||||
kfree(wq->lock_name);
|
||||
}
|
||||
#else
|
||||
static void wq_init_lockdep(struct workqueue_struct *wq)
|
||||
{
|
||||
}
|
||||
|
||||
static void wq_unregister_lockdep(struct workqueue_struct *wq)
|
||||
{
|
||||
}
|
||||
|
||||
static void wq_free_lockdep(struct workqueue_struct *wq)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void rcu_free_wq(struct rcu_head *rcu)
|
||||
{
|
||||
struct workqueue_struct *wq =
|
||||
container_of(rcu, struct workqueue_struct, rcu);
|
||||
|
||||
wq_free_lockdep(wq);
|
||||
|
||||
if (!(wq->flags & WQ_UNBOUND))
|
||||
free_percpu(wq->cpu_pwqs);
|
||||
else
|
||||
@@ -3532,8 +3572,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
|
||||
* If we're the last pwq going away, @wq is already dead and no one
|
||||
* is gonna access it anymore. Schedule RCU free.
|
||||
*/
|
||||
if (is_last)
|
||||
if (is_last) {
|
||||
wq_unregister_lockdep(wq);
|
||||
call_rcu(&wq->rcu, rcu_free_wq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -4067,11 +4109,9 @@ static int init_rescuer(struct workqueue_struct *wq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||
unsigned int flags,
|
||||
int max_active,
|
||||
struct lock_class_key *key,
|
||||
const char *lock_name, ...)
|
||||
struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||
unsigned int flags,
|
||||
int max_active, ...)
|
||||
{
|
||||
size_t tbl_size = 0;
|
||||
va_list args;
|
||||
@@ -4106,7 +4146,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||
goto err_free_wq;
|
||||
}
|
||||
|
||||
va_start(args, lock_name);
|
||||
va_start(args, max_active);
|
||||
vsnprintf(wq->name, sizeof(wq->name), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
@@ -4123,7 +4163,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||
INIT_LIST_HEAD(&wq->flusher_overflow);
|
||||
INIT_LIST_HEAD(&wq->maydays);
|
||||
|
||||
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
|
||||
wq_init_lockdep(wq);
|
||||
INIT_LIST_HEAD(&wq->list);
|
||||
|
||||
if (alloc_and_link_pwqs(wq) < 0)
|
||||
@@ -4161,7 +4201,7 @@ err_destroy:
|
||||
destroy_workqueue(wq);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
|
||||
EXPORT_SYMBOL_GPL(alloc_workqueue);
|
||||
|
||||
/**
|
||||
* destroy_workqueue - safely terminate a workqueue
|
||||
@@ -4214,6 +4254,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||
kthread_stop(wq->rescuer->task);
|
||||
|
||||
if (!(wq->flags & WQ_UNBOUND)) {
|
||||
wq_unregister_lockdep(wq);
|
||||
/*
|
||||
* The base ref is never dropped on per-cpu pwqs. Directly
|
||||
* schedule RCU free.
|
||||
|
Reference in New Issue
Block a user