Merge branch 'linus' into timers/urgent
Merge in Linus's branch which already has timers/core merged. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -14,6 +14,7 @@
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmsg_dump.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sysrq.h>
|
||||
@@ -2040,8 +2041,15 @@ static int kdb_env(int argc, const char **argv)
|
||||
*/
|
||||
static int kdb_dmesg(int argc, const char **argv)
|
||||
{
|
||||
char *syslog_data[4], *start, *end, c = '\0', *p;
|
||||
int diag, logging, logsize, lines = 0, adjust = 0, n;
|
||||
int diag;
|
||||
int logging;
|
||||
int lines = 0;
|
||||
int adjust = 0;
|
||||
int n = 0;
|
||||
int skip = 0;
|
||||
struct kmsg_dumper dumper = { .active = 1 };
|
||||
size_t len;
|
||||
char buf[201];
|
||||
|
||||
if (argc > 2)
|
||||
return KDB_ARGCOUNT;
|
||||
@@ -2064,22 +2072,10 @@ static int kdb_dmesg(int argc, const char **argv)
|
||||
kdb_set(2, setargs);
|
||||
}
|
||||
|
||||
/* syslog_data[0,1] physical start, end+1. syslog_data[2,3]
|
||||
* logical start, end+1. */
|
||||
kdb_syslog_data(syslog_data);
|
||||
if (syslog_data[2] == syslog_data[3])
|
||||
return 0;
|
||||
logsize = syslog_data[1] - syslog_data[0];
|
||||
start = syslog_data[2];
|
||||
end = syslog_data[3];
|
||||
#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
|
||||
for (n = 0, p = start; p < end; ++p) {
|
||||
c = *KDB_WRAP(p);
|
||||
if (c == '\n')
|
||||
++n;
|
||||
}
|
||||
if (c != '\n')
|
||||
++n;
|
||||
kmsg_dump_rewind_nolock(&dumper);
|
||||
while (kmsg_dump_get_line_nolock(&dumper, 1, NULL, 0, NULL))
|
||||
n++;
|
||||
|
||||
if (lines < 0) {
|
||||
if (adjust >= n)
|
||||
kdb_printf("buffer only contains %d lines, nothing "
|
||||
@@ -2087,21 +2083,11 @@ static int kdb_dmesg(int argc, const char **argv)
|
||||
else if (adjust - lines >= n)
|
||||
kdb_printf("buffer only contains %d lines, last %d "
|
||||
"lines printed\n", n, n - adjust);
|
||||
if (adjust) {
|
||||
for (; start < end && adjust; ++start) {
|
||||
if (*KDB_WRAP(start) == '\n')
|
||||
--adjust;
|
||||
}
|
||||
if (start < end)
|
||||
++start;
|
||||
}
|
||||
for (p = start; p < end && lines; ++p) {
|
||||
if (*KDB_WRAP(p) == '\n')
|
||||
++lines;
|
||||
}
|
||||
end = p;
|
||||
skip = adjust;
|
||||
lines = abs(lines);
|
||||
} else if (lines > 0) {
|
||||
int skip = n - (adjust + lines);
|
||||
skip = n - lines - adjust;
|
||||
lines = abs(lines);
|
||||
if (adjust >= n) {
|
||||
kdb_printf("buffer only contains %d lines, "
|
||||
"nothing printed\n", n);
|
||||
@@ -2112,35 +2098,24 @@ static int kdb_dmesg(int argc, const char **argv)
|
||||
kdb_printf("buffer only contains %d lines, first "
|
||||
"%d lines printed\n", n, lines);
|
||||
}
|
||||
for (; start < end && skip; ++start) {
|
||||
if (*KDB_WRAP(start) == '\n')
|
||||
--skip;
|
||||
}
|
||||
for (p = start; p < end && lines; ++p) {
|
||||
if (*KDB_WRAP(p) == '\n')
|
||||
--lines;
|
||||
}
|
||||
end = p;
|
||||
} else {
|
||||
lines = n;
|
||||
}
|
||||
/* Do a line at a time (max 200 chars) to reduce protocol overhead */
|
||||
c = '\n';
|
||||
while (start != end) {
|
||||
char buf[201];
|
||||
p = buf;
|
||||
if (KDB_FLAG(CMD_INTERRUPT))
|
||||
return 0;
|
||||
while (start < end && (c = *KDB_WRAP(start)) &&
|
||||
(p - buf) < sizeof(buf)-1) {
|
||||
++start;
|
||||
*p++ = c;
|
||||
if (c == '\n')
|
||||
break;
|
||||
|
||||
if (skip >= n || skip < 0)
|
||||
return 0;
|
||||
|
||||
kmsg_dump_rewind_nolock(&dumper);
|
||||
while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len)) {
|
||||
if (skip) {
|
||||
skip--;
|
||||
continue;
|
||||
}
|
||||
*p = '\0';
|
||||
kdb_printf("%s", buf);
|
||||
if (!lines--)
|
||||
break;
|
||||
|
||||
kdb_printf("%.*s\n", (int)len - 1, buf);
|
||||
}
|
||||
if (c != '\n')
|
||||
kdb_printf("\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -205,7 +205,6 @@ extern char kdb_grep_string[];
|
||||
extern int kdb_grep_leading;
|
||||
extern int kdb_grep_trailing;
|
||||
extern char *kdb_cmds[];
|
||||
extern void kdb_syslog_data(char *syslog_data[]);
|
||||
extern unsigned long kdb_task_state_string(const char *);
|
||||
extern char kdb_task_state_char (const struct task_struct *);
|
||||
extern unsigned long kdb_task_state(const struct task_struct *p,
|
||||
|
@@ -1645,6 +1645,8 @@ perf_install_in_context(struct perf_event_context *ctx,
|
||||
lockdep_assert_held(&ctx->mutex);
|
||||
|
||||
event->ctx = ctx;
|
||||
if (event->cpu != -1)
|
||||
event->cpu = cpu;
|
||||
|
||||
if (!task) {
|
||||
/*
|
||||
@@ -6252,6 +6254,8 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
}
|
||||
}
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
|
||||
NULL, NULL);
|
||||
if (IS_ERR(event)) {
|
||||
@@ -6304,7 +6308,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
/*
|
||||
* Get the target context (task or percpu):
|
||||
*/
|
||||
ctx = find_get_context(pmu, task, cpu);
|
||||
ctx = find_get_context(pmu, task, event->cpu);
|
||||
if (IS_ERR(ctx)) {
|
||||
err = PTR_ERR(ctx);
|
||||
goto err_alloc;
|
||||
@@ -6377,20 +6381,23 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
mutex_lock(&ctx->mutex);
|
||||
|
||||
if (move_group) {
|
||||
perf_install_in_context(ctx, group_leader, cpu);
|
||||
synchronize_rcu();
|
||||
perf_install_in_context(ctx, group_leader, event->cpu);
|
||||
get_ctx(ctx);
|
||||
list_for_each_entry(sibling, &group_leader->sibling_list,
|
||||
group_entry) {
|
||||
perf_install_in_context(ctx, sibling, cpu);
|
||||
perf_install_in_context(ctx, sibling, event->cpu);
|
||||
get_ctx(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
perf_install_in_context(ctx, event, cpu);
|
||||
perf_install_in_context(ctx, event, event->cpu);
|
||||
++ctx->generation;
|
||||
perf_unpin_context(ctx);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
event->owner = current;
|
||||
|
||||
mutex_lock(¤t->perf_event_mutex);
|
||||
@@ -6419,6 +6426,7 @@ err_context:
|
||||
err_alloc:
|
||||
free_event(event);
|
||||
err_task:
|
||||
put_online_cpus();
|
||||
if (task)
|
||||
put_task_struct(task);
|
||||
err_group_fd:
|
||||
@@ -6479,6 +6487,39 @@ err:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
|
||||
|
||||
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
|
||||
{
|
||||
struct perf_event_context *src_ctx;
|
||||
struct perf_event_context *dst_ctx;
|
||||
struct perf_event *event, *tmp;
|
||||
LIST_HEAD(events);
|
||||
|
||||
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
|
||||
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
|
||||
|
||||
mutex_lock(&src_ctx->mutex);
|
||||
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
|
||||
event_entry) {
|
||||
perf_remove_from_context(event);
|
||||
put_ctx(src_ctx);
|
||||
list_add(&event->event_entry, &events);
|
||||
}
|
||||
mutex_unlock(&src_ctx->mutex);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
mutex_lock(&dst_ctx->mutex);
|
||||
list_for_each_entry_safe(event, tmp, &events, event_entry) {
|
||||
list_del(&event->event_entry);
|
||||
if (event->state >= PERF_EVENT_STATE_OFF)
|
||||
event->state = PERF_EVENT_STATE_INACTIVE;
|
||||
perf_install_in_context(dst_ctx, event, dst_cpu);
|
||||
get_ctx(dst_ctx);
|
||||
}
|
||||
mutex_unlock(&dst_ctx->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
|
||||
|
||||
static void sync_child_event(struct perf_event *child_event,
|
||||
struct task_struct *child)
|
||||
{
|
||||
|
@@ -38,13 +38,29 @@
|
||||
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
|
||||
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
|
||||
|
||||
static struct srcu_struct uprobes_srcu;
|
||||
static struct rb_root uprobes_tree = RB_ROOT;
|
||||
|
||||
static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
|
||||
|
||||
#define UPROBES_HASH_SZ 13
|
||||
|
||||
/*
|
||||
* We need separate register/unregister and mmap/munmap lock hashes because
|
||||
* of mmap_sem nesting.
|
||||
*
|
||||
* uprobe_register() needs to install probes on (potentially) all processes
|
||||
* and thus needs to acquire multiple mmap_sems (consequtively, not
|
||||
* concurrently), whereas uprobe_mmap() is called while holding mmap_sem
|
||||
* for the particular process doing the mmap.
|
||||
*
|
||||
* uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
|
||||
* because of lock order against i_mmap_mutex. This means there's a hole in
|
||||
* the register vma iteration where a mmap() can happen.
|
||||
*
|
||||
* Thus uprobe_register() can race with uprobe_mmap() and we can try and
|
||||
* install a probe where one is already installed.
|
||||
*/
|
||||
|
||||
/* serialize (un)register */
|
||||
static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
|
||||
|
||||
@@ -61,17 +77,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
|
||||
*/
|
||||
static atomic_t uprobe_events = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Maintain a temporary per vma info that can be used to search if a vma
|
||||
* has already been handled. This structure is introduced since extending
|
||||
* vm_area_struct wasnt recommended.
|
||||
*/
|
||||
struct vma_info {
|
||||
struct list_head probe_list;
|
||||
struct mm_struct *mm;
|
||||
loff_t vaddr;
|
||||
};
|
||||
|
||||
struct uprobe {
|
||||
struct rb_node rb_node; /* node in the rb tree */
|
||||
atomic_t ref;
|
||||
@@ -100,7 +105,8 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register)
|
||||
if (!is_register)
|
||||
return true;
|
||||
|
||||
if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
|
||||
if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
|
||||
== (VM_READ|VM_EXEC))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@@ -129,33 +135,17 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
|
||||
static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep;
|
||||
spinlock_t *ptl;
|
||||
unsigned long addr;
|
||||
int err = -EFAULT;
|
||||
spinlock_t *ptl;
|
||||
pte_t *ptep;
|
||||
|
||||
addr = page_address_in_vma(page, vma);
|
||||
if (addr == -EFAULT)
|
||||
goto out;
|
||||
return -EFAULT;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (!pgd_present(*pgd))
|
||||
goto out;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (!pud_present(*pud))
|
||||
goto out;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_present(*pmd))
|
||||
goto out;
|
||||
|
||||
ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||
ptep = page_check_address(page, mm, addr, &ptl, 0);
|
||||
if (!ptep)
|
||||
goto out;
|
||||
return -EAGAIN;
|
||||
|
||||
get_page(kpage);
|
||||
page_add_new_anon_rmap(kpage, vma, addr);
|
||||
@@ -174,10 +164,8 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct
|
||||
try_to_free_swap(page);
|
||||
put_page(page);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -222,9 +210,8 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
||||
void *vaddr_old, *vaddr_new;
|
||||
struct vm_area_struct *vma;
|
||||
struct uprobe *uprobe;
|
||||
loff_t addr;
|
||||
int ret;
|
||||
|
||||
retry:
|
||||
/* Read the page with vaddr into memory */
|
||||
ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
|
||||
if (ret <= 0)
|
||||
@@ -246,10 +233,6 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
||||
if (mapping != vma->vm_file->f_mapping)
|
||||
goto put_out;
|
||||
|
||||
addr = vma_address(vma, uprobe->offset);
|
||||
if (vaddr != (unsigned long)addr)
|
||||
goto put_out;
|
||||
|
||||
ret = -ENOMEM;
|
||||
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
|
||||
if (!new_page)
|
||||
@@ -267,11 +250,7 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
||||
vaddr_new = kmap_atomic(new_page);
|
||||
|
||||
memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
|
||||
|
||||
/* poke the new insn in, ASSUMES we don't cross page boundary */
|
||||
vaddr &= ~PAGE_MASK;
|
||||
BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
|
||||
memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
|
||||
memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
|
||||
|
||||
kunmap_atomic(vaddr_new);
|
||||
kunmap_atomic(vaddr_old);
|
||||
@@ -291,6 +270,8 @@ unlock_out:
|
||||
put_out:
|
||||
put_page(old_page);
|
||||
|
||||
if (unlikely(ret == -EAGAIN))
|
||||
goto retry;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -312,7 +293,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
|
||||
void *vaddr_new;
|
||||
int ret;
|
||||
|
||||
ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
|
||||
ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
@@ -333,10 +314,20 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
|
||||
uprobe_opcode_t opcode;
|
||||
int result;
|
||||
|
||||
if (current->mm == mm) {
|
||||
pagefault_disable();
|
||||
result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
|
||||
sizeof(opcode));
|
||||
pagefault_enable();
|
||||
|
||||
if (likely(result == 0))
|
||||
goto out;
|
||||
}
|
||||
|
||||
result = read_opcode(mm, vaddr, &opcode);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
out:
|
||||
if (is_swbp_insn(&opcode))
|
||||
return 1;
|
||||
|
||||
@@ -355,7 +346,9 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
|
||||
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
int result;
|
||||
|
||||
/*
|
||||
* See the comment near uprobes_hash().
|
||||
*/
|
||||
result = is_swbp_at_addr(mm, vaddr);
|
||||
if (result == 1)
|
||||
return -EEXIST;
|
||||
@@ -520,7 +513,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
|
||||
uprobe->inode = igrab(inode);
|
||||
uprobe->offset = offset;
|
||||
init_rwsem(&uprobe->consumer_rwsem);
|
||||
INIT_LIST_HEAD(&uprobe->pending_list);
|
||||
|
||||
/* add to uprobes_tree, sorted on inode:offset */
|
||||
cur_uprobe = insert_uprobe(uprobe);
|
||||
@@ -588,20 +580,22 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
|
||||
}
|
||||
|
||||
static int
|
||||
__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
|
||||
unsigned long nbytes, unsigned long offset)
|
||||
__copy_insn(struct address_space *mapping, struct file *filp, char *insn,
|
||||
unsigned long nbytes, loff_t offset)
|
||||
{
|
||||
struct file *filp = vma->vm_file;
|
||||
struct page *page;
|
||||
void *vaddr;
|
||||
unsigned long off1;
|
||||
unsigned long idx;
|
||||
unsigned long off;
|
||||
pgoff_t idx;
|
||||
|
||||
if (!filp)
|
||||
return -EINVAL;
|
||||
|
||||
idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
|
||||
off1 = offset &= ~PAGE_MASK;
|
||||
if (!mapping->a_ops->readpage)
|
||||
return -EIO;
|
||||
|
||||
idx = offset >> PAGE_CACHE_SHIFT;
|
||||
off = offset & ~PAGE_MASK;
|
||||
|
||||
/*
|
||||
* Ensure that the page that has the original instruction is
|
||||
@@ -612,22 +606,20 @@ __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *ins
|
||||
return PTR_ERR(page);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
memcpy(insn, vaddr + off1, nbytes);
|
||||
memcpy(insn, vaddr + off, nbytes);
|
||||
kunmap_atomic(vaddr);
|
||||
page_cache_release(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
|
||||
static int copy_insn(struct uprobe *uprobe, struct file *filp)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
unsigned long nbytes;
|
||||
int bytes;
|
||||
|
||||
addr &= ~PAGE_MASK;
|
||||
nbytes = PAGE_SIZE - addr;
|
||||
nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
|
||||
mapping = uprobe->inode->i_mapping;
|
||||
|
||||
/* Instruction at end of binary; copy only available bytes */
|
||||
@@ -638,13 +630,13 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
|
||||
|
||||
/* Instruction at the page-boundary; copy bytes in second page */
|
||||
if (nbytes < bytes) {
|
||||
if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
|
||||
bytes - nbytes, uprobe->offset + nbytes))
|
||||
return -ENOMEM;
|
||||
|
||||
int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
|
||||
bytes - nbytes, uprobe->offset + nbytes);
|
||||
if (err)
|
||||
return err;
|
||||
bytes = nbytes;
|
||||
}
|
||||
return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
|
||||
return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -672,9 +664,8 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
|
||||
*/
|
||||
static int
|
||||
install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, loff_t vaddr)
|
||||
struct vm_area_struct *vma, unsigned long vaddr)
|
||||
{
|
||||
unsigned long addr;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@@ -687,20 +678,22 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
|
||||
if (!uprobe->consumers)
|
||||
return -EEXIST;
|
||||
|
||||
addr = (unsigned long)vaddr;
|
||||
|
||||
if (!(uprobe->flags & UPROBE_COPY_INSN)) {
|
||||
ret = copy_insn(uprobe, vma, addr);
|
||||
ret = copy_insn(uprobe, vma->vm_file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
|
||||
return -EEXIST;
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
|
||||
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* write_opcode() assumes we don't cross page boundary */
|
||||
BUG_ON((uprobe->offset & ~PAGE_MASK) +
|
||||
UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
|
||||
|
||||
uprobe->flags |= UPROBE_COPY_INSN;
|
||||
}
|
||||
|
||||
@@ -713,7 +706,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
|
||||
* Hence increment before and decrement on failure.
|
||||
*/
|
||||
atomic_inc(&mm->uprobes_state.count);
|
||||
ret = set_swbp(&uprobe->arch, mm, addr);
|
||||
ret = set_swbp(&uprobe->arch, mm, vaddr);
|
||||
if (ret)
|
||||
atomic_dec(&mm->uprobes_state.count);
|
||||
|
||||
@@ -721,27 +714,21 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
|
||||
}
|
||||
|
||||
static void
|
||||
remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
|
||||
remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
|
||||
if (!set_orig_insn(&uprobe->arch, mm, vaddr, true))
|
||||
atomic_dec(&mm->uprobes_state.count);
|
||||
}
|
||||
|
||||
/*
|
||||
* There could be threads that have hit the breakpoint and are entering the
|
||||
* notifier code and trying to acquire the uprobes_treelock. The thread
|
||||
* calling delete_uprobe() that is removing the uprobe from the rb_tree can
|
||||
* race with these threads and might acquire the uprobes_treelock compared
|
||||
* to some of the breakpoint hit threads. In such a case, the breakpoint
|
||||
* hit threads will not find the uprobe. The current unregistering thread
|
||||
* waits till all other threads have hit a breakpoint, to acquire the
|
||||
* uprobes_treelock before the uprobe is removed from the rbtree.
|
||||
* There could be threads that have already hit the breakpoint. They
|
||||
* will recheck the current insn and restart if find_uprobe() fails.
|
||||
* See find_active_uprobe().
|
||||
*/
|
||||
static void delete_uprobe(struct uprobe *uprobe)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
synchronize_srcu(&uprobes_srcu);
|
||||
spin_lock_irqsave(&uprobes_treelock, flags);
|
||||
rb_erase(&uprobe->rb_node, &uprobes_tree);
|
||||
spin_unlock_irqrestore(&uprobes_treelock, flags);
|
||||
@@ -750,139 +737,135 @@ static void delete_uprobe(struct uprobe *uprobe)
|
||||
atomic_dec(&uprobe_events);
|
||||
}
|
||||
|
||||
static struct vma_info *
|
||||
__find_next_vma_info(struct address_space *mapping, struct list_head *head,
|
||||
struct vma_info *vi, loff_t offset, bool is_register)
|
||||
struct map_info {
|
||||
struct map_info *next;
|
||||
struct mm_struct *mm;
|
||||
unsigned long vaddr;
|
||||
};
|
||||
|
||||
static inline struct map_info *free_map_info(struct map_info *info)
|
||||
{
|
||||
struct map_info *next = info->next;
|
||||
kfree(info);
|
||||
return next;
|
||||
}
|
||||
|
||||
static struct map_info *
|
||||
build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
|
||||
{
|
||||
unsigned long pgoff = offset >> PAGE_SHIFT;
|
||||
struct prio_tree_iter iter;
|
||||
struct vm_area_struct *vma;
|
||||
struct vma_info *tmpvi;
|
||||
unsigned long pgoff;
|
||||
int existing_vma;
|
||||
loff_t vaddr;
|
||||
|
||||
pgoff = offset >> PAGE_SHIFT;
|
||||
struct map_info *curr = NULL;
|
||||
struct map_info *prev = NULL;
|
||||
struct map_info *info;
|
||||
int more = 0;
|
||||
|
||||
again:
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
||||
if (!valid_vma(vma, is_register))
|
||||
continue;
|
||||
|
||||
existing_vma = 0;
|
||||
vaddr = vma_address(vma, offset);
|
||||
|
||||
list_for_each_entry(tmpvi, head, probe_list) {
|
||||
if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
|
||||
existing_vma = 1;
|
||||
break;
|
||||
}
|
||||
if (!prev && !more) {
|
||||
/*
|
||||
* Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
|
||||
* reclaim. This is optimistic, no harm done if it fails.
|
||||
*/
|
||||
prev = kmalloc(sizeof(struct map_info),
|
||||
GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
|
||||
if (prev)
|
||||
prev->next = NULL;
|
||||
}
|
||||
if (!prev) {
|
||||
more++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Another vma needs a probe to be installed. However skip
|
||||
* installing the probe if the vma is about to be unlinked.
|
||||
*/
|
||||
if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
|
||||
vi->mm = vma->vm_mm;
|
||||
vi->vaddr = vaddr;
|
||||
list_add(&vi->probe_list, head);
|
||||
if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
|
||||
continue;
|
||||
|
||||
return vi;
|
||||
}
|
||||
info = prev;
|
||||
prev = prev->next;
|
||||
info->next = curr;
|
||||
curr = info;
|
||||
|
||||
info->mm = vma->vm_mm;
|
||||
info->vaddr = vma_address(vma, offset);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate in the rmap prio tree and find a vma where a probe has not
|
||||
* yet been inserted.
|
||||
*/
|
||||
static struct vma_info *
|
||||
find_next_vma_info(struct address_space *mapping, struct list_head *head,
|
||||
loff_t offset, bool is_register)
|
||||
{
|
||||
struct vma_info *vi, *retvi;
|
||||
|
||||
vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
|
||||
if (!vi)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
|
||||
if (!retvi)
|
||||
kfree(vi);
|
||||
if (!more)
|
||||
goto out;
|
||||
|
||||
return retvi;
|
||||
prev = curr;
|
||||
while (curr) {
|
||||
mmput(curr->mm);
|
||||
curr = curr->next;
|
||||
}
|
||||
|
||||
do {
|
||||
info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
|
||||
if (!info) {
|
||||
curr = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
info->next = prev;
|
||||
prev = info;
|
||||
} while (--more);
|
||||
|
||||
goto again;
|
||||
out:
|
||||
while (prev)
|
||||
prev = free_map_info(prev);
|
||||
return curr;
|
||||
}
|
||||
|
||||
static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
|
||||
{
|
||||
struct list_head try_list;
|
||||
struct vm_area_struct *vma;
|
||||
struct address_space *mapping;
|
||||
struct vma_info *vi, *tmpvi;
|
||||
struct mm_struct *mm;
|
||||
loff_t vaddr;
|
||||
int ret;
|
||||
struct map_info *info;
|
||||
int err = 0;
|
||||
|
||||
mapping = uprobe->inode->i_mapping;
|
||||
INIT_LIST_HEAD(&try_list);
|
||||
info = build_map_info(uprobe->inode->i_mapping,
|
||||
uprobe->offset, is_register);
|
||||
if (IS_ERR(info))
|
||||
return PTR_ERR(info);
|
||||
|
||||
ret = 0;
|
||||
while (info) {
|
||||
struct mm_struct *mm = info->mm;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
for (;;) {
|
||||
vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
|
||||
if (!vi)
|
||||
break;
|
||||
if (err)
|
||||
goto free;
|
||||
|
||||
if (IS_ERR(vi)) {
|
||||
ret = PTR_ERR(vi);
|
||||
break;
|
||||
}
|
||||
down_write(&mm->mmap_sem);
|
||||
vma = find_vma(mm, (unsigned long)info->vaddr);
|
||||
if (!vma || !valid_vma(vma, is_register))
|
||||
goto unlock;
|
||||
|
||||
mm = vi->mm;
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, (unsigned long)vi->vaddr);
|
||||
if (!vma || !valid_vma(vma, is_register)) {
|
||||
list_del(&vi->probe_list);
|
||||
kfree(vi);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
continue;
|
||||
}
|
||||
vaddr = vma_address(vma, uprobe->offset);
|
||||
if (vma->vm_file->f_mapping->host != uprobe->inode ||
|
||||
vaddr != vi->vaddr) {
|
||||
list_del(&vi->probe_list);
|
||||
kfree(vi);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
continue;
|
||||
}
|
||||
vma_address(vma, uprobe->offset) != info->vaddr)
|
||||
goto unlock;
|
||||
|
||||
if (is_register)
|
||||
ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
|
||||
else
|
||||
remove_breakpoint(uprobe, mm, vi->vaddr);
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
if (is_register) {
|
||||
if (ret && ret == -EEXIST)
|
||||
ret = 0;
|
||||
if (ret)
|
||||
break;
|
||||
err = install_breakpoint(uprobe, mm, vma, info->vaddr);
|
||||
/*
|
||||
* We can race against uprobe_mmap(), see the
|
||||
* comment near uprobe_hash().
|
||||
*/
|
||||
if (err == -EEXIST)
|
||||
err = 0;
|
||||
} else {
|
||||
remove_breakpoint(uprobe, mm, info->vaddr);
|
||||
}
|
||||
unlock:
|
||||
up_write(&mm->mmap_sem);
|
||||
free:
|
||||
mmput(mm);
|
||||
info = free_map_info(info);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
|
||||
list_del(&vi->probe_list);
|
||||
kfree(vi);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __uprobe_register(struct uprobe *uprobe)
|
||||
@@ -1048,7 +1031,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
|
||||
int uprobe_mmap(struct vm_area_struct *vma)
|
||||
{
|
||||
struct list_head tmp_list;
|
||||
struct uprobe *uprobe, *u;
|
||||
struct uprobe *uprobe;
|
||||
struct inode *inode;
|
||||
int ret, count;
|
||||
|
||||
@@ -1066,12 +1049,9 @@ int uprobe_mmap(struct vm_area_struct *vma)
|
||||
ret = 0;
|
||||
count = 0;
|
||||
|
||||
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
|
||||
loff_t vaddr;
|
||||
|
||||
list_del(&uprobe->pending_list);
|
||||
list_for_each_entry(uprobe, &tmp_list, pending_list) {
|
||||
if (!ret) {
|
||||
vaddr = vma_address(vma, uprobe->offset);
|
||||
loff_t vaddr = vma_address(vma, uprobe->offset);
|
||||
|
||||
if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
|
||||
put_uprobe(uprobe);
|
||||
@@ -1079,8 +1059,10 @@ int uprobe_mmap(struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
|
||||
|
||||
/* Ignore double add: */
|
||||
/*
|
||||
* We can race against uprobe_register(), see the
|
||||
* comment near uprobe_hash().
|
||||
*/
|
||||
if (ret == -EEXIST) {
|
||||
ret = 0;
|
||||
|
||||
@@ -1115,7 +1097,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
|
||||
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
struct list_head tmp_list;
|
||||
struct uprobe *uprobe, *u;
|
||||
struct uprobe *uprobe;
|
||||
struct inode *inode;
|
||||
|
||||
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
|
||||
@@ -1132,11 +1114,8 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
|
||||
mutex_lock(uprobes_mmap_hash(inode));
|
||||
build_probe_list(inode, &tmp_list);
|
||||
|
||||
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
|
||||
loff_t vaddr;
|
||||
|
||||
list_del(&uprobe->pending_list);
|
||||
vaddr = vma_address(vma, uprobe->offset);
|
||||
list_for_each_entry(uprobe, &tmp_list, pending_list) {
|
||||
loff_t vaddr = vma_address(vma, uprobe->offset);
|
||||
|
||||
if (vaddr >= start && vaddr < end) {
|
||||
/*
|
||||
@@ -1378,9 +1357,6 @@ void uprobe_free_utask(struct task_struct *t)
|
||||
{
|
||||
struct uprobe_task *utask = t->utask;
|
||||
|
||||
if (t->uprobe_srcu_id != -1)
|
||||
srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
|
||||
|
||||
if (!utask)
|
||||
return;
|
||||
|
||||
@@ -1398,7 +1374,6 @@ void uprobe_free_utask(struct task_struct *t)
|
||||
void uprobe_copy_process(struct task_struct *t)
|
||||
{
|
||||
t->utask = NULL;
|
||||
t->uprobe_srcu_id = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1417,7 +1392,6 @@ static struct uprobe_task *add_utask(void)
|
||||
if (unlikely(!utask))
|
||||
return NULL;
|
||||
|
||||
utask->active_uprobe = NULL;
|
||||
current->utask = utask;
|
||||
return utask;
|
||||
}
|
||||
@@ -1479,41 +1453,64 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct uprobe *uprobe = NULL;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, bp_vaddr);
|
||||
if (vma && vma->vm_start <= bp_vaddr) {
|
||||
if (valid_vma(vma, false)) {
|
||||
struct inode *inode;
|
||||
loff_t offset;
|
||||
|
||||
inode = vma->vm_file->f_mapping->host;
|
||||
offset = bp_vaddr - vma->vm_start;
|
||||
offset += (vma->vm_pgoff << PAGE_SHIFT);
|
||||
uprobe = find_uprobe(inode, offset);
|
||||
}
|
||||
|
||||
if (!uprobe)
|
||||
*is_swbp = is_swbp_at_addr(mm, bp_vaddr);
|
||||
} else {
|
||||
*is_swbp = -EFAULT;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
return uprobe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Run handler and ask thread to singlestep.
|
||||
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
|
||||
*/
|
||||
static void handle_swbp(struct pt_regs *regs)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct uprobe_task *utask;
|
||||
struct uprobe *uprobe;
|
||||
struct mm_struct *mm;
|
||||
unsigned long bp_vaddr;
|
||||
int uninitialized_var(is_swbp);
|
||||
|
||||
uprobe = NULL;
|
||||
bp_vaddr = uprobe_get_swbp_addr(regs);
|
||||
mm = current->mm;
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, bp_vaddr);
|
||||
|
||||
if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
|
||||
struct inode *inode;
|
||||
loff_t offset;
|
||||
|
||||
inode = vma->vm_file->f_mapping->host;
|
||||
offset = bp_vaddr - vma->vm_start;
|
||||
offset += (vma->vm_pgoff << PAGE_SHIFT);
|
||||
uprobe = find_uprobe(inode, offset);
|
||||
}
|
||||
|
||||
srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
|
||||
current->uprobe_srcu_id = -1;
|
||||
up_read(&mm->mmap_sem);
|
||||
uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
|
||||
|
||||
if (!uprobe) {
|
||||
/* No matching uprobe; signal SIGTRAP. */
|
||||
send_sig(SIGTRAP, current, 0);
|
||||
if (is_swbp > 0) {
|
||||
/* No matching uprobe; signal SIGTRAP. */
|
||||
send_sig(SIGTRAP, current, 0);
|
||||
} else {
|
||||
/*
|
||||
* Either we raced with uprobe_unregister() or we can't
|
||||
* access this memory. The latter is only possible if
|
||||
* another thread plays with our ->mm. In both cases
|
||||
* we can simply restart. If this vma was unmapped we
|
||||
* can pretend this insn was not executed yet and get
|
||||
* the (correct) SIGSEGV after restart.
|
||||
*/
|
||||
instruction_pointer_set(regs, bp_vaddr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1620,7 +1617,6 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
|
||||
utask->state = UTASK_BP_HIT;
|
||||
|
||||
set_thread_flag(TIF_UPROBE);
|
||||
current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1655,7 +1651,6 @@ static int __init init_uprobes(void)
|
||||
mutex_init(&uprobes_mutex[i]);
|
||||
mutex_init(&uprobes_mmap_mutex[i]);
|
||||
}
|
||||
init_srcu_struct(&uprobes_srcu);
|
||||
|
||||
return register_die_notifier(&uprobe_exception_nb);
|
||||
}
|
||||
|
@@ -27,7 +27,6 @@
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <scsi/scsi_scan.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
@@ -748,13 +747,6 @@ static int software_resume(void)
|
||||
async_synchronize_full();
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't depend on SCSI devices being available after loading
|
||||
* one of their modules until scsi_complete_async_scans() is
|
||||
* called and the resume device usually is a SCSI one.
|
||||
*/
|
||||
scsi_complete_async_scans();
|
||||
|
||||
swsusp_resume_device = name_to_dev_t(resume_file);
|
||||
if (!swsusp_resume_device) {
|
||||
error = -ENODEV;
|
||||
|
@@ -24,7 +24,6 @@
|
||||
#include <linux/console.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <scsi/scsi_scan.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
@@ -84,7 +83,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
|
||||
* appear.
|
||||
*/
|
||||
wait_for_device_probe();
|
||||
scsi_complete_async_scans();
|
||||
|
||||
data->swap = -1;
|
||||
data->mode = O_WRONLY;
|
||||
|
119
kernel/printk.c
119
kernel/printk.c
@@ -1192,21 +1192,6 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
|
||||
return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KGDB_KDB
|
||||
/* kdb dmesg command needs access to the syslog buffer. do_syslog()
|
||||
* uses locks so it cannot be used during debugging. Just tell kdb
|
||||
* where the start and end of the physical and logical logs are. This
|
||||
* is equivalent to do_syslog(3).
|
||||
*/
|
||||
void kdb_syslog_data(char *syslog_data[4])
|
||||
{
|
||||
syslog_data[0] = log_buf;
|
||||
syslog_data[1] = log_buf + log_buf_len;
|
||||
syslog_data[2] = log_buf + log_first_idx;
|
||||
syslog_data[3] = log_buf + log_next_idx;
|
||||
}
|
||||
#endif /* CONFIG_KGDB_KDB */
|
||||
|
||||
static bool __read_mostly ignore_loglevel;
|
||||
|
||||
static int __init ignore_loglevel_setup(char *str)
|
||||
@@ -2524,6 +2509,57 @@ void kmsg_dump(enum kmsg_dump_reason reason)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
|
||||
* @dumper: registered kmsg dumper
|
||||
* @syslog: include the "<4>" prefixes
|
||||
* @line: buffer to copy the line to
|
||||
* @size: maximum size of the buffer
|
||||
* @len: length of line placed into buffer
|
||||
*
|
||||
* Start at the beginning of the kmsg buffer, with the oldest kmsg
|
||||
* record, and copy one record into the provided buffer.
|
||||
*
|
||||
* Consecutive calls will return the next available record moving
|
||||
* towards the end of the buffer with the youngest messages.
|
||||
*
|
||||
* A return value of FALSE indicates that there are no more records to
|
||||
* read.
|
||||
*
|
||||
* The function is similar to kmsg_dump_get_line(), but grabs no locks.
|
||||
*/
|
||||
bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
|
||||
char *line, size_t size, size_t *len)
|
||||
{
|
||||
struct log *msg;
|
||||
size_t l = 0;
|
||||
bool ret = false;
|
||||
|
||||
if (!dumper->active)
|
||||
goto out;
|
||||
|
||||
if (dumper->cur_seq < log_first_seq) {
|
||||
/* messages are gone, move to first available one */
|
||||
dumper->cur_seq = log_first_seq;
|
||||
dumper->cur_idx = log_first_idx;
|
||||
}
|
||||
|
||||
/* last entry */
|
||||
if (dumper->cur_seq >= log_next_seq)
|
||||
goto out;
|
||||
|
||||
msg = log_from_idx(dumper->cur_idx);
|
||||
l = msg_print_text(msg, 0, syslog, line, size);
|
||||
|
||||
dumper->cur_idx = log_next(dumper->cur_idx);
|
||||
dumper->cur_seq++;
|
||||
ret = true;
|
||||
out:
|
||||
if (len)
|
||||
*len = l;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* kmsg_dump_get_line - retrieve one kmsg log line
|
||||
* @dumper: registered kmsg dumper
|
||||
@@ -2545,36 +2581,12 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
|
||||
char *line, size_t size, size_t *len)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct log *msg;
|
||||
size_t l = 0;
|
||||
bool ret = false;
|
||||
|
||||
if (!dumper->active)
|
||||
goto out;
|
||||
bool ret;
|
||||
|
||||
raw_spin_lock_irqsave(&logbuf_lock, flags);
|
||||
if (dumper->cur_seq < log_first_seq) {
|
||||
/* messages are gone, move to first available one */
|
||||
dumper->cur_seq = log_first_seq;
|
||||
dumper->cur_idx = log_first_idx;
|
||||
}
|
||||
|
||||
/* last entry */
|
||||
if (dumper->cur_seq >= log_next_seq) {
|
||||
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
msg = log_from_idx(dumper->cur_idx);
|
||||
l = msg_print_text(msg, 0, syslog, line, size);
|
||||
|
||||
dumper->cur_idx = log_next(dumper->cur_idx);
|
||||
dumper->cur_seq++;
|
||||
ret = true;
|
||||
ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
|
||||
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
|
||||
out:
|
||||
if (len)
|
||||
*len = l;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
|
||||
@@ -2678,6 +2690,24 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
|
||||
|
||||
/**
|
||||
* kmsg_dump_rewind_nolock - reset the interator (unlocked version)
|
||||
* @dumper: registered kmsg dumper
|
||||
*
|
||||
* Reset the dumper's iterator so that kmsg_dump_get_line() and
|
||||
* kmsg_dump_get_buffer() can be called again and used multiple
|
||||
* times within the same dumper.dump() callback.
|
||||
*
|
||||
* The function is similar to kmsg_dump_rewind(), but grabs no locks.
|
||||
*/
|
||||
void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
|
||||
{
|
||||
dumper->cur_seq = clear_seq;
|
||||
dumper->cur_idx = clear_idx;
|
||||
dumper->next_seq = log_next_seq;
|
||||
dumper->next_idx = log_next_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* kmsg_dump_rewind - reset the interator
|
||||
* @dumper: registered kmsg dumper
|
||||
@@ -2691,10 +2721,7 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper)
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&logbuf_lock, flags);
|
||||
dumper->cur_seq = clear_seq;
|
||||
dumper->cur_idx = clear_idx;
|
||||
dumper->next_seq = log_next_seq;
|
||||
dumper->next_idx = log_next_idx;
|
||||
kmsg_dump_rewind_nolock(dumper);
|
||||
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
|
||||
|
@@ -53,6 +53,50 @@
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
|
||||
/*
|
||||
* Preemptible RCU implementation for rcu_read_lock().
|
||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
||||
* if we block.
|
||||
*/
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
current->rcu_read_lock_nesting++;
|
||||
barrier(); /* critical section after entry code. */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||
|
||||
/*
|
||||
* Preemptible RCU implementation for rcu_read_unlock().
|
||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
||||
* in an RCU read-side critical section and other special cases.
|
||||
*/
|
||||
void __rcu_read_unlock(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (t->rcu_read_lock_nesting != 1) {
|
||||
--t->rcu_read_lock_nesting;
|
||||
} else {
|
||||
barrier(); /* critical section before exit code. */
|
||||
t->rcu_read_lock_nesting = INT_MIN;
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
t->rcu_read_lock_nesting = 0;
|
||||
}
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
{
|
||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
||||
|
||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
||||
}
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
|
||||
/*
|
||||
* Check for a task exiting while in a preemptible-RCU read-side
|
||||
* critical section, clean up if so. No need to issue warnings,
|
||||
|
@@ -172,7 +172,7 @@ void rcu_irq_enter(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
/*
|
||||
* Test whether RCU thinks that the current CPU is idle.
|
||||
@@ -183,7 +183,7 @@ int rcu_is_cpu_idle(void)
|
||||
}
|
||||
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
||||
|
||||
#endif /* #ifdef CONFIG_PROVE_RCU */
|
||||
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
/*
|
||||
* Test whether the current CPU was interrupted from idle. Nested
|
||||
|
@@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
|
||||
RCU_TRACE(.rcb.name = "rcu_preempt")
|
||||
};
|
||||
|
||||
static void rcu_read_unlock_special(struct task_struct *t);
|
||||
static int rcu_preempted_readers_exp(void);
|
||||
static void rcu_report_exp_done(void);
|
||||
|
||||
@@ -351,8 +350,9 @@ static int rcu_initiate_boost(void)
|
||||
rcu_preempt_ctrlblk.boost_tasks =
|
||||
rcu_preempt_ctrlblk.gp_tasks;
|
||||
invoke_rcu_callbacks();
|
||||
} else
|
||||
} else {
|
||||
RCU_TRACE(rcu_initiate_boost_trace());
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -526,24 +526,12 @@ void rcu_preempt_note_context_switch(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tiny-preemptible RCU implementation for rcu_read_lock().
|
||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
||||
* if we block.
|
||||
*/
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
current->rcu_read_lock_nesting++;
|
||||
barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||
|
||||
/*
|
||||
* Handle special cases during rcu_read_unlock(), such as needing to
|
||||
* notify RCU core processing or task having blocked during the RCU
|
||||
* read-side critical section.
|
||||
*/
|
||||
static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
void rcu_read_unlock_special(struct task_struct *t)
|
||||
{
|
||||
int empty;
|
||||
int empty_exp;
|
||||
@@ -626,38 +614,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tiny-preemptible RCU implementation for rcu_read_unlock().
|
||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
||||
* in an RCU read-side critical section and other special cases.
|
||||
*/
|
||||
void __rcu_read_unlock(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
|
||||
if (t->rcu_read_lock_nesting != 1)
|
||||
--t->rcu_read_lock_nesting;
|
||||
else {
|
||||
t->rcu_read_lock_nesting = INT_MIN;
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
t->rcu_read_lock_nesting = 0;
|
||||
}
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
{
|
||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
||||
|
||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
||||
}
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
|
||||
/*
|
||||
* Check for a quiescent state from the current CPU. When a task blocks,
|
||||
* the task is recorded in the rcu_preempt_ctrlblk structure, which is
|
||||
@@ -823,9 +779,9 @@ void synchronize_rcu_expedited(void)
|
||||
rpcp->exp_tasks = NULL;
|
||||
|
||||
/* Wait for tail of ->blkd_tasks list to drain. */
|
||||
if (!rcu_preempted_readers_exp())
|
||||
if (!rcu_preempted_readers_exp()) {
|
||||
local_irq_restore(flags);
|
||||
else {
|
||||
} else {
|
||||
rcu_initiate_boost();
|
||||
local_irq_restore(flags);
|
||||
wait_event(sync_rcu_preempt_exp_wq,
|
||||
@@ -846,8 +802,6 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
*/
|
||||
int rcu_preempt_needs_cpu(void)
|
||||
{
|
||||
if (!rcu_preempt_running_reader())
|
||||
rcu_preempt_cpu_qs();
|
||||
return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
|
||||
}
|
||||
|
||||
|
@@ -49,8 +49,7 @@
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
|
||||
"Josh Triplett <josh@freedesktop.org>");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
|
||||
|
||||
static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
|
||||
static int nfakewriters = 4; /* # fake writer threads */
|
||||
@@ -206,6 +205,7 @@ static unsigned long boost_starttime; /* jiffies of next boost test start. */
|
||||
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
|
||||
/* and boost task create/destroy. */
|
||||
static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
|
||||
static bool barrier_phase; /* Test phase. */
|
||||
static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
|
||||
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
|
||||
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
|
||||
@@ -407,8 +407,9 @@ rcu_torture_cb(struct rcu_head *p)
|
||||
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
|
||||
rp->rtort_mbtest = 0;
|
||||
rcu_torture_free(rp);
|
||||
} else
|
||||
} else {
|
||||
cur_ops->deferred_free(rp);
|
||||
}
|
||||
}
|
||||
|
||||
static int rcu_no_completed(void)
|
||||
@@ -635,6 +636,17 @@ static void srcu_torture_synchronize(void)
|
||||
synchronize_srcu(&srcu_ctl);
|
||||
}
|
||||
|
||||
static void srcu_torture_call(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head))
|
||||
{
|
||||
call_srcu(&srcu_ctl, head, func);
|
||||
}
|
||||
|
||||
static void srcu_torture_barrier(void)
|
||||
{
|
||||
srcu_barrier(&srcu_ctl);
|
||||
}
|
||||
|
||||
static int srcu_torture_stats(char *page)
|
||||
{
|
||||
int cnt = 0;
|
||||
@@ -661,8 +673,8 @@ static struct rcu_torture_ops srcu_ops = {
|
||||
.completed = srcu_torture_completed,
|
||||
.deferred_free = srcu_torture_deferred_free,
|
||||
.sync = srcu_torture_synchronize,
|
||||
.call = NULL,
|
||||
.cb_barrier = NULL,
|
||||
.call = srcu_torture_call,
|
||||
.cb_barrier = srcu_torture_barrier,
|
||||
.stats = srcu_torture_stats,
|
||||
.name = "srcu"
|
||||
};
|
||||
@@ -1013,7 +1025,11 @@ rcu_torture_fakewriter(void *arg)
|
||||
do {
|
||||
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
|
||||
udelay(rcu_random(&rand) & 0x3ff);
|
||||
cur_ops->sync();
|
||||
if (cur_ops->cb_barrier != NULL &&
|
||||
rcu_random(&rand) % (nfakewriters * 8) == 0)
|
||||
cur_ops->cb_barrier();
|
||||
else
|
||||
cur_ops->sync();
|
||||
rcu_stutter_wait("rcu_torture_fakewriter");
|
||||
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||
|
||||
@@ -1183,27 +1199,27 @@ rcu_torture_printk(char *page)
|
||||
}
|
||||
cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
|
||||
cnt += sprintf(&page[cnt],
|
||||
"rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
|
||||
"rtmbe: %d rtbke: %ld rtbre: %ld "
|
||||
"rtbf: %ld rtb: %ld nt: %ld "
|
||||
"onoff: %ld/%ld:%ld/%ld "
|
||||
"barrier: %ld/%ld:%ld",
|
||||
"rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
|
||||
rcu_torture_current,
|
||||
rcu_torture_current_version,
|
||||
list_empty(&rcu_torture_freelist),
|
||||
atomic_read(&n_rcu_torture_alloc),
|
||||
atomic_read(&n_rcu_torture_alloc_fail),
|
||||
atomic_read(&n_rcu_torture_free),
|
||||
atomic_read(&n_rcu_torture_free));
|
||||
cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
|
||||
atomic_read(&n_rcu_torture_mberror),
|
||||
n_rcu_torture_boost_ktrerror,
|
||||
n_rcu_torture_boost_rterror,
|
||||
n_rcu_torture_boost_rterror);
|
||||
cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
|
||||
n_rcu_torture_boost_failure,
|
||||
n_rcu_torture_boosts,
|
||||
n_rcu_torture_timers,
|
||||
n_rcu_torture_timers);
|
||||
cnt += sprintf(&page[cnt], "onoff: %ld/%ld:%ld/%ld ",
|
||||
n_online_successes,
|
||||
n_online_attempts,
|
||||
n_offline_successes,
|
||||
n_offline_attempts,
|
||||
n_offline_attempts);
|
||||
cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld",
|
||||
n_barrier_successes,
|
||||
n_barrier_attempts,
|
||||
n_rcu_torture_barrier_error);
|
||||
@@ -1445,8 +1461,7 @@ rcu_torture_shutdown(void *arg)
|
||||
delta = shutdown_time - jiffies_snap;
|
||||
if (verbose)
|
||||
printk(KERN_ALERT "%s" TORTURE_FLAG
|
||||
"rcu_torture_shutdown task: %lu "
|
||||
"jiffies remaining\n",
|
||||
"rcu_torture_shutdown task: %lu jiffies remaining\n",
|
||||
torture_type, delta);
|
||||
schedule_timeout_interruptible(delta);
|
||||
jiffies_snap = ACCESS_ONCE(jiffies);
|
||||
@@ -1498,8 +1513,7 @@ rcu_torture_onoff(void *arg)
|
||||
if (cpu_down(cpu) == 0) {
|
||||
if (verbose)
|
||||
printk(KERN_ALERT "%s" TORTURE_FLAG
|
||||
"rcu_torture_onoff task: "
|
||||
"offlined %d\n",
|
||||
"rcu_torture_onoff task: offlined %d\n",
|
||||
torture_type, cpu);
|
||||
n_offline_successes++;
|
||||
}
|
||||
@@ -1512,8 +1526,7 @@ rcu_torture_onoff(void *arg)
|
||||
if (cpu_up(cpu) == 0) {
|
||||
if (verbose)
|
||||
printk(KERN_ALERT "%s" TORTURE_FLAG
|
||||
"rcu_torture_onoff task: "
|
||||
"onlined %d\n",
|
||||
"rcu_torture_onoff task: onlined %d\n",
|
||||
torture_type, cpu);
|
||||
n_online_successes++;
|
||||
}
|
||||
@@ -1631,6 +1644,7 @@ void rcu_torture_barrier_cbf(struct rcu_head *rcu)
|
||||
static int rcu_torture_barrier_cbs(void *arg)
|
||||
{
|
||||
long myid = (long)arg;
|
||||
bool lastphase = 0;
|
||||
struct rcu_head rcu;
|
||||
|
||||
init_rcu_head_on_stack(&rcu);
|
||||
@@ -1638,9 +1652,11 @@ static int rcu_torture_barrier_cbs(void *arg)
|
||||
set_user_nice(current, 19);
|
||||
do {
|
||||
wait_event(barrier_cbs_wq[myid],
|
||||
atomic_read(&barrier_cbs_count) == n_barrier_cbs ||
|
||||
barrier_phase != lastphase ||
|
||||
kthread_should_stop() ||
|
||||
fullstop != FULLSTOP_DONTSTOP);
|
||||
lastphase = barrier_phase;
|
||||
smp_mb(); /* ensure barrier_phase load before ->call(). */
|
||||
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
|
||||
break;
|
||||
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
|
||||
@@ -1665,7 +1681,8 @@ static int rcu_torture_barrier(void *arg)
|
||||
do {
|
||||
atomic_set(&barrier_cbs_invoked, 0);
|
||||
atomic_set(&barrier_cbs_count, n_barrier_cbs);
|
||||
/* wake_up() path contains the required barriers. */
|
||||
smp_mb(); /* Ensure barrier_phase after prior assignments. */
|
||||
barrier_phase = !barrier_phase;
|
||||
for (i = 0; i < n_barrier_cbs; i++)
|
||||
wake_up(&barrier_cbs_wq[i]);
|
||||
wait_event(barrier_wq,
|
||||
@@ -1684,7 +1701,7 @@ static int rcu_torture_barrier(void *arg)
|
||||
schedule_timeout_interruptible(HZ / 10);
|
||||
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||
VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
|
||||
rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
|
||||
rcutorture_shutdown_absorb("rcu_torture_barrier");
|
||||
while (!kthread_should_stop())
|
||||
schedule_timeout_interruptible(1);
|
||||
return 0;
|
||||
@@ -1908,8 +1925,8 @@ rcu_torture_init(void)
|
||||
static struct rcu_torture_ops *torture_ops[] =
|
||||
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
|
||||
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
|
||||
&srcu_ops, &srcu_sync_ops, &srcu_raw_ops,
|
||||
&srcu_raw_sync_ops, &srcu_expedited_ops,
|
||||
&srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
|
||||
&srcu_raw_ops, &srcu_raw_sync_ops,
|
||||
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
|
||||
|
||||
mutex_lock(&fullstop_mutex);
|
||||
@@ -1931,8 +1948,7 @@ rcu_torture_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cur_ops->fqs == NULL && fqs_duration != 0) {
|
||||
printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
|
||||
"fqs_duration, fqs disabled.\n");
|
||||
printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
|
||||
fqs_duration = 0;
|
||||
}
|
||||
if (cur_ops->init)
|
||||
|
478
kernel/rcutree.c
478
kernel/rcutree.c
@@ -60,36 +60,44 @@
|
||||
|
||||
/* Data structures. */
|
||||
|
||||
static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
|
||||
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
|
||||
|
||||
#define RCU_STATE_INITIALIZER(structname) { \
|
||||
.level = { &structname##_state.node[0] }, \
|
||||
.levelcnt = { \
|
||||
NUM_RCU_LVL_0, /* root of hierarchy. */ \
|
||||
NUM_RCU_LVL_1, \
|
||||
NUM_RCU_LVL_2, \
|
||||
NUM_RCU_LVL_3, \
|
||||
NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
|
||||
}, \
|
||||
#define RCU_STATE_INITIALIZER(sname, cr) { \
|
||||
.level = { &sname##_state.node[0] }, \
|
||||
.call = cr, \
|
||||
.fqs_state = RCU_GP_IDLE, \
|
||||
.gpnum = -300, \
|
||||
.completed = -300, \
|
||||
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
|
||||
.orphan_nxttail = &structname##_state.orphan_nxtlist, \
|
||||
.orphan_donetail = &structname##_state.orphan_donelist, \
|
||||
.fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
|
||||
.n_force_qs = 0, \
|
||||
.n_force_qs_ngp = 0, \
|
||||
.name = #structname, \
|
||||
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
|
||||
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
|
||||
.orphan_donetail = &sname##_state.orphan_donelist, \
|
||||
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
||||
.fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \
|
||||
.name = #sname, \
|
||||
}
|
||||
|
||||
struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
|
||||
struct rcu_state rcu_sched_state =
|
||||
RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched);
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
|
||||
|
||||
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
|
||||
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
|
||||
|
||||
static struct rcu_state *rcu_state;
|
||||
LIST_HEAD(rcu_struct_flavors);
|
||||
|
||||
/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
|
||||
static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
|
||||
module_param(rcu_fanout_leaf, int, 0);
|
||||
int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
|
||||
static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */
|
||||
NUM_RCU_LVL_0,
|
||||
NUM_RCU_LVL_1,
|
||||
NUM_RCU_LVL_2,
|
||||
NUM_RCU_LVL_3,
|
||||
NUM_RCU_LVL_4,
|
||||
};
|
||||
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
|
||||
|
||||
/*
|
||||
* The rcu_scheduler_active variable transitions from zero to one just
|
||||
@@ -147,13 +155,6 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
|
||||
unsigned long rcutorture_testseq;
|
||||
unsigned long rcutorture_vernum;
|
||||
|
||||
/* State information for rcu_barrier() and friends. */
|
||||
|
||||
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
|
||||
static atomic_t rcu_barrier_cpu_count;
|
||||
static DEFINE_MUTEX(rcu_barrier_mutex);
|
||||
static struct completion rcu_barrier_completion;
|
||||
|
||||
/*
|
||||
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
|
||||
* permit this function to be invoked without holding the root rcu_node
|
||||
@@ -358,7 +359,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
||||
struct task_struct *idle = idle_task(smp_processor_id());
|
||||
|
||||
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
|
||||
ftrace_dump(DUMP_ALL);
|
||||
ftrace_dump(DUMP_ORIG);
|
||||
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
||||
current->pid, current->comm,
|
||||
idle->pid, idle->comm); /* must be idle task! */
|
||||
@@ -468,7 +469,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
||||
|
||||
trace_rcu_dyntick("Error on exit: not idle task",
|
||||
oldval, rdtp->dynticks_nesting);
|
||||
ftrace_dump(DUMP_ALL);
|
||||
ftrace_dump(DUMP_ORIG);
|
||||
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
||||
current->pid, current->comm,
|
||||
idle->pid, idle->comm); /* must be idle task! */
|
||||
@@ -585,8 +586,6 @@ void rcu_nmi_exit(void)
|
||||
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
|
||||
/**
|
||||
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
|
||||
*
|
||||
@@ -604,7 +603,7 @@ int rcu_is_cpu_idle(void)
|
||||
}
|
||||
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
/*
|
||||
* Is the current CPU online? Disable preemption to avoid false positives
|
||||
@@ -645,9 +644,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#endif /* #ifdef CONFIG_PROVE_RCU */
|
||||
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
|
||||
|
||||
/**
|
||||
* rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
|
||||
@@ -733,7 +730,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||
int cpu;
|
||||
long delta;
|
||||
unsigned long flags;
|
||||
int ndetected;
|
||||
int ndetected = 0;
|
||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||
|
||||
/* Only let one CPU complain about others per time interval. */
|
||||
@@ -774,7 +771,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||
*/
|
||||
rnp = rcu_get_root(rsp);
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
ndetected = rcu_print_task_stall(rnp);
|
||||
ndetected += rcu_print_task_stall(rnp);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
|
||||
print_cpu_stall_info_end();
|
||||
@@ -860,9 +857,10 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
|
||||
*/
|
||||
void rcu_cpu_stall_reset(void)
|
||||
{
|
||||
rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
|
||||
rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
|
||||
rcu_preempt_stall_reset();
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp)
|
||||
rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
|
||||
}
|
||||
|
||||
static struct notifier_block rcu_panic_block = {
|
||||
@@ -894,8 +892,9 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
|
||||
if (rnp->qsmask & rdp->grpmask) {
|
||||
rdp->qs_pending = 1;
|
||||
rdp->passed_quiesce = 0;
|
||||
} else
|
||||
} else {
|
||||
rdp->qs_pending = 0;
|
||||
}
|
||||
zero_cpu_stall_ticks(rdp);
|
||||
}
|
||||
}
|
||||
@@ -936,6 +935,18 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the specified rcu_data structure's callback list to empty.
|
||||
*/
|
||||
static void init_callback_list(struct rcu_data *rdp)
|
||||
{
|
||||
int i;
|
||||
|
||||
rdp->nxtlist = NULL;
|
||||
for (i = 0; i < RCU_NEXT_SIZE; i++)
|
||||
rdp->nxttail[i] = &rdp->nxtlist;
|
||||
}
|
||||
|
||||
/*
|
||||
* Advance this CPU's callbacks, but only if the current grace period
|
||||
* has ended. This may be called only from the CPU to whom the rdp
|
||||
@@ -1328,8 +1339,6 @@ static void
|
||||
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||
struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Orphan the callbacks. First adjust the counts. This is safe
|
||||
* because ->onofflock excludes _rcu_barrier()'s adoption of
|
||||
@@ -1340,7 +1349,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||
rsp->qlen += rdp->qlen;
|
||||
rdp->n_cbs_orphaned += rdp->qlen;
|
||||
rdp->qlen_lazy = 0;
|
||||
rdp->qlen = 0;
|
||||
ACCESS_ONCE(rdp->qlen) = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1369,9 +1378,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
||||
}
|
||||
|
||||
/* Finally, initialize the rcu_data structure's list to empty. */
|
||||
rdp->nxtlist = NULL;
|
||||
for (i = 0; i < RCU_NEXT_SIZE; i++)
|
||||
rdp->nxttail[i] = &rdp->nxtlist;
|
||||
init_callback_list(rdp);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1505,6 +1512,9 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
if (need_report & RCU_OFL_TASKS_EXP_GP)
|
||||
rcu_report_exp_rnp(rsp, rnp, true);
|
||||
WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
|
||||
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
|
||||
cpu, rdp->qlen, rdp->nxtlist);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
@@ -1592,7 +1602,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
}
|
||||
smp_mb(); /* List handling before counting for rcu_barrier(). */
|
||||
rdp->qlen_lazy -= count_lazy;
|
||||
rdp->qlen -= count;
|
||||
ACCESS_ONCE(rdp->qlen) -= count;
|
||||
rdp->n_cbs_invoked += count;
|
||||
|
||||
/* Reinstate batch limit if we have worked down the excess. */
|
||||
@@ -1605,6 +1615,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
rdp->n_force_qs_snap = rsp->n_force_qs;
|
||||
} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
|
||||
rdp->qlen_last_fqs_check = rdp->qlen;
|
||||
WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
@@ -1745,8 +1756,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
|
||||
break; /* grace period idle or initializing, ignore. */
|
||||
|
||||
case RCU_SAVE_DYNTICK:
|
||||
if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
|
||||
break; /* So gcc recognizes the dead code. */
|
||||
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
|
||||
|
||||
@@ -1788,9 +1797,10 @@ unlock_fqs_ret:
|
||||
* whom the rdp belongs.
|
||||
*/
|
||||
static void
|
||||
__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
__rcu_process_callbacks(struct rcu_state *rsp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
|
||||
|
||||
WARN_ON_ONCE(rdp->beenonline == 0);
|
||||
|
||||
@@ -1826,11 +1836,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
*/
|
||||
static void rcu_process_callbacks(struct softirq_action *unused)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
|
||||
trace_rcu_utilization("Start RCU core");
|
||||
__rcu_process_callbacks(&rcu_sched_state,
|
||||
&__get_cpu_var(rcu_sched_data));
|
||||
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
|
||||
rcu_preempt_process_callbacks();
|
||||
for_each_rcu_flavor(rsp)
|
||||
__rcu_process_callbacks(rsp);
|
||||
trace_rcu_utilization("End RCU core");
|
||||
}
|
||||
|
||||
@@ -1857,50 +1867,22 @@ static void invoke_rcu_core(void)
|
||||
raise_softirq(RCU_SOFTIRQ);
|
||||
}
|
||||
|
||||
static void
|
||||
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
struct rcu_state *rsp, bool lazy)
|
||||
/*
|
||||
* Handle any core-RCU processing required by a call_rcu() invocation.
|
||||
*/
|
||||
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
||||
struct rcu_head *head, unsigned long flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
|
||||
debug_rcu_head_queue(head);
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
|
||||
smp_mb(); /* Ensure RCU update seen before callback registry. */
|
||||
|
||||
/*
|
||||
* Opportunistically note grace-period endings and beginnings.
|
||||
* Note that we might see a beginning right after we see an
|
||||
* end, but never vice versa, since this CPU has to pass through
|
||||
* a quiescent state betweentimes.
|
||||
* If called from an extended quiescent state, invoke the RCU
|
||||
* core in order to force a re-evaluation of RCU's idleness.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
||||
invoke_rcu_core();
|
||||
|
||||
/* Add the callback to our list. */
|
||||
rdp->qlen++;
|
||||
if (lazy)
|
||||
rdp->qlen_lazy++;
|
||||
else
|
||||
rcu_idle_count_callbacks_posted();
|
||||
smp_mb(); /* Count before adding callback for rcu_barrier(). */
|
||||
*rdp->nxttail[RCU_NEXT_TAIL] = head;
|
||||
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
|
||||
|
||||
if (__is_kfree_rcu_offset((unsigned long)func))
|
||||
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
|
||||
rdp->qlen_lazy, rdp->qlen);
|
||||
else
|
||||
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
|
||||
|
||||
/* If interrupts were disabled, don't dive into RCU core. */
|
||||
if (irqs_disabled_flags(flags)) {
|
||||
local_irq_restore(flags);
|
||||
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
||||
if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the grace period if too many callbacks or too long waiting.
|
||||
@@ -1933,6 +1915,49 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
}
|
||||
} else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
|
||||
force_quiescent_state(rsp, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
struct rcu_state *rsp, bool lazy)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
|
||||
debug_rcu_head_queue(head);
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
|
||||
smp_mb(); /* Ensure RCU update seen before callback registry. */
|
||||
|
||||
/*
|
||||
* Opportunistically note grace-period endings and beginnings.
|
||||
* Note that we might see a beginning right after we see an
|
||||
* end, but never vice versa, since this CPU has to pass through
|
||||
* a quiescent state betweentimes.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
|
||||
/* Add the callback to our list. */
|
||||
ACCESS_ONCE(rdp->qlen)++;
|
||||
if (lazy)
|
||||
rdp->qlen_lazy++;
|
||||
else
|
||||
rcu_idle_count_callbacks_posted();
|
||||
smp_mb(); /* Count before adding callback for rcu_barrier(). */
|
||||
*rdp->nxttail[RCU_NEXT_TAIL] = head;
|
||||
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
|
||||
|
||||
if (__is_kfree_rcu_offset((unsigned long)func))
|
||||
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
|
||||
rdp->qlen_lazy, rdp->qlen);
|
||||
else
|
||||
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
|
||||
|
||||
/* Go handle any RCU core processing required. */
|
||||
__call_rcu_core(rsp, rdp, head, flags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -1962,28 +1987,16 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
|
||||
* occasionally incorrectly indicate that there are multiple CPUs online
|
||||
* when there was in fact only one the whole time, as this just adds
|
||||
* some overhead: RCU still operates correctly.
|
||||
*
|
||||
* Of course, sampling num_online_cpus() with preemption enabled can
|
||||
* give erroneous results if there are concurrent CPU-hotplug operations.
|
||||
* For example, given a demonic sequence of preemptions in num_online_cpus()
|
||||
* and CPU-hotplug operations, there could be two or more CPUs online at
|
||||
* all times, but num_online_cpus() might well return one (or even zero).
|
||||
*
|
||||
* However, all such demonic sequences require at least one CPU-offline
|
||||
* operation. Furthermore, rcu_blocking_is_gp() giving the wrong answer
|
||||
* is only a problem if there is an RCU read-side critical section executing
|
||||
* throughout. But RCU-sched and RCU-bh read-side critical sections
|
||||
* disable either preemption or bh, which prevents a CPU from going offline.
|
||||
* Therefore, the only way that rcu_blocking_is_gp() can incorrectly return
|
||||
* that there is only one CPU when in fact there was more than one throughout
|
||||
* is when there were no RCU readers in the system. If there are no
|
||||
* RCU readers, the grace period by definition can be of zero length,
|
||||
* regardless of the number of online CPUs.
|
||||
*/
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep(); /* Check for RCU read-side critical section. */
|
||||
return num_online_cpus() <= 1;
|
||||
preempt_disable();
|
||||
ret = num_online_cpus() <= 1;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2118,9 +2131,9 @@ void synchronize_sched_expedited(void)
|
||||
put_online_cpus();
|
||||
|
||||
/* No joy, try again later. Or just synchronize_sched(). */
|
||||
if (trycount++ < 10)
|
||||
if (trycount++ < 10) {
|
||||
udelay(trycount * num_online_cpus());
|
||||
else {
|
||||
} else {
|
||||
synchronize_sched();
|
||||
return;
|
||||
}
|
||||
@@ -2241,9 +2254,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
*/
|
||||
static int rcu_pending(int cpu)
|
||||
{
|
||||
return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
|
||||
__rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
|
||||
rcu_preempt_pending(cpu);
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp)
|
||||
if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2253,20 +2269,41 @@ static int rcu_pending(int cpu)
|
||||
*/
|
||||
static int rcu_cpu_has_callbacks(int cpu)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
|
||||
/* RCU callbacks either ready or pending? */
|
||||
return per_cpu(rcu_sched_data, cpu).nxtlist ||
|
||||
per_cpu(rcu_bh_data, cpu).nxtlist ||
|
||||
rcu_preempt_cpu_has_callbacks(cpu);
|
||||
for_each_rcu_flavor(rsp)
|
||||
if (per_cpu_ptr(rsp->rda, cpu)->nxtlist)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for _rcu_barrier() tracing. If tracing is disabled,
|
||||
* the compiler is expected to optimize this away.
|
||||
*/
|
||||
static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
|
||||
int cpu, unsigned long done)
|
||||
{
|
||||
trace_rcu_barrier(rsp->name, s, cpu,
|
||||
atomic_read(&rsp->barrier_cpu_count), done);
|
||||
}
|
||||
|
||||
/*
|
||||
* RCU callback function for _rcu_barrier(). If we are last, wake
|
||||
* up the task executing _rcu_barrier().
|
||||
*/
|
||||
static void rcu_barrier_callback(struct rcu_head *notused)
|
||||
static void rcu_barrier_callback(struct rcu_head *rhp)
|
||||
{
|
||||
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
|
||||
complete(&rcu_barrier_completion);
|
||||
struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
|
||||
struct rcu_state *rsp = rdp->rsp;
|
||||
|
||||
if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
|
||||
_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
|
||||
complete(&rsp->barrier_completion);
|
||||
} else {
|
||||
_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2274,35 +2311,63 @@ static void rcu_barrier_callback(struct rcu_head *notused)
|
||||
*/
|
||||
static void rcu_barrier_func(void *type)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
|
||||
void (*call_rcu_func)(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head));
|
||||
struct rcu_state *rsp = type;
|
||||
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
|
||||
|
||||
atomic_inc(&rcu_barrier_cpu_count);
|
||||
call_rcu_func = type;
|
||||
call_rcu_func(head, rcu_barrier_callback);
|
||||
_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
|
||||
atomic_inc(&rsp->barrier_cpu_count);
|
||||
rsp->call(&rdp->barrier_head, rcu_barrier_callback);
|
||||
}
|
||||
|
||||
/*
|
||||
* Orchestrate the specified type of RCU barrier, waiting for all
|
||||
* RCU callbacks of the specified type to complete.
|
||||
*/
|
||||
static void _rcu_barrier(struct rcu_state *rsp,
|
||||
void (*call_rcu_func)(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head)))
|
||||
static void _rcu_barrier(struct rcu_state *rsp)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_head rh;
|
||||
struct rcu_data rd;
|
||||
unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
|
||||
unsigned long snap_done;
|
||||
|
||||
init_rcu_head_on_stack(&rh);
|
||||
init_rcu_head_on_stack(&rd.barrier_head);
|
||||
_rcu_barrier_trace(rsp, "Begin", -1, snap);
|
||||
|
||||
/* Take mutex to serialize concurrent rcu_barrier() requests. */
|
||||
mutex_lock(&rcu_barrier_mutex);
|
||||
mutex_lock(&rsp->barrier_mutex);
|
||||
|
||||
smp_mb(); /* Prevent any prior operations from leaking in. */
|
||||
/*
|
||||
* Ensure that all prior references, including to ->n_barrier_done,
|
||||
* are ordered before the _rcu_barrier() machinery.
|
||||
*/
|
||||
smp_mb(); /* See above block comment. */
|
||||
|
||||
/*
|
||||
* Recheck ->n_barrier_done to see if others did our work for us.
|
||||
* This means checking ->n_barrier_done for an even-to-odd-to-even
|
||||
* transition. The "if" expression below therefore rounds the old
|
||||
* value up to the next even number and adds two before comparing.
|
||||
*/
|
||||
snap_done = ACCESS_ONCE(rsp->n_barrier_done);
|
||||
_rcu_barrier_trace(rsp, "Check", -1, snap_done);
|
||||
if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
|
||||
_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
|
||||
smp_mb(); /* caller's subsequent code after above check. */
|
||||
mutex_unlock(&rsp->barrier_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment ->n_barrier_done to avoid duplicate work. Use
|
||||
* ACCESS_ONCE() to prevent the compiler from speculating
|
||||
* the increment to precede the early-exit check.
|
||||
*/
|
||||
ACCESS_ONCE(rsp->n_barrier_done)++;
|
||||
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
|
||||
_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
|
||||
smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
|
||||
|
||||
/*
|
||||
* Initialize the count to one rather than to zero in order to
|
||||
@@ -2321,8 +2386,8 @@ static void _rcu_barrier(struct rcu_state *rsp,
|
||||
* 6. Both rcu_barrier_callback() callbacks are invoked, awakening
|
||||
* us -- but before CPU 1's orphaned callbacks are invoked!!!
|
||||
*/
|
||||
init_completion(&rcu_barrier_completion);
|
||||
atomic_set(&rcu_barrier_cpu_count, 1);
|
||||
init_completion(&rsp->barrier_completion);
|
||||
atomic_set(&rsp->barrier_cpu_count, 1);
|
||||
raw_spin_lock_irqsave(&rsp->onofflock, flags);
|
||||
rsp->rcu_barrier_in_progress = current;
|
||||
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
|
||||
@@ -2338,14 +2403,19 @@ static void _rcu_barrier(struct rcu_state *rsp,
|
||||
preempt_disable();
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
if (cpu_is_offline(cpu)) {
|
||||
_rcu_barrier_trace(rsp, "Offline", cpu,
|
||||
rsp->n_barrier_done);
|
||||
preempt_enable();
|
||||
while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
|
||||
schedule_timeout_interruptible(1);
|
||||
} else if (ACCESS_ONCE(rdp->qlen)) {
|
||||
smp_call_function_single(cpu, rcu_barrier_func,
|
||||
(void *)call_rcu_func, 1);
|
||||
_rcu_barrier_trace(rsp, "OnlineQ", cpu,
|
||||
rsp->n_barrier_done);
|
||||
smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
|
||||
preempt_enable();
|
||||
} else {
|
||||
_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
|
||||
rsp->n_barrier_done);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
@@ -2362,24 +2432,32 @@ static void _rcu_barrier(struct rcu_state *rsp,
|
||||
rcu_adopt_orphan_cbs(rsp);
|
||||
rsp->rcu_barrier_in_progress = NULL;
|
||||
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
|
||||
atomic_inc(&rcu_barrier_cpu_count);
|
||||
atomic_inc(&rsp->barrier_cpu_count);
|
||||
smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
|
||||
call_rcu_func(&rh, rcu_barrier_callback);
|
||||
rd.rsp = rsp;
|
||||
rsp->call(&rd.barrier_head, rcu_barrier_callback);
|
||||
|
||||
/*
|
||||
* Now that we have an rcu_barrier_callback() callback on each
|
||||
* CPU, and thus each counted, remove the initial count.
|
||||
*/
|
||||
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
|
||||
complete(&rcu_barrier_completion);
|
||||
if (atomic_dec_and_test(&rsp->barrier_cpu_count))
|
||||
complete(&rsp->barrier_completion);
|
||||
|
||||
/* Increment ->n_barrier_done to prevent duplicate work. */
|
||||
smp_mb(); /* Keep increment after above mechanism. */
|
||||
ACCESS_ONCE(rsp->n_barrier_done)++;
|
||||
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
|
||||
_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
|
||||
smp_mb(); /* Keep increment before caller's subsequent code. */
|
||||
|
||||
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
|
||||
wait_for_completion(&rcu_barrier_completion);
|
||||
wait_for_completion(&rsp->barrier_completion);
|
||||
|
||||
/* Other rcu_barrier() invocations can now safely proceed. */
|
||||
mutex_unlock(&rcu_barrier_mutex);
|
||||
mutex_unlock(&rsp->barrier_mutex);
|
||||
|
||||
destroy_rcu_head_on_stack(&rh);
|
||||
destroy_rcu_head_on_stack(&rd.barrier_head);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2387,7 +2465,7 @@ static void _rcu_barrier(struct rcu_state *rsp,
|
||||
*/
|
||||
void rcu_barrier_bh(void)
|
||||
{
|
||||
_rcu_barrier(&rcu_bh_state, call_rcu_bh);
|
||||
_rcu_barrier(&rcu_bh_state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
||||
|
||||
@@ -2396,7 +2474,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
||||
*/
|
||||
void rcu_barrier_sched(void)
|
||||
{
|
||||
_rcu_barrier(&rcu_sched_state, call_rcu_sched);
|
||||
_rcu_barrier(&rcu_sched_state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
|
||||
|
||||
@@ -2407,18 +2485,15 @@ static void __init
|
||||
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i;
|
||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||
|
||||
/* Set up local state, ensuring consistent view of global state. */
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
|
||||
rdp->nxtlist = NULL;
|
||||
for (i = 0; i < RCU_NEXT_SIZE; i++)
|
||||
rdp->nxttail[i] = &rdp->nxtlist;
|
||||
init_callback_list(rdp);
|
||||
rdp->qlen_lazy = 0;
|
||||
rdp->qlen = 0;
|
||||
ACCESS_ONCE(rdp->qlen) = 0;
|
||||
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
||||
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
||||
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
|
||||
@@ -2492,9 +2567,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
|
||||
|
||||
static void __cpuinit rcu_prepare_cpu(int cpu)
|
||||
{
|
||||
rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
|
||||
rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
|
||||
rcu_preempt_init_percpu_data(cpu);
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp)
|
||||
rcu_init_percpu_data(cpu, rsp,
|
||||
strcmp(rsp->name, "rcu_preempt") == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2506,6 +2583,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
|
||||
long cpu = (long)hcpu;
|
||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
struct rcu_state *rsp;
|
||||
|
||||
trace_rcu_utilization("Start CPU hotplug");
|
||||
switch (action) {
|
||||
@@ -2530,18 +2608,16 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
|
||||
* touch any data without introducing corruption. We send the
|
||||
* dying CPU's callbacks to an arbitrarily chosen online CPU.
|
||||
*/
|
||||
rcu_cleanup_dying_cpu(&rcu_bh_state);
|
||||
rcu_cleanup_dying_cpu(&rcu_sched_state);
|
||||
rcu_preempt_cleanup_dying_cpu();
|
||||
for_each_rcu_flavor(rsp)
|
||||
rcu_cleanup_dying_cpu(rsp);
|
||||
rcu_cleanup_after_idle(cpu);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
rcu_cleanup_dead_cpu(cpu, &rcu_bh_state);
|
||||
rcu_cleanup_dead_cpu(cpu, &rcu_sched_state);
|
||||
rcu_preempt_cleanup_dead_cpu(cpu);
|
||||
for_each_rcu_flavor(rsp)
|
||||
rcu_cleanup_dead_cpu(cpu, rsp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -2574,9 +2650,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = NUM_RCU_LVLS - 1; i > 0; i--)
|
||||
for (i = rcu_num_lvls - 1; i > 0; i--)
|
||||
rsp->levelspread[i] = CONFIG_RCU_FANOUT;
|
||||
rsp->levelspread[0] = CONFIG_RCU_FANOUT_LEAF;
|
||||
rsp->levelspread[0] = rcu_fanout_leaf;
|
||||
}
|
||||
#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
|
||||
static void __init rcu_init_levelspread(struct rcu_state *rsp)
|
||||
@@ -2586,7 +2662,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
|
||||
int i;
|
||||
|
||||
cprv = NR_CPUS;
|
||||
for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
|
||||
for (i = rcu_num_lvls - 1; i >= 0; i--) {
|
||||
ccur = rsp->levelcnt[i];
|
||||
rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
|
||||
cprv = ccur;
|
||||
@@ -2613,13 +2689,15 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
||||
|
||||
/* Initialize the level-tracking arrays. */
|
||||
|
||||
for (i = 1; i < NUM_RCU_LVLS; i++)
|
||||
for (i = 0; i < rcu_num_lvls; i++)
|
||||
rsp->levelcnt[i] = num_rcu_lvl[i];
|
||||
for (i = 1; i < rcu_num_lvls; i++)
|
||||
rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
|
||||
rcu_init_levelspread(rsp);
|
||||
|
||||
/* Initialize the elements themselves, starting from the leaves. */
|
||||
|
||||
for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
|
||||
for (i = rcu_num_lvls - 1; i >= 0; i--) {
|
||||
cpustride *= rsp->levelspread[i];
|
||||
rnp = rsp->level[i];
|
||||
for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
|
||||
@@ -2649,13 +2727,74 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
||||
}
|
||||
|
||||
rsp->rda = rda;
|
||||
rnp = rsp->level[NUM_RCU_LVLS - 1];
|
||||
rnp = rsp->level[rcu_num_lvls - 1];
|
||||
for_each_possible_cpu(i) {
|
||||
while (i > rnp->grphi)
|
||||
rnp++;
|
||||
per_cpu_ptr(rsp->rda, i)->mynode = rnp;
|
||||
rcu_boot_init_percpu_data(i, rsp);
|
||||
}
|
||||
list_add(&rsp->flavors, &rcu_struct_flavors);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the rcu_node tree geometry from kernel parameters. This cannot
|
||||
* replace the definitions in rcutree.h because those are needed to size
|
||||
* the ->node array in the rcu_state structure.
|
||||
*/
|
||||
static void __init rcu_init_geometry(void)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
int n = nr_cpu_ids;
|
||||
int rcu_capacity[MAX_RCU_LVLS + 1];
|
||||
|
||||
/* If the compile-time values are accurate, just leave. */
|
||||
if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Compute number of nodes that can be handled an rcu_node tree
|
||||
* with the given number of levels. Setting rcu_capacity[0] makes
|
||||
* some of the arithmetic easier.
|
||||
*/
|
||||
rcu_capacity[0] = 1;
|
||||
rcu_capacity[1] = rcu_fanout_leaf;
|
||||
for (i = 2; i <= MAX_RCU_LVLS; i++)
|
||||
rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
|
||||
|
||||
/*
|
||||
* The boot-time rcu_fanout_leaf parameter is only permitted
|
||||
* to increase the leaf-level fanout, not decrease it. Of course,
|
||||
* the leaf-level fanout cannot exceed the number of bits in
|
||||
* the rcu_node masks. Finally, the tree must be able to accommodate
|
||||
* the configured number of CPUs. Complain and fall back to the
|
||||
* compile-time values if these limits are exceeded.
|
||||
*/
|
||||
if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
|
||||
rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
|
||||
n > rcu_capacity[MAX_RCU_LVLS]) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Calculate the number of rcu_nodes at each level of the tree. */
|
||||
for (i = 1; i <= MAX_RCU_LVLS; i++)
|
||||
if (n <= rcu_capacity[i]) {
|
||||
for (j = 0; j <= i; j++)
|
||||
num_rcu_lvl[j] =
|
||||
DIV_ROUND_UP(n, rcu_capacity[i - j]);
|
||||
rcu_num_lvls = i;
|
||||
for (j = i + 1; j <= MAX_RCU_LVLS; j++)
|
||||
num_rcu_lvl[j] = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Calculate the total number of rcu_node structures. */
|
||||
rcu_num_nodes = 0;
|
||||
for (i = 0; i <= MAX_RCU_LVLS; i++)
|
||||
rcu_num_nodes += num_rcu_lvl[i];
|
||||
rcu_num_nodes -= n;
|
||||
}
|
||||
|
||||
void __init rcu_init(void)
|
||||
@@ -2663,6 +2802,7 @@ void __init rcu_init(void)
|
||||
int cpu;
|
||||
|
||||
rcu_bootup_announce();
|
||||
rcu_init_geometry();
|
||||
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
||||
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
|
||||
__rcu_init_preempt();
|
||||
|
@@ -42,28 +42,28 @@
|
||||
#define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
|
||||
|
||||
#if NR_CPUS <= RCU_FANOUT_1
|
||||
# define NUM_RCU_LVLS 1
|
||||
# define RCU_NUM_LVLS 1
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 (NR_CPUS)
|
||||
# define NUM_RCU_LVL_2 0
|
||||
# define NUM_RCU_LVL_3 0
|
||||
# define NUM_RCU_LVL_4 0
|
||||
#elif NR_CPUS <= RCU_FANOUT_2
|
||||
# define NUM_RCU_LVLS 2
|
||||
# define RCU_NUM_LVLS 2
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
|
||||
# define NUM_RCU_LVL_2 (NR_CPUS)
|
||||
# define NUM_RCU_LVL_3 0
|
||||
# define NUM_RCU_LVL_4 0
|
||||
#elif NR_CPUS <= RCU_FANOUT_3
|
||||
# define NUM_RCU_LVLS 3
|
||||
# define RCU_NUM_LVLS 3
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
|
||||
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
|
||||
# define NUM_RCU_LVL_3 (NR_CPUS)
|
||||
# define NUM_RCU_LVL_4 0
|
||||
#elif NR_CPUS <= RCU_FANOUT_4
|
||||
# define NUM_RCU_LVLS 4
|
||||
# define RCU_NUM_LVLS 4
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
|
||||
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
|
||||
@@ -76,6 +76,9 @@
|
||||
#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
|
||||
#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
|
||||
|
||||
extern int rcu_num_lvls;
|
||||
extern int rcu_num_nodes;
|
||||
|
||||
/*
|
||||
* Dynticks per-CPU state.
|
||||
*/
|
||||
@@ -97,6 +100,7 @@ struct rcu_dynticks {
|
||||
/* # times non-lazy CBs posted to CPU. */
|
||||
unsigned long nonlazy_posted_snap;
|
||||
/* idle-period nonlazy_posted snapshot. */
|
||||
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
|
||||
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
};
|
||||
|
||||
@@ -206,7 +210,7 @@ struct rcu_node {
|
||||
*/
|
||||
#define rcu_for_each_node_breadth_first(rsp, rnp) \
|
||||
for ((rnp) = &(rsp)->node[0]; \
|
||||
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|
||||
(rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
|
||||
|
||||
/*
|
||||
* Do a breadth-first scan of the non-leaf rcu_node structures for the
|
||||
@@ -215,7 +219,7 @@ struct rcu_node {
|
||||
*/
|
||||
#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
|
||||
for ((rnp) = &(rsp)->node[0]; \
|
||||
(rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
|
||||
(rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
|
||||
|
||||
/*
|
||||
* Scan the leaves of the rcu_node hierarchy for the specified rcu_state
|
||||
@@ -224,8 +228,8 @@ struct rcu_node {
|
||||
* It is still a leaf node, even if it is also the root node.
|
||||
*/
|
||||
#define rcu_for_each_leaf_node(rsp, rnp) \
|
||||
for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
|
||||
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|
||||
for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
|
||||
(rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
|
||||
|
||||
/* Index values for nxttail array in struct rcu_data. */
|
||||
#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
|
||||
@@ -311,6 +315,9 @@ struct rcu_data {
|
||||
unsigned long n_rp_need_fqs;
|
||||
unsigned long n_rp_need_nothing;
|
||||
|
||||
/* 6) _rcu_barrier() callback. */
|
||||
struct rcu_head barrier_head;
|
||||
|
||||
int cpu;
|
||||
struct rcu_state *rsp;
|
||||
};
|
||||
@@ -357,10 +364,12 @@ do { \
|
||||
*/
|
||||
struct rcu_state {
|
||||
struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
|
||||
struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
|
||||
struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
|
||||
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
|
||||
u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
|
||||
u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
|
||||
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
|
||||
void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
|
||||
void (*func)(struct rcu_head *head));
|
||||
|
||||
/* The following fields are guarded by the root rcu_node's lock. */
|
||||
|
||||
@@ -392,6 +401,11 @@ struct rcu_state {
|
||||
struct task_struct *rcu_barrier_in_progress;
|
||||
/* Task doing rcu_barrier(), */
|
||||
/* or NULL if no barrier. */
|
||||
struct mutex barrier_mutex; /* Guards barrier fields. */
|
||||
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
|
||||
struct completion barrier_completion; /* Wake at barrier end. */
|
||||
unsigned long n_barrier_done; /* ++ at start and end of */
|
||||
/* _rcu_barrier(). */
|
||||
raw_spinlock_t fqslock; /* Only one task forcing */
|
||||
/* quiescent states. */
|
||||
unsigned long jiffies_force_qs; /* Time at which to invoke */
|
||||
@@ -409,8 +423,13 @@ struct rcu_state {
|
||||
unsigned long gp_max; /* Maximum GP duration in */
|
||||
/* jiffies. */
|
||||
char *name; /* Name of structure. */
|
||||
struct list_head flavors; /* List of RCU flavors. */
|
||||
};
|
||||
|
||||
extern struct list_head rcu_struct_flavors;
|
||||
#define for_each_rcu_flavor(rsp) \
|
||||
list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
|
||||
|
||||
/* Return values for rcu_preempt_offline_tasks(). */
|
||||
|
||||
#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
|
||||
@@ -453,25 +472,18 @@ static void rcu_stop_cpu_kthread(int cpu);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
|
||||
static int rcu_print_task_stall(struct rcu_node *rnp);
|
||||
static void rcu_preempt_stall_reset(void);
|
||||
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
static void rcu_preempt_cleanup_dead_cpu(int cpu);
|
||||
static void rcu_preempt_check_callbacks(int cpu);
|
||||
static void rcu_preempt_process_callbacks(void);
|
||||
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
|
||||
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
bool wake);
|
||||
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
|
||||
static int rcu_preempt_pending(int cpu);
|
||||
static int rcu_preempt_cpu_has_callbacks(int cpu);
|
||||
static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
|
||||
static void rcu_preempt_cleanup_dying_cpu(void);
|
||||
static void __init __rcu_init_preempt(void);
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||
|
@@ -68,17 +68,21 @@ static void __init rcu_bootup_announce_oddness(void)
|
||||
printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
|
||||
#endif
|
||||
#if NUM_RCU_LVL_4 != 0
|
||||
printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
|
||||
printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
|
||||
#endif
|
||||
if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
|
||||
printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
|
||||
if (nr_cpu_ids != NR_CPUS)
|
||||
printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
|
||||
struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
|
||||
struct rcu_state rcu_preempt_state =
|
||||
RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
||||
static struct rcu_state *rcu_state = &rcu_preempt_state;
|
||||
|
||||
static void rcu_read_unlock_special(struct task_struct *t);
|
||||
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
||||
|
||||
/*
|
||||
@@ -232,18 +236,6 @@ static void rcu_preempt_note_context_switch(int cpu)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tree-preemptible RCU implementation for rcu_read_lock().
|
||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
||||
* if we block.
|
||||
*/
|
||||
void __rcu_read_lock(void)
|
||||
{
|
||||
current->rcu_read_lock_nesting++;
|
||||
barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||
|
||||
/*
|
||||
* Check for preempted RCU readers blocking the current grace period
|
||||
* for the specified rcu_node structure. If the caller needs a reliable
|
||||
@@ -310,7 +302,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
|
||||
* notify RCU core processing or task having blocked during the RCU
|
||||
* read-side critical section.
|
||||
*/
|
||||
static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
void rcu_read_unlock_special(struct task_struct *t)
|
||||
{
|
||||
int empty;
|
||||
int empty_exp;
|
||||
@@ -398,8 +390,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
rnp->grphi,
|
||||
!!rnp->gp_tasks);
|
||||
rcu_report_unblock_qs_rnp(rnp, flags);
|
||||
} else
|
||||
} else {
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
/* Unboost if we were boosted. */
|
||||
@@ -418,38 +411,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Tree-preemptible RCU implementation for rcu_read_unlock().
|
||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
||||
* in an RCU read-side critical section and other special cases.
|
||||
*/
|
||||
void __rcu_read_unlock(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (t->rcu_read_lock_nesting != 1)
|
||||
--t->rcu_read_lock_nesting;
|
||||
else {
|
||||
barrier(); /* critical section before exit code. */
|
||||
t->rcu_read_lock_nesting = INT_MIN;
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
t->rcu_read_lock_nesting = 0;
|
||||
}
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
{
|
||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
||||
|
||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
||||
}
|
||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
|
||||
|
||||
/*
|
||||
@@ -539,16 +500,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
return ndetected;
|
||||
}
|
||||
|
||||
/*
|
||||
* Suppress preemptible RCU's CPU stall warnings by pushing the
|
||||
* time of the next stall-warning message comfortably far into the
|
||||
* future.
|
||||
*/
|
||||
static void rcu_preempt_stall_reset(void)
|
||||
{
|
||||
rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the list of blocked tasks for the newly completed grace
|
||||
* period is in fact empty. It is a serious bug to complete a grace
|
||||
@@ -649,14 +600,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* Do CPU-offline processing for preemptible RCU.
|
||||
*/
|
||||
static void rcu_preempt_cleanup_dead_cpu(int cpu)
|
||||
{
|
||||
rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for a quiescent state from the current CPU. When a task blocks,
|
||||
* the task is recorded in the corresponding CPU's rcu_node structure,
|
||||
@@ -677,15 +620,6 @@ static void rcu_preempt_check_callbacks(int cpu)
|
||||
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Process callbacks for preemptible RCU.
|
||||
*/
|
||||
static void rcu_preempt_process_callbacks(void)
|
||||
{
|
||||
__rcu_process_callbacks(&rcu_preempt_state,
|
||||
&__get_cpu_var(rcu_preempt_data));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
static void rcu_preempt_do_callbacks(void)
|
||||
@@ -824,9 +758,9 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
|
||||
int must_wait = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
if (list_empty(&rnp->blkd_tasks))
|
||||
if (list_empty(&rnp->blkd_tasks)) {
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
else {
|
||||
} else {
|
||||
rnp->exp_tasks = rnp->blkd_tasks.next;
|
||||
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
|
||||
must_wait = 1;
|
||||
@@ -870,9 +804,9 @@ void synchronize_rcu_expedited(void)
|
||||
* expedited grace period for us, just leave.
|
||||
*/
|
||||
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
|
||||
if (trycount++ < 10)
|
||||
if (trycount++ < 10) {
|
||||
udelay(trycount * num_online_cpus());
|
||||
else {
|
||||
} else {
|
||||
synchronize_rcu();
|
||||
return;
|
||||
}
|
||||
@@ -917,50 +851,15 @@ mb_ret:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
|
||||
/*
|
||||
* Check to see if there is any immediate preemptible-RCU-related work
|
||||
* to be done.
|
||||
*/
|
||||
static int rcu_preempt_pending(int cpu)
|
||||
{
|
||||
return __rcu_pending(&rcu_preempt_state,
|
||||
&per_cpu(rcu_preempt_data, cpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* Does preemptible RCU have callbacks on this CPU?
|
||||
*/
|
||||
static int rcu_preempt_cpu_has_callbacks(int cpu)
|
||||
{
|
||||
return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
|
||||
*/
|
||||
void rcu_barrier(void)
|
||||
{
|
||||
_rcu_barrier(&rcu_preempt_state, call_rcu);
|
||||
_rcu_barrier(&rcu_preempt_state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
|
||||
/*
|
||||
* Initialize preemptible RCU's per-CPU data.
|
||||
*/
|
||||
static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
|
||||
{
|
||||
rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Move preemptible RCU's callbacks from dying CPU to other online CPU
|
||||
* and record a quiescent state.
|
||||
*/
|
||||
static void rcu_preempt_cleanup_dying_cpu(void)
|
||||
{
|
||||
rcu_cleanup_dying_cpu(&rcu_preempt_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize preemptible RCU's state structures.
|
||||
*/
|
||||
@@ -1045,14 +944,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, there is no need to suppress
|
||||
* its CPU stall warnings.
|
||||
*/
|
||||
static void rcu_preempt_stall_reset(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because there is no preemptible RCU, there can be no readers blocked,
|
||||
* so there is no need to check for blocked tasks. So check only for
|
||||
@@ -1080,14 +971,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, it never needs CPU-offline
|
||||
* processing.
|
||||
*/
|
||||
static void rcu_preempt_cleanup_dead_cpu(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, it never has any callbacks
|
||||
* to check.
|
||||
@@ -1096,14 +979,6 @@ static void rcu_preempt_check_callbacks(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, it never has any callbacks
|
||||
* to process.
|
||||
*/
|
||||
static void rcu_preempt_process_callbacks(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue an RCU callback for lazy invocation after a grace period.
|
||||
* This will likely be later named something like "call_rcu_lazy()",
|
||||
@@ -1144,22 +1019,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, it never has any work to do.
|
||||
*/
|
||||
static int rcu_preempt_pending(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, it never has callbacks
|
||||
*/
|
||||
static int rcu_preempt_cpu_has_callbacks(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, rcu_barrier() is just
|
||||
* another name for rcu_barrier_sched().
|
||||
@@ -1170,21 +1029,6 @@ void rcu_barrier(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, there is no per-CPU
|
||||
* data to initialize.
|
||||
*/
|
||||
static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because there is no preemptible RCU, there is no cleanup to do.
|
||||
*/
|
||||
static void rcu_preempt_cleanup_dying_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, it need not be initialized.
|
||||
*/
|
||||
@@ -1968,9 +1812,11 @@ static void rcu_idle_count_callbacks_posted(void)
|
||||
*/
|
||||
#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
|
||||
#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
|
||||
#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
|
||||
#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
|
||||
#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
|
||||
|
||||
extern int tick_nohz_enabled;
|
||||
|
||||
/*
|
||||
* Does the specified flavor of RCU have non-lazy callbacks pending on
|
||||
* the specified CPU? Both RCU flavor and CPU are specified by the
|
||||
@@ -2047,10 +1893,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
|
||||
return 1;
|
||||
}
|
||||
/* Set up for the possibility that RCU will post a timer. */
|
||||
if (rcu_cpu_has_nonlazy_callbacks(cpu))
|
||||
*delta_jiffies = RCU_IDLE_GP_DELAY;
|
||||
else
|
||||
*delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
|
||||
if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
|
||||
*delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
|
||||
RCU_IDLE_GP_DELAY) - jiffies;
|
||||
} else {
|
||||
*delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
|
||||
*delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2109,6 +1958,7 @@ static void rcu_cleanup_after_idle(int cpu)
|
||||
|
||||
del_timer(&rdtp->idle_gp_timer);
|
||||
trace_rcu_prep_idle("Cleanup after idle");
|
||||
rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2134,6 +1984,18 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
{
|
||||
struct timer_list *tp;
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
int tne;
|
||||
|
||||
/* Handle nohz enablement switches conservatively. */
|
||||
tne = ACCESS_ONCE(tick_nohz_enabled);
|
||||
if (tne != rdtp->tick_nohz_enabled_snap) {
|
||||
if (rcu_cpu_has_callbacks(cpu))
|
||||
invoke_rcu_core(); /* force nohz to see update. */
|
||||
rdtp->tick_nohz_enabled_snap = tne;
|
||||
return;
|
||||
}
|
||||
if (!tne)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If this is an idle re-entry, for example, due to use of
|
||||
@@ -2187,10 +2049,11 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
|
||||
trace_rcu_prep_idle("Dyntick with callbacks");
|
||||
rdtp->idle_gp_timer_expires =
|
||||
jiffies + RCU_IDLE_GP_DELAY;
|
||||
round_up(jiffies + RCU_IDLE_GP_DELAY,
|
||||
RCU_IDLE_GP_DELAY);
|
||||
} else {
|
||||
rdtp->idle_gp_timer_expires =
|
||||
jiffies + RCU_IDLE_LAZY_GP_DELAY;
|
||||
round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
|
||||
trace_rcu_prep_idle("Dyntick with lazy callbacks");
|
||||
}
|
||||
tp = &rdtp->idle_gp_timer;
|
||||
@@ -2231,8 +2094,9 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
if (rcu_cpu_has_callbacks(cpu)) {
|
||||
trace_rcu_prep_idle("More callbacks");
|
||||
invoke_rcu_core();
|
||||
} else
|
||||
} else {
|
||||
trace_rcu_prep_idle("Callbacks drained");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2269,6 +2133,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
||||
|
||||
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
||||
{
|
||||
*cp = '\0';
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
|
@@ -46,6 +46,31 @@
|
||||
#define RCU_TREE_NONCORE
|
||||
#include "rcutree.h"
|
||||
|
||||
static int show_rcubarrier(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp)
|
||||
seq_printf(m, "%s: %c bcc: %d nbd: %lu\n",
|
||||
rsp->name, rsp->rcu_barrier_in_progress ? 'B' : '.',
|
||||
atomic_read(&rsp->barrier_cpu_count),
|
||||
rsp->n_barrier_done);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcubarrier_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, show_rcubarrier, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations rcubarrier_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = rcubarrier_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
static char convert_kthread_status(unsigned int kthread_status)
|
||||
@@ -95,24 +120,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
|
||||
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
|
||||
}
|
||||
|
||||
#define PRINT_RCU_DATA(name, func, m) \
|
||||
do { \
|
||||
int _p_r_d_i; \
|
||||
\
|
||||
for_each_possible_cpu(_p_r_d_i) \
|
||||
func(m, &per_cpu(name, _p_r_d_i)); \
|
||||
} while (0)
|
||||
|
||||
static int show_rcudata(struct seq_file *m, void *unused)
|
||||
{
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
seq_puts(m, "rcu_preempt:\n");
|
||||
PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m);
|
||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
seq_puts(m, "rcu_sched:\n");
|
||||
PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
|
||||
seq_puts(m, "rcu_bh:\n");
|
||||
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
|
||||
int cpu;
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp) {
|
||||
seq_printf(m, "%s:\n", rsp->name);
|
||||
for_each_possible_cpu(cpu)
|
||||
print_one_rcu_data(m, per_cpu_ptr(rsp->rda, cpu));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -166,6 +183,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
|
||||
|
||||
static int show_rcudata_csv(struct seq_file *m, void *unused)
|
||||
{
|
||||
int cpu;
|
||||
struct rcu_state *rsp;
|
||||
|
||||
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
|
||||
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
|
||||
seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\"");
|
||||
@@ -173,14 +193,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
|
||||
seq_puts(m, "\"kt\",\"ktl\"");
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
seq_puts(m, "\"rcu_preempt:\"\n");
|
||||
PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
|
||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
seq_puts(m, "\"rcu_sched:\"\n");
|
||||
PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
|
||||
seq_puts(m, "\"rcu_bh:\"\n");
|
||||
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
|
||||
for_each_rcu_flavor(rsp) {
|
||||
seq_printf(m, "\"%s:\"\n", rsp->name);
|
||||
for_each_possible_cpu(cpu)
|
||||
print_one_rcu_data_csv(m, per_cpu_ptr(rsp->rda, cpu));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -201,8 +218,7 @@ static const struct file_operations rcudata_csv_fops = {
|
||||
|
||||
static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
|
||||
{
|
||||
seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu "
|
||||
"j=%04x bt=%04x\n",
|
||||
seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu ",
|
||||
rnp->grplo, rnp->grphi,
|
||||
"T."[list_empty(&rnp->blkd_tasks)],
|
||||
"N."[!rnp->gp_tasks],
|
||||
@@ -210,11 +226,11 @@ static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
|
||||
"B."[!rnp->boost_tasks],
|
||||
convert_kthread_status(rnp->boost_kthread_status),
|
||||
rnp->n_tasks_boosted, rnp->n_exp_boosts,
|
||||
rnp->n_normal_boosts,
|
||||
rnp->n_normal_boosts);
|
||||
seq_printf(m, "j=%04x bt=%04x\n",
|
||||
(int)(jiffies & 0xffff),
|
||||
(int)(rnp->boost_time & 0xffff));
|
||||
seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
|
||||
" balk",
|
||||
seq_printf(m, " balk: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
|
||||
rnp->n_balk_blkd_tasks,
|
||||
rnp->n_balk_exp_gp_tasks,
|
||||
rnp->n_balk_boost_tasks,
|
||||
@@ -270,15 +286,15 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
|
||||
struct rcu_node *rnp;
|
||||
|
||||
gpnum = rsp->gpnum;
|
||||
seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
|
||||
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
|
||||
rsp->completed, gpnum, rsp->fqs_state,
|
||||
seq_printf(m, "%s: c=%lu g=%lu s=%d jfq=%ld j=%x ",
|
||||
rsp->name, rsp->completed, gpnum, rsp->fqs_state,
|
||||
(long)(rsp->jiffies_force_qs - jiffies),
|
||||
(int)(jiffies & 0xffff),
|
||||
(int)(jiffies & 0xffff));
|
||||
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
|
||||
rsp->n_force_qs, rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs - rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
|
||||
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
|
||||
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
|
||||
if (rnp->level != level) {
|
||||
seq_puts(m, "\n");
|
||||
level = rnp->level;
|
||||
@@ -295,14 +311,10 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
|
||||
|
||||
static int show_rcuhier(struct seq_file *m, void *unused)
|
||||
{
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
seq_puts(m, "rcu_preempt:\n");
|
||||
print_one_rcu_state(m, &rcu_preempt_state);
|
||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
seq_puts(m, "rcu_sched:\n");
|
||||
print_one_rcu_state(m, &rcu_sched_state);
|
||||
seq_puts(m, "rcu_bh:\n");
|
||||
print_one_rcu_state(m, &rcu_bh_state);
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp)
|
||||
print_one_rcu_state(m, rsp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -343,11 +355,10 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
|
||||
|
||||
static int show_rcugp(struct seq_file *m, void *unused)
|
||||
{
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
show_one_rcugp(m, &rcu_preempt_state);
|
||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
show_one_rcugp(m, &rcu_sched_state);
|
||||
show_one_rcugp(m, &rcu_bh_state);
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp)
|
||||
show_one_rcugp(m, rsp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -366,44 +377,36 @@ static const struct file_operations rcugp_fops = {
|
||||
|
||||
static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
|
||||
{
|
||||
seq_printf(m, "%3d%cnp=%ld "
|
||||
"qsp=%ld rpq=%ld cbr=%ld cng=%ld "
|
||||
"gpc=%ld gps=%ld nf=%ld nn=%ld\n",
|
||||
seq_printf(m, "%3d%cnp=%ld ",
|
||||
rdp->cpu,
|
||||
cpu_is_offline(rdp->cpu) ? '!' : ' ',
|
||||
rdp->n_rcu_pending,
|
||||
rdp->n_rcu_pending);
|
||||
seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
|
||||
rdp->n_rp_qs_pending,
|
||||
rdp->n_rp_report_qs,
|
||||
rdp->n_rp_cb_ready,
|
||||
rdp->n_rp_cpu_needs_gp,
|
||||
rdp->n_rp_cpu_needs_gp);
|
||||
seq_printf(m, "gpc=%ld gps=%ld nf=%ld nn=%ld\n",
|
||||
rdp->n_rp_gp_completed,
|
||||
rdp->n_rp_gp_started,
|
||||
rdp->n_rp_need_fqs,
|
||||
rdp->n_rp_need_nothing);
|
||||
}
|
||||
|
||||
static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
|
||||
static int show_rcu_pending(struct seq_file *m, void *unused)
|
||||
{
|
||||
int cpu;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
if (rdp->beenonline)
|
||||
print_one_rcu_pending(m, rdp);
|
||||
for_each_rcu_flavor(rsp) {
|
||||
seq_printf(m, "%s:\n", rsp->name);
|
||||
for_each_possible_cpu(cpu) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
if (rdp->beenonline)
|
||||
print_one_rcu_pending(m, rdp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int show_rcu_pending(struct seq_file *m, void *unused)
|
||||
{
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
seq_puts(m, "rcu_preempt:\n");
|
||||
print_rcu_pendings(m, &rcu_preempt_state);
|
||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
seq_puts(m, "rcu_sched:\n");
|
||||
print_rcu_pendings(m, &rcu_sched_state);
|
||||
seq_puts(m, "rcu_bh:\n");
|
||||
print_rcu_pendings(m, &rcu_bh_state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -453,6 +456,11 @@ static int __init rcutree_trace_init(void)
|
||||
if (!rcudir)
|
||||
goto free_out;
|
||||
|
||||
retval = debugfs_create_file("rcubarrier", 0444, rcudir,
|
||||
NULL, &rcubarrier_fops);
|
||||
if (!retval)
|
||||
goto free_out;
|
||||
|
||||
retval = debugfs_create_file("rcudata", 0444, rcudir,
|
||||
NULL, &rcudata_fops);
|
||||
if (!retval)
|
||||
|
20
kernel/smp.c
20
kernel/smp.c
@@ -581,26 +581,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
void ipi_call_lock(void)
|
||||
{
|
||||
raw_spin_lock(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_unlock(void)
|
||||
{
|
||||
raw_spin_unlock(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_lock_irq(void)
|
||||
{
|
||||
raw_spin_lock_irq(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_unlock_irq(void)
|
||||
{
|
||||
raw_spin_unlock_irq(&call_function.lock);
|
||||
}
|
||||
#endif /* USE_GENERIC_SMP_HELPERS */
|
||||
|
||||
/* Setup configured maximum number of CPUs to activate */
|
||||
|
@@ -3,8 +3,6 @@
|
||||
|
||||
struct task_struct;
|
||||
|
||||
int smpboot_prepare(unsigned int cpu);
|
||||
|
||||
#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
|
||||
struct task_struct *idle_thread_get(unsigned int cpu);
|
||||
void idle_thread_set_boot_cpu(void);
|
||||
|
@@ -105,7 +105,7 @@ static ktime_t tick_init_jiffy_update(void)
|
||||
/*
|
||||
* NO HZ enabled ?
|
||||
*/
|
||||
static int tick_nohz_enabled __read_mostly = 1;
|
||||
int tick_nohz_enabled __read_mostly = 1;
|
||||
|
||||
/*
|
||||
* Enable / Disable tickless mode
|
||||
|
@@ -745,6 +745,7 @@ static void timekeeping_resume(void)
|
||||
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
|
||||
timekeeper.ntp_error = 0;
|
||||
timekeeping_suspended = 0;
|
||||
timekeeping_update(&timekeeper, false);
|
||||
write_sequnlock_irqrestore(&timekeeper.lock, flags);
|
||||
|
||||
touch_softlockup_watchdog();
|
||||
|
@@ -312,7 +312,7 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
|
||||
|
||||
static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||
{
|
||||
if (ftrace_disabled)
|
||||
if (unlikely(ftrace_disabled))
|
||||
return -ENODEV;
|
||||
|
||||
if (FTRACE_WARN_ON(ops == &global_ops))
|
||||
@@ -4299,16 +4299,12 @@ int register_ftrace_function(struct ftrace_ops *ops)
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
|
||||
if (unlikely(ftrace_disabled))
|
||||
goto out_unlock;
|
||||
|
||||
ret = __register_ftrace_function(ops);
|
||||
if (!ret)
|
||||
ret = ftrace_startup(ops, 0);
|
||||
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_ftrace_function);
|
||||
|
@@ -3239,6 +3239,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
if (cpu_buffer->commit_page == cpu_buffer->reader_page)
|
||||
goto out;
|
||||
|
||||
/* Don't bother swapping if the ring buffer is empty */
|
||||
if (rb_num_of_entries(cpu_buffer) == 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Reset the reader page to size zero.
|
||||
*/
|
||||
|
@@ -830,6 +830,8 @@ int register_tracer(struct tracer *type)
|
||||
current_trace = saved_tracer;
|
||||
if (ret) {
|
||||
printk(KERN_CONT "FAILED!\n");
|
||||
/* Add the warning after printing 'FAILED' */
|
||||
WARN_ON(1);
|
||||
goto out;
|
||||
}
|
||||
/* Only reset on passing, to avoid touching corrupted buffers */
|
||||
@@ -1708,9 +1710,11 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
|
||||
|
||||
static void trace_iterator_increment(struct trace_iterator *iter)
|
||||
{
|
||||
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
|
||||
|
||||
iter->idx++;
|
||||
if (iter->buffer_iter[iter->cpu])
|
||||
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
|
||||
if (buf_iter)
|
||||
ring_buffer_read(buf_iter, NULL);
|
||||
}
|
||||
|
||||
static struct trace_entry *
|
||||
@@ -1718,7 +1722,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
|
||||
unsigned long *lost_events)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
|
||||
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
|
||||
|
||||
if (buf_iter)
|
||||
event = ring_buffer_iter_peek(buf_iter, ts);
|
||||
@@ -1856,10 +1860,10 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
||||
|
||||
tr->data[cpu]->skipped_entries = 0;
|
||||
|
||||
if (!iter->buffer_iter[cpu])
|
||||
buf_iter = trace_buffer_iter(iter, cpu);
|
||||
if (!buf_iter)
|
||||
return;
|
||||
|
||||
buf_iter = iter->buffer_iter[cpu];
|
||||
ring_buffer_iter_reset(buf_iter);
|
||||
|
||||
/*
|
||||
@@ -2205,13 +2209,15 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
|
||||
|
||||
int trace_empty(struct trace_iterator *iter)
|
||||
{
|
||||
struct ring_buffer_iter *buf_iter;
|
||||
int cpu;
|
||||
|
||||
/* If we are looking at one CPU buffer, only check that one */
|
||||
if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
|
||||
cpu = iter->cpu_file;
|
||||
if (iter->buffer_iter[cpu]) {
|
||||
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
|
||||
buf_iter = trace_buffer_iter(iter, cpu);
|
||||
if (buf_iter) {
|
||||
if (!ring_buffer_iter_empty(buf_iter))
|
||||
return 0;
|
||||
} else {
|
||||
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
|
||||
@@ -2221,8 +2227,9 @@ int trace_empty(struct trace_iterator *iter)
|
||||
}
|
||||
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (iter->buffer_iter[cpu]) {
|
||||
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
|
||||
buf_iter = trace_buffer_iter(iter, cpu);
|
||||
if (buf_iter) {
|
||||
if (!ring_buffer_iter_empty(buf_iter))
|
||||
return 0;
|
||||
} else {
|
||||
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
|
||||
@@ -2381,6 +2388,11 @@ __tracing_open(struct inode *inode, struct file *file)
|
||||
if (!iter)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
|
||||
GFP_KERNEL);
|
||||
if (!iter->buffer_iter)
|
||||
goto release;
|
||||
|
||||
/*
|
||||
* We make a copy of the current tracer to avoid concurrent
|
||||
* changes on it while we are reading.
|
||||
@@ -2441,6 +2453,8 @@ __tracing_open(struct inode *inode, struct file *file)
|
||||
fail:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
kfree(iter->trace);
|
||||
kfree(iter->buffer_iter);
|
||||
release:
|
||||
seq_release_private(inode, file);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -2481,6 +2495,7 @@ static int tracing_release(struct inode *inode, struct file *file)
|
||||
mutex_destroy(&iter->mutex);
|
||||
free_cpumask_var(iter->started);
|
||||
kfree(iter->trace);
|
||||
kfree(iter->buffer_iter);
|
||||
seq_release_private(inode, file);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -317,6 +317,14 @@ struct tracer {
|
||||
|
||||
#define TRACE_PIPE_ALL_CPU -1
|
||||
|
||||
static inline struct ring_buffer_iter *
|
||||
trace_buffer_iter(struct trace_iterator *iter, int cpu)
|
||||
{
|
||||
if (iter->buffer_iter && iter->buffer_iter[cpu])
|
||||
return iter->buffer_iter[cpu];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int tracer_init(struct tracer *t, struct trace_array *tr);
|
||||
int tracing_is_enabled(void);
|
||||
void trace_wake_up(void);
|
||||
|
@@ -538,7 +538,7 @@ get_return_for_leaf(struct trace_iterator *iter,
|
||||
next = &data->ret;
|
||||
} else {
|
||||
|
||||
ring_iter = iter->buffer_iter[iter->cpu];
|
||||
ring_iter = trace_buffer_iter(iter, iter->cpu);
|
||||
|
||||
/* First peek to compare current entry and the next one */
|
||||
if (ring_iter)
|
||||
|
@@ -1325,4 +1325,4 @@ __init static int init_events(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(init_events);
|
||||
early_initcall(init_events);
|
||||
|
Reference in New Issue
Block a user