Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (123 commits) perf: Remove the nmi parameter from the oprofile_perf backend x86, perf: Make copy_from_user_nmi() a library function perf: Remove perf_event_attr::type check x86, perf: P4 PMU - Fix typos in comments and style cleanup perf tools: Make test use the preset debugfs path perf tools: Add automated tests for events parsing perf tools: De-opt the parse_events function perf script: Fix display of IP address for non-callchain path perf tools: Fix endian conversion reading event attr from file header perf tools: Add missing 'node' alias to the hw_cache[] array perf probe: Support adding probes on offline kernel modules perf probe: Add probed module in front of function perf probe: Introduce debuginfo to encapsulate dwarf information perf-probe: Move dwarf library routines to dwarf-aux.{c, h} perf probe: Remove redundant dwarf functions perf probe: Move strtailcmp to string.c perf probe: Rename DIE_FIND_CB_FOUND to DIE_FIND_CB_END tracing/kprobe: Update symbol reference when loading module tracing/kprobes: Support module init function probing kprobes: Return -ENOENT if probe point doesn't exist ...
This commit is contained in:
@@ -49,12 +49,13 @@ asynchronous and synchronous parts of the kernel.
|
||||
*/
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
static async_cookie_t next_cookie = 1;
|
||||
|
||||
@@ -128,7 +129,8 @@ static void async_run_entry_fn(struct work_struct *work)
|
||||
|
||||
/* 2) run (and print duration) */
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
|
||||
printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
|
||||
(long long)entry->cookie,
|
||||
entry->func, task_pid_nr(current));
|
||||
calltime = ktime_get();
|
||||
}
|
||||
@@ -136,7 +138,7 @@ static void async_run_entry_fn(struct work_struct *work)
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, calltime);
|
||||
printk("initcall %lli_%pF returned 0 after %lld usecs\n",
|
||||
printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
|
||||
(long long)entry->cookie,
|
||||
entry->func,
|
||||
(long long)ktime_to_ns(delta) >> 10);
|
||||
@@ -270,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
|
||||
ktime_t starttime, delta, endtime;
|
||||
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
printk("async_waiting @ %i\n", task_pid_nr(current));
|
||||
printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
|
||||
starttime = ktime_get();
|
||||
}
|
||||
|
||||
@@ -280,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
|
||||
endtime = ktime_get();
|
||||
delta = ktime_sub(endtime, starttime);
|
||||
|
||||
printk("async_continuing @ %i after %lli usec\n",
|
||||
printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
|
||||
task_pid_nr(current),
|
||||
(long long)ktime_to_ns(delta) >> 10);
|
||||
}
|
||||
|
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_core.o = -pg
|
||||
endif
|
||||
|
||||
obj-y := core.o
|
||||
obj-y := core.o ring_buffer.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
|
文件差異過大導致無法顯示
Load Diff
@@ -431,9 +431,11 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
|
||||
struct perf_event *
|
||||
register_user_hw_breakpoint(struct perf_event_attr *attr,
|
||||
perf_overflow_handler_t triggered,
|
||||
void *context,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
return perf_event_create_kernel_counter(attr, -1, tsk, triggered);
|
||||
return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
|
||||
context);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
|
||||
|
||||
@@ -502,7 +504,8 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
|
||||
*/
|
||||
struct perf_event * __percpu *
|
||||
register_wide_hw_breakpoint(struct perf_event_attr *attr,
|
||||
perf_overflow_handler_t triggered)
|
||||
perf_overflow_handler_t triggered,
|
||||
void *context)
|
||||
{
|
||||
struct perf_event * __percpu *cpu_events, **pevent, *bp;
|
||||
long err;
|
||||
@@ -515,7 +518,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
pevent = per_cpu_ptr(cpu_events, cpu);
|
||||
bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered);
|
||||
bp = perf_event_create_kernel_counter(attr, cpu, NULL,
|
||||
triggered, context);
|
||||
|
||||
*pevent = bp;
|
||||
|
||||
|
96
kernel/events/internal.h
Normal file
96
kernel/events/internal.h
Normal file
@@ -0,0 +1,96 @@
|
||||
#ifndef _KERNEL_EVENTS_INTERNAL_H
|
||||
#define _KERNEL_EVENTS_INTERNAL_H
|
||||
|
||||
#define RING_BUFFER_WRITABLE 0x01
|
||||
|
||||
struct ring_buffer {
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu_head;
|
||||
#ifdef CONFIG_PERF_USE_VMALLOC
|
||||
struct work_struct work;
|
||||
int page_order; /* allocation order */
|
||||
#endif
|
||||
int nr_pages; /* nr of data pages */
|
||||
int writable; /* are we writable */
|
||||
|
||||
atomic_t poll; /* POLL_ for wakeups */
|
||||
|
||||
local_t head; /* write position */
|
||||
local_t nest; /* nested writers */
|
||||
local_t events; /* event limit */
|
||||
local_t wakeup; /* wakeup stamp */
|
||||
local_t lost; /* nr records lost */
|
||||
|
||||
long watermark; /* wakeup watermark */
|
||||
|
||||
struct perf_event_mmap_page *user_page;
|
||||
void *data_pages[0];
|
||||
};
|
||||
|
||||
extern void rb_free(struct ring_buffer *rb);
|
||||
extern struct ring_buffer *
|
||||
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
|
||||
extern void perf_event_wakeup(struct perf_event *event);
|
||||
|
||||
extern void
|
||||
perf_event_header__init_id(struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event);
|
||||
extern void
|
||||
perf_event__output_id_sample(struct perf_event *event,
|
||||
struct perf_output_handle *handle,
|
||||
struct perf_sample_data *sample);
|
||||
|
||||
extern struct page *
|
||||
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
|
||||
|
||||
#ifdef CONFIG_PERF_USE_VMALLOC
|
||||
/*
|
||||
* Back perf_mmap() with vmalloc memory.
|
||||
*
|
||||
* Required for architectures that have d-cache aliasing issues.
|
||||
*/
|
||||
|
||||
static inline int page_order(struct ring_buffer *rb)
|
||||
{
|
||||
return rb->page_order;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int page_order(struct ring_buffer *rb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long perf_data_size(struct ring_buffer *rb)
|
||||
{
|
||||
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
|
||||
}
|
||||
|
||||
static inline void
|
||||
__output_copy(struct perf_output_handle *handle,
|
||||
const void *buf, unsigned int len)
|
||||
{
|
||||
do {
|
||||
unsigned long size = min_t(unsigned long, handle->size, len);
|
||||
|
||||
memcpy(handle->addr, buf, size);
|
||||
|
||||
len -= size;
|
||||
handle->addr += size;
|
||||
buf += size;
|
||||
handle->size -= size;
|
||||
if (!handle->size) {
|
||||
struct ring_buffer *rb = handle->rb;
|
||||
|
||||
handle->page++;
|
||||
handle->page &= rb->nr_pages - 1;
|
||||
handle->addr = rb->data_pages[handle->page];
|
||||
handle->size = PAGE_SIZE << page_order(rb);
|
||||
}
|
||||
} while (len);
|
||||
}
|
||||
|
||||
#endif /* _KERNEL_EVENTS_INTERNAL_H */
|
380
kernel/events/ring_buffer.c
Normal file
380
kernel/events/ring_buffer.c
Normal file
@@ -0,0 +1,380 @@
|
||||
/*
|
||||
* Performance events ring-buffer code:
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright <20> 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
|
||||
unsigned long offset, unsigned long head)
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
if (!rb->writable)
|
||||
return true;
|
||||
|
||||
mask = perf_data_size(rb) - 1;
|
||||
|
||||
offset = (offset - tail) & mask;
|
||||
head = (head - tail) & mask;
|
||||
|
||||
if ((int)(head - offset) < 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void perf_output_wakeup(struct perf_output_handle *handle)
|
||||
{
|
||||
atomic_set(&handle->rb->poll, POLL_IN);
|
||||
|
||||
handle->event->pending_wakeup = 1;
|
||||
irq_work_queue(&handle->event->pending);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to ensure a later event_id doesn't publish a head when a former
|
||||
* event isn't done writing. However since we need to deal with NMIs we
|
||||
* cannot fully serialize things.
|
||||
*
|
||||
* We only publish the head (and generate a wakeup) when the outer-most
|
||||
* event completes.
|
||||
*/
|
||||
static void perf_output_get_handle(struct perf_output_handle *handle)
|
||||
{
|
||||
struct ring_buffer *rb = handle->rb;
|
||||
|
||||
preempt_disable();
|
||||
local_inc(&rb->nest);
|
||||
handle->wakeup = local_read(&rb->wakeup);
|
||||
}
|
||||
|
||||
static void perf_output_put_handle(struct perf_output_handle *handle)
|
||||
{
|
||||
struct ring_buffer *rb = handle->rb;
|
||||
unsigned long head;
|
||||
|
||||
again:
|
||||
head = local_read(&rb->head);
|
||||
|
||||
/*
|
||||
* IRQ/NMI can happen here, which means we can miss a head update.
|
||||
*/
|
||||
|
||||
if (!local_dec_and_test(&rb->nest))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Publish the known good head. Rely on the full barrier implied
|
||||
* by atomic_dec_and_test() order the rb->head read and this
|
||||
* write.
|
||||
*/
|
||||
rb->user_page->data_head = head;
|
||||
|
||||
/*
|
||||
* Now check if we missed an update, rely on the (compiler)
|
||||
* barrier in atomic_dec_and_test() to re-read rb->head.
|
||||
*/
|
||||
if (unlikely(head != local_read(&rb->head))) {
|
||||
local_inc(&rb->nest);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (handle->wakeup != local_read(&rb->wakeup))
|
||||
perf_output_wakeup(handle);
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
int perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_event *event, unsigned int size)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
unsigned long tail, offset, head;
|
||||
int have_lost;
|
||||
struct perf_sample_data sample_data;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u64 id;
|
||||
u64 lost;
|
||||
} lost_event;
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* For inherited events we send all the output towards the parent.
|
||||
*/
|
||||
if (event->parent)
|
||||
event = event->parent;
|
||||
|
||||
rb = rcu_dereference(event->rb);
|
||||
if (!rb)
|
||||
goto out;
|
||||
|
||||
handle->rb = rb;
|
||||
handle->event = event;
|
||||
|
||||
if (!rb->nr_pages)
|
||||
goto out;
|
||||
|
||||
have_lost = local_read(&rb->lost);
|
||||
if (have_lost) {
|
||||
lost_event.header.size = sizeof(lost_event);
|
||||
perf_event_header__init_id(&lost_event.header, &sample_data,
|
||||
event);
|
||||
size += lost_event.header.size;
|
||||
}
|
||||
|
||||
perf_output_get_handle(handle);
|
||||
|
||||
do {
|
||||
/*
|
||||
* Userspace could choose to issue a mb() before updating the
|
||||
* tail pointer. So that all reads will be completed before the
|
||||
* write is issued.
|
||||
*/
|
||||
tail = ACCESS_ONCE(rb->user_page->data_tail);
|
||||
smp_rmb();
|
||||
offset = head = local_read(&rb->head);
|
||||
head += size;
|
||||
if (unlikely(!perf_output_space(rb, tail, offset, head)))
|
||||
goto fail;
|
||||
} while (local_cmpxchg(&rb->head, offset, head) != offset);
|
||||
|
||||
if (head - local_read(&rb->wakeup) > rb->watermark)
|
||||
local_add(rb->watermark, &rb->wakeup);
|
||||
|
||||
handle->page = offset >> (PAGE_SHIFT + page_order(rb));
|
||||
handle->page &= rb->nr_pages - 1;
|
||||
handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
|
||||
handle->addr = rb->data_pages[handle->page];
|
||||
handle->addr += handle->size;
|
||||
handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
|
||||
|
||||
if (have_lost) {
|
||||
lost_event.header.type = PERF_RECORD_LOST;
|
||||
lost_event.header.misc = 0;
|
||||
lost_event.id = event->id;
|
||||
lost_event.lost = local_xchg(&rb->lost, 0);
|
||||
|
||||
perf_output_put(handle, lost_event);
|
||||
perf_event__output_id_sample(event, handle, &sample_data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
local_inc(&rb->lost);
|
||||
perf_output_put_handle(handle);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
void perf_output_copy(struct perf_output_handle *handle,
|
||||
const void *buf, unsigned int len)
|
||||
{
|
||||
__output_copy(handle, buf, len);
|
||||
}
|
||||
|
||||
void perf_output_end(struct perf_output_handle *handle)
|
||||
{
|
||||
perf_output_put_handle(handle);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void
|
||||
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
||||
{
|
||||
long max_size = perf_data_size(rb);
|
||||
|
||||
if (watermark)
|
||||
rb->watermark = min(max_size, watermark);
|
||||
|
||||
if (!rb->watermark)
|
||||
rb->watermark = max_size / 2;
|
||||
|
||||
if (flags & RING_BUFFER_WRITABLE)
|
||||
rb->writable = 1;
|
||||
|
||||
atomic_set(&rb->refcount, 1);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PERF_USE_VMALLOC
|
||||
|
||||
/*
|
||||
* Back perf_mmap() with regular GFP_KERNEL-0 pages.
|
||||
*/
|
||||
|
||||
struct page *
|
||||
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
|
||||
{
|
||||
if (pgoff > rb->nr_pages)
|
||||
return NULL;
|
||||
|
||||
if (pgoff == 0)
|
||||
return virt_to_page(rb->user_page);
|
||||
|
||||
return virt_to_page(rb->data_pages[pgoff - 1]);
|
||||
}
|
||||
|
||||
static void *perf_mmap_alloc_page(int cpu)
|
||||
{
|
||||
struct page *page;
|
||||
int node;
|
||||
|
||||
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
|
||||
page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
unsigned long size;
|
||||
int i;
|
||||
|
||||
size = sizeof(struct ring_buffer);
|
||||
size += nr_pages * sizeof(void *);
|
||||
|
||||
rb = kzalloc(size, GFP_KERNEL);
|
||||
if (!rb)
|
||||
goto fail;
|
||||
|
||||
rb->user_page = perf_mmap_alloc_page(cpu);
|
||||
if (!rb->user_page)
|
||||
goto fail_user_page;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
rb->data_pages[i] = perf_mmap_alloc_page(cpu);
|
||||
if (!rb->data_pages[i])
|
||||
goto fail_data_pages;
|
||||
}
|
||||
|
||||
rb->nr_pages = nr_pages;
|
||||
|
||||
ring_buffer_init(rb, watermark, flags);
|
||||
|
||||
return rb;
|
||||
|
||||
fail_data_pages:
|
||||
for (i--; i >= 0; i--)
|
||||
free_page((unsigned long)rb->data_pages[i]);
|
||||
|
||||
free_page((unsigned long)rb->user_page);
|
||||
|
||||
fail_user_page:
|
||||
kfree(rb);
|
||||
|
||||
fail:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void perf_mmap_free_page(unsigned long addr)
|
||||
{
|
||||
struct page *page = virt_to_page((void *)addr);
|
||||
|
||||
page->mapping = NULL;
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
void rb_free(struct ring_buffer *rb)
|
||||
{
|
||||
int i;
|
||||
|
||||
perf_mmap_free_page((unsigned long)rb->user_page);
|
||||
for (i = 0; i < rb->nr_pages; i++)
|
||||
perf_mmap_free_page((unsigned long)rb->data_pages[i]);
|
||||
kfree(rb);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
struct page *
|
||||
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
|
||||
{
|
||||
if (pgoff > (1UL << page_order(rb)))
|
||||
return NULL;
|
||||
|
||||
return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void perf_mmap_unmark_page(void *addr)
|
||||
{
|
||||
struct page *page = vmalloc_to_page(addr);
|
||||
|
||||
page->mapping = NULL;
|
||||
}
|
||||
|
||||
static void rb_free_work(struct work_struct *work)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
void *base;
|
||||
int i, nr;
|
||||
|
||||
rb = container_of(work, struct ring_buffer, work);
|
||||
nr = 1 << page_order(rb);
|
||||
|
||||
base = rb->user_page;
|
||||
for (i = 0; i < nr + 1; i++)
|
||||
perf_mmap_unmark_page(base + (i * PAGE_SIZE));
|
||||
|
||||
vfree(base);
|
||||
kfree(rb);
|
||||
}
|
||||
|
||||
void rb_free(struct ring_buffer *rb)
|
||||
{
|
||||
schedule_work(&rb->work);
|
||||
}
|
||||
|
||||
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
unsigned long size;
|
||||
void *all_buf;
|
||||
|
||||
size = sizeof(struct ring_buffer);
|
||||
size += sizeof(void *);
|
||||
|
||||
rb = kzalloc(size, GFP_KERNEL);
|
||||
if (!rb)
|
||||
goto fail;
|
||||
|
||||
INIT_WORK(&rb->work, rb_free_work);
|
||||
|
||||
all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
|
||||
if (!all_buf)
|
||||
goto fail_all_buf;
|
||||
|
||||
rb->user_page = all_buf;
|
||||
rb->data_pages[0] = all_buf + PAGE_SIZE;
|
||||
rb->page_order = ilog2(nr_pages);
|
||||
rb->nr_pages = 1;
|
||||
|
||||
ring_buffer_init(rb, watermark, flags);
|
||||
|
||||
return rb;
|
||||
|
||||
fail_all_buf:
|
||||
kfree(rb);
|
||||
|
||||
fail:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
@@ -1255,19 +1255,29 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
|
||||
/*
|
||||
* If we have a symbol_name argument, look it up and add the offset field
|
||||
* to it. This way, we can specify a relative address to a symbol.
|
||||
* This returns encoded errors if it fails to look up symbol or invalid
|
||||
* combination of parameters.
|
||||
*/
|
||||
static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
|
||||
{
|
||||
kprobe_opcode_t *addr = p->addr;
|
||||
|
||||
if ((p->symbol_name && p->addr) ||
|
||||
(!p->symbol_name && !p->addr))
|
||||
goto invalid;
|
||||
|
||||
if (p->symbol_name) {
|
||||
if (addr)
|
||||
return NULL;
|
||||
kprobe_lookup_name(p->symbol_name, addr);
|
||||
if (!addr)
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
if (!addr)
|
||||
return NULL;
|
||||
return (kprobe_opcode_t *)(((char *)addr) + p->offset);
|
||||
addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
|
||||
if (addr)
|
||||
return addr;
|
||||
|
||||
invalid:
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
|
||||
@@ -1311,8 +1321,8 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
kprobe_opcode_t *addr;
|
||||
|
||||
addr = kprobe_addr(p);
|
||||
if (!addr)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
p->addr = addr;
|
||||
|
||||
ret = check_kprobe_rereg(p);
|
||||
@@ -1335,6 +1345,8 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
*/
|
||||
probed_mod = __module_text_address((unsigned long) p->addr);
|
||||
if (probed_mod) {
|
||||
/* Return -ENOENT if fail. */
|
||||
ret = -ENOENT;
|
||||
/*
|
||||
* We must hold a refcount of the probed module while updating
|
||||
* its code to prohibit unexpected unloading.
|
||||
@@ -1351,6 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
module_put(probed_mod);
|
||||
goto fail_with_jump_label;
|
||||
}
|
||||
/* ret will be updated by following code */
|
||||
}
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
@@ -1399,7 +1412,7 @@ out:
|
||||
fail_with_jump_label:
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_kprobe);
|
||||
|
||||
@@ -1686,8 +1699,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
|
||||
|
||||
if (kretprobe_blacklist_size) {
|
||||
addr = kprobe_addr(&rp->kp);
|
||||
if (!addr)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
|
||||
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
|
||||
if (kretprobe_blacklist[i].addr == addr)
|
||||
|
@@ -2220,7 +2220,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
|
||||
if (task_cpu(p) != new_cpu) {
|
||||
p->se.nr_migrations++;
|
||||
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
|
||||
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
|
||||
}
|
||||
|
||||
__set_task_cpu(p, new_cpu);
|
||||
|
@@ -26,12 +26,18 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
|
||||
EXPORT_SYMBOL_GPL(print_stack_trace);
|
||||
|
||||
/*
|
||||
* Architectures that do not implement save_stack_trace_tsk get this
|
||||
* weak alias and a once-per-bootup warning (whenever this facility
|
||||
* is utilized - for example by procfs):
|
||||
* Architectures that do not implement save_stack_trace_tsk or
|
||||
* save_stack_trace_regs get this weak alias and a once-per-bootup warning
|
||||
* (whenever this facility is utilized - for example by procfs):
|
||||
*/
|
||||
__weak void
|
||||
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||
{
|
||||
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
|
||||
}
|
||||
|
||||
__weak void
|
||||
save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
||||
{
|
||||
WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
|
||||
}
|
||||
|
@@ -32,7 +32,6 @@
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
@@ -82,14 +81,14 @@ static int ftrace_disabled __read_mostly;
|
||||
|
||||
static DEFINE_MUTEX(ftrace_lock);
|
||||
|
||||
static struct ftrace_ops ftrace_list_end __read_mostly =
|
||||
{
|
||||
static struct ftrace_ops ftrace_list_end __read_mostly = {
|
||||
.func = ftrace_stub,
|
||||
};
|
||||
|
||||
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
|
||||
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
|
||||
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
static struct ftrace_ops global_ops;
|
||||
@@ -148,9 +147,11 @@ void clear_ftrace_function(void)
|
||||
{
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
__ftrace_trace_function = ftrace_stub;
|
||||
__ftrace_trace_function_delay = ftrace_stub;
|
||||
ftrace_pid_function = ftrace_stub;
|
||||
}
|
||||
|
||||
#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
/*
|
||||
* For those archs that do not test ftrace_trace_stop in their
|
||||
@@ -209,8 +210,13 @@ static void update_ftrace_function(void)
|
||||
|
||||
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
ftrace_trace_function = func;
|
||||
#else
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
/* do not update till all functions have been modified */
|
||||
__ftrace_trace_function_delay = func;
|
||||
#else
|
||||
__ftrace_trace_function = func;
|
||||
#endif
|
||||
ftrace_trace_function = ftrace_test_stop_func;
|
||||
#endif
|
||||
}
|
||||
@@ -785,8 +791,7 @@ static void unregister_ftrace_profiler(void)
|
||||
unregister_ftrace_graph();
|
||||
}
|
||||
#else
|
||||
static struct ftrace_ops ftrace_profile_ops __read_mostly =
|
||||
{
|
||||
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
|
||||
.func = function_profile_call,
|
||||
};
|
||||
|
||||
@@ -806,19 +811,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long val;
|
||||
char buf[64]; /* big enough to hold a number */
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = !!val;
|
||||
@@ -1182,8 +1178,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
|
||||
static void
|
||||
ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
|
||||
|
||||
static int
|
||||
ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
ftrace_hash_move(struct ftrace_ops *ops, int enable,
|
||||
struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
{
|
||||
struct ftrace_func_entry *entry;
|
||||
struct hlist_node *tp, *tn;
|
||||
@@ -1193,8 +1195,15 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
unsigned long key;
|
||||
int size = src->count;
|
||||
int bits = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Remove the current set, update the hash and add
|
||||
* them back.
|
||||
*/
|
||||
ftrace_hash_rec_disable(ops, enable);
|
||||
|
||||
/*
|
||||
* If the new source is empty, just free dst and assign it
|
||||
* the empty_hash.
|
||||
@@ -1215,9 +1224,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
if (bits > FTRACE_HASH_MAX_BITS)
|
||||
bits = FTRACE_HASH_MAX_BITS;
|
||||
|
||||
ret = -ENOMEM;
|
||||
new_hash = alloc_ftrace_hash(bits);
|
||||
if (!new_hash)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
size = 1 << src->size_bits;
|
||||
for (i = 0; i < size; i++) {
|
||||
@@ -1236,7 +1246,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
rcu_assign_pointer(*dst, new_hash);
|
||||
free_ftrace_hash_rcu(old_hash);
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
out:
|
||||
/*
|
||||
* Enable regardless of ret:
|
||||
* On success, we enable the new hash.
|
||||
* On failure, we re-enable the original hash.
|
||||
*/
|
||||
ftrace_hash_rec_enable(ops, enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1596,6 +1615,12 @@ static int __ftrace_modify_code(void *data)
|
||||
{
|
||||
int *command = data;
|
||||
|
||||
/*
|
||||
* Do not call function tracer while we update the code.
|
||||
* We are in stop machine, no worrying about races.
|
||||
*/
|
||||
function_trace_stop++;
|
||||
|
||||
if (*command & FTRACE_ENABLE_CALLS)
|
||||
ftrace_replace_code(1);
|
||||
else if (*command & FTRACE_DISABLE_CALLS)
|
||||
@@ -1609,6 +1634,18 @@ static int __ftrace_modify_code(void *data)
|
||||
else if (*command & FTRACE_STOP_FUNC_RET)
|
||||
ftrace_disable_ftrace_graph_caller();
|
||||
|
||||
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
/*
|
||||
* For archs that call ftrace_test_stop_func(), we must
|
||||
* wait till after we update all the function callers
|
||||
* before we update the callback. This keeps different
|
||||
* ops that record different functions from corrupting
|
||||
* each other.
|
||||
*/
|
||||
__ftrace_trace_function = __ftrace_trace_function_delay;
|
||||
#endif
|
||||
function_trace_stop--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1744,10 +1781,36 @@ static cycle_t ftrace_update_time;
|
||||
static unsigned long ftrace_update_cnt;
|
||||
unsigned long ftrace_update_tot_cnt;
|
||||
|
||||
static int ops_traces_mod(struct ftrace_ops *ops)
|
||||
{
|
||||
struct ftrace_hash *hash;
|
||||
|
||||
hash = ops->filter_hash;
|
||||
return !!(!hash || !hash->count);
|
||||
}
|
||||
|
||||
static int ftrace_update_code(struct module *mod)
|
||||
{
|
||||
struct dyn_ftrace *p;
|
||||
cycle_t start, stop;
|
||||
unsigned long ref = 0;
|
||||
|
||||
/*
|
||||
* When adding a module, we need to check if tracers are
|
||||
* currently enabled and if they are set to trace all functions.
|
||||
* If they are, we need to enable the module functions as well
|
||||
* as update the reference counts for those function records.
|
||||
*/
|
||||
if (mod) {
|
||||
struct ftrace_ops *ops;
|
||||
|
||||
for (ops = ftrace_ops_list;
|
||||
ops != &ftrace_list_end; ops = ops->next) {
|
||||
if (ops->flags & FTRACE_OPS_FL_ENABLED &&
|
||||
ops_traces_mod(ops))
|
||||
ref++;
|
||||
}
|
||||
}
|
||||
|
||||
start = ftrace_now(raw_smp_processor_id());
|
||||
ftrace_update_cnt = 0;
|
||||
@@ -1760,7 +1823,7 @@ static int ftrace_update_code(struct module *mod)
|
||||
|
||||
p = ftrace_new_addrs;
|
||||
ftrace_new_addrs = p->newlist;
|
||||
p->flags = 0L;
|
||||
p->flags = ref;
|
||||
|
||||
/*
|
||||
* Do the initial record conversion from mcount jump
|
||||
@@ -1783,7 +1846,7 @@ static int ftrace_update_code(struct module *mod)
|
||||
* conversion puts the module to the correct state, thus
|
||||
* passing the ftrace_make_call check.
|
||||
*/
|
||||
if (ftrace_start_up) {
|
||||
if (ftrace_start_up && ref) {
|
||||
int failed = __ftrace_replace_code(p, 1);
|
||||
if (failed) {
|
||||
ftrace_bug(failed, p->ip);
|
||||
@@ -2407,10 +2470,9 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
|
||||
*/
|
||||
|
||||
static int
|
||||
ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
|
||||
ftrace_mod_callback(struct ftrace_hash *hash,
|
||||
char *func, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_ops *ops = &global_ops;
|
||||
struct ftrace_hash *hash;
|
||||
char *mod;
|
||||
int ret = -EINVAL;
|
||||
|
||||
@@ -2430,11 +2492,6 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
|
||||
if (!strlen(mod))
|
||||
return ret;
|
||||
|
||||
if (enable)
|
||||
hash = ops->filter_hash;
|
||||
else
|
||||
hash = ops->notrace_hash;
|
||||
|
||||
ret = ftrace_match_module_records(hash, func, mod);
|
||||
if (!ret)
|
||||
ret = -EINVAL;
|
||||
@@ -2760,7 +2817,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
|
||||
mutex_lock(&ftrace_cmd_mutex);
|
||||
list_for_each_entry(p, &ftrace_commands, list) {
|
||||
if (strcmp(p->name, command) == 0) {
|
||||
ret = p->func(func, command, next, enable);
|
||||
ret = p->func(hash, func, command, next, enable);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
@@ -2857,7 +2914,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
|
||||
ftrace_match_records(hash, buf, len);
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
ret = ftrace_hash_move(orig_hash, hash);
|
||||
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
|
||||
if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
|
||||
&& ftrace_enabled)
|
||||
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
mutex_unlock(&ftrace_regex_lock);
|
||||
@@ -3040,18 +3101,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
|
||||
orig_hash = &iter->ops->notrace_hash;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
/*
|
||||
* Remove the current set, update the hash and add
|
||||
* them back.
|
||||
*/
|
||||
ftrace_hash_rec_disable(iter->ops, filter_hash);
|
||||
ret = ftrace_hash_move(orig_hash, iter->hash);
|
||||
if (!ret) {
|
||||
ftrace_hash_rec_enable(iter->ops, filter_hash);
|
||||
if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
|
||||
&& ftrace_enabled)
|
||||
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
||||
}
|
||||
ret = ftrace_hash_move(iter->ops, filter_hash,
|
||||
orig_hash, iter->hash);
|
||||
if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
|
||||
&& ftrace_enabled)
|
||||
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
}
|
||||
free_ftrace_hash(iter->hash);
|
||||
@@ -3330,7 +3385,7 @@ static int ftrace_process_locs(struct module *mod,
|
||||
{
|
||||
unsigned long *p;
|
||||
unsigned long addr;
|
||||
unsigned long flags;
|
||||
unsigned long flags = 0; /* Shut up gcc */
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
p = start;
|
||||
@@ -3348,12 +3403,18 @@ static int ftrace_process_locs(struct module *mod,
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable interrupts to prevent interrupts from executing
|
||||
* code that is being modified.
|
||||
* We only need to disable interrupts on start up
|
||||
* because we are modifying code that an interrupt
|
||||
* may execute, and the modification is not atomic.
|
||||
* But for modules, nothing runs the code we modify
|
||||
* until we are finished with it, and there's no
|
||||
* reason to cause large interrupt latencies while we do it.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
if (!mod)
|
||||
local_irq_save(flags);
|
||||
ftrace_update_code(mod);
|
||||
local_irq_restore(flags);
|
||||
if (!mod)
|
||||
local_irq_restore(flags);
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
return 0;
|
||||
|
@@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
unsigned nr_pages)
|
||||
{
|
||||
struct buffer_page *bpage, *tmp;
|
||||
unsigned long addr;
|
||||
LIST_HEAD(pages);
|
||||
unsigned i;
|
||||
|
||||
WARN_ON(!nr_pages);
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page;
|
||||
/*
|
||||
* __GFP_NORETRY flag makes sure that the allocation fails
|
||||
* gracefully without invoking oom-killer and the system is
|
||||
* not destabilized.
|
||||
*/
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
||||
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
|
||||
GFP_KERNEL | __GFP_NORETRY,
|
||||
cpu_to_node(cpu_buffer->cpu));
|
||||
if (!bpage)
|
||||
goto free_pages;
|
||||
|
||||
@@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
|
||||
list_add(&bpage->list, &pages);
|
||||
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
goto free_pages;
|
||||
bpage->page = (void *)addr;
|
||||
bpage->page = page_address(page);
|
||||
rb_init_page(bpage->page);
|
||||
}
|
||||
|
||||
@@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_page *bpage;
|
||||
unsigned long addr;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
|
||||
@@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
||||
rb_check_bpage(cpu_buffer, bpage);
|
||||
|
||||
cpu_buffer->reader_page = bpage;
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
|
||||
if (!page)
|
||||
goto fail_free_reader;
|
||||
bpage->page = (void *)addr;
|
||||
bpage->page = page_address(page);
|
||||
rb_init_page(bpage->page);
|
||||
|
||||
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
|
||||
@@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
||||
unsigned nr_pages, rm_pages, new_pages;
|
||||
struct buffer_page *bpage, *tmp;
|
||||
unsigned long buffer_size;
|
||||
unsigned long addr;
|
||||
LIST_HEAD(pages);
|
||||
int i, cpu;
|
||||
|
||||
@@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
||||
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
for (i = 0; i < new_pages; i++) {
|
||||
struct page *page;
|
||||
/*
|
||||
* __GFP_NORETRY flag makes sure that the allocation
|
||||
* fails gracefully without invoking oom-killer and
|
||||
* the system is not destabilized.
|
||||
*/
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage),
|
||||
cache_line_size()),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
GFP_KERNEL | __GFP_NORETRY,
|
||||
cpu_to_node(cpu));
|
||||
if (!bpage)
|
||||
goto free_pages;
|
||||
list_add(&bpage->list, &pages);
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
goto free_pages;
|
||||
bpage->page = (void *)addr;
|
||||
bpage->page = page_address(page);
|
||||
rb_init_page(bpage->page);
|
||||
}
|
||||
}
|
||||
@@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
||||
* Returns:
|
||||
* The page allocated, or NULL on error.
|
||||
*/
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct buffer_data_page *bpage;
|
||||
unsigned long addr;
|
||||
struct page *page;
|
||||
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
bpage = (void *)addr;
|
||||
bpage = page_address(page);
|
||||
|
||||
rb_init_page(bpage);
|
||||
|
||||
@@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long *p = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val)
|
||||
|
@@ -106,7 +106,7 @@ static enum event_status read_page(int cpu)
|
||||
int inc;
|
||||
int i;
|
||||
|
||||
bpage = ring_buffer_alloc_read_page(buffer);
|
||||
bpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
if (!bpage)
|
||||
return EVENT_DROPPED;
|
||||
|
||||
|
@@ -343,26 +343,27 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
|
||||
static int trace_stop_count;
|
||||
static DEFINE_SPINLOCK(tracing_start_lock);
|
||||
|
||||
static void wakeup_work_handler(struct work_struct *work)
|
||||
{
|
||||
wake_up(&trace_wait);
|
||||
}
|
||||
|
||||
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
|
||||
|
||||
/**
|
||||
* trace_wake_up - wake up tasks waiting for trace input
|
||||
*
|
||||
* Simply wakes up any task that is blocked on the trace_wait
|
||||
* queue. These is used with trace_poll for tasks polling the trace.
|
||||
* Schedules a delayed work to wake up any task that is blocked on the
|
||||
* trace_wait queue. These is used with trace_poll for tasks polling the
|
||||
* trace.
|
||||
*/
|
||||
void trace_wake_up(void)
|
||||
{
|
||||
int cpu;
|
||||
const unsigned long delay = msecs_to_jiffies(2);
|
||||
|
||||
if (trace_flags & TRACE_ITER_BLOCK)
|
||||
return;
|
||||
/*
|
||||
* The runqueue_is_locked() can fail, but this is the best we
|
||||
* have for now:
|
||||
*/
|
||||
cpu = get_cpu();
|
||||
if (!runqueue_is_locked(cpu))
|
||||
wake_up(&trace_wait);
|
||||
put_cpu();
|
||||
schedule_delayed_work(&wakeup_work, delay);
|
||||
}
|
||||
|
||||
static int __init set_buf_size(char *str)
|
||||
@@ -424,6 +425,7 @@ static const char *trace_options[] = {
|
||||
"graph-time",
|
||||
"record-cmd",
|
||||
"overwrite",
|
||||
"disable_on_free",
|
||||
NULL
|
||||
};
|
||||
|
||||
@@ -1191,6 +1193,18 @@ void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
|
||||
|
||||
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
|
||||
ftrace_trace_userstack(buffer, flags, pc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
|
||||
|
||||
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event)
|
||||
{
|
||||
@@ -1234,30 +1248,103 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
||||
#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
|
||||
struct ftrace_stack {
|
||||
unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
|
||||
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
|
||||
|
||||
static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
||||
unsigned long flags,
|
||||
int skip, int pc)
|
||||
int skip, int pc, struct pt_regs *regs)
|
||||
{
|
||||
struct ftrace_event_call *call = &event_kernel_stack;
|
||||
struct ring_buffer_event *event;
|
||||
struct stack_entry *entry;
|
||||
struct stack_trace trace;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
memset(&entry->caller, 0, sizeof(entry->caller));
|
||||
int use_stack;
|
||||
int size = FTRACE_STACK_ENTRIES;
|
||||
|
||||
trace.nr_entries = 0;
|
||||
trace.max_entries = FTRACE_STACK_ENTRIES;
|
||||
trace.skip = skip;
|
||||
trace.entries = entry->caller;
|
||||
|
||||
save_stack_trace(&trace);
|
||||
/*
|
||||
* Since events can happen in NMIs there's no safe way to
|
||||
* use the per cpu ftrace_stacks. We reserve it and if an interrupt
|
||||
* or NMI comes in, it will just have to use the default
|
||||
* FTRACE_STACK_SIZE.
|
||||
*/
|
||||
preempt_disable_notrace();
|
||||
|
||||
use_stack = ++__get_cpu_var(ftrace_stack_reserve);
|
||||
/*
|
||||
* We don't need any atomic variables, just a barrier.
|
||||
* If an interrupt comes in, we don't care, because it would
|
||||
* have exited and put the counter back to what we want.
|
||||
* We just need a barrier to keep gcc from moving things
|
||||
* around.
|
||||
*/
|
||||
barrier();
|
||||
if (use_stack == 1) {
|
||||
trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
|
||||
trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
|
||||
|
||||
if (regs)
|
||||
save_stack_trace_regs(regs, &trace);
|
||||
else
|
||||
save_stack_trace(&trace);
|
||||
|
||||
if (trace.nr_entries > size)
|
||||
size = trace.nr_entries;
|
||||
} else
|
||||
/* From now on, use_stack is a boolean */
|
||||
use_stack = 0;
|
||||
|
||||
size *= sizeof(unsigned long);
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
|
||||
sizeof(*entry) + size, flags, pc);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
|
||||
memset(&entry->caller, 0, size);
|
||||
|
||||
if (use_stack)
|
||||
memcpy(&entry->caller, trace.entries,
|
||||
trace.nr_entries * sizeof(unsigned long));
|
||||
else {
|
||||
trace.max_entries = FTRACE_STACK_ENTRIES;
|
||||
trace.entries = entry->caller;
|
||||
if (regs)
|
||||
save_stack_trace_regs(regs, &trace);
|
||||
else
|
||||
save_stack_trace(&trace);
|
||||
}
|
||||
|
||||
entry->size = trace.nr_entries;
|
||||
|
||||
if (!filter_check_discard(call, entry, buffer, event))
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
out:
|
||||
/* Again, don't let gcc optimize things here */
|
||||
barrier();
|
||||
__get_cpu_var(ftrace_stack_reserve)--;
|
||||
preempt_enable_notrace();
|
||||
|
||||
}
|
||||
|
||||
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
|
||||
int skip, int pc, struct pt_regs *regs)
|
||||
{
|
||||
if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
||||
return;
|
||||
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
|
||||
}
|
||||
|
||||
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
|
||||
@@ -1266,13 +1353,13 @@ void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
|
||||
if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
||||
return;
|
||||
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc);
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
||||
}
|
||||
|
||||
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
||||
int pc)
|
||||
{
|
||||
__ftrace_trace_stack(tr->buffer, flags, skip, pc);
|
||||
__ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1288,7 +1375,7 @@ void trace_dump_stack(void)
|
||||
local_save_flags(flags);
|
||||
|
||||
/* skipping 3 traces, seems to get us at the caller of this function */
|
||||
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
||||
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(int, user_stack_count);
|
||||
@@ -1536,7 +1623,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
|
||||
|
||||
ftrace_enable_cpu();
|
||||
|
||||
return event ? ring_buffer_event_data(event) : NULL;
|
||||
if (event) {
|
||||
iter->ent_size = ring_buffer_event_length(event);
|
||||
return ring_buffer_event_data(event);
|
||||
}
|
||||
iter->ent_size = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct trace_entry *
|
||||
@@ -2051,6 +2143,9 @@ void trace_default_header(struct seq_file *m)
|
||||
{
|
||||
struct trace_iterator *iter = m->private;
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return;
|
||||
|
||||
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
|
||||
/* print nothing if the buffers are empty */
|
||||
if (trace_empty(iter))
|
||||
@@ -2701,20 +2796,11 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
struct trace_array *tr = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = !!val;
|
||||
@@ -2767,7 +2853,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
|
||||
return t->init(tr);
|
||||
}
|
||||
|
||||
static int tracing_resize_ring_buffer(unsigned long size)
|
||||
static int __tracing_resize_ring_buffer(unsigned long size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -2819,6 +2905,41 @@ static int tracing_resize_ring_buffer(unsigned long size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t tracing_resize_ring_buffer(unsigned long size)
|
||||
{
|
||||
int cpu, ret = size;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
tracing_stop();
|
||||
|
||||
/* disable all cpu buffers */
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_inc(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_inc(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
|
||||
if (size != global_trace.entries)
|
||||
ret = __tracing_resize_ring_buffer(size);
|
||||
|
||||
if (ret < 0)
|
||||
ret = -ENOMEM;
|
||||
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_dec(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_dec(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
|
||||
tracing_start();
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* tracing_update_buffers - used by tracing facility to expand ring buffers
|
||||
@@ -2836,7 +2957,7 @@ int tracing_update_buffers(void)
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (!ring_buffer_expanded)
|
||||
ret = tracing_resize_ring_buffer(trace_buf_size);
|
||||
ret = __tracing_resize_ring_buffer(trace_buf_size);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
@@ -2860,7 +2981,7 @@ static int tracing_set_tracer(const char *buf)
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
if (!ring_buffer_expanded) {
|
||||
ret = tracing_resize_ring_buffer(trace_buf_size);
|
||||
ret = __tracing_resize_ring_buffer(trace_buf_size);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
@@ -2966,20 +3087,11 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long *ptr = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*ptr = val * 1000;
|
||||
@@ -3434,67 +3546,54 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long val;
|
||||
char buf[64];
|
||||
int ret, cpu;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* must have at least 1 entry */
|
||||
if (!val)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
tracing_stop();
|
||||
|
||||
/* disable all cpu buffers */
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_inc(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_inc(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
|
||||
/* value is in KB */
|
||||
val <<= 10;
|
||||
|
||||
if (val != global_trace.entries) {
|
||||
ret = tracing_resize_ring_buffer(val);
|
||||
if (ret < 0) {
|
||||
cnt = ret;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ret = tracing_resize_ring_buffer(val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
/* If check pages failed, return ENOMEM */
|
||||
if (tracing_disabled)
|
||||
cnt = -ENOMEM;
|
||||
out:
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_dec(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_dec(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
tracing_start();
|
||||
mutex_unlock(&trace_types_lock);
|
||||
static ssize_t
|
||||
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
/*
|
||||
* There is no need to read what the user has written, this function
|
||||
* is just to make sure that there is no error when "echo" is used
|
||||
*/
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static int
|
||||
tracing_free_buffer_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
/* disable tracing ? */
|
||||
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
|
||||
tracing_off();
|
||||
/* resize the ring buffer to 0 */
|
||||
tracing_resize_ring_buffer(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mark_printk(const char *fmt, ...)
|
||||
{
|
||||
int ret;
|
||||
@@ -3640,6 +3739,11 @@ static const struct file_operations tracing_entries_fops = {
|
||||
.llseek = generic_file_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations tracing_free_buffer_fops = {
|
||||
.write = tracing_free_buffer_write,
|
||||
.release = tracing_free_buffer_release,
|
||||
};
|
||||
|
||||
static const struct file_operations tracing_mark_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.write = tracing_mark_write,
|
||||
@@ -3696,7 +3800,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||
return 0;
|
||||
|
||||
if (!info->spare)
|
||||
info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
|
||||
info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
|
||||
if (!info->spare)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -3853,7 +3957,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
|
||||
ref->ref = 1;
|
||||
ref->buffer = info->tr->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer);
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
|
||||
if (!ref->page) {
|
||||
kfree(ref);
|
||||
break;
|
||||
@@ -3862,8 +3966,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
r = ring_buffer_read_page(ref->buffer, &ref->page,
|
||||
len, info->cpu, 1);
|
||||
if (r < 0) {
|
||||
ring_buffer_free_read_page(ref->buffer,
|
||||
ref->page);
|
||||
ring_buffer_free_read_page(ref->buffer, ref->page);
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
@@ -4099,19 +4202,10 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
{
|
||||
struct trace_option_dentry *topt = filp->private_data;
|
||||
unsigned long val;
|
||||
char buf[64];
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
@@ -4159,20 +4253,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
long index = (long)filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
@@ -4365,6 +4450,9 @@ static __init int tracer_init_debugfs(void)
|
||||
trace_create_file("buffer_size_kb", 0644, d_tracer,
|
||||
&global_trace, &tracing_entries_fops);
|
||||
|
||||
trace_create_file("free_buffer", 0644, d_tracer,
|
||||
&global_trace, &tracing_free_buffer_fops);
|
||||
|
||||
trace_create_file("trace_marker", 0220, d_tracer,
|
||||
NULL, &tracing_mark_fops);
|
||||
|
||||
|
@@ -278,6 +278,29 @@ struct tracer {
|
||||
};
|
||||
|
||||
|
||||
/* Only current can touch trace_recursion */
|
||||
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
|
||||
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
|
||||
|
||||
/* Ring buffer has the 10 LSB bits to count */
|
||||
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
|
||||
|
||||
/* for function tracing recursion */
|
||||
#define TRACE_INTERNAL_BIT (1<<11)
|
||||
#define TRACE_GLOBAL_BIT (1<<12)
|
||||
/*
|
||||
* Abuse of the trace_recursion.
|
||||
* As we need a way to maintain state if we are tracing the function
|
||||
* graph in irq because we want to trace a particular function that
|
||||
* was called in irq context but we have irq tracing off. Since this
|
||||
* can only be modified by current, we can reuse trace_recursion.
|
||||
*/
|
||||
#define TRACE_IRQ_BIT (1<<13)
|
||||
|
||||
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
|
||||
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
|
||||
#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
|
||||
|
||||
#define TRACE_PIPE_ALL_CPU -1
|
||||
|
||||
int tracer_init(struct tracer *t, struct trace_array *tr);
|
||||
@@ -389,6 +412,9 @@ void update_max_tr_single(struct trace_array *tr,
|
||||
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
|
||||
int skip, int pc);
|
||||
|
||||
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
|
||||
int skip, int pc, struct pt_regs *regs);
|
||||
|
||||
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
|
||||
int pc);
|
||||
|
||||
@@ -400,6 +426,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
|
||||
unsigned long flags, int skip,
|
||||
int pc, struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
|
||||
unsigned long flags, int pc)
|
||||
{
|
||||
@@ -507,8 +539,18 @@ static inline int ftrace_graph_addr(unsigned long addr)
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < ftrace_graph_count; i++) {
|
||||
if (addr == ftrace_graph_funcs[i])
|
||||
if (addr == ftrace_graph_funcs[i]) {
|
||||
/*
|
||||
* If no irqs are to be traced, but a set_graph_function
|
||||
* is set, and called by an interrupt handler, we still
|
||||
* want to trace it.
|
||||
*/
|
||||
if (in_irq())
|
||||
trace_recursion_set(TRACE_IRQ_BIT);
|
||||
else
|
||||
trace_recursion_clear(TRACE_IRQ_BIT);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -609,6 +651,7 @@ enum trace_iterator_flags {
|
||||
TRACE_ITER_GRAPH_TIME = 0x80000,
|
||||
TRACE_ITER_RECORD_CMD = 0x100000,
|
||||
TRACE_ITER_OVERWRITE = 0x200000,
|
||||
TRACE_ITER_STOP_ON_FREE = 0x400000,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -677,6 +720,7 @@ struct event_subsystem {
|
||||
struct dentry *entry;
|
||||
struct event_filter *filter;
|
||||
int nr_events;
|
||||
int ref_count;
|
||||
};
|
||||
|
||||
#define FILTER_PRED_INVALID ((unsigned short)-1)
|
||||
@@ -784,19 +828,4 @@ extern const char *__stop___trace_bprintk_fmt[];
|
||||
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
|
||||
#include "trace_entries.h"
|
||||
|
||||
/* Only current can touch trace_recursion */
|
||||
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
|
||||
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
|
||||
|
||||
/* Ring buffer has the 10 LSB bits to count */
|
||||
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
|
||||
|
||||
/* for function tracing recursion */
|
||||
#define TRACE_INTERNAL_BIT (1<<11)
|
||||
#define TRACE_GLOBAL_BIT (1<<12)
|
||||
|
||||
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
|
||||
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
|
||||
#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
|
||||
|
||||
#endif /* _LINUX_KERNEL_TRACE_H */
|
||||
|
@@ -161,7 +161,8 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
|
||||
TRACE_STACK,
|
||||
|
||||
F_STRUCT(
|
||||
__array( unsigned long, caller, FTRACE_STACK_ENTRIES )
|
||||
__field( int, size )
|
||||
__dynamic_array(unsigned long, caller )
|
||||
),
|
||||
|
||||
F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
|
||||
|
@@ -244,6 +244,35 @@ static void ftrace_clear_events(void)
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
static void __put_system(struct event_subsystem *system)
|
||||
{
|
||||
struct event_filter *filter = system->filter;
|
||||
|
||||
WARN_ON_ONCE(system->ref_count == 0);
|
||||
if (--system->ref_count)
|
||||
return;
|
||||
|
||||
if (filter) {
|
||||
kfree(filter->filter_string);
|
||||
kfree(filter);
|
||||
}
|
||||
kfree(system->name);
|
||||
kfree(system);
|
||||
}
|
||||
|
||||
static void __get_system(struct event_subsystem *system)
|
||||
{
|
||||
WARN_ON_ONCE(system->ref_count == 0);
|
||||
system->ref_count++;
|
||||
}
|
||||
|
||||
static void put_system(struct event_subsystem *system)
|
||||
{
|
||||
mutex_lock(&event_mutex);
|
||||
__put_system(system);
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
|
||||
*/
|
||||
@@ -486,20 +515,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tracing_update_buffers();
|
||||
@@ -528,7 +548,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
const char set_to_char[4] = { '?', '0', '1', 'X' };
|
||||
const char *system = filp->private_data;
|
||||
struct event_subsystem *system = filp->private_data;
|
||||
struct ftrace_event_call *call;
|
||||
char buf[2];
|
||||
int set = 0;
|
||||
@@ -539,7 +559,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
if (!call->name || !call->class || !call->class->reg)
|
||||
continue;
|
||||
|
||||
if (system && strcmp(call->class->system, system) != 0)
|
||||
if (system && strcmp(call->class->system, system->name) != 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
@@ -569,21 +589,13 @@ static ssize_t
|
||||
system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
{
|
||||
const char *system = filp->private_data;
|
||||
struct event_subsystem *system = filp->private_data;
|
||||
const char *name = NULL;
|
||||
unsigned long val;
|
||||
char buf[64];
|
||||
ssize_t ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tracing_update_buffers();
|
||||
@@ -593,7 +605,14 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
if (val != 0 && val != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = __ftrace_set_clr_event(NULL, system, NULL, val);
|
||||
/*
|
||||
* Opening of "enable" adds a ref count to system,
|
||||
* so the name is safe to use.
|
||||
*/
|
||||
if (system)
|
||||
name = system->name;
|
||||
|
||||
ret = __ftrace_set_clr_event(NULL, name, NULL, val);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -826,6 +845,52 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static LIST_HEAD(event_subsystems);
|
||||
|
||||
static int subsystem_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct event_subsystem *system = NULL;
|
||||
int ret;
|
||||
|
||||
if (!inode->i_private)
|
||||
goto skip_search;
|
||||
|
||||
/* Make sure the system still exists */
|
||||
mutex_lock(&event_mutex);
|
||||
list_for_each_entry(system, &event_subsystems, list) {
|
||||
if (system == inode->i_private) {
|
||||
/* Don't open systems with no events */
|
||||
if (!system->nr_events) {
|
||||
system = NULL;
|
||||
break;
|
||||
}
|
||||
__get_system(system);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
if (system != inode->i_private)
|
||||
return -ENODEV;
|
||||
|
||||
skip_search:
|
||||
ret = tracing_open_generic(inode, filp);
|
||||
if (ret < 0 && system)
|
||||
put_system(system);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int subsystem_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct event_subsystem *system = inode->i_private;
|
||||
|
||||
if (system)
|
||||
put_system(system);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
@@ -963,17 +1028,19 @@ static const struct file_operations ftrace_event_filter_fops = {
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_subsystem_filter_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.open = subsystem_open,
|
||||
.read = subsystem_filter_read,
|
||||
.write = subsystem_filter_write,
|
||||
.llseek = default_llseek,
|
||||
.release = subsystem_release,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_system_enable_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.open = subsystem_open,
|
||||
.read = system_enable_read,
|
||||
.write = system_enable_write,
|
||||
.llseek = default_llseek,
|
||||
.release = subsystem_release,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_show_header_fops = {
|
||||
@@ -1002,8 +1069,6 @@ static struct dentry *event_trace_events_dir(void)
|
||||
return d_events;
|
||||
}
|
||||
|
||||
static LIST_HEAD(event_subsystems);
|
||||
|
||||
static struct dentry *
|
||||
event_subsystem_dir(const char *name, struct dentry *d_events)
|
||||
{
|
||||
@@ -1013,6 +1078,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
|
||||
/* First see if we did not already create this dir */
|
||||
list_for_each_entry(system, &event_subsystems, list) {
|
||||
if (strcmp(system->name, name) == 0) {
|
||||
__get_system(system);
|
||||
system->nr_events++;
|
||||
return system->entry;
|
||||
}
|
||||
@@ -1035,6 +1101,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
|
||||
}
|
||||
|
||||
system->nr_events = 1;
|
||||
system->ref_count = 1;
|
||||
system->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!system->name) {
|
||||
debugfs_remove(system->entry);
|
||||
@@ -1062,8 +1129,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
|
||||
"'%s/filter' entry\n", name);
|
||||
}
|
||||
|
||||
trace_create_file("enable", 0644, system->entry,
|
||||
(void *)system->name,
|
||||
trace_create_file("enable", 0644, system->entry, system,
|
||||
&ftrace_system_enable_fops);
|
||||
|
||||
return system->entry;
|
||||
@@ -1184,16 +1250,9 @@ static void remove_subsystem_dir(const char *name)
|
||||
list_for_each_entry(system, &event_subsystems, list) {
|
||||
if (strcmp(system->name, name) == 0) {
|
||||
if (!--system->nr_events) {
|
||||
struct event_filter *filter = system->filter;
|
||||
|
||||
debugfs_remove_recursive(system->entry);
|
||||
list_del(&system->list);
|
||||
if (filter) {
|
||||
kfree(filter->filter_string);
|
||||
kfree(filter);
|
||||
}
|
||||
kfree(system->name);
|
||||
kfree(system);
|
||||
__put_system(system);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@@ -1886,6 +1886,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
|
||||
/* Make sure the system still has events */
|
||||
if (!system->nr_events) {
|
||||
err = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!strcmp(strstrip(filter_string), "0")) {
|
||||
filter_free_subsystem_preds(system);
|
||||
remove_filter_string(system->filter);
|
||||
|
@@ -324,7 +324,8 @@ ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
|
||||
}
|
||||
|
||||
static int
|
||||
ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
|
||||
ftrace_trace_onoff_callback(struct ftrace_hash *hash,
|
||||
char *glob, char *cmd, char *param, int enable)
|
||||
{
|
||||
struct ftrace_probe_ops *ops;
|
||||
void *count = (void *)-1;
|
||||
|
@@ -74,6 +74,20 @@ static struct tracer_flags tracer_flags = {
|
||||
|
||||
static struct trace_array *graph_array;
|
||||
|
||||
/*
|
||||
* DURATION column is being also used to display IRQ signs,
|
||||
* following values are used by print_graph_irq and others
|
||||
* to fill in space into DURATION column.
|
||||
*/
|
||||
enum {
|
||||
DURATION_FILL_FULL = -1,
|
||||
DURATION_FILL_START = -2,
|
||||
DURATION_FILL_END = -3,
|
||||
};
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_duration(unsigned long long duration, struct trace_seq *s,
|
||||
u32 flags);
|
||||
|
||||
/* Add a function return address to the trace stack on thread info.*/
|
||||
int
|
||||
@@ -213,7 +227,7 @@ int __trace_graph_entry(struct trace_array *tr,
|
||||
|
||||
static inline int ftrace_graph_ignore_irqs(void)
|
||||
{
|
||||
if (!ftrace_graph_skip_irqs)
|
||||
if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
|
||||
return 0;
|
||||
|
||||
return in_irq();
|
||||
@@ -577,32 +591,6 @@ get_return_for_leaf(struct trace_iterator *iter,
|
||||
return next;
|
||||
}
|
||||
|
||||
/* Signal a overhead of time execution to the output */
|
||||
static int
|
||||
print_graph_overhead(unsigned long long duration, struct trace_seq *s,
|
||||
u32 flags)
|
||||
{
|
||||
/* If duration disappear, we don't need anything */
|
||||
if (!(flags & TRACE_GRAPH_PRINT_DURATION))
|
||||
return 1;
|
||||
|
||||
/* Non nested entry or return */
|
||||
if (duration == -1)
|
||||
return trace_seq_printf(s, " ");
|
||||
|
||||
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
/* Duration exceeded 100 msecs */
|
||||
if (duration > 100000ULL)
|
||||
return trace_seq_printf(s, "! ");
|
||||
|
||||
/* Duration exceeded 10 msecs */
|
||||
if (duration > 10000ULL)
|
||||
return trace_seq_printf(s, "+ ");
|
||||
}
|
||||
|
||||
return trace_seq_printf(s, " ");
|
||||
}
|
||||
|
||||
static int print_graph_abs_time(u64 t, struct trace_seq *s)
|
||||
{
|
||||
unsigned long usecs_rem;
|
||||
@@ -625,34 +613,36 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
|
||||
addr >= (unsigned long)__irqentry_text_end)
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
|
||||
/* Absolute time */
|
||||
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
||||
/* Absolute time */
|
||||
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Cpu */
|
||||
if (flags & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
/* Cpu */
|
||||
if (flags & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Proc */
|
||||
if (flags & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
/* Proc */
|
||||
if (flags & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s, flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
ret = print_graph_duration(DURATION_FILL_START, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
if (type == TRACE_GRAPH_ENT)
|
||||
ret = trace_seq_printf(s, "==========>");
|
||||
@@ -662,9 +652,10 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Don't close the duration column if haven't one */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION)
|
||||
trace_seq_printf(s, " |");
|
||||
ret = print_graph_duration(DURATION_FILL_END, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
ret = trace_seq_printf(s, "\n");
|
||||
|
||||
if (!ret)
|
||||
@@ -716,9 +707,49 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_duration(unsigned long long duration, struct trace_seq *s)
|
||||
print_graph_duration(unsigned long long duration, struct trace_seq *s,
|
||||
u32 flags)
|
||||
{
|
||||
int ret;
|
||||
int ret = -1;
|
||||
|
||||
if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
|
||||
!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return TRACE_TYPE_HANDLED;
|
||||
|
||||
/* No real adata, just filling the column with spaces */
|
||||
switch (duration) {
|
||||
case DURATION_FILL_FULL:
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
|
||||
case DURATION_FILL_START:
|
||||
ret = trace_seq_printf(s, " ");
|
||||
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
|
||||
case DURATION_FILL_END:
|
||||
ret = trace_seq_printf(s, " |");
|
||||
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Signal a overhead of time execution to the output */
|
||||
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
/* Duration exceeded 100 msecs */
|
||||
if (duration > 100000ULL)
|
||||
ret = trace_seq_printf(s, "! ");
|
||||
/* Duration exceeded 10 msecs */
|
||||
else if (duration > 10000ULL)
|
||||
ret = trace_seq_printf(s, "+ ");
|
||||
}
|
||||
|
||||
/*
|
||||
* The -1 means we either did not exceed the duration tresholds
|
||||
* or we dont want to print out the overhead. Either way we need
|
||||
* to fill out the space.
|
||||
*/
|
||||
if (ret == -1)
|
||||
ret = trace_seq_printf(s, " ");
|
||||
|
||||
/* Catching here any failure happenned above */
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_print_graph_duration(duration, s);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
@@ -767,18 +798,11 @@ print_graph_entry_leaf(struct trace_iterator *iter,
|
||||
cpu_data->enter_funcs[call->depth] = 0;
|
||||
}
|
||||
|
||||
/* Overhead */
|
||||
ret = print_graph_overhead(duration, s, flags);
|
||||
if (!ret)
|
||||
/* Overhead and duration */
|
||||
ret = print_graph_duration(duration, s, flags);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Duration */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Function */
|
||||
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
@@ -815,17 +839,10 @@ print_graph_entry_nested(struct trace_iterator *iter,
|
||||
cpu_data->enter_funcs[call->depth] = call->func;
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s, flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No time */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
/* Function */
|
||||
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
@@ -865,6 +882,9 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return 0;
|
||||
|
||||
/* Absolute time */
|
||||
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
@@ -1078,18 +1098,11 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
||||
if (print_graph_prologue(iter, s, 0, 0, flags))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Overhead */
|
||||
ret = print_graph_overhead(duration, s, flags);
|
||||
if (!ret)
|
||||
/* Overhead and duration */
|
||||
ret = print_graph_duration(duration, s, flags);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Duration */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Closing brace */
|
||||
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
@@ -1146,17 +1159,10 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
||||
if (print_graph_prologue(iter, s, 0, 0, flags))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s, flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No time */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
/* Indentation */
|
||||
if (depth > 0)
|
||||
@@ -1207,7 +1213,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
||||
|
||||
|
||||
enum print_line_t
|
||||
__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
||||
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
||||
{
|
||||
struct ftrace_graph_ent_entry *field;
|
||||
struct fgraph_data *data = iter->private;
|
||||
@@ -1270,18 +1276,7 @@ __print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
||||
static enum print_line_t
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
return __print_graph_function_flags(iter, tracer_flags.val);
|
||||
}
|
||||
|
||||
enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
|
||||
u32 flags)
|
||||
{
|
||||
if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
||||
flags |= TRACE_GRAPH_PRINT_DURATION;
|
||||
else
|
||||
flags |= TRACE_GRAPH_PRINT_ABS_TIME;
|
||||
|
||||
return __print_graph_function_flags(iter, flags);
|
||||
return print_graph_function_flags(iter, tracer_flags.val);
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
@@ -1309,8 +1304,7 @@ static void print_lat_header(struct seq_file *s, u32 flags)
|
||||
seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
|
||||
seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
|
||||
seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
|
||||
seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
|
||||
seq_printf(s, "#%.*s|||| / \n", size, spaces);
|
||||
seq_printf(s, "#%.*s||| / \n", size, spaces);
|
||||
}
|
||||
|
||||
static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
|
||||
@@ -1329,7 +1323,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
|
||||
if (flags & TRACE_GRAPH_PRINT_PROC)
|
||||
seq_printf(s, " TASK/PID ");
|
||||
if (lat)
|
||||
seq_printf(s, "|||||");
|
||||
seq_printf(s, "||||");
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION)
|
||||
seq_printf(s, " DURATION ");
|
||||
seq_printf(s, " FUNCTION CALLS\n");
|
||||
@@ -1343,7 +1337,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
|
||||
if (flags & TRACE_GRAPH_PRINT_PROC)
|
||||
seq_printf(s, " | | ");
|
||||
if (lat)
|
||||
seq_printf(s, "|||||");
|
||||
seq_printf(s, "||||");
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION)
|
||||
seq_printf(s, " | | ");
|
||||
seq_printf(s, " | | | |\n");
|
||||
@@ -1358,15 +1352,16 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
|
||||
{
|
||||
struct trace_iterator *iter = s->private;
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return;
|
||||
|
||||
if (trace_flags & TRACE_ITER_LATENCY_FMT) {
|
||||
/* print nothing if the buffers are empty */
|
||||
if (trace_empty(iter))
|
||||
return;
|
||||
|
||||
print_trace_header(s, iter);
|
||||
flags |= TRACE_GRAPH_PRINT_DURATION;
|
||||
} else
|
||||
flags |= TRACE_GRAPH_PRINT_ABS_TIME;
|
||||
}
|
||||
|
||||
__print_graph_headers_flags(s, flags);
|
||||
}
|
||||
|
@@ -226,7 +226,9 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
|
||||
}
|
||||
|
||||
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
|
||||
TRACE_GRAPH_PRINT_PROC)
|
||||
TRACE_GRAPH_PRINT_PROC | \
|
||||
TRACE_GRAPH_PRINT_ABS_TIME | \
|
||||
TRACE_GRAPH_PRINT_DURATION)
|
||||
|
||||
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
|
@@ -343,6 +343,14 @@ DEFINE_BASIC_FETCH_FUNCS(deref)
|
||||
DEFINE_FETCH_deref(string)
|
||||
DEFINE_FETCH_deref(string_size)
|
||||
|
||||
static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
|
||||
update_deref_fetch_param(data->orig.data);
|
||||
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
|
||||
update_symbol_cache(data->orig.data);
|
||||
}
|
||||
|
||||
static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
|
||||
@@ -376,6 +384,19 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield)
|
||||
#define fetch_bitfield_string NULL
|
||||
#define fetch_bitfield_string_size NULL
|
||||
|
||||
static __kprobes void
|
||||
update_bitfield_fetch_param(struct bitfield_fetch_param *data)
|
||||
{
|
||||
/*
|
||||
* Don't check the bitfield itself, because this must be the
|
||||
* last fetch function.
|
||||
*/
|
||||
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
|
||||
update_deref_fetch_param(data->orig.data);
|
||||
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
|
||||
update_symbol_cache(data->orig.data);
|
||||
}
|
||||
|
||||
static __kprobes void
|
||||
free_bitfield_fetch_param(struct bitfield_fetch_param *data)
|
||||
{
|
||||
@@ -389,6 +410,7 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data)
|
||||
free_symbol_cache(data->orig.data);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
/* Default (unsigned long) fetch type */
|
||||
#define __DEFAULT_FETCH_TYPE(t) u##t
|
||||
#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
|
||||
@@ -536,6 +558,7 @@ struct probe_arg {
|
||||
/* Flags for trace_probe */
|
||||
#define TP_FLAG_TRACE 1
|
||||
#define TP_FLAG_PROFILE 2
|
||||
#define TP_FLAG_REGISTERED 4
|
||||
|
||||
struct trace_probe {
|
||||
struct list_head list;
|
||||
@@ -555,16 +578,49 @@ struct trace_probe {
|
||||
(sizeof(struct probe_arg) * (n)))
|
||||
|
||||
|
||||
static __kprobes int probe_is_return(struct trace_probe *tp)
|
||||
static __kprobes int trace_probe_is_return(struct trace_probe *tp)
|
||||
{
|
||||
return tp->rp.handler != NULL;
|
||||
}
|
||||
|
||||
static __kprobes const char *probe_symbol(struct trace_probe *tp)
|
||||
static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
|
||||
{
|
||||
return tp->symbol ? tp->symbol : "unknown";
|
||||
}
|
||||
|
||||
static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
|
||||
{
|
||||
return tp->rp.kp.offset;
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
|
||||
{
|
||||
return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
|
||||
{
|
||||
return !!(tp->flags & TP_FLAG_REGISTERED);
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
|
||||
{
|
||||
return !!(kprobe_gone(&tp->rp.kp));
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
|
||||
struct module *mod)
|
||||
{
|
||||
int len = strlen(mod->name);
|
||||
const char *name = trace_probe_symbol(tp);
|
||||
return strncmp(mod->name, name, len) == 0 && name[len] == ':';
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
|
||||
{
|
||||
return !!strchr(trace_probe_symbol(tp), ':');
|
||||
}
|
||||
|
||||
static int register_probe_event(struct trace_probe *tp);
|
||||
static void unregister_probe_event(struct trace_probe *tp);
|
||||
|
||||
@@ -646,6 +702,16 @@ error:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void update_probe_arg(struct probe_arg *arg)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
|
||||
update_bitfield_fetch_param(arg->fetch.data);
|
||||
else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
|
||||
update_deref_fetch_param(arg->fetch.data);
|
||||
else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
|
||||
update_symbol_cache(arg->fetch.data);
|
||||
}
|
||||
|
||||
static void free_probe_arg(struct probe_arg *arg)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
|
||||
@@ -671,7 +737,7 @@ static void free_trace_probe(struct trace_probe *tp)
|
||||
kfree(tp);
|
||||
}
|
||||
|
||||
static struct trace_probe *find_probe_event(const char *event,
|
||||
static struct trace_probe *find_trace_probe(const char *event,
|
||||
const char *group)
|
||||
{
|
||||
struct trace_probe *tp;
|
||||
@@ -683,13 +749,96 @@ static struct trace_probe *find_probe_event(const char *event,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
|
||||
static int enable_trace_probe(struct trace_probe *tp, int flag)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
tp->flags |= flag;
|
||||
if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
|
||||
!trace_probe_has_gone(tp)) {
|
||||
if (trace_probe_is_return(tp))
|
||||
ret = enable_kretprobe(&tp->rp);
|
||||
else
|
||||
ret = enable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
|
||||
static void disable_trace_probe(struct trace_probe *tp, int flag)
|
||||
{
|
||||
tp->flags &= ~flag;
|
||||
if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
|
||||
if (trace_probe_is_return(tp))
|
||||
disable_kretprobe(&tp->rp);
|
||||
else
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
|
||||
/* Internal register function - just handle k*probes and flags */
|
||||
static int __register_trace_probe(struct trace_probe *tp)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
if (trace_probe_is_registered(tp))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
update_probe_arg(&tp->args[i]);
|
||||
|
||||
/* Set/clear disabled flag according to tp->flag */
|
||||
if (trace_probe_is_enabled(tp))
|
||||
tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
|
||||
else
|
||||
tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
|
||||
|
||||
if (trace_probe_is_return(tp))
|
||||
ret = register_kretprobe(&tp->rp);
|
||||
else
|
||||
ret = register_kprobe(&tp->rp.kp);
|
||||
|
||||
if (ret == 0)
|
||||
tp->flags |= TP_FLAG_REGISTERED;
|
||||
else {
|
||||
pr_warning("Could not insert probe at %s+%lu: %d\n",
|
||||
trace_probe_symbol(tp), trace_probe_offset(tp), ret);
|
||||
if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
|
||||
pr_warning("This probe might be able to register after"
|
||||
"target module is loaded. Continue.\n");
|
||||
ret = 0;
|
||||
} else if (ret == -EILSEQ) {
|
||||
pr_warning("Probing address(0x%p) is not an "
|
||||
"instruction boundary.\n",
|
||||
tp->rp.kp.addr);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Internal unregister function - just handle k*probes and flags */
|
||||
static void __unregister_trace_probe(struct trace_probe *tp)
|
||||
{
|
||||
if (trace_probe_is_registered(tp)) {
|
||||
if (trace_probe_is_return(tp))
|
||||
unregister_kretprobe(&tp->rp);
|
||||
else
|
||||
unregister_kprobe(&tp->rp.kp);
|
||||
tp->flags &= ~TP_FLAG_REGISTERED;
|
||||
/* Cleanup kprobe for reuse */
|
||||
if (tp->rp.kp.symbol_name)
|
||||
tp->rp.kp.addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Unregister a trace_probe and probe_event: call with locking probe_lock */
|
||||
static void unregister_trace_probe(struct trace_probe *tp)
|
||||
{
|
||||
if (probe_is_return(tp))
|
||||
unregister_kretprobe(&tp->rp);
|
||||
else
|
||||
unregister_kprobe(&tp->rp.kp);
|
||||
__unregister_trace_probe(tp);
|
||||
list_del(&tp->list);
|
||||
unregister_probe_event(tp);
|
||||
}
|
||||
@@ -702,41 +851,65 @@ static int register_trace_probe(struct trace_probe *tp)
|
||||
|
||||
mutex_lock(&probe_lock);
|
||||
|
||||
/* register as an event */
|
||||
old_tp = find_probe_event(tp->call.name, tp->call.class->system);
|
||||
/* Delete old (same name) event if exist */
|
||||
old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
|
||||
if (old_tp) {
|
||||
/* delete old event */
|
||||
unregister_trace_probe(old_tp);
|
||||
free_trace_probe(old_tp);
|
||||
}
|
||||
|
||||
/* Register new event */
|
||||
ret = register_probe_event(tp);
|
||||
if (ret) {
|
||||
pr_warning("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
|
||||
if (probe_is_return(tp))
|
||||
ret = register_kretprobe(&tp->rp);
|
||||
else
|
||||
ret = register_kprobe(&tp->rp.kp);
|
||||
|
||||
if (ret) {
|
||||
pr_warning("Could not insert probe(%d)\n", ret);
|
||||
if (ret == -EILSEQ) {
|
||||
pr_warning("Probing address(0x%p) is not an "
|
||||
"instruction boundary.\n",
|
||||
tp->rp.kp.addr);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
/* Register k*probe */
|
||||
ret = __register_trace_probe(tp);
|
||||
if (ret < 0)
|
||||
unregister_probe_event(tp);
|
||||
} else
|
||||
else
|
||||
list_add_tail(&tp->list, &probe_list);
|
||||
|
||||
end:
|
||||
mutex_unlock(&probe_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Module notifier call back, checking event on the module */
|
||||
static int trace_probe_module_callback(struct notifier_block *nb,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct module *mod = data;
|
||||
struct trace_probe *tp;
|
||||
int ret;
|
||||
|
||||
if (val != MODULE_STATE_COMING)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Update probes on coming module */
|
||||
mutex_lock(&probe_lock);
|
||||
list_for_each_entry(tp, &probe_list, list) {
|
||||
if (trace_probe_within_module(tp, mod)) {
|
||||
__unregister_trace_probe(tp);
|
||||
ret = __register_trace_probe(tp);
|
||||
if (ret)
|
||||
pr_warning("Failed to re-register probe %s on"
|
||||
"%s: %d\n",
|
||||
tp->call.name, mod->name, ret);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&probe_lock);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block trace_probe_module_nb = {
|
||||
.notifier_call = trace_probe_module_callback,
|
||||
.priority = 1 /* Invoked after kprobe module callback */
|
||||
};
|
||||
|
||||
/* Split symbol and offset. */
|
||||
static int split_symbol_offset(char *symbol, unsigned long *offset)
|
||||
{
|
||||
@@ -962,8 +1135,8 @@ static int create_trace_probe(int argc, char **argv)
|
||||
{
|
||||
/*
|
||||
* Argument syntax:
|
||||
* - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
|
||||
* - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
|
||||
* Fetch args:
|
||||
* $retval : fetch return value
|
||||
* $stack : fetch stack address
|
||||
@@ -1025,7 +1198,7 @@ static int create_trace_probe(int argc, char **argv)
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&probe_lock);
|
||||
tp = find_probe_event(event, group);
|
||||
tp = find_trace_probe(event, group);
|
||||
if (!tp) {
|
||||
mutex_unlock(&probe_lock);
|
||||
pr_info("Event %s/%s doesn't exist.\n", group, event);
|
||||
@@ -1144,7 +1317,7 @@ error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cleanup_all_probes(void)
|
||||
static void release_all_trace_probes(void)
|
||||
{
|
||||
struct trace_probe *tp;
|
||||
|
||||
@@ -1158,7 +1331,6 @@ static void cleanup_all_probes(void)
|
||||
mutex_unlock(&probe_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Probes listing interfaces */
|
||||
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
@@ -1181,15 +1353,16 @@ static int probes_seq_show(struct seq_file *m, void *v)
|
||||
struct trace_probe *tp = v;
|
||||
int i;
|
||||
|
||||
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
|
||||
seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
|
||||
seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
|
||||
|
||||
if (!tp->symbol)
|
||||
seq_printf(m, " 0x%p", tp->rp.kp.addr);
|
||||
else if (tp->rp.kp.offset)
|
||||
seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
|
||||
seq_printf(m, " %s+%u", trace_probe_symbol(tp),
|
||||
tp->rp.kp.offset);
|
||||
else
|
||||
seq_printf(m, " %s", probe_symbol(tp));
|
||||
seq_printf(m, " %s", trace_probe_symbol(tp));
|
||||
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
|
||||
@@ -1209,7 +1382,7 @@ static int probes_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
(file->f_flags & O_TRUNC))
|
||||
cleanup_all_probes();
|
||||
release_all_trace_probes();
|
||||
|
||||
return seq_open(file, &probes_seq_op);
|
||||
}
|
||||
@@ -1397,7 +1570,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
trace_nowake_buffer_unlock_commit_regs(buffer, event,
|
||||
irq_flags, pc, regs);
|
||||
}
|
||||
|
||||
/* Kretprobe handler */
|
||||
@@ -1429,7 +1603,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
trace_nowake_buffer_unlock_commit_regs(buffer, event,
|
||||
irq_flags, pc, regs);
|
||||
}
|
||||
|
||||
/* Event entry printers */
|
||||
@@ -1511,30 +1686,6 @@ partial:
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
static int probe_event_enable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags |= TP_FLAG_TRACE;
|
||||
if (probe_is_return(tp))
|
||||
return enable_kretprobe(&tp->rp);
|
||||
else
|
||||
return enable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
|
||||
static void probe_event_disable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags &= ~TP_FLAG_TRACE;
|
||||
if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
|
||||
if (probe_is_return(tp))
|
||||
disable_kretprobe(&tp->rp);
|
||||
else
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
|
||||
#undef DEFINE_FIELD
|
||||
#define DEFINE_FIELD(type, item, name, is_signed) \
|
||||
do { \
|
||||
@@ -1596,7 +1747,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
|
||||
|
||||
const char *fmt, *arg;
|
||||
|
||||
if (!probe_is_return(tp)) {
|
||||
if (!trace_probe_is_return(tp)) {
|
||||
fmt = "(%lx)";
|
||||
arg = "REC->" FIELD_STRING_IP;
|
||||
} else {
|
||||
@@ -1713,49 +1864,25 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
|
||||
head = this_cpu_ptr(call->perf_events);
|
||||
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
|
||||
}
|
||||
|
||||
static int probe_perf_enable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags |= TP_FLAG_PROFILE;
|
||||
|
||||
if (probe_is_return(tp))
|
||||
return enable_kretprobe(&tp->rp);
|
||||
else
|
||||
return enable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
|
||||
static void probe_perf_disable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags &= ~TP_FLAG_PROFILE;
|
||||
|
||||
if (!(tp->flags & TP_FLAG_TRACE)) {
|
||||
if (probe_is_return(tp))
|
||||
disable_kretprobe(&tp->rp);
|
||||
else
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
static __kprobes
|
||||
int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)event->data;
|
||||
|
||||
switch (type) {
|
||||
case TRACE_REG_REGISTER:
|
||||
return probe_event_enable(event);
|
||||
return enable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
case TRACE_REG_UNREGISTER:
|
||||
probe_event_disable(event);
|
||||
disable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
case TRACE_REG_PERF_REGISTER:
|
||||
return probe_perf_enable(event);
|
||||
return enable_trace_probe(tp, TP_FLAG_PROFILE);
|
||||
case TRACE_REG_PERF_UNREGISTER:
|
||||
probe_perf_disable(event);
|
||||
disable_trace_probe(tp, TP_FLAG_PROFILE);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
@@ -1805,7 +1932,7 @@ static int register_probe_event(struct trace_probe *tp)
|
||||
|
||||
/* Initialize ftrace_event_call */
|
||||
INIT_LIST_HEAD(&call->class->fields);
|
||||
if (probe_is_return(tp)) {
|
||||
if (trace_probe_is_return(tp)) {
|
||||
call->event.funcs = &kretprobe_funcs;
|
||||
call->class->define_fields = kretprobe_event_define_fields;
|
||||
} else {
|
||||
@@ -1844,6 +1971,9 @@ static __init int init_kprobe_trace(void)
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
|
||||
if (register_module_notifier(&trace_probe_module_nb))
|
||||
return -EINVAL;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return 0;
|
||||
@@ -1897,12 +2027,12 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
|
||||
tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
enable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
}
|
||||
|
||||
ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
|
||||
@@ -1912,12 +2042,12 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||
tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
enable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
}
|
||||
|
||||
if (warn)
|
||||
@@ -1938,7 +2068,7 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||
}
|
||||
|
||||
end:
|
||||
cleanup_all_probes();
|
||||
release_all_trace_probes();
|
||||
if (warn)
|
||||
pr_cont("NG: Some tests are failed. Please check them.\n");
|
||||
else
|
||||
|
@@ -1107,19 +1107,20 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
|
||||
{
|
||||
struct stack_entry *field;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int i;
|
||||
unsigned long *p;
|
||||
unsigned long *end;
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
end = (unsigned long *)((long)iter->ent + iter->ent_size);
|
||||
|
||||
if (!trace_seq_puts(s, "<stack trace>\n"))
|
||||
goto partial;
|
||||
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
|
||||
if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
|
||||
break;
|
||||
|
||||
for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
|
||||
if (!trace_seq_puts(s, " => "))
|
||||
goto partial;
|
||||
|
||||
if (!seq_print_ip_sym(s, field->caller[i], flags))
|
||||
if (!seq_print_ip_sym(s, *p, flags))
|
||||
goto partial;
|
||||
if (!trace_seq_puts(s, "\n"))
|
||||
goto partial;
|
||||
|
@@ -227,7 +227,9 @@ static void wakeup_trace_close(struct trace_iterator *iter)
|
||||
graph_trace_close(iter);
|
||||
}
|
||||
|
||||
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC)
|
||||
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
|
||||
TRACE_GRAPH_PRINT_ABS_TIME | \
|
||||
TRACE_GRAPH_PRINT_DURATION)
|
||||
|
||||
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
|
@@ -156,20 +156,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
|
||||
{
|
||||
long *ptr = filp->private_data;
|
||||
unsigned long val, flags;
|
||||
char buf[64];
|
||||
int ret;
|
||||
int cpu;
|
||||
|
||||
if (count >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buf[count] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, count, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@@ -200,6 +200,7 @@ static int is_softlockup(unsigned long touch_ts)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
||||
|
||||
static struct perf_event_attr wd_hw_attr = {
|
||||
.type = PERF_TYPE_HARDWARE,
|
||||
.config = PERF_COUNT_HW_CPU_CYCLES,
|
||||
@@ -209,7 +210,7 @@ static struct perf_event_attr wd_hw_attr = {
|
||||
};
|
||||
|
||||
/* Callback function for perf event subsystem */
|
||||
static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||
static void watchdog_overflow_callback(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
@@ -368,10 +369,11 @@ static int watchdog_nmi_enable(int cpu)
|
||||
if (event != NULL)
|
||||
goto out_enable;
|
||||
|
||||
/* Try to register using hardware perf events */
|
||||
wd_attr = &wd_hw_attr;
|
||||
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
|
||||
event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
|
||||
|
||||
/* Try to register using hardware perf events */
|
||||
event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
|
||||
if (!IS_ERR(event)) {
|
||||
printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
|
||||
goto out_save;
|
||||
|
Reference in New Issue
Block a user