Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile: (31 commits)
  powerpc/oprofile: fix whitespaces in op_model_cell.c
  powerpc/oprofile: IBM CELL: add SPU event profiling support
  powerpc/oprofile: fix cell/pr_util.h
  powerpc/oprofile: IBM CELL: cleanup and restructuring
  oprofile: make new cpu buffer functions part of the api
  oprofile: remove #ifdef CONFIG_OPROFILE_IBS in non-ibs code
  ring_buffer: fix ring_buffer_event_length()
  oprofile: use new data sample format for ibs
  oprofile: add op_cpu_buffer_get_data()
  oprofile: add op_cpu_buffer_add_data()
  oprofile: rework implementation of cpu buffer events
  oprofile: modify op_cpu_buffer_read_entry()
  oprofile: add op_cpu_buffer_write_reserve()
  oprofile: rename variables in add_ibs_begin()
  oprofile: rename add_sample() in cpu_buffer.c
  oprofile: rename variable ibs_allowed to has_ibs in op_model_amd.c
  oprofile: making add_sample_entry() inline
  oprofile: remove backtrace code for ibs
  oprofile: remove unused ibs macro
  oprofile: remove unused components in struct oprofile_cpu_buffer
  ...
This commit is contained in:
Linus Torvalds
2009-01-09 12:43:06 -08:00
16 changed files with 1141 additions and 553 deletions

View File

@@ -1,11 +1,12 @@
/**
* @file buffer_sync.c
*
* @remark Copyright 2002 OProfile authors
* @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Barry Kasindorf
* @author Robert Richter <robert.richter@amd.com>
*
* This is the core of the buffer management. Each
* CPU buffer is processed and entered into the
@@ -315,88 +316,73 @@ static void add_trace_begin(void)
add_event_entry(TRACE_BEGIN_CODE);
}
#ifdef CONFIG_OPROFILE_IBS
#define IBS_FETCH_CODE_SIZE 2
#define IBS_OP_CODE_SIZE 5
/*
* Add IBS fetch and op entries to event buffer
*/
static void add_ibs_begin(int cpu, int code, struct mm_struct *mm)
static void add_data(struct op_entry *entry, struct mm_struct *mm)
{
unsigned long rip;
int i, count;
unsigned long ibs_cookie = 0;
unsigned long code, pc, val;
unsigned long cookie;
off_t offset;
struct op_sample *sample;
sample = cpu_buffer_read_entry(cpu);
if (!sample)
goto Error;
rip = sample->eip;
#ifdef __LP64__
rip += sample->event << 32;
#endif
if (!op_cpu_buffer_get_data(entry, &code))
return;
if (!op_cpu_buffer_get_data(entry, &pc))
return;
if (!op_cpu_buffer_get_size(entry))
return;
if (mm) {
ibs_cookie = lookup_dcookie(mm, rip, &offset);
cookie = lookup_dcookie(mm, pc, &offset);
if (ibs_cookie == NO_COOKIE)
offset = rip;
if (ibs_cookie == INVALID_COOKIE) {
if (cookie == NO_COOKIE)
offset = pc;
if (cookie == INVALID_COOKIE) {
atomic_inc(&oprofile_stats.sample_lost_no_mapping);
offset = rip;
offset = pc;
}
if (ibs_cookie != last_cookie) {
add_cookie_switch(ibs_cookie);
last_cookie = ibs_cookie;
if (cookie != last_cookie) {
add_cookie_switch(cookie);
last_cookie = cookie;
}
} else
offset = rip;
offset = pc;
add_event_entry(ESCAPE_CODE);
add_event_entry(code);
add_event_entry(offset); /* Offset from Dcookie */
/* we send the Dcookie offset, but send the raw Linear Add also*/
add_event_entry(sample->eip);
add_event_entry(sample->event);
if (code == IBS_FETCH_CODE)
count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
else
count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
for (i = 0; i < count; i++) {
sample = cpu_buffer_read_entry(cpu);
if (!sample)
goto Error;
add_event_entry(sample->eip);
add_event_entry(sample->event);
}
return;
Error:
return;
while (op_cpu_buffer_get_data(entry, &val))
add_event_entry(val);
}
#endif
static void add_sample_entry(unsigned long offset, unsigned long event)
static inline void add_sample_entry(unsigned long offset, unsigned long event)
{
add_event_entry(offset);
add_event_entry(event);
}
static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
/*
* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace. Return 0 on failure.
*/
static int
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
{
unsigned long cookie;
off_t offset;
if (in_kernel) {
add_sample_entry(s->eip, s->event);
return 1;
}
/* add userspace sample */
if (!mm) {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
return 0;
}
cookie = lookup_dcookie(mm, s->eip, &offset);
if (cookie == INVALID_COOKIE) {
@@ -415,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
}
/* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace.
*/
static int
add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
{
if (in_kernel) {
add_sample_entry(s->eip, s->event);
return 1;
} else if (mm) {
return add_us_sample(mm, s);
} else {
atomic_inc(&oprofile_stats.sample_lost_no_mm);
}
return 0;
}
static void release_mm(struct mm_struct *mm)
{
if (!mm)
@@ -526,66 +493,69 @@ void sync_buffer(int cpu)
{
struct mm_struct *mm = NULL;
struct mm_struct *oldmm;
unsigned long val;
struct task_struct *new;
unsigned long cookie = 0;
int in_kernel = 1;
sync_buffer_state state = sb_buffer_start;
unsigned int i;
unsigned long available;
unsigned long flags;
struct op_entry entry;
struct op_sample *sample;
mutex_lock(&buffer_mutex);
add_cpu_switch(cpu);
cpu_buffer_reset(cpu);
available = cpu_buffer_entries(cpu);
op_cpu_buffer_reset(cpu);
available = op_cpu_buffer_entries(cpu);
for (i = 0; i < available; ++i) {
struct op_sample *s = cpu_buffer_read_entry(cpu);
if (!s)
sample = op_cpu_buffer_read_entry(&entry, cpu);
if (!sample)
break;
if (is_code(s->eip)) {
switch (s->event) {
case 0:
case CPU_IS_KERNEL:
/* kernel/userspace switch */
in_kernel = s->event;
if (state == sb_buffer_start)
state = sb_sample_start;
add_kernel_ctx_switch(s->event);
break;
case CPU_TRACE_BEGIN:
if (is_code(sample->eip)) {
flags = sample->event;
if (flags & TRACE_BEGIN) {
state = sb_bt_start;
add_trace_begin();
break;
#ifdef CONFIG_OPROFILE_IBS
case IBS_FETCH_BEGIN:
state = sb_bt_start;
add_ibs_begin(cpu, IBS_FETCH_CODE, mm);
break;
case IBS_OP_BEGIN:
state = sb_bt_start;
add_ibs_begin(cpu, IBS_OP_CODE, mm);
break;
#endif
default:
}
if (flags & KERNEL_CTX_SWITCH) {
/* kernel/userspace switch */
in_kernel = flags & IS_KERNEL;
if (state == sb_buffer_start)
state = sb_sample_start;
add_kernel_ctx_switch(flags & IS_KERNEL);
}
if (flags & USER_CTX_SWITCH
&& op_cpu_buffer_get_data(&entry, &val)) {
/* userspace context switch */
new = (struct task_struct *)val;
oldmm = mm;
new = (struct task_struct *)s->event;
release_mm(oldmm);
mm = take_tasks_mm(new);
if (mm != oldmm)
cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie);
break;
}
} else if (state >= sb_bt_start &&
!add_sample(mm, s, in_kernel)) {
if (state == sb_bt_start) {
state = sb_bt_ignore;
atomic_inc(&oprofile_stats.bt_lost_no_mapping);
}
if (op_cpu_buffer_get_size(&entry))
add_data(&entry, mm);
continue;
}
if (state < sb_bt_start)
/* ignore sample */
continue;
if (add_sample(mm, sample, in_kernel))
continue;
/* ignore backtraces if failed to add a sample */
if (state == sb_bt_start) {
state = sb_bt_ignore;
atomic_inc(&oprofile_stats.bt_lost_no_mapping);
}
}
release_mm(mm);

View File

@@ -1,11 +1,12 @@
/**
* @file cpu_buffer.c
*
* @remark Copyright 2002 OProfile authors
* @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Barry Kasindorf <barry.kasindorf@amd.com>
* @author Robert Richter <robert.richter@amd.com>
*
* Each CPU has a local buffer that stores PC value/event
* pairs. We also log context switches when we notice them.
@@ -45,8 +46,8 @@
* can be changed to a single buffer solution when the ring buffer
* access is implemented as non-locking atomic code.
*/
struct ring_buffer *op_ring_buffer_read;
struct ring_buffer *op_ring_buffer_write;
static struct ring_buffer *op_ring_buffer_read;
static struct ring_buffer *op_ring_buffer_write;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
@@ -54,6 +55,19 @@ static void wq_sync_buffer(struct work_struct *work);
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
static int work_enabled;
unsigned long oprofile_get_cpu_buffer_size(void)
{
return oprofile_cpu_buffer_size;
}
void oprofile_cpu_buffer_inc_smpl_lost(void)
{
struct oprofile_cpu_buffer *cpu_buf
= &__get_cpu_var(cpu_buffer);
cpu_buf->sample_lost_overflow++;
}
void free_cpu_buffers(void)
{
if (op_ring_buffer_read)
@@ -64,24 +78,11 @@ void free_cpu_buffers(void)
op_ring_buffer_write = NULL;
}
unsigned long oprofile_get_cpu_buffer_size(void)
{
return fs_cpu_buffer_size;
}
void oprofile_cpu_buffer_inc_smpl_lost(void)
{
struct oprofile_cpu_buffer *cpu_buf
= &__get_cpu_var(cpu_buffer);
cpu_buf->sample_lost_overflow++;
}
int alloc_cpu_buffers(void)
{
int i;
unsigned long buffer_size = fs_cpu_buffer_size;
unsigned long buffer_size = oprofile_cpu_buffer_size;
op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
if (!op_ring_buffer_read)
@@ -97,8 +98,6 @@ int alloc_cpu_buffers(void)
b->last_is_kernel = -1;
b->tracing = 0;
b->buffer_size = buffer_size;
b->tail_pos = 0;
b->head_pos = 0;
b->sample_received = 0;
b->sample_lost_overflow = 0;
b->backtrace_aborted = 0;
@@ -145,47 +144,156 @@ void end_cpu_work(void)
flush_scheduled_work();
}
static inline int
add_sample(struct oprofile_cpu_buffer *cpu_buf,
unsigned long pc, unsigned long event)
/*
* This function prepares the cpu buffer to write a sample.
*
* Struct op_entry is used during operations on the ring buffer while
* struct op_sample contains the data that is stored in the ring
* buffer. Struct entry can be uninitialized. The function reserves a
* data array that is specified by size. Use
* op_cpu_buffer_write_commit() after preparing the sample. In case of
* errors a null pointer is returned, otherwise the pointer to the
* sample.
*
*/
struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
{
entry->event = ring_buffer_lock_reserve
(op_ring_buffer_write, sizeof(struct op_sample) +
size * sizeof(entry->sample->data[0]), &entry->irq_flags);
if (entry->event)
entry->sample = ring_buffer_event_data(entry->event);
else
entry->sample = NULL;
if (!entry->sample)
return NULL;
entry->size = size;
entry->data = entry->sample->data;
return entry->sample;
}
int op_cpu_buffer_write_commit(struct op_entry *entry)
{
return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
entry->irq_flags);
}
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
goto event;
if (ring_buffer_swap_cpu(op_ring_buffer_read,
op_ring_buffer_write,
cpu))
return NULL;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
goto event;
return NULL;
event:
entry->event = e;
entry->sample = ring_buffer_event_data(e);
entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
/ sizeof(entry->sample->data[0]);
entry->data = entry->sample->data;
return entry->sample;
}
unsigned long op_cpu_buffer_entries(int cpu)
{
return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
}
static int
op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
int is_kernel, struct task_struct *task)
{
struct op_entry entry;
int ret;
struct op_sample *sample;
unsigned long flags;
int size;
ret = cpu_buffer_write_entry(&entry);
if (ret)
return ret;
flags = 0;
entry.sample->eip = pc;
entry.sample->event = event;
if (backtrace)
flags |= TRACE_BEGIN;
ret = cpu_buffer_write_commit(&entry);
if (ret)
return ret;
/* notice a switch from user->kernel or vice versa */
is_kernel = !!is_kernel;
if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel;
flags |= KERNEL_CTX_SWITCH;
if (is_kernel)
flags |= IS_KERNEL;
}
/* notice a task switch */
if (cpu_buf->last_task != task) {
cpu_buf->last_task = task;
flags |= USER_CTX_SWITCH;
}
if (!flags)
/* nothing to do */
return 0;
if (flags & USER_CTX_SWITCH)
size = 1;
else
size = 0;
sample = op_cpu_buffer_write_reserve(&entry, size);
if (!sample)
return -ENOMEM;
sample->eip = ESCAPE_CODE;
sample->event = flags;
if (size)
op_cpu_buffer_add_data(&entry, (unsigned long)task);
op_cpu_buffer_write_commit(&entry);
return 0;
}
static inline int
add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
unsigned long pc, unsigned long event)
{
return add_sample(buffer, ESCAPE_CODE, value);
struct op_entry entry;
struct op_sample *sample;
sample = op_cpu_buffer_write_reserve(&entry, 0);
if (!sample)
return -ENOMEM;
sample->eip = pc;
sample->event = event;
return op_cpu_buffer_write_commit(&entry);
}
/* This must be safe from any context. It's safe writing here
* because of the head/tail separation of the writer and reader
* of the CPU buffer.
/*
* This must be safe from any context.
*
* is_kernel is needed because on some architectures you cannot
* tell if you are in kernel or user space simply by looking at
* pc. We tag this in the buffer by generating kernel enter/exit
* events whenever is_kernel changes
*/
static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
int is_kernel, unsigned long event)
static int
log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
unsigned long backtrace, int is_kernel, unsigned long event)
{
struct task_struct *task;
cpu_buf->sample_received++;
if (pc == ESCAPE_CODE) {
@@ -193,25 +301,10 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
return 0;
}
is_kernel = !!is_kernel;
if (op_add_code(cpu_buf, backtrace, is_kernel, current))
goto fail;
task = current;
/* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel;
if (add_code(cpu_buf, is_kernel))
goto fail;
}
/* notice a task switch */
if (cpu_buf->last_task != task) {
cpu_buf->last_task = task;
if (add_code(cpu_buf, (unsigned long)task))
goto fail;
}
if (add_sample(cpu_buf, pc, event))
if (op_add_sample(cpu_buf, pc, event))
goto fail;
return 1;
@@ -221,109 +314,102 @@ fail:
return 0;
}
static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
{
add_code(cpu_buf, CPU_TRACE_BEGIN);
cpu_buf->tracing = 1;
return 1;
}
static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
{
cpu_buf->tracing = 0;
}
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
static inline void
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
if (!backtrace_depth) {
log_sample(cpu_buf, pc, is_kernel, event);
return;
}
if (!oprofile_begin_trace(cpu_buf))
return;
unsigned long backtrace = oprofile_backtrace_depth;
/*
* if log_sample() fail we can't backtrace since we lost the
* source of this event
*/
if (log_sample(cpu_buf, pc, is_kernel, event))
oprofile_ops.backtrace(regs, backtrace_depth);
if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
/* failed */
return;
if (!backtrace)
return;
oprofile_begin_trace(cpu_buf);
oprofile_ops.backtrace(regs, backtrace);
oprofile_end_trace(cpu_buf);
}
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
__oprofile_add_ext_sample(pc, regs, event, is_kernel);
}
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
int is_kernel = !user_mode(regs);
unsigned long pc = profile_pc(regs);
oprofile_add_ext_sample(pc, regs, event, is_kernel);
__oprofile_add_ext_sample(pc, regs, event, is_kernel);
}
#ifdef CONFIG_OPROFILE_IBS
#define MAX_IBS_SAMPLE_SIZE 14
void oprofile_add_ibs_sample(struct pt_regs * const regs,
unsigned int * const ibs_sample, int ibs_code)
/*
* Add samples with data to the ring buffer.
*
* Use oprofile_add_data(&entry, val) to add data and
* oprofile_write_commit(&entry) to commit the sample.
*/
void
oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
unsigned long pc, int code, int size)
{
struct op_sample *sample;
int is_kernel = !user_mode(regs);
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
struct task_struct *task;
int fail = 0;
cpu_buf->sample_received++;
/* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) {
if (add_code(cpu_buf, is_kernel))
goto fail;
cpu_buf->last_is_kernel = is_kernel;
}
/* notice a task switch */
if (!is_kernel) {
task = current;
if (cpu_buf->last_task != task) {
if (add_code(cpu_buf, (unsigned long)task))
goto fail;
cpu_buf->last_task = task;
}
}
fail = fail || add_code(cpu_buf, ibs_code);
fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
if (ibs_code == IBS_OP_BEGIN) {
fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
}
if (fail)
/* no backtraces for samples with data */
if (op_add_code(cpu_buf, 0, is_kernel, current))
goto fail;
if (backtrace_depth)
oprofile_ops.backtrace(regs, backtrace_depth);
sample = op_cpu_buffer_write_reserve(entry, size + 2);
if (!sample)
goto fail;
sample->eip = ESCAPE_CODE;
sample->event = 0; /* no flags */
op_cpu_buffer_add_data(entry, code);
op_cpu_buffer_add_data(entry, pc);
return;
fail:
cpu_buf->sample_lost_overflow++;
return;
}
#endif
int oprofile_add_data(struct op_entry *entry, unsigned long val)
{
return op_cpu_buffer_add_data(entry, val);
}
int oprofile_write_commit(struct op_entry *entry)
{
return op_cpu_buffer_write_commit(entry);
}
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
log_sample(cpu_buf, pc, is_kernel, event);
log_sample(cpu_buf, pc, 0, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
@@ -340,7 +426,7 @@ void oprofile_add_trace(unsigned long pc)
if (pc == ESCAPE_CODE)
goto fail;
if (add_sample(cpu_buf, pc, 0))
if (op_add_sample(cpu_buf, pc, 0))
goto fail;
return;

View File

@@ -1,10 +1,11 @@
/**
* @file cpu_buffer.h
*
* @remark Copyright 2002 OProfile authors
* @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Robert Richter <robert.richter@amd.com>
*/
#ifndef OPROFILE_CPU_BUFFER_H
@@ -31,17 +32,12 @@ void end_cpu_work(void);
struct op_sample {
unsigned long eip;
unsigned long event;
unsigned long data[0];
};
struct op_entry {
struct ring_buffer_event *event;
struct op_sample *sample;
unsigned long irq_flags;
};
struct op_entry;
struct oprofile_cpu_buffer {
volatile unsigned long head_pos;
volatile unsigned long tail_pos;
unsigned long buffer_size;
struct task_struct *last_task;
int last_is_kernel;
@@ -54,8 +50,6 @@ struct oprofile_cpu_buffer {
struct delayed_work work;
};
extern struct ring_buffer *op_ring_buffer_read;
extern struct ring_buffer *op_ring_buffer_write;
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
/*
@@ -64,7 +58,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
* reset these to invalid values; the next sample collected will
* populate the buffer with proper values to initialize the buffer
*/
static inline void cpu_buffer_reset(int cpu)
static inline void op_cpu_buffer_reset(int cpu)
{
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
@@ -72,55 +66,48 @@ static inline void cpu_buffer_reset(int cpu)
cpu_buf->last_task = NULL;
}
static inline int cpu_buffer_write_entry(struct op_entry *entry)
struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
int op_cpu_buffer_write_commit(struct op_entry *entry);
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
unsigned long op_cpu_buffer_entries(int cpu);
/* returns the remaining free size of data in the entry */
static inline
int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
{
entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
sizeof(struct op_sample),
&entry->irq_flags);
if (entry->event)
entry->sample = ring_buffer_event_data(entry->event);
else
entry->sample = NULL;
if (!entry->sample)
return -ENOMEM;
return 0;
if (!entry->size)
return 0;
*entry->data = val;
entry->size--;
entry->data++;
return entry->size;
}
static inline int cpu_buffer_write_commit(struct op_entry *entry)
/* returns the size of data in the entry */
static inline
int op_cpu_buffer_get_size(struct op_entry *entry)
{
return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
entry->irq_flags);
return entry->size;
}
static inline struct op_sample *cpu_buffer_read_entry(int cpu)
/* returns 0 if empty or the size of data including the current value */
static inline
int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
{
struct ring_buffer_event *e;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
return ring_buffer_event_data(e);
if (ring_buffer_swap_cpu(op_ring_buffer_read,
op_ring_buffer_write,
cpu))
return NULL;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
return ring_buffer_event_data(e);
return NULL;
int size = entry->size;
if (!size)
return 0;
*val = *entry->data;
entry->size--;
entry->data++;
return size;
}
/* "acquire" as many cpu buffer slots as we can */
static inline unsigned long cpu_buffer_entries(int cpu)
{
return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
}
/* transient events for the CPU buffer -> event buffer */
#define CPU_IS_KERNEL 1
#define CPU_TRACE_BEGIN 2
#define IBS_FETCH_BEGIN 3
#define IBS_OP_BEGIN 4
/* extra data flags */
#define KERNEL_CTX_SWITCH (1UL << 0)
#define IS_KERNEL (1UL << 1)
#define TRACE_BEGIN (1UL << 2)
#define USER_CTX_SWITCH (1UL << 3)
#endif /* OPROFILE_CPU_BUFFER_H */

View File

@@ -73,8 +73,8 @@ int alloc_event_buffer(void)
unsigned long flags;
spin_lock_irqsave(&oprofilefs_lock, flags);
buffer_size = fs_buffer_size;
buffer_watershed = fs_buffer_watershed;
buffer_size = oprofile_buffer_size;
buffer_watershed = oprofile_buffer_watershed;
spin_unlock_irqrestore(&oprofilefs_lock, flags);
if (buffer_watershed >= buffer_size)

View File

@@ -23,7 +23,7 @@
struct oprofile_operations oprofile_ops;
unsigned long oprofile_started;
unsigned long backtrace_depth;
unsigned long oprofile_backtrace_depth;
static unsigned long is_setup;
static DEFINE_MUTEX(start_mutex);
@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val)
goto out;
}
backtrace_depth = val;
oprofile_backtrace_depth = val;
out:
mutex_unlock(&start_mutex);

View File

@@ -21,12 +21,12 @@ void oprofile_stop(void);
struct oprofile_operations;
extern unsigned long fs_buffer_size;
extern unsigned long fs_cpu_buffer_size;
extern unsigned long fs_buffer_watershed;
extern unsigned long oprofile_buffer_size;
extern unsigned long oprofile_cpu_buffer_size;
extern unsigned long oprofile_buffer_watershed;
extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started;
extern unsigned long backtrace_depth;
extern unsigned long oprofile_backtrace_depth;
struct super_block;
struct dentry;

View File

@@ -14,17 +14,18 @@
#include "oprofile_stats.h"
#include "oprof.h"
#define FS_BUFFER_SIZE_DEFAULT 131072
#define FS_CPU_BUFFER_SIZE_DEFAULT 8192
#define FS_BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
#define BUFFER_SIZE_DEFAULT 131072
#define CPU_BUFFER_SIZE_DEFAULT 8192
#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
unsigned long fs_buffer_size;
unsigned long fs_cpu_buffer_size;
unsigned long fs_buffer_watershed;
unsigned long oprofile_buffer_size;
unsigned long oprofile_cpu_buffer_size;
unsigned long oprofile_buffer_watershed;
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
offset);
}
@@ -125,16 +126,16 @@ static const struct file_operations dump_fops = {
void oprofile_create_files(struct super_block *sb, struct dentry *root)
{
/* reinitialize default values */
fs_buffer_size = FS_BUFFER_SIZE_DEFAULT;
fs_cpu_buffer_size = FS_CPU_BUFFER_SIZE_DEFAULT;
fs_buffer_watershed = FS_BUFFER_WATERSHED_DEFAULT;
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);