Merge branch 'devel-stable' into for-next
This commit is contained in:
@@ -259,20 +259,29 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
validate_event(struct pmu_hw_events *hw_events,
|
||||
struct perf_event *event)
|
||||
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct arm_pmu *armpmu;
|
||||
|
||||
if (is_software_event(event))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
|
||||
* core perf code won't check that the pmu->ctx == leader->ctx
|
||||
* until after pmu->event_init(event).
|
||||
*/
|
||||
if (event->pmu != pmu)
|
||||
return 0;
|
||||
|
||||
if (event->state < PERF_EVENT_STATE_OFF)
|
||||
return 1;
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
||||
return 1;
|
||||
|
||||
armpmu = to_arm_pmu(event->pmu);
|
||||
return armpmu->get_event_idx(hw_events, event) >= 0;
|
||||
}
|
||||
|
||||
@@ -288,15 +297,15 @@ validate_group(struct perf_event *event)
|
||||
*/
|
||||
memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
|
||||
|
||||
if (!validate_event(&fake_pmu, leader))
|
||||
if (!validate_event(event->pmu, &fake_pmu, leader))
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
||||
if (!validate_event(&fake_pmu, sibling))
|
||||
if (!validate_event(event->pmu, &fake_pmu, sibling))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!validate_event(&fake_pmu, event))
|
||||
if (!validate_event(event->pmu, &fake_pmu, event))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
|
||||
int cpu = i;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
||||
continue;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq >= 0)
|
||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i));
|
||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
|
||||
err = 0;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq < 0)
|
||||
continue;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
/*
|
||||
* If we have a single PMU interrupt that we can't shift,
|
||||
* assume that we're running on a uniprocessor machine and
|
||||
* continue. Otherwise, continue without this interrupt.
|
||||
*/
|
||||
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
|
||||
if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
|
||||
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
||||
irq, i);
|
||||
irq, cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
err = request_irq(irq, handler,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
|
||||
per_cpu_ptr(&hw_events->percpu_pmu, i));
|
||||
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(i, &cpu_pmu->active_irqs);
|
||||
cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,6 +253,8 @@ static const struct of_device_id cpu_pmu_of_device_ids[] = {
|
||||
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
|
||||
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
|
||||
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
|
||||
{.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
|
||||
{.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -289,6 +301,48 @@ static int probe_current_pmu(struct arm_pmu *pmu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int of_pmu_irq_cfg(struct platform_device *pdev)
|
||||
{
|
||||
int i;
|
||||
int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < pdev->num_resources; ++i) {
|
||||
struct device_node *dn;
|
||||
int cpu;
|
||||
|
||||
dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
|
||||
i);
|
||||
if (!dn) {
|
||||
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
|
||||
of_node_full_name(dn), i);
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
|
||||
break;
|
||||
|
||||
of_node_put(dn);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
pr_warn("Failed to find logical CPU for %s\n",
|
||||
dn->name);
|
||||
break;
|
||||
}
|
||||
|
||||
irqs[i] = cpu;
|
||||
}
|
||||
|
||||
if (i == pdev->num_resources)
|
||||
cpu_pmu->irq_affinity = irqs;
|
||||
else
|
||||
kfree(irqs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_pmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
@@ -313,7 +367,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
|
||||
init_fn = of_id->data;
|
||||
ret = init_fn(pmu);
|
||||
|
||||
ret = of_pmu_irq_cfg(pdev);
|
||||
if (!ret)
|
||||
ret = init_fn(pmu);
|
||||
} else {
|
||||
ret = probe_current_pmu(pmu);
|
||||
}
|
||||
|
@@ -140,6 +140,23 @@ enum krait_perf_types {
|
||||
KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
|
||||
};
|
||||
|
||||
/* ARMv7 Scorpion specific event types */
|
||||
enum scorpion_perf_types {
|
||||
SCORPION_LPM0_GROUP0 = 0x4c,
|
||||
SCORPION_LPM1_GROUP0 = 0x50,
|
||||
SCORPION_LPM2_GROUP0 = 0x54,
|
||||
SCORPION_L2LPM_GROUP0 = 0x58,
|
||||
SCORPION_VLPM_GROUP0 = 0x5c,
|
||||
|
||||
SCORPION_ICACHE_ACCESS = 0x10053,
|
||||
SCORPION_ICACHE_MISS = 0x10052,
|
||||
|
||||
SCORPION_DTLB_ACCESS = 0x12013,
|
||||
SCORPION_DTLB_MISS = 0x12012,
|
||||
|
||||
SCORPION_ITLB_MISS = 0x12021,
|
||||
};
|
||||
|
||||
/*
|
||||
* Cortex-A8 HW events mapping
|
||||
*
|
||||
@@ -481,6 +498,49 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
};
|
||||
|
||||
/*
|
||||
* Scorpion HW events mapping
|
||||
*/
|
||||
static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
PERF_MAP_ALL_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
|
||||
};
|
||||
|
||||
static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
PERF_CACHE_MAP_ALL_UNSUPPORTED,
|
||||
/*
|
||||
* The performance counters don't differentiate between read and write
|
||||
* accesses/misses so this isn't strictly correct, but it's the best we
|
||||
* can do. Writes and reads get combined.
|
||||
*/
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
|
||||
/*
|
||||
* Only ITLB misses and DTLB refills are supported. If users want the
|
||||
* DTLB refills misses a raw counter must be used.
|
||||
*/
|
||||
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
|
||||
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
|
||||
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
|
||||
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
|
||||
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
|
||||
[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
|
||||
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
|
||||
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
|
||||
[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
};
|
||||
|
||||
/*
|
||||
* Perf Events' indices
|
||||
*/
|
||||
@@ -976,6 +1036,12 @@ static int krait_map_event_no_branch(struct perf_event *event)
|
||||
&krait_perf_cache_map, 0xFFFFF);
|
||||
}
|
||||
|
||||
static int scorpion_map_event(struct perf_event *event)
|
||||
{
|
||||
return armpmu_map_event(event, &scorpion_perf_map,
|
||||
&scorpion_perf_cache_map, 0xFFFFF);
|
||||
}
|
||||
|
||||
static void armv7pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
cpu_pmu->handle_irq = armv7pmu_handle_irq;
|
||||
@@ -1103,6 +1169,12 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
|
||||
#define PMRESRn_EN BIT(31)
|
||||
|
||||
#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
|
||||
#define EVENT_GROUP(event) ((event) & 0xf) /* G */
|
||||
#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
|
||||
#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
|
||||
#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
|
||||
|
||||
static u32 krait_read_pmresrn(int n)
|
||||
{
|
||||
u32 val;
|
||||
@@ -1141,19 +1213,19 @@ static void krait_write_pmresrn(int n, u32 val)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 krait_read_vpmresr0(void)
|
||||
static u32 venum_read_pmresr(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static void krait_write_vpmresr0(u32 val)
|
||||
static void venum_write_pmresr(u32 val)
|
||||
{
|
||||
asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
|
||||
}
|
||||
|
||||
static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
|
||||
static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
|
||||
{
|
||||
u32 venum_new_val;
|
||||
u32 fp_new_val;
|
||||
@@ -1170,7 +1242,7 @@ static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
|
||||
fmxr(FPEXC, fp_new_val);
|
||||
}
|
||||
|
||||
static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
|
||||
static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
/* Restore FPEXC */
|
||||
@@ -1193,16 +1265,11 @@ static void krait_evt_setup(int idx, u32 config_base)
|
||||
u32 val;
|
||||
u32 mask;
|
||||
u32 vval, fval;
|
||||
unsigned int region;
|
||||
unsigned int group;
|
||||
unsigned int code;
|
||||
unsigned int region = EVENT_REGION(config_base);
|
||||
unsigned int group = EVENT_GROUP(config_base);
|
||||
unsigned int code = EVENT_CODE(config_base);
|
||||
unsigned int group_shift;
|
||||
bool venum_event;
|
||||
|
||||
venum_event = !!(config_base & VENUM_EVENT);
|
||||
region = (config_base >> 12) & 0xf;
|
||||
code = (config_base >> 4) & 0xff;
|
||||
group = (config_base >> 0) & 0xf;
|
||||
bool venum_event = EVENT_VENUM(config_base);
|
||||
|
||||
group_shift = group * 8;
|
||||
mask = 0xff << group_shift;
|
||||
@@ -1217,16 +1284,14 @@ static void krait_evt_setup(int idx, u32 config_base)
|
||||
val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
|
||||
armv7_pmnc_write_evtsel(idx, val);
|
||||
|
||||
asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
|
||||
|
||||
if (venum_event) {
|
||||
krait_pre_vpmresr0(&vval, &fval);
|
||||
val = krait_read_vpmresr0();
|
||||
venum_pre_pmresr(&vval, &fval);
|
||||
val = venum_read_pmresr();
|
||||
val &= ~mask;
|
||||
val |= code << group_shift;
|
||||
val |= PMRESRn_EN;
|
||||
krait_write_vpmresr0(val);
|
||||
krait_post_vpmresr0(vval, fval);
|
||||
venum_write_pmresr(val);
|
||||
venum_post_pmresr(vval, fval);
|
||||
} else {
|
||||
val = krait_read_pmresrn(region);
|
||||
val &= ~mask;
|
||||
@@ -1236,7 +1301,7 @@ static void krait_evt_setup(int idx, u32 config_base)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 krait_clear_pmresrn_group(u32 val, int group)
|
||||
static u32 clear_pmresrn_group(u32 val, int group)
|
||||
{
|
||||
u32 mask;
|
||||
int group_shift;
|
||||
@@ -1256,23 +1321,19 @@ static void krait_clearpmu(u32 config_base)
|
||||
{
|
||||
u32 val;
|
||||
u32 vval, fval;
|
||||
unsigned int region;
|
||||
unsigned int group;
|
||||
bool venum_event;
|
||||
|
||||
venum_event = !!(config_base & VENUM_EVENT);
|
||||
region = (config_base >> 12) & 0xf;
|
||||
group = (config_base >> 0) & 0xf;
|
||||
unsigned int region = EVENT_REGION(config_base);
|
||||
unsigned int group = EVENT_GROUP(config_base);
|
||||
bool venum_event = EVENT_VENUM(config_base);
|
||||
|
||||
if (venum_event) {
|
||||
krait_pre_vpmresr0(&vval, &fval);
|
||||
val = krait_read_vpmresr0();
|
||||
val = krait_clear_pmresrn_group(val, group);
|
||||
krait_write_vpmresr0(val);
|
||||
krait_post_vpmresr0(vval, fval);
|
||||
venum_pre_pmresr(&vval, &fval);
|
||||
val = venum_read_pmresr();
|
||||
val = clear_pmresrn_group(val, group);
|
||||
venum_write_pmresr(val);
|
||||
venum_post_pmresr(vval, fval);
|
||||
} else {
|
||||
val = krait_read_pmresrn(region);
|
||||
val = krait_clear_pmresrn_group(val, group);
|
||||
val = clear_pmresrn_group(val, group);
|
||||
krait_write_pmresrn(region, val);
|
||||
}
|
||||
}
|
||||
@@ -1342,6 +1403,8 @@ static void krait_pmu_enable_event(struct perf_event *event)
|
||||
static void krait_pmu_reset(void *info)
|
||||
{
|
||||
u32 vval, fval;
|
||||
struct arm_pmu *cpu_pmu = info;
|
||||
u32 idx, nb_cnt = cpu_pmu->num_events;
|
||||
|
||||
armv7pmu_reset(info);
|
||||
|
||||
@@ -1350,9 +1413,16 @@ static void krait_pmu_reset(void *info)
|
||||
krait_write_pmresrn(1, 0);
|
||||
krait_write_pmresrn(2, 0);
|
||||
|
||||
krait_pre_vpmresr0(&vval, &fval);
|
||||
krait_write_vpmresr0(0);
|
||||
krait_post_vpmresr0(vval, fval);
|
||||
venum_pre_pmresr(&vval, &fval);
|
||||
venum_write_pmresr(0);
|
||||
venum_post_pmresr(vval, fval);
|
||||
|
||||
/* Reset PMxEVNCTCR to sane default */
|
||||
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
|
||||
armv7_pmnc_select_counter(idx);
|
||||
asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int krait_event_to_bit(struct perf_event *event, unsigned int region,
|
||||
@@ -1386,26 +1456,18 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
{
|
||||
int idx;
|
||||
int bit = -1;
|
||||
unsigned int prefix;
|
||||
unsigned int region;
|
||||
unsigned int code;
|
||||
unsigned int group;
|
||||
bool krait_event;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int region = EVENT_REGION(hwc->config_base);
|
||||
unsigned int code = EVENT_CODE(hwc->config_base);
|
||||
unsigned int group = EVENT_GROUP(hwc->config_base);
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool krait_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
region = (hwc->config_base >> 12) & 0xf;
|
||||
code = (hwc->config_base >> 4) & 0xff;
|
||||
group = (hwc->config_base >> 0) & 0xf;
|
||||
krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
|
||||
|
||||
if (krait_event) {
|
||||
if (venum_event || krait_event) {
|
||||
/* Ignore invalid events */
|
||||
if (group > 3 || region > 2)
|
||||
return -EINVAL;
|
||||
prefix = hwc->config_base & KRAIT_EVENT_MASK;
|
||||
if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
|
||||
return -EINVAL;
|
||||
if (prefix == VENUM_EVENT && (code & 0xe0))
|
||||
if (venum_event && (code & 0xe0))
|
||||
return -EINVAL;
|
||||
|
||||
bit = krait_event_to_bit(event, region, group);
|
||||
@@ -1425,15 +1487,12 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
{
|
||||
int bit;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int region;
|
||||
unsigned int group;
|
||||
bool krait_event;
|
||||
unsigned int region = EVENT_REGION(hwc->config_base);
|
||||
unsigned int group = EVENT_GROUP(hwc->config_base);
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool krait_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
region = (hwc->config_base >> 12) & 0xf;
|
||||
group = (hwc->config_base >> 0) & 0xf;
|
||||
krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
|
||||
|
||||
if (krait_event) {
|
||||
if (venum_event || krait_event) {
|
||||
bit = krait_event_to_bit(event, region, group);
|
||||
clear_bit(bit, cpuc->used_mask);
|
||||
}
|
||||
@@ -1458,6 +1517,344 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scorpion Local Performance Monitor Register (LPMn)
|
||||
*
|
||||
* 31 30 24 16 8 0
|
||||
* +--------------------------------+
|
||||
* LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
|
||||
* +--------------------------------+
|
||||
* LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
|
||||
* +--------------------------------+
|
||||
* LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
|
||||
* +--------------------------------+
|
||||
* L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
|
||||
* +--------------------------------+
|
||||
* VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
|
||||
* +--------------------------------+
|
||||
* EN | G=3 | G=2 | G=1 | G=0
|
||||
*
|
||||
*
|
||||
* Event Encoding:
|
||||
*
|
||||
* hwc->config_base = 0xNRCCG
|
||||
*
|
||||
* N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
|
||||
* R = region register
|
||||
* CC = class of events the group G is choosing from
|
||||
* G = group or particular event
|
||||
*
|
||||
* Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
|
||||
*
|
||||
* A region (R) corresponds to a piece of the CPU (execution unit, instruction
|
||||
* unit, etc.) while the event code (CC) corresponds to a particular class of
|
||||
* events (interrupts for example). An event code is broken down into
|
||||
* groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
|
||||
* example).
|
||||
*/
|
||||
|
||||
static u32 scorpion_read_pmresrn(int n)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
switch (n) {
|
||||
case 0:
|
||||
asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
|
||||
break;
|
||||
case 1:
|
||||
asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
|
||||
break;
|
||||
case 2:
|
||||
asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
|
||||
break;
|
||||
case 3:
|
||||
asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
|
||||
break;
|
||||
default:
|
||||
BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void scorpion_write_pmresrn(int n, u32 val)
|
||||
{
|
||||
switch (n) {
|
||||
case 0:
|
||||
asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
|
||||
break;
|
||||
case 1:
|
||||
asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
|
||||
break;
|
||||
case 2:
|
||||
asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
|
||||
break;
|
||||
case 3:
|
||||
asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
|
||||
break;
|
||||
default:
|
||||
BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
|
||||
}
|
||||
}
|
||||
|
||||
static u32 scorpion_get_pmresrn_event(unsigned int region)
|
||||
{
|
||||
static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
|
||||
SCORPION_LPM1_GROUP0,
|
||||
SCORPION_LPM2_GROUP0,
|
||||
SCORPION_L2LPM_GROUP0 };
|
||||
return pmresrn_table[region];
|
||||
}
|
||||
|
||||
static void scorpion_evt_setup(int idx, u32 config_base)
|
||||
{
|
||||
u32 val;
|
||||
u32 mask;
|
||||
u32 vval, fval;
|
||||
unsigned int region = EVENT_REGION(config_base);
|
||||
unsigned int group = EVENT_GROUP(config_base);
|
||||
unsigned int code = EVENT_CODE(config_base);
|
||||
unsigned int group_shift;
|
||||
bool venum_event = EVENT_VENUM(config_base);
|
||||
|
||||
group_shift = group * 8;
|
||||
mask = 0xff << group_shift;
|
||||
|
||||
/* Configure evtsel for the region and group */
|
||||
if (venum_event)
|
||||
val = SCORPION_VLPM_GROUP0;
|
||||
else
|
||||
val = scorpion_get_pmresrn_event(region);
|
||||
val += group;
|
||||
/* Mix in mode-exclusion bits */
|
||||
val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
|
||||
armv7_pmnc_write_evtsel(idx, val);
|
||||
|
||||
asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
|
||||
|
||||
if (venum_event) {
|
||||
venum_pre_pmresr(&vval, &fval);
|
||||
val = venum_read_pmresr();
|
||||
val &= ~mask;
|
||||
val |= code << group_shift;
|
||||
val |= PMRESRn_EN;
|
||||
venum_write_pmresr(val);
|
||||
venum_post_pmresr(vval, fval);
|
||||
} else {
|
||||
val = scorpion_read_pmresrn(region);
|
||||
val &= ~mask;
|
||||
val |= code << group_shift;
|
||||
val |= PMRESRn_EN;
|
||||
scorpion_write_pmresrn(region, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void scorpion_clearpmu(u32 config_base)
|
||||
{
|
||||
u32 val;
|
||||
u32 vval, fval;
|
||||
unsigned int region = EVENT_REGION(config_base);
|
||||
unsigned int group = EVENT_GROUP(config_base);
|
||||
bool venum_event = EVENT_VENUM(config_base);
|
||||
|
||||
if (venum_event) {
|
||||
venum_pre_pmresr(&vval, &fval);
|
||||
val = venum_read_pmresr();
|
||||
val = clear_pmresrn_group(val, group);
|
||||
venum_write_pmresr(val);
|
||||
venum_post_pmresr(vval, fval);
|
||||
} else {
|
||||
val = scorpion_read_pmresrn(region);
|
||||
val = clear_pmresrn_group(val, group);
|
||||
scorpion_write_pmresrn(region, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void scorpion_pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
|
||||
/* Disable counter and interrupt */
|
||||
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||
|
||||
/* Disable counter */
|
||||
armv7_pmnc_disable_counter(idx);
|
||||
|
||||
/*
|
||||
* Clear pmresr code (if destined for PMNx counters)
|
||||
*/
|
||||
if (hwc->config_base & KRAIT_EVENT_MASK)
|
||||
scorpion_clearpmu(hwc->config_base);
|
||||
|
||||
/* Disable interrupt for this counter */
|
||||
armv7_pmnc_disable_intens(idx);
|
||||
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void scorpion_pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
|
||||
/*
|
||||
* Enable counter and interrupt, and set the counter to count
|
||||
* the event that we're interested in.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||
|
||||
/* Disable counter */
|
||||
armv7_pmnc_disable_counter(idx);
|
||||
|
||||
/*
|
||||
* Set event (if destined for PMNx counters)
|
||||
* We don't set the event for the cycle counter because we
|
||||
* don't have the ability to perform event filtering.
|
||||
*/
|
||||
if (hwc->config_base & KRAIT_EVENT_MASK)
|
||||
scorpion_evt_setup(idx, hwc->config_base);
|
||||
else if (idx != ARMV7_IDX_CYCLE_COUNTER)
|
||||
armv7_pmnc_write_evtsel(idx, hwc->config_base);
|
||||
|
||||
/* Enable interrupt for this counter */
|
||||
armv7_pmnc_enable_intens(idx);
|
||||
|
||||
/* Enable counter */
|
||||
armv7_pmnc_enable_counter(idx);
|
||||
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void scorpion_pmu_reset(void *info)
|
||||
{
|
||||
u32 vval, fval;
|
||||
struct arm_pmu *cpu_pmu = info;
|
||||
u32 idx, nb_cnt = cpu_pmu->num_events;
|
||||
|
||||
armv7pmu_reset(info);
|
||||
|
||||
/* Clear all pmresrs */
|
||||
scorpion_write_pmresrn(0, 0);
|
||||
scorpion_write_pmresrn(1, 0);
|
||||
scorpion_write_pmresrn(2, 0);
|
||||
scorpion_write_pmresrn(3, 0);
|
||||
|
||||
venum_pre_pmresr(&vval, &fval);
|
||||
venum_write_pmresr(0);
|
||||
venum_post_pmresr(vval, fval);
|
||||
|
||||
/* Reset PMxEVNCTCR to sane default */
|
||||
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
|
||||
armv7_pmnc_select_counter(idx);
|
||||
asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
|
||||
}
|
||||
}
|
||||
|
||||
static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
|
||||
unsigned int group)
|
||||
{
|
||||
int bit;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
|
||||
if (hwc->config_base & VENUM_EVENT)
|
||||
bit = SCORPION_VLPM_GROUP0;
|
||||
else
|
||||
bit = scorpion_get_pmresrn_event(region);
|
||||
bit -= scorpion_get_pmresrn_event(0);
|
||||
bit += group;
|
||||
/*
|
||||
* Lower bits are reserved for use by the counters (see
|
||||
* armv7pmu_get_event_idx() for more info)
|
||||
*/
|
||||
bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
|
||||
|
||||
return bit;
|
||||
}
|
||||
|
||||
/*
|
||||
* We check for column exclusion constraints here.
|
||||
* Two events cant use the same group within a pmresr register.
|
||||
*/
|
||||
static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
int idx;
|
||||
int bit = -1;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int region = EVENT_REGION(hwc->config_base);
|
||||
unsigned int group = EVENT_GROUP(hwc->config_base);
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool scorpion_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
if (venum_event || scorpion_event) {
|
||||
/* Ignore invalid events */
|
||||
if (group > 3 || region > 3)
|
||||
return -EINVAL;
|
||||
|
||||
bit = scorpion_event_to_bit(event, region, group);
|
||||
if (test_and_set_bit(bit, cpuc->used_mask))
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
idx = armv7pmu_get_event_idx(cpuc, event);
|
||||
if (idx < 0 && bit >= 0)
|
||||
clear_bit(bit, cpuc->used_mask);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
int bit;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int region = EVENT_REGION(hwc->config_base);
|
||||
unsigned int group = EVENT_GROUP(hwc->config_base);
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool scorpion_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
if (venum_event || scorpion_event) {
|
||||
bit = scorpion_event_to_bit(event, region, group);
|
||||
clear_bit(bit, cpuc->used_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_scorpion";
|
||||
cpu_pmu->map_event = scorpion_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->reset = scorpion_pmu_reset;
|
||||
cpu_pmu->enable = scorpion_pmu_enable_event;
|
||||
cpu_pmu->disable = scorpion_pmu_disable_event;
|
||||
cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_scorpion_mp";
|
||||
cpu_pmu->map_event = scorpion_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->reset = scorpion_pmu_reset;
|
||||
cpu_pmu->enable = scorpion_pmu_enable_event;
|
||||
cpu_pmu->disable = scorpion_pmu_disable_event;
|
||||
cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
@@ -1498,4 +1895,14 @@ static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* CONFIG_CPU_V7 */
|
||||
|
Reference in New Issue
Block a user