Merge branches 'for-next/52-bit-kva', 'for-next/cpu-topology', 'for-next/error-injection', 'for-next/perf', 'for-next/psci-cpuidle', 'for-next/rng', 'for-next/smpboot', 'for-next/tbi' and 'for-next/tlbi' into for-next/core
* for-next/52-bit-kva: (25 commits) Support for 52-bit virtual addressing in kernel space * for-next/cpu-topology: (9 commits) Move CPU topology parsing into core code and add support for ACPI 6.3 * for-next/error-injection: (2 commits) Support for function error injection via kprobes * for-next/perf: (8 commits) Support for i.MX8 DDR PMU and proper SMMUv3 group validation * for-next/psci-cpuidle: (7 commits) Move PSCI idle code into a new CPUidle driver * for-next/rng: (4 commits) Support for 'rng-seed' property being passed in the devicetree * for-next/smpboot: (3 commits) Reduce fragility of secondary CPU bringup in debug configurations * for-next/tbi: (10 commits) Introduce new syscall ABI with relaxed requirements for pointer tags * for-next/tlbi: (6 commits) Handle spurious page faults arising from kernel space
This commit is contained in:

@@ -113,8 +113,6 @@ struct smmu_pmu {
|
||||
u64 counter_mask;
|
||||
u32 options;
|
||||
bool global_filter;
|
||||
u32 global_filter_span;
|
||||
u32 global_filter_sid;
|
||||
};
|
||||
|
||||
#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
|
||||
@@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event,
|
||||
smmu_pmu_set_smr(smmu_pmu, idx, sid);
|
||||
}
|
||||
|
||||
static bool smmu_pmu_check_global_filter(struct perf_event *curr,
|
||||
struct perf_event *new)
|
||||
{
|
||||
if (get_filter_enable(new) != get_filter_enable(curr))
|
||||
return false;
|
||||
|
||||
if (!get_filter_enable(new))
|
||||
return true;
|
||||
|
||||
return get_filter_span(new) == get_filter_span(curr) &&
|
||||
get_filter_stream_id(new) == get_filter_stream_id(curr);
|
||||
}
|
||||
|
||||
static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
|
||||
struct perf_event *event, int idx)
|
||||
{
|
||||
@@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
|
||||
}
|
||||
|
||||
/* Requested settings same as current global settings*/
|
||||
if (span == smmu_pmu->global_filter_span &&
|
||||
sid == smmu_pmu->global_filter_sid)
|
||||
idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
|
||||
if (idx == num_ctrs ||
|
||||
smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
|
||||
smmu_pmu_set_event_filter(event, 0, span, sid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
|
||||
return -EAGAIN;
|
||||
|
||||
smmu_pmu_set_event_filter(event, 0, span, sid);
|
||||
smmu_pmu->global_filter_span = span;
|
||||
smmu_pmu->global_filter_sid = sid;
|
||||
return 0;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
|
||||
@@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
|
||||
return idx;
|
||||
}
|
||||
|
||||
static bool smmu_pmu_events_compatible(struct perf_event *curr,
|
||||
struct perf_event *new)
|
||||
{
|
||||
if (new->pmu != curr->pmu)
|
||||
return false;
|
||||
|
||||
if (to_smmu_pmu(new->pmu)->global_filter &&
|
||||
!smmu_pmu_check_global_filter(curr, new))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Implementation of abstract pmu functionality required by
|
||||
* the core perf events code.
|
||||
@@ -323,6 +344,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
|
||||
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
|
||||
struct device *dev = smmu_pmu->dev;
|
||||
struct perf_event *sibling;
|
||||
int group_num_events = 1;
|
||||
u16 event_id;
|
||||
|
||||
if (event->attr.type != event->pmu->type)
|
||||
@@ -347,18 +369,23 @@ static int smmu_pmu_event_init(struct perf_event *event)
|
||||
}
|
||||
|
||||
/* Don't allow groups with mixed PMUs, except for s/w events */
|
||||
if (event->group_leader->pmu != event->pmu &&
|
||||
!is_software_event(event->group_leader)) {
|
||||
dev_dbg(dev, "Can't create mixed PMU group\n");
|
||||
return -EINVAL;
|
||||
if (!is_software_event(event->group_leader)) {
|
||||
if (!smmu_pmu_events_compatible(event->group_leader, event))
|
||||
return -EINVAL;
|
||||
|
||||
if (++group_num_events > smmu_pmu->num_counters)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for_each_sibling_event(sibling, event->group_leader) {
|
||||
if (sibling->pmu != event->pmu &&
|
||||
!is_software_event(sibling)) {
|
||||
dev_dbg(dev, "Can't create mixed PMU group\n");
|
||||
if (is_software_event(sibling))
|
||||
continue;
|
||||
|
||||
if (!smmu_pmu_events_compatible(sibling, event))
|
||||
return -EINVAL;
|
||||
|
||||
if (++group_num_events > smmu_pmu->num_counters)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
hwc->idx = -1;
|
||||
|
@@ -35,6 +35,8 @@
|
||||
#define EVENT_CYCLES_COUNTER 0
|
||||
#define NUM_COUNTERS 4
|
||||
|
||||
#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
|
||||
|
||||
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
|
||||
|
||||
#define DDR_PERF_DEV_NAME "imx8_ddr"
|
||||
@@ -42,11 +44,25 @@
|
||||
|
||||
static DEFINE_IDA(ddr_ida);
|
||||
|
||||
/* DDR Perf hardware feature */
|
||||
#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
|
||||
|
||||
struct fsl_ddr_devtype_data {
|
||||
unsigned int quirks; /* quirks needed for different DDR Perf core */
|
||||
};
|
||||
|
||||
static const struct fsl_ddr_devtype_data imx8_devtype_data;
|
||||
|
||||
static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
|
||||
.quirks = DDR_CAP_AXI_ID_FILTER,
|
||||
};
|
||||
|
||||
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
|
||||
{ .compatible = "fsl,imx8-ddr-pmu",},
|
||||
{ .compatible = "fsl,imx8m-ddr-pmu",},
|
||||
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
|
||||
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
|
||||
|
||||
struct ddr_pmu {
|
||||
struct pmu pmu;
|
||||
@@ -57,6 +73,7 @@ struct ddr_pmu {
|
||||
struct perf_event *events[NUM_COUNTERS];
|
||||
int active_events;
|
||||
enum cpuhp_state cpuhp_state;
|
||||
const struct fsl_ddr_devtype_data *devtype_data;
|
||||
int irq;
|
||||
int id;
|
||||
};
|
||||
@@ -128,6 +145,8 @@ static struct attribute *ddr_perf_events_attrs[] = {
|
||||
IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
|
||||
IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
|
||||
IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
|
||||
IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
|
||||
IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -137,9 +156,13 @@ static struct attribute_group ddr_perf_events_attr_group = {
|
||||
};
|
||||
|
||||
PMU_FORMAT_ATTR(event, "config:0-7");
|
||||
PMU_FORMAT_ATTR(axi_id, "config1:0-15");
|
||||
PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
|
||||
|
||||
static struct attribute *ddr_perf_format_attrs[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_axi_id.attr,
|
||||
&format_attr_axi_mask.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -189,6 +212,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
|
||||
return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
|
||||
}
|
||||
|
||||
static bool ddr_perf_is_filtered(struct perf_event *event)
|
||||
{
|
||||
return event->attr.config == 0x41 || event->attr.config == 0x42;
|
||||
}
|
||||
|
||||
static u32 ddr_perf_filter_val(struct perf_event *event)
|
||||
{
|
||||
return event->attr.config1;
|
||||
}
|
||||
|
||||
static bool ddr_perf_filters_compatible(struct perf_event *a,
|
||||
struct perf_event *b)
|
||||
{
|
||||
if (!ddr_perf_is_filtered(a))
|
||||
return true;
|
||||
if (!ddr_perf_is_filtered(b))
|
||||
return true;
|
||||
return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
|
||||
}
|
||||
|
||||
static int ddr_perf_event_init(struct perf_event *event)
|
||||
{
|
||||
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
|
||||
@@ -215,6 +258,15 @@ static int ddr_perf_event_init(struct perf_event *event)
|
||||
!is_software_event(event->group_leader))
|
||||
return -EINVAL;
|
||||
|
||||
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
|
||||
if (!ddr_perf_filters_compatible(event, event->group_leader))
|
||||
return -EINVAL;
|
||||
for_each_sibling_event(sibling, event->group_leader) {
|
||||
if (!ddr_perf_filters_compatible(event, sibling))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_sibling_event(sibling, event->group_leader) {
|
||||
if (sibling->pmu != event->pmu &&
|
||||
!is_software_event(sibling))
|
||||
@@ -287,6 +339,23 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter;
|
||||
int cfg = event->attr.config;
|
||||
int cfg1 = event->attr.config1;
|
||||
|
||||
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
|
||||
int i;
|
||||
|
||||
for (i = 1; i < NUM_COUNTERS; i++) {
|
||||
if (pmu->events[i] &&
|
||||
!ddr_perf_filters_compatible(event, pmu->events[i]))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ddr_perf_is_filtered(event)) {
|
||||
/* revert axi id masking(axi_mask) value */
|
||||
cfg1 ^= AXI_MASKING_REVERT;
|
||||
writel(cfg1, pmu->base + COUNTER_DPCR1);
|
||||
}
|
||||
}
|
||||
|
||||
counter = ddr_perf_alloc_counter(pmu, cfg);
|
||||
if (counter < 0) {
|
||||
@@ -472,6 +541,8 @@ static int ddr_perf_probe(struct platform_device *pdev)
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
pmu->devtype_data = of_device_get_match_data(&pdev->dev);
|
||||
|
||||
pmu->cpu = raw_smp_processor_id();
|
||||
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
|
||||
DDR_CPUHP_CB_NAME,
|
||||
|
@@ -217,10 +217,8 @@ static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
|
||||
|
||||
/* Read and init IRQ */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD,
|
||||
|
@@ -207,10 +207,8 @@ static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
|
||||
|
||||
/* Read and init IRQ */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD,
|
||||
|
@@ -206,10 +206,8 @@ static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
|
||||
|
||||
/* Read and init IRQ */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD,
|
||||
|
@@ -909,12 +909,8 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
|
||||
cluster->cluster_id = fw_cluster_id;
|
||||
|
||||
irq = platform_get_irq(sdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to get valid irq for cluster %ld\n",
|
||||
fw_cluster_id);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
cluster->irq = irq;
|
||||
|
||||
|
@@ -1901,10 +1901,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "No IRQ resource\n");
|
||||
if (irq < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD,
|
||||
|
Reference in New Issue
Block a user