123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * This driver adds support for PCIe PMU RCiEP device. Related
- * perf events are bandwidth, latency etc.
- *
- * Copyright (C) 2021 HiSilicon Limited
- * Author: Qi Liu <[email protected]>
- */
- #include <linux/bitfield.h>
- #include <linux/bitmap.h>
- #include <linux/bug.h>
- #include <linux/device.h>
- #include <linux/err.h>
- #include <linux/interrupt.h>
- #include <linux/irq.h>
- #include <linux/kernel.h>
- #include <linux/list.h>
- #include <linux/module.h>
- #include <linux/pci.h>
- #include <linux/perf_event.h>
- #define DRV_NAME "hisi_pcie_pmu"
- /* Define registers */
- #define HISI_PCIE_GLOBAL_CTRL 0x00
- #define HISI_PCIE_EVENT_CTRL 0x010
- #define HISI_PCIE_CNT 0x090
- #define HISI_PCIE_EXT_CNT 0x110
- #define HISI_PCIE_INT_STAT 0x150
- #define HISI_PCIE_INT_MASK 0x154
- #define HISI_PCIE_REG_BDF 0xfe0
- #define HISI_PCIE_REG_VERSION 0xfe4
- #define HISI_PCIE_REG_INFO 0xfe8
- /* Define command in HISI_PCIE_GLOBAL_CTRL */
- #define HISI_PCIE_GLOBAL_EN 0x01
- #define HISI_PCIE_GLOBAL_NONE 0
- /* Define command in HISI_PCIE_EVENT_CTRL */
- #define HISI_PCIE_EVENT_EN BIT_ULL(20)
- #define HISI_PCIE_RESET_CNT BIT_ULL(22)
- #define HISI_PCIE_INIT_SET BIT_ULL(34)
- #define HISI_PCIE_THR_EN BIT_ULL(26)
- #define HISI_PCIE_TARGET_EN BIT_ULL(32)
- #define HISI_PCIE_TRIG_EN BIT_ULL(52)
- /* Define offsets in HISI_PCIE_EVENT_CTRL */
- #define HISI_PCIE_EVENT_M GENMASK_ULL(15, 0)
- #define HISI_PCIE_THR_MODE_M GENMASK_ULL(27, 27)
- #define HISI_PCIE_THR_M GENMASK_ULL(31, 28)
- #define HISI_PCIE_TARGET_M GENMASK_ULL(52, 36)
- #define HISI_PCIE_TRIG_MODE_M GENMASK_ULL(53, 53)
- #define HISI_PCIE_TRIG_M GENMASK_ULL(59, 56)
- #define HISI_PCIE_MAX_COUNTERS 8
- #define HISI_PCIE_REG_STEP 8
- #define HISI_PCIE_THR_MAX_VAL 10
- #define HISI_PCIE_TRIG_MAX_VAL 10
- #define HISI_PCIE_MAX_PERIOD (GENMASK_ULL(63, 0))
- #define HISI_PCIE_INIT_VAL BIT_ULL(63)
- struct hisi_pcie_pmu {
- struct perf_event *hw_events[HISI_PCIE_MAX_COUNTERS];
- struct hlist_node node;
- struct pci_dev *pdev;
- struct pmu pmu;
- void __iomem *base;
- int irq;
- u32 identifier;
- /* Minimum and maximum BDF of root ports monitored by PMU */
- u16 bdf_min;
- u16 bdf_max;
- int on_cpu;
- };
- struct hisi_pcie_reg_pair {
- u16 lo;
- u16 hi;
- };
- #define to_pcie_pmu(p) (container_of((p), struct hisi_pcie_pmu, pmu))
- #define GET_PCI_DEVFN(bdf) ((bdf) & 0xff)
- #define HISI_PCIE_PMU_FILTER_ATTR(_name, _config, _hi, _lo) \
- static u64 hisi_pcie_get_##_name(struct perf_event *event) \
- { \
- return FIELD_GET(GENMASK(_hi, _lo), event->attr._config); \
- } \
- HISI_PCIE_PMU_FILTER_ATTR(event, config, 16, 0);
- HISI_PCIE_PMU_FILTER_ATTR(thr_len, config1, 3, 0);
- HISI_PCIE_PMU_FILTER_ATTR(thr_mode, config1, 4, 4);
- HISI_PCIE_PMU_FILTER_ATTR(trig_len, config1, 8, 5);
- HISI_PCIE_PMU_FILTER_ATTR(trig_mode, config1, 9, 9);
- HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0);
- HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16);
- static ssize_t hisi_pcie_format_sysfs_show(struct device *dev, struct device_attribute *attr,
- char *buf)
- {
- struct dev_ext_attribute *eattr;
- eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
- }
- static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr,
- char *buf)
- {
- struct perf_pmu_events_attr *pmu_attr =
- container_of(attr, struct perf_pmu_events_attr, attr);
- return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
- }
- #define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format) \
- (&((struct dev_ext_attribute[]){ \
- { .attr = __ATTR(_name, 0444, hisi_pcie_format_sysfs_show, \
- NULL), \
- .var = (void *)_format } \
- })[0].attr.attr)
- #define HISI_PCIE_PMU_EVENT_ATTR(_name, _id) \
- PMU_EVENT_ATTR_ID(_name, hisi_pcie_event_sysfs_show, _id)
- static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
- return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
- }
- static DEVICE_ATTR_RO(cpumask);
- static ssize_t identifier_show(struct device *dev, struct device_attribute *attr, char *buf)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
- return sysfs_emit(buf, "%#x\n", pcie_pmu->identifier);
- }
- static DEVICE_ATTR_RO(identifier);
- static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char *buf)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
- return sysfs_emit(buf, "%#04x\n", PCI_BUS_NUM(pcie_pmu->bdf_min));
- }
- static DEVICE_ATTR_RO(bus);
- static struct hisi_pcie_reg_pair
- hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off)
- {
- u32 val = readl_relaxed(pcie_pmu->base + reg_off);
- struct hisi_pcie_reg_pair regs = {
- .lo = val,
- .hi = val >> 16,
- };
- return regs;
- }
- /*
- * Hardware counter and ext_counter work together for bandwidth, latency, bus
- * utilization and buffer occupancy events. For example, RX memory write latency
- * events(index = 0x0010), counter counts total delay cycles and ext_counter
- * counts RX memory write PCIe packets number.
- *
- * As we don't want PMU driver to process these two data, "delay cycles" can
- * be treated as an independent event(index = 0x0010), "RX memory write packets
- * number" as another(index = 0x10010). BIT 16 is used to distinguish and 0-15
- * bits are "real" event index, which can be used to set HISI_PCIE_EVENT_CTRL.
- */
- #define EXT_COUNTER_IS_USED(idx) ((idx) & BIT(16))
- static u32 hisi_pcie_get_real_event(struct perf_event *event)
- {
- return hisi_pcie_get_event(event) & GENMASK(15, 0);
- }
- static u32 hisi_pcie_pmu_get_offset(u32 offset, u32 idx)
- {
- return offset + HISI_PCIE_REG_STEP * idx;
- }
- static u32 hisi_pcie_pmu_readl(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
- u32 idx)
- {
- u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
- return readl_relaxed(pcie_pmu->base + offset);
- }
- static void hisi_pcie_pmu_writel(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u32 val)
- {
- u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
- writel_relaxed(val, pcie_pmu->base + offset);
- }
- static u64 hisi_pcie_pmu_readq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx)
- {
- u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
- return readq_relaxed(pcie_pmu->base + offset);
- }
- static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u64 val)
- {
- u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
- writeq_relaxed(val, pcie_pmu->base + offset);
- }
- static void hisi_pcie_pmu_config_filter(struct perf_event *event)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- u64 reg = HISI_PCIE_INIT_SET;
- u64 port, trig_len, thr_len;
- /* Config HISI_PCIE_EVENT_CTRL according to event. */
- reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event));
- /* Config HISI_PCIE_EVENT_CTRL according to root port or EP device. */
- port = hisi_pcie_get_port(event);
- if (port)
- reg |= FIELD_PREP(HISI_PCIE_TARGET_M, port);
- else
- reg |= HISI_PCIE_TARGET_EN |
- FIELD_PREP(HISI_PCIE_TARGET_M, hisi_pcie_get_bdf(event));
- /* Config HISI_PCIE_EVENT_CTRL according to trigger condition. */
- trig_len = hisi_pcie_get_trig_len(event);
- if (trig_len) {
- reg |= FIELD_PREP(HISI_PCIE_TRIG_M, trig_len);
- reg |= FIELD_PREP(HISI_PCIE_TRIG_MODE_M, hisi_pcie_get_trig_mode(event));
- reg |= HISI_PCIE_TRIG_EN;
- }
- /* Config HISI_PCIE_EVENT_CTRL according to threshold condition. */
- thr_len = hisi_pcie_get_thr_len(event);
- if (thr_len) {
- reg |= FIELD_PREP(HISI_PCIE_THR_M, thr_len);
- reg |= FIELD_PREP(HISI_PCIE_THR_MODE_M, hisi_pcie_get_thr_mode(event));
- reg |= HISI_PCIE_THR_EN;
- }
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
- }
- static void hisi_pcie_pmu_clear_filter(struct perf_event *event)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, HISI_PCIE_INIT_SET);
- }
- static bool hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu *pcie_pmu, u32 bdf)
- {
- struct pci_dev *root_port, *pdev;
- u16 rp_bdf;
- pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pcie_pmu->pdev->bus), PCI_BUS_NUM(bdf),
- GET_PCI_DEVFN(bdf));
- if (!pdev)
- return false;
- root_port = pcie_find_root_port(pdev);
- if (!root_port) {
- pci_dev_put(pdev);
- return false;
- }
- pci_dev_put(pdev);
- rp_bdf = pci_dev_id(root_port);
- return rp_bdf >= pcie_pmu->bdf_min && rp_bdf <= pcie_pmu->bdf_max;
- }
- static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
- struct hisi_pcie_pmu *pcie_pmu)
- {
- u32 requester_id = hisi_pcie_get_bdf(event);
- if (hisi_pcie_get_thr_len(event) > HISI_PCIE_THR_MAX_VAL)
- return false;
- if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
- return false;
- if (requester_id) {
- if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
- return false;
- }
- return true;
- }
- static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
- struct perf_event *event)
- {
- return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event);
- }
- static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
- {
- struct perf_event *sibling, *leader = event->group_leader;
- struct perf_event *event_group[HISI_PCIE_MAX_COUNTERS];
- int counters = 1;
- int num;
- event_group[0] = leader;
- if (!is_software_event(leader)) {
- if (leader->pmu != event->pmu)
- return false;
- if (leader != event && !hisi_pcie_pmu_cmp_event(leader, event))
- event_group[counters++] = event;
- }
- for_each_sibling_event(sibling, event->group_leader) {
- if (is_software_event(sibling))
- continue;
- if (sibling->pmu != event->pmu)
- return false;
- for (num = 0; num < counters; num++) {
- if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
- break;
- }
- if (num == counters)
- event_group[counters++] = sibling;
- }
- return counters <= HISI_PCIE_MAX_COUNTERS;
- }
- static int hisi_pcie_pmu_event_init(struct perf_event *event)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- /* Check the type first before going on, otherwise it's not our event */
- if (event->attr.type != event->pmu->type)
- return -ENOENT;
- event->cpu = pcie_pmu->on_cpu;
- if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
- hwc->event_base = HISI_PCIE_EXT_CNT;
- else
- hwc->event_base = HISI_PCIE_CNT;
- /* Sampling is not supported. */
- if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
- return -EOPNOTSUPP;
- if (!hisi_pcie_pmu_valid_filter(event, pcie_pmu))
- return -EINVAL;
- if (!hisi_pcie_pmu_validate_event_group(event))
- return -EINVAL;
- return 0;
- }
- static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- u32 idx = event->hw.idx;
- return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
- }
- static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu,
- struct perf_event *event)
- {
- struct perf_event *sibling;
- int idx;
- for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
- sibling = pcie_pmu->hw_events[idx];
- if (!sibling)
- continue;
- if (!hisi_pcie_pmu_cmp_event(sibling, event))
- continue;
- /* Related events must be used in group */
- if (sibling->group_leader == event->group_leader)
- return idx;
- else
- return -EINVAL;
- }
- return idx;
- }
- static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu)
- {
- int idx;
- for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
- if (!pcie_pmu->hw_events[idx])
- return idx;
- }
- return -EINVAL;
- }
- static void hisi_pcie_pmu_event_update(struct perf_event *event)
- {
- struct hw_perf_event *hwc = &event->hw;
- u64 new_cnt, prev_cnt, delta;
- do {
- prev_cnt = local64_read(&hwc->prev_count);
- new_cnt = hisi_pcie_pmu_read_counter(event);
- } while (local64_cmpxchg(&hwc->prev_count, prev_cnt,
- new_cnt) != prev_cnt);
- delta = (new_cnt - prev_cnt) & HISI_PCIE_MAX_PERIOD;
- local64_add(delta, &event->count);
- }
- static void hisi_pcie_pmu_read(struct perf_event *event)
- {
- hisi_pcie_pmu_event_update(event);
- }
- static void hisi_pcie_pmu_set_period(struct perf_event *event)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL);
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL);
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL);
- }
- static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
- {
- u32 idx = hwc->idx;
- u64 val;
- val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
- val |= HISI_PCIE_EVENT_EN;
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
- }
- static void hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
- {
- u32 idx = hwc->idx;
- u64 val;
- val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
- val &= ~HISI_PCIE_EVENT_EN;
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
- }
- static void hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
- {
- u32 idx = hwc->idx;
- hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 0);
- }
- static void hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
- {
- u32 idx = hwc->idx;
- hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 1);
- }
- static void hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu *pcie_pmu, int idx)
- {
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_RESET_CNT);
- hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_INIT_SET);
- }
- static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- u64 prev_cnt;
- if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
- return;
- WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
- hwc->state = 0;
- hisi_pcie_pmu_config_filter(event);
- hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
- hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
- hisi_pcie_pmu_set_period(event);
- if (flags & PERF_EF_RELOAD) {
- prev_cnt = local64_read(&hwc->prev_count);
- hisi_pcie_pmu_writeq(pcie_pmu, hwc->event_base, idx, prev_cnt);
- }
- perf_event_update_userpage(event);
- }
- static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- hisi_pcie_pmu_event_update(event);
- hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
- hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
- hisi_pcie_pmu_clear_filter(event);
- WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
- hwc->state |= PERF_HES_STOPPED;
- if (hwc->state & PERF_HES_UPTODATE)
- return;
- hwc->state |= PERF_HES_UPTODATE;
- }
- static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx;
- hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
- /* Check all working events to find a related event. */
- idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event);
- if (idx < 0)
- return idx;
- /* Current event shares an enabled counter with the related event */
- if (idx < HISI_PCIE_MAX_COUNTERS) {
- hwc->idx = idx;
- goto start_count;
- }
- idx = hisi_pcie_pmu_get_event_idx(pcie_pmu);
- if (idx < 0)
- return idx;
- hwc->idx = idx;
- pcie_pmu->hw_events[idx] = event;
- /* Reset Counter to avoid previous statistic interference. */
- hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
- start_count:
- if (flags & PERF_EF_START)
- hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
- return 0;
- }
- static void hisi_pcie_pmu_del(struct perf_event *event, int flags)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- hisi_pcie_pmu_stop(event, PERF_EF_UPDATE);
- pcie_pmu->hw_events[hwc->idx] = NULL;
- perf_event_update_userpage(event);
- }
- static void hisi_pcie_pmu_enable(struct pmu *pmu)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
- int num;
- for (num = 0; num < HISI_PCIE_MAX_COUNTERS; num++) {
- if (pcie_pmu->hw_events[num])
- break;
- }
- if (num == HISI_PCIE_MAX_COUNTERS)
- return;
- writel(HISI_PCIE_GLOBAL_EN, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
- }
- static void hisi_pcie_pmu_disable(struct pmu *pmu)
- {
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
- writel(HISI_PCIE_GLOBAL_NONE, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
- }
- static irqreturn_t hisi_pcie_pmu_irq(int irq, void *data)
- {
- struct hisi_pcie_pmu *pcie_pmu = data;
- irqreturn_t ret = IRQ_NONE;
- struct perf_event *event;
- u32 overflown;
- int idx;
- for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
- overflown = hisi_pcie_pmu_readl(pcie_pmu, HISI_PCIE_INT_STAT, idx);
- if (!overflown)
- continue;
- /* Clear status of interrupt. */
- hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_STAT, idx, 1);
- event = pcie_pmu->hw_events[idx];
- if (!event)
- continue;
- hisi_pcie_pmu_event_update(event);
- hisi_pcie_pmu_set_period(event);
- ret = IRQ_HANDLED;
- }
- return ret;
- }
- static int hisi_pcie_pmu_irq_register(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
- {
- int irq, ret;
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
- if (ret < 0) {
- pci_err(pdev, "Failed to enable MSI vectors: %d\n", ret);
- return ret;
- }
- irq = pci_irq_vector(pdev, 0);
- ret = request_irq(irq, hisi_pcie_pmu_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
- pcie_pmu);
- if (ret) {
- pci_err(pdev, "Failed to register IRQ: %d\n", ret);
- pci_free_irq_vectors(pdev);
- return ret;
- }
- pcie_pmu->irq = irq;
- return 0;
- }
- static void hisi_pcie_pmu_irq_unregister(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
- {
- free_irq(pcie_pmu->irq, pcie_pmu);
- pci_free_irq_vectors(pdev);
- }
- static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
- {
- struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
- if (pcie_pmu->on_cpu == -1) {
- pcie_pmu->on_cpu = cpu;
- WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(cpu)));
- }
- return 0;
- }
- static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
- {
- struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
- unsigned int target;
- /* Nothing to do if this CPU doesn't own the PMU */
- if (pcie_pmu->on_cpu != cpu)
- return 0;
- pcie_pmu->on_cpu = -1;
- /* Choose a new CPU from all online cpus. */
- target = cpumask_any_but(cpu_online_mask, cpu);
- if (target >= nr_cpu_ids) {
- pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
- return 0;
- }
- perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
- /* Use this CPU for event counting */
- pcie_pmu->on_cpu = target;
- WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target)));
- return 0;
- }
- static struct attribute *hisi_pcie_pmu_events_attr[] = {
- HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_latency, 0x0010),
- HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_cnt, 0x10010),
- HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_latency, 0x0210),
- HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
- HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
- HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
- HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
- HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
- HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
- HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
- NULL
- };
- static struct attribute_group hisi_pcie_pmu_events_group = {
- .name = "events",
- .attrs = hisi_pcie_pmu_events_attr,
- };
- static struct attribute *hisi_pcie_pmu_format_attr[] = {
- HISI_PCIE_PMU_FORMAT_ATTR(event, "config:0-16"),
- HISI_PCIE_PMU_FORMAT_ATTR(thr_len, "config1:0-3"),
- HISI_PCIE_PMU_FORMAT_ATTR(thr_mode, "config1:4"),
- HISI_PCIE_PMU_FORMAT_ATTR(trig_len, "config1:5-8"),
- HISI_PCIE_PMU_FORMAT_ATTR(trig_mode, "config1:9"),
- HISI_PCIE_PMU_FORMAT_ATTR(port, "config2:0-15"),
- HISI_PCIE_PMU_FORMAT_ATTR(bdf, "config2:16-31"),
- NULL
- };
- static const struct attribute_group hisi_pcie_pmu_format_group = {
- .name = "format",
- .attrs = hisi_pcie_pmu_format_attr,
- };
- static struct attribute *hisi_pcie_pmu_bus_attrs[] = {
- &dev_attr_bus.attr,
- NULL
- };
- static const struct attribute_group hisi_pcie_pmu_bus_attr_group = {
- .attrs = hisi_pcie_pmu_bus_attrs,
- };
- static struct attribute *hisi_pcie_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
- };
- static const struct attribute_group hisi_pcie_pmu_cpumask_attr_group = {
- .attrs = hisi_pcie_pmu_cpumask_attrs,
- };
- static struct attribute *hisi_pcie_pmu_identifier_attrs[] = {
- &dev_attr_identifier.attr,
- NULL
- };
- static const struct attribute_group hisi_pcie_pmu_identifier_attr_group = {
- .attrs = hisi_pcie_pmu_identifier_attrs,
- };
- static const struct attribute_group *hisi_pcie_pmu_attr_groups[] = {
- &hisi_pcie_pmu_events_group,
- &hisi_pcie_pmu_format_group,
- &hisi_pcie_pmu_bus_attr_group,
- &hisi_pcie_pmu_cpumask_attr_group,
- &hisi_pcie_pmu_identifier_attr_group,
- NULL
- };
- static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
- {
- struct hisi_pcie_reg_pair regs;
- u16 sicl_id, core_id;
- char *name;
- regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_BDF);
- pcie_pmu->bdf_min = regs.lo;
- pcie_pmu->bdf_max = regs.hi;
- regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_INFO);
- sicl_id = regs.hi;
- core_id = regs.lo;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_pcie%u_core%u", sicl_id, core_id);
- if (!name)
- return -ENOMEM;
- pcie_pmu->pdev = pdev;
- pcie_pmu->on_cpu = -1;
- pcie_pmu->identifier = readl(pcie_pmu->base + HISI_PCIE_REG_VERSION);
- pcie_pmu->pmu = (struct pmu) {
- .name = name,
- .module = THIS_MODULE,
- .event_init = hisi_pcie_pmu_event_init,
- .pmu_enable = hisi_pcie_pmu_enable,
- .pmu_disable = hisi_pcie_pmu_disable,
- .add = hisi_pcie_pmu_add,
- .del = hisi_pcie_pmu_del,
- .start = hisi_pcie_pmu_start,
- .stop = hisi_pcie_pmu_stop,
- .read = hisi_pcie_pmu_read,
- .task_ctx_nr = perf_invalid_context,
- .attr_groups = hisi_pcie_pmu_attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
- return 0;
- }
- static int hisi_pcie_init_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
- {
- int ret;
- pcie_pmu->base = pci_ioremap_bar(pdev, 2);
- if (!pcie_pmu->base) {
- pci_err(pdev, "Ioremap failed for pcie_pmu resource\n");
- return -ENOMEM;
- }
- ret = hisi_pcie_alloc_pmu(pdev, pcie_pmu);
- if (ret)
- goto err_iounmap;
- ret = hisi_pcie_pmu_irq_register(pdev, pcie_pmu);
- if (ret)
- goto err_iounmap;
- ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
- if (ret) {
- pci_err(pdev, "Failed to register hotplug: %d\n", ret);
- goto err_irq_unregister;
- }
- ret = perf_pmu_register(&pcie_pmu->pmu, pcie_pmu->pmu.name, -1);
- if (ret) {
- pci_err(pdev, "Failed to register PCIe PMU: %d\n", ret);
- goto err_hotplug_unregister;
- }
- return ret;
- err_hotplug_unregister:
- cpuhp_state_remove_instance_nocalls(
- CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
- err_irq_unregister:
- hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
- err_iounmap:
- iounmap(pcie_pmu->base);
- return ret;
- }
- static void hisi_pcie_uninit_pmu(struct pci_dev *pdev)
- {
- struct hisi_pcie_pmu *pcie_pmu = pci_get_drvdata(pdev);
- perf_pmu_unregister(&pcie_pmu->pmu);
- cpuhp_state_remove_instance_nocalls(
- CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
- hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
- iounmap(pcie_pmu->base);
- }
- static int hisi_pcie_init_dev(struct pci_dev *pdev)
- {
- int ret;
- ret = pcim_enable_device(pdev);
- if (ret) {
- pci_err(pdev, "Failed to enable PCI device: %d\n", ret);
- return ret;
- }
- ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
- if (ret < 0) {
- pci_err(pdev, "Failed to request PCI mem regions: %d\n", ret);
- return ret;
- }
- pci_set_master(pdev);
- return 0;
- }
- static int hisi_pcie_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- {
- struct hisi_pcie_pmu *pcie_pmu;
- int ret;
- pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
- if (!pcie_pmu)
- return -ENOMEM;
- ret = hisi_pcie_init_dev(pdev);
- if (ret)
- return ret;
- ret = hisi_pcie_init_pmu(pdev, pcie_pmu);
- if (ret)
- return ret;
- pci_set_drvdata(pdev, pcie_pmu);
- return ret;
- }
- static void hisi_pcie_pmu_remove(struct pci_dev *pdev)
- {
- hisi_pcie_uninit_pmu(pdev);
- pci_set_drvdata(pdev, NULL);
- }
- static const struct pci_device_id hisi_pcie_pmu_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12d) },
- { 0, }
- };
- MODULE_DEVICE_TABLE(pci, hisi_pcie_pmu_ids);
- static struct pci_driver hisi_pcie_pmu_driver = {
- .name = DRV_NAME,
- .id_table = hisi_pcie_pmu_ids,
- .probe = hisi_pcie_pmu_probe,
- .remove = hisi_pcie_pmu_remove,
- };
- static int __init hisi_pcie_module_init(void)
- {
- int ret;
- ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
- "AP_PERF_ARM_HISI_PCIE_PMU_ONLINE",
- hisi_pcie_pmu_online_cpu,
- hisi_pcie_pmu_offline_cpu);
- if (ret) {
- pr_err("Failed to setup PCIe PMU hotplug: %d\n", ret);
- return ret;
- }
- ret = pci_register_driver(&hisi_pcie_pmu_driver);
- if (ret)
- cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
- return ret;
- }
- module_init(hisi_pcie_module_init);
- static void __exit hisi_pcie_module_exit(void)
- {
- pci_unregister_driver(&hisi_pcie_pmu_driver);
- cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
- }
- module_exit(hisi_pcie_module_exit);
- MODULE_DESCRIPTION("HiSilicon PCIe PMU driver");
- MODULE_LICENSE("GPL v2");
- MODULE_AUTHOR("Qi Liu <[email protected]>");
|