perfmon.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
  3. #ifndef _PERFMON_H_
  4. #define _PERFMON_H_
  5. #include <linux/slab.h>
  6. #include <linux/pci.h>
  7. #include <linux/sbitmap.h>
  8. #include <linux/dmaengine.h>
  9. #include <linux/percpu-rwsem.h>
  10. #include <linux/wait.h>
  11. #include <linux/cdev.h>
  12. #include <linux/uuid.h>
  13. #include <linux/idxd.h>
  14. #include <linux/perf_event.h>
  15. #include "registers.h"
  16. static inline struct idxd_pmu *event_to_pmu(struct perf_event *event)
  17. {
  18. struct idxd_pmu *idxd_pmu;
  19. struct pmu *pmu;
  20. pmu = event->pmu;
  21. idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
  22. return idxd_pmu;
  23. }
  24. static inline struct idxd_device *event_to_idxd(struct perf_event *event)
  25. {
  26. struct idxd_pmu *idxd_pmu;
  27. struct pmu *pmu;
  28. pmu = event->pmu;
  29. idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
  30. return idxd_pmu->idxd;
  31. }
  32. static inline struct idxd_device *pmu_to_idxd(struct pmu *pmu)
  33. {
  34. struct idxd_pmu *idxd_pmu;
  35. idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
  36. return idxd_pmu->idxd;
  37. }
  38. enum dsa_perf_events {
  39. DSA_PERF_EVENT_WQ = 0,
  40. DSA_PERF_EVENT_ENGINE,
  41. DSA_PERF_EVENT_ADDR_TRANS,
  42. DSA_PERF_EVENT_OP,
  43. DSA_PERF_EVENT_COMPL,
  44. DSA_PERF_EVENT_MAX,
  45. };
  46. enum filter_enc {
  47. FLT_WQ = 0,
  48. FLT_TC,
  49. FLT_PG_SZ,
  50. FLT_XFER_SZ,
  51. FLT_ENG,
  52. FLT_MAX,
  53. };
  54. #define CONFIG_RESET 0x0000000000000001
  55. #define CNTR_RESET 0x0000000000000002
  56. #define CNTR_ENABLE 0x0000000000000001
  57. #define INTR_OVFL 0x0000000000000002
  58. #define COUNTER_FREEZE 0x00000000FFFFFFFF
  59. #define COUNTER_UNFREEZE 0x0000000000000000
  60. #define OVERFLOW_SIZE 32
  61. #define CNTRCFG_ENABLE BIT(0)
  62. #define CNTRCFG_IRQ_OVERFLOW BIT(1)
  63. #define CNTRCFG_CATEGORY_SHIFT 8
  64. #define CNTRCFG_EVENT_SHIFT 32
  65. #define PERFMON_TABLE_OFFSET(_idxd) \
  66. ({ \
  67. typeof(_idxd) __idxd = (_idxd); \
  68. ((__idxd)->reg_base + (__idxd)->perfmon_offset); \
  69. })
  70. #define PERFMON_REG_OFFSET(idxd, offset) \
  71. (PERFMON_TABLE_OFFSET(idxd) + (offset))
  72. #define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET))
  73. #define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET))
  74. #define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET))
  75. #define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET))
  76. #define FLTCFG_REG(idxd, cntr, flt) \
  77. (PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) + ((cntr) * 32) + ((flt) * 4))
  78. #define CNTRCFG_REG(idxd, cntr) \
  79. (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCFG_OFFSET) + ((cntr) * 8))
  80. #define CNTRDATA_REG(idxd, cntr) \
  81. (PERFMON_REG_OFFSET(idxd, IDXD_CNTRDATA_OFFSET) + ((cntr) * 8))
  82. #define CNTRCAP_REG(idxd, cntr) \
  83. (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCAP_OFFSET) + ((cntr) * 8))
  84. #define EVNTCAP_REG(idxd, category) \
  85. (PERFMON_REG_OFFSET(idxd, IDXD_EVNTCAP_OFFSET) + ((category) * 8))
  86. #define DEFINE_PERFMON_FORMAT_ATTR(_name, _format) \
  87. static ssize_t __perfmon_idxd_##_name##_show(struct kobject *kobj, \
  88. struct kobj_attribute *attr, \
  89. char *page) \
  90. { \
  91. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  92. return sprintf(page, _format "\n"); \
  93. } \
  94. static struct kobj_attribute format_attr_idxd_##_name = \
  95. __ATTR(_name, 0444, __perfmon_idxd_##_name##_show, NULL)
  96. #endif