28 #include <linux/perf_event.h>
33 #include <rte_compat.h>
37 #if defined(RTE_ARCH_ARM64)
38 #include "rte_pmu_pmc_arm64.h"
39 #elif defined(RTE_ARCH_X86_64)
40 #include "rte_pmu_pmc_x86_64.h"
48 #define RTE_MAX_NUM_GROUP_EVENTS 8
76 #ifndef rte_pmu_pmc_read
77 #define rte_pmu_pmc_read(index) ({ RTE_SET_USED(index); 0; })
97 #define __RTE_PMU_READ_ONCE(x) (*(const volatile typeof(x) *)&(x))
98 uint64_t width, offset;
103 seq = __RTE_PMU_READ_ONCE(pc->lock);
105 index = __RTE_PMU_READ_ONCE(pc->index);
106 offset = __RTE_PMU_READ_ONCE(pc->offset);
107 width = __RTE_PMU_READ_ONCE(pc->pmc_width);
110 if (
likely(pc->cap_user_rdpmc && index)) {
111 pmc = rte_pmu_pmc_read(index - 1);
119 if (
likely(__RTE_PMU_READ_ONCE(pc->lock) == seq))
206 #ifdef ALLOW_EXPERIMENTAL_API
214 if (
unlikely(lcore_id >= RTE_MAX_LCORE))
#define rte_compiler_barrier()
#define __rte_cache_aligned
#define __rte_always_inline
static unsigned rte_lcore_id(void)
__rte_experimental int rte_pmu_add_event(const char *name)
static __rte_experimental __rte_always_inline uint64_t rte_pmu_read(unsigned int index)
__rte_experimental int __rte_pmu_enable_group(struct rte_pmu_event_group *group)
__rte_experimental int rte_pmu_init(void)
__rte_experimental void rte_pmu_fini(void)
#define RTE_MAX_NUM_GROUP_EVENTS
static __rte_experimental __rte_always_inline uint64_t __rte_pmu_read_userpage(struct perf_event_mmap_page *pc)
struct perf_event_mmap_page * mmap_pages[RTE_MAX_NUM_GROUP_EVENTS]
TAILQ_ENTRY(rte_pmu_event_group) next
TAILQ_HEAD(, rte_pmu_event) event_list
struct rte_pmu_event_group event_groups[RTE_MAX_LCORE]
unsigned int num_group_events