perf_eventRegular
4.4: ✅struct perf_event {
struct list_head event_entry;
struct list_head group_entry;
struct list_head sibling_list;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
struct pmu *pmu;
enum perf_event_active_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp_enabled;
u64 tstamp_running;
u64 tstamp_stopped;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
int cgrp_defer_enabled;
};
4.8: ✅struct perf_event {
struct list_head event_entry;
struct list_head group_entry;
struct list_head sibling_list;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_active_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp_enabled;
u64 tstamp_running;
u64 tstamp_stopped;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
long unsigned int *addr_filters_offs;
long unsigned int addr_filters_gen;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
int cgrp_defer_enabled;
struct list_head sb_list;
};
4.10: ✅struct perf_event {
struct list_head event_entry;
struct list_head group_entry;
struct list_head sibling_list;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_active_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp_enabled;
u64 tstamp_running;
u64 tstamp_stopped;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
long unsigned int *addr_filters_offs;
long unsigned int addr_filters_gen;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
int cgrp_defer_enabled;
struct list_head sb_list;
};
4.13: ✅struct perf_event {
struct list_head event_entry;
struct list_head group_entry;
struct list_head sibling_list;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_active_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp_enabled;
u64 tstamp_running;
u64 tstamp_stopped;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
long unsigned int *addr_filters_offs;
long unsigned int addr_filters_gen;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
int cgrp_defer_enabled;
struct list_head sb_list;
};
4.15: ✅struct perf_event {
struct list_head event_entry;
struct list_head group_entry;
struct list_head sibling_list;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
long unsigned int *addr_filters_offs;
long unsigned int addr_filters_gen;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
4.18: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
long unsigned int *addr_filters_offs;
long unsigned int addr_filters_gen;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
5.0: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
long unsigned int *addr_filters_offs;
long unsigned int addr_filters_gen;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
5.3: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
5.4: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
5.8: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
};
5.11: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
};
5.13: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
long unsigned int pending_addr;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
};
5.15: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
long unsigned int pending_addr;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
};
5.19: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
long unsigned int pending_addr;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
};
6.2: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
unsigned int pending_wakeup;
unsigned int pending_kill;
unsigned int pending_disable;
unsigned int pending_sigtrap;
long unsigned int pending_addr;
struct irq_work pending_irq;
struct callback_head pending_task;
unsigned int pending_work;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
atomic64_t lost_samples;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
};
6.5: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
unsigned int pending_wakeup;
unsigned int pending_kill;
unsigned int pending_disable;
unsigned int pending_sigtrap;
long unsigned int pending_addr;
struct irq_work pending_irq;
struct callback_head pending_task;
unsigned int pending_work;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
atomic64_t lost_samples;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
__u32 orig_type;
};
6.8: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
unsigned int group_generation;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
unsigned int pending_wakeup;
unsigned int pending_kill;
unsigned int pending_disable;
unsigned int pending_sigtrap;
long unsigned int pending_addr;
struct irq_work pending_irq;
struct callback_head pending_task;
unsigned int pending_work;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
atomic64_t lost_samples;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
void *security;
struct list_head sb_list;
__u32 orig_type;
};
arm64: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
armhf: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
ppc64el: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
riscv64: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
aws: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
azure: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
gcp: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
lowlatency: ✅struct perf_event {
struct list_head event_entry;
struct list_head sibling_list;
struct list_head active_list;
struct rb_node group_node;
u64 group_index;
struct list_head migrate_entry;
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
int event_caps;
int group_caps;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
atomic_long_t refcount;
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
struct mutex mmap_mutex;
atomic_t mmap_count;
struct ring_buffer *rb;
struct list_head rb_entry;
long unsigned int rcu_batches;
int rcu_pending;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
struct perf_addr_filters_head addr_filters;
struct perf_addr_filter_range *addr_filter_ranges;
long unsigned int addr_filters_gen;
struct perf_event *aux_event;
void (*destroy)(struct perf_event *);
struct callback_head callback_head;
struct pid_namespace *ns;
u64 id;
u64 (*clock)();
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
struct trace_event_call *tp_event;
struct event_filter *filter;
struct ftrace_ops ftrace_ops;
struct perf_cgroup *cgrp;
struct list_head sb_list;
};
Regular
4.4 and 4.8 ⚠️void *pmu_private
struct perf_addr_filters_head addr_filters
long unsigned int *addr_filters_offs
long unsigned int addr_filters_gen
struct list_head sb_list
4.8 and 4.10 ⚠️int event_caps
int group_caps
perf_overflow_handler_t orig_overflow_handler
struct bpf_prog *prog
int group_flags
4.10 and 4.13 ✅
4.13 and 4.15 ⚠️u64 tstamp
u64 tstamp_enabled
u64 tstamp_running
u64 tstamp_stopped
int cgrp_defer_enabled
enum perf_event_active_state state ➡️ enum perf_event_state state
4.15 and 4.18 ⚠️struct list_head active_list
struct rb_node group_node
u64 group_index
struct list_head group_entry
4.18 and 5.0 ✅
5.0 and 5.3 ⚠️struct perf_addr_filter_range *addr_filter_ranges
long unsigned int *addr_filters_offs
5.3 and 5.4 ⚠️struct perf_event *aux_event
5.4 and 5.8 ⚠️void *security
struct ring_buffer *rb ➡️ struct perf_buffer *rb
5.8 and 5.11 ✅
5.11 and 5.13 ⚠️long unsigned int pending_addr
5.13 and 5.15 ⚠️u64 bpf_cookie
u64 shadow_ctx_time
5.15 and 5.19 ✅
5.19 and 6.2 ⚠️struct perf_event_pmu_context *pmu_ctx
unsigned int pending_sigtrap
struct irq_work pending_irq
struct callback_head pending_task
unsigned int pending_work
atomic64_t lost_samples
struct irq_work pending
int pending_wakeup ➡️ unsigned int pending_wakeup
int pending_kill ➡️ unsigned int pending_kill
int pending_disable ➡️ unsigned int pending_disable
6.2 and 6.5 ⚠️__u32 orig_type
6.5 and 6.8 ⚠️unsigned int group_generation
amd64 and arm64 ✅
amd64 and armhf ✅
amd64 and ppc64el ✅
amd64 and riscv64 ✅
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅