trace_event_raw_percpu_alloc_percpuRegular
4.4: Absent ⚠️
4.8: Absent ⚠️
4.10: Absent ⚠️
4.13: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
4.15: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
4.18: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.0: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.3: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.4: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.8: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.11: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.13: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.15: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
5.19: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
6.2: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
6.5: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
6.8: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
arm64: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
armhf: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
ppc64el: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
riscv64: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
aws: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
azure: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
gcp: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
lowlatency: ✅struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Regular
4.13 and 4.15 ✅
4.15 and 4.18 ✅
4.18 and 5.0 ✅
5.0 and 5.3 ✅
5.3 and 5.4 ✅
5.4 and 5.8 ✅
5.8 and 5.11 ✅
5.11 and 5.13 ✅
5.13 and 5.15 ✅
5.15 and 5.19 ⚠️long unsigned int call_site
size_t bytes_alloc
long unsigned int gfp_flags
5.19 and 6.2 ✅
6.2 and 6.5 ✅
6.5 and 6.8 ✅
amd64 and arm64 ✅
amd64 and armhf ✅
amd64 and ppc64el ✅
amd64 and riscv64 ✅
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅