percpu_alloc_percpuRegular
4.4: Absent ⚠️
4.8: Absent ⚠️
4.10: Absent ⚠️
4.13: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
4.15: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
4.18: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.0: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.3: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.4: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.8: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.11: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.13: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.15: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
5.19: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, long unsigned int call_site, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr, size_t bytes_alloc, gfp_t gfp_flags);
6.2: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, long unsigned int call_site, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr, size_t bytes_alloc, gfp_t gfp_flags);
6.5: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, long unsigned int call_site, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr, size_t bytes_alloc, gfp_t gfp_flags);
6.8: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
long unsigned int call_site;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
size_t bytes_alloc;
long unsigned int gfp_flags;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, long unsigned int call_site, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr, size_t bytes_alloc, gfp_t gfp_flags);
arm64: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
armhf: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
ppc64el: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
riscv64: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
aws: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
azure: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
gcp: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
lowlatency: ✅Event:
struct trace_event_raw_percpu_alloc_percpu {
struct trace_entry ent;
bool reserved;
bool is_atomic;
size_t size;
size_t align;
void *base_addr;
int off;
void *ptr;
char __data[0];
};
Function:
void trace_event_raw_event_percpu_alloc_percpu(void *__data, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void *ptr);
Regular
4.13 and 4.15 ✅
4.15 and 4.18 ✅
4.18 and 5.0 ✅
5.0 and 5.3 ✅
5.3 and 5.4 ✅
5.4 and 5.8 ✅
5.8 and 5.11 ✅
5.11 and 5.13 ✅
5.13 and 5.15 ✅
5.15 and 5.19 ⚠️long unsigned int call_site
size_t bytes_alloc
long unsigned int gfp_flags
long unsigned int call_site
size_t bytes_alloc
gfp_t gfp_flags
__data, reserved, is_atomic, size, align, base_addr, off, ptr ➡️ __data, call_site, reserved, is_atomic, size, align, base_addr, off, ptr, bytes_alloc, gfp_flags
5.19 and 6.2 ✅
6.2 and 6.5 ✅
6.5 and 6.8 ✅
amd64 and arm64 ✅
amd64 and armhf ✅
amd64 and ppc64el ✅
amd64 and riscv64 ✅
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅