bpf_map
Regular
4.4
: ✅struct bpf_map {
atomic_t refcnt;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 pages;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
atomic_t usercnt;
};
4.8
: ✅struct bpf_map {
atomic_t refcnt;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
u32 pages;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
atomic_t usercnt;
};
4.10
: ✅struct bpf_map {
atomic_t refcnt;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
u32 pages;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
atomic_t usercnt;
};
4.13
: ✅struct bpf_map {
atomic_t refcnt;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
u32 pages;
u32 id;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
atomic_t usercnt;
struct bpf_map *inner_map_meta;
};
4.15
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
u32 pages;
u32 id;
int numa_node;
bool unpriv_array;
struct user_struct *user;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
4.18
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
u32 pages;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
bool unpriv_array;
struct user_struct *user;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
5.0
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
u32 pages;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
bool unpriv_array;
struct user_struct *user;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
5.3
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
5.4
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
5.8
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
char name[16];
u32 btf_vmlinux_value_type_id;
bool bypass_spec_v1;
bool frozen;
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
u64 writecnt;
};
5.11
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct mem_cgroup *memcg;
char name[16];
u32 btf_vmlinux_value_type_id;
bool bypass_spec_v1;
bool frozen;
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
u64 writecnt;
};
5.13
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct mem_cgroup *memcg;
char name[16];
u32 btf_vmlinux_value_type_id;
bool bypass_spec_v1;
bool frozen;
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
u64 writecnt;
};
5.15
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
int timer_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct mem_cgroup *memcg;
char name[16];
u32 btf_vmlinux_value_type_id;
bool bypass_spec_v1;
bool frozen;
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;
};
5.19
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u64 map_extra;
u32 map_flags;
int spin_lock_off;
struct bpf_map_value_off *kptr_off_tab;
int timer_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;
struct mem_cgroup *memcg;
char name[16];
struct bpf_map_off_arr *off_arr;
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;
struct (anon) owner;
bool bypass_spec_v1;
bool frozen;
};
6.2
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u64 map_extra;
u32 map_flags;
u32 id;
struct btf_record *record;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;
struct obj_cgroup *objcg;
char name[16];
struct btf_field_offs *field_offs;
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;
struct (anon) owner;
bool bypass_spec_v1;
bool frozen;
};
6.5
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u64 map_extra;
u32 map_flags;
u32 id;
struct btf_record *record;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;
struct obj_cgroup *objcg;
char name[16];
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;
struct (anon) owner;
bool bypass_spec_v1;
bool frozen;
};
6.8
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u64 map_extra;
u32 map_flags;
u32 id;
struct btf_record *record;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;
struct obj_cgroup *objcg;
char name[16];
atomic64_t refcnt;
atomic64_t usercnt;
struct work_struct work;
struct callback_head rcu;
struct mutex freeze_mutex;
atomic64_t writecnt;
struct (anon) owner;
bool bypass_spec_v1;
bool frozen;
bool free_after_mult_rcu_gp;
bool free_after_rcu_gp;
atomic64_t sleepable_refcnt;
s64 *elem_count;
};
arm64
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
armhf
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
ppc64el
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
riscv64
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
aws
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
azure
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
gcp
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
lowlatency
: ✅struct bpf_map {
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
void *security;
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u32 map_flags;
int spin_lock_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
bool unpriv_array;
bool frozen;
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
char name[16];
};
Regular
4.4
and 4.8
⚠️u32 map_flags
4.8
and 4.10
✅
4.10
and 4.13
⚠️u32 id
struct bpf_map *inner_map_meta
4.13
and 4.15
⚠️void *security
int numa_node
bool unpriv_array
char name[16]
4.15
and 4.18
⚠️u32 btf_key_type_id
u32 btf_value_type_id
struct btf *btf
4.18
and 5.0
✅
5.0
and 5.3
⚠️int spin_lock_off
struct bpf_map_memory memory
bool frozen
u32 pages
struct user_struct *user
5.3
and 5.4
✅
5.4
and 5.8
⚠️u32 btf_vmlinux_value_type_id
bool bypass_spec_v1
struct mutex freeze_mutex
u64 writecnt
bool unpriv_array
atomic_t refcnt
➡️ atomic64_t refcnt
atomic_t usercnt
➡️ atomic64_t usercnt
5.8
and 5.11
⚠️struct mem_cgroup *memcg
struct bpf_map_memory memory
5.11
and 5.13
✅
5.13
and 5.15
⚠️int timer_off
u64 writecnt
➡️ atomic64_t writecnt
5.15
and 5.19
⚠️u64 map_extra
struct bpf_map_value_off *kptr_off_tab
struct bpf_map_off_arr *off_arr
struct (anon) owner
5.19
and 6.2
⚠️struct btf_record *record
struct obj_cgroup *objcg
struct btf_field_offs *field_offs
int spin_lock_off
struct bpf_map_value_off *kptr_off_tab
int timer_off
struct mem_cgroup *memcg
struct bpf_map_off_arr *off_arr
6.2
and 6.5
⚠️struct btf_field_offs *field_offs
6.5
and 6.8
⚠️struct callback_head rcu
bool free_after_mult_rcu_gp
bool free_after_rcu_gp
atomic64_t sleepable_refcnt
s64 *elem_count
amd64
and arm64
✅
amd64
and armhf
✅
amd64
and ppc64el
✅
amd64
and riscv64
✅
generic
and aws
✅
generic
and azure
✅
generic
and gcp
✅
generic
and lowlatency
✅