xdp_bulk_queueRegular
4.4: Absent ⚠️
4.8: Absent ⚠️
4.10: Absent ⚠️
4.13: Absent ⚠️
4.15: ✅struct xdp_bulk_queue {
void * q[8];
unsigned int count;
};
4.18: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct net_device *dev_rx;
unsigned int count;
};
5.0: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct net_device *dev_rx;
unsigned int count;
};
5.3: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
5.4: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
5.8: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
5.11: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
5.13: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
5.15: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
5.19: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
6.2: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
6.5: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
6.8: ✅struct xdp_bulk_queue {
void * q[8];
struct list_head flush_node;
struct bpf_cpu_map_entry *obj;
unsigned int count;
};
arm64: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
armhf: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
ppc64el: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
riscv64: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
aws: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
azure: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
gcp: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
lowlatency: ✅struct xdp_bulk_queue {
struct xdp_frame * q[16];
struct list_head flush_node;
struct net_device *dev_rx;
struct bpf_dtab_netdev *obj;
unsigned int count;
};
Regular
4.15 and 4.18 ⚠️struct net_device *dev_rx
void * q[8] ➡️ struct xdp_frame * q[16]
4.18 and 5.0 ✅
5.0 and 5.3 ⚠️struct list_head flush_node
struct bpf_dtab_netdev *obj
5.3 and 5.4 ✅
5.4 and 5.8 ⚠️struct net_device *dev_rx
struct xdp_frame * q[16] ➡️ void * q[8]
struct bpf_dtab_netdev *obj ➡️ struct bpf_cpu_map_entry *obj
5.8 and 5.11 ✅
5.11 and 5.13 ✅
5.13 and 5.15 ✅
5.15 and 5.19 ✅
5.19 and 6.2 ✅
6.2 and 6.5 ✅
6.5 and 6.8 ✅
amd64 and arm64 ✅
amd64 and armhf ✅
amd64 and ppc64el ✅
amd64 and riscv64 ✅
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅