xdp_sockRegular
4.4: Absent ⚠️
4.8: Absent ⚠️
4.10: Absent ⚠️
4.13: Absent ⚠️
4.15: Absent ⚠️
4.18: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
struct xsk_queue *tx;
struct list_head list;
bool zc;
struct mutex mutex;
spinlock_t tx_completion_lock;
u64 rx_dropped;
};
5.0: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
struct xsk_queue *tx;
struct list_head list;
bool zc;
struct mutex mutex;
spinlock_t tx_completion_lock;
u64 rx_dropped;
};
5.3: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
};
5.4: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
5.8: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
5.11: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum (anon) state;
struct xsk_queue *tx;
struct list_head tx_list;
spinlock_t rx_lock;
u64 rx_dropped;
u64 rx_queue_full;
struct list_head map_list;
spinlock_t map_list_lock;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
5.13: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum (anon) state;
struct xsk_queue *tx;
struct list_head tx_list;
spinlock_t rx_lock;
u64 rx_dropped;
u64 rx_queue_full;
struct list_head map_list;
spinlock_t map_list_lock;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
5.15: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum (anon) state;
struct xsk_queue *tx;
struct list_head tx_list;
spinlock_t rx_lock;
u64 rx_dropped;
u64 rx_queue_full;
struct list_head map_list;
spinlock_t map_list_lock;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
5.19: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum (anon) state;
struct xsk_queue *tx;
struct list_head tx_list;
spinlock_t rx_lock;
u64 rx_dropped;
u64 rx_queue_full;
struct list_head map_list;
spinlock_t map_list_lock;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
6.2: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum (anon) state;
struct xsk_queue *tx;
struct list_head tx_list;
spinlock_t rx_lock;
u64 rx_dropped;
u64 rx_queue_full;
struct list_head map_list;
spinlock_t map_list_lock;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
6.5: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum (anon) state;
struct xsk_queue *tx;
struct list_head tx_list;
spinlock_t rx_lock;
u64 rx_dropped;
u64 rx_queue_full;
struct list_head map_list;
spinlock_t map_list_lock;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
6.8: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
bool sg;
enum (anon) state;
struct xsk_queue *tx;
struct list_head tx_list;
u32 tx_budget_spent;
spinlock_t rx_lock;
u64 rx_dropped;
u64 rx_queue_full;
struct sk_buff *skb;
struct list_head map_list;
spinlock_t map_list_lock;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
arm64: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
armhf: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
ppc64el: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
riscv64: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
aws: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
azure: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
gcp: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
lowlatency: ✅struct xdp_sock {
struct sock sk;
struct xsk_queue *rx;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
bool zc;
enum (anon) state;
struct mutex mutex;
struct xsk_queue *tx;
struct list_head list;
spinlock_t tx_completion_lock;
spinlock_t rx_lock;
u64 rx_dropped;
struct list_head map_list;
spinlock_t map_list_lock;
};
Regular
4.18 and 5.0 ✅
5.0 and 5.3 ⚠️enum (anon) state
spinlock_t rx_lock
5.3 and 5.4 ⚠️struct list_head map_list
spinlock_t map_list_lock
5.4 and 5.8 ✅
5.8 and 5.11 ⚠️struct xsk_buff_pool *pool
struct list_head tx_list
u64 rx_queue_full
struct xsk_queue *fq_tmp
struct xsk_queue *cq_tmp
struct list_head list
spinlock_t tx_completion_lock
5.11 and 5.13 ✅
5.13 and 5.15 ✅
5.15 and 5.19 ✅
5.19 and 6.2 ✅
6.2 and 6.5 ✅
6.5 and 6.8 ⚠️bool sg
u32 tx_budget_spent
struct sk_buff *skb
amd64 and arm64 ✅
amd64 and armhf ✅
amd64 and ppc64el ✅
amd64 and riscv64 ✅
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅