xsk_buff_poolRegular
4.4: Absent ⚠️
4.8: Absent ⚠️
4.10: Absent ⚠️
4.13: Absent ⚠️
4.15: Absent ⚠️
4.18: Absent ⚠️
5.0: Absent ⚠️
5.3: Absent ⚠️
5.4: Absent ⚠️
5.8: ✅struct xsk_buff_pool {
struct xsk_queue *fq;
struct list_head free_list;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
bool dma_need_sync;
bool unaligned;
void *addrs;
struct device *dev;
struct xdp_buff_xsk * free_heads[0];
};
5.11: ✅struct xsk_buff_pool {
struct device *dev;
struct net_device *netdev;
struct list_head xsk_tx_list;
spinlock_t xsk_tx_list_lock;
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
u32 heads_cnt;
u16 queue_id;
struct xsk_queue *fq;
struct xsk_queue *cq;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
spinlock_t cq_lock;
struct xdp_buff_xsk * free_heads[0];
};
5.13: ✅struct xsk_buff_pool {
struct device *dev;
struct net_device *netdev;
struct list_head xsk_tx_list;
spinlock_t xsk_tx_list_lock;
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
u32 heads_cnt;
u16 queue_id;
struct xsk_queue *fq;
struct xsk_queue *cq;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
spinlock_t cq_lock;
struct xdp_buff_xsk * free_heads[0];
};
5.15: ✅struct xsk_buff_pool {
struct device *dev;
struct net_device *netdev;
struct list_head xsk_tx_list;
spinlock_t xsk_tx_list_lock;
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
u32 heads_cnt;
u16 queue_id;
struct xsk_queue *fq;
struct xsk_queue *cq;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
spinlock_t cq_lock;
struct xdp_buff_xsk * free_heads[0];
};
5.19: ✅struct xsk_buff_pool {
struct device *dev;
struct net_device *netdev;
struct list_head xsk_tx_list;
spinlock_t xsk_tx_list_lock;
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
u32 heads_cnt;
u16 queue_id;
struct xsk_queue *fq;
struct xsk_queue *cq;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
struct xdp_desc *tx_descs;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
spinlock_t cq_lock;
struct xdp_buff_xsk * free_heads[0];
};
6.2: ✅struct xsk_buff_pool {
struct device *dev;
struct net_device *netdev;
struct list_head xsk_tx_list;
spinlock_t xsk_tx_list_lock;
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
u32 heads_cnt;
u16 queue_id;
struct xsk_queue *fq;
struct xsk_queue *cq;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
struct xdp_desc *tx_descs;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
spinlock_t cq_lock;
struct xdp_buff_xsk * free_heads[0];
};
6.5: ✅struct xsk_buff_pool {
struct device *dev;
struct net_device *netdev;
struct list_head xsk_tx_list;
spinlock_t xsk_tx_list_lock;
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
u32 heads_cnt;
u16 queue_id;
struct xsk_queue *fq;
struct xsk_queue *cq;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
struct xdp_desc *tx_descs;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
spinlock_t cq_lock;
struct xdp_buff_xsk * free_heads[0];
};
6.8: ✅struct xsk_buff_pool {
struct device *dev;
struct net_device *netdev;
struct list_head xsk_tx_list;
spinlock_t xsk_tx_list_lock;
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
struct list_head xskb_list;
u32 heads_cnt;
u16 queue_id;
struct xsk_queue *fq;
struct xsk_queue *cq;
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
struct xdp_desc *tx_descs;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
u8 tx_metadata_len;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
bool tx_sw_csum;
void *addrs;
spinlock_t cq_lock;
struct xdp_buff_xsk * free_heads[0];
};
arm64: Absent ⚠️
armhf: Absent ⚠️
ppc64el: Absent ⚠️
riscv64: Absent ⚠️
aws: Absent ⚠️
azure: Absent ⚠️
gcp: Absent ⚠️
lowlatency: Absent ⚠️
Regular
5.8 and 5.11 ⚠️struct net_device *netdev
struct list_head xsk_tx_list
spinlock_t xsk_tx_list_lock
refcount_t users
struct xdp_umem *umem
struct work_struct work
u16 queue_id
struct xsk_queue *cq
u8 cached_need_wakeup
bool uses_need_wakeup
spinlock_t cq_lock
5.11 and 5.13 ✅
5.13 and 5.15 ✅
5.15 and 5.19 ⚠️struct xdp_desc *tx_descs
u32 chunk_shift
5.19 and 6.2 ✅
6.2 and 6.5 ✅
6.5 and 6.8 ⚠️struct list_head xskb_list
u8 tx_metadata_len
bool tx_sw_csum