QdiscRegular
4.4: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct list_head list;
u32 handle;
u32 parent;
int (*reshape_fail)(struct sk_buff *, struct Qdisc *);
void *u32_node;
struct Qdisc *__parent;
struct netdev_queue *dev_queue;
struct gnet_stats_rate_est64 rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
struct Qdisc *next_sched;
struct sk_buff *gso_skb;
long unsigned int state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
unsigned int __state;
struct gnet_stats_queue qstats;
struct callback_head callback_head;
int padded;
atomic_t refcnt;
spinlock_t busylock;
};
4.8: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct list_head list;
u32 handle;
u32 parent;
void *u32_node;
struct netdev_queue *dev_queue;
struct gnet_stats_rate_est64 rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
struct sk_buff *gso_skb;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff *skb_bad_txq;
struct callback_head callback_head;
int padded;
atomic_t refcnt;
spinlock_t busylock;
};
4.10: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
void *u32_node;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
struct sk_buff *gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff *skb_bad_txq;
struct callback_head callback_head;
int padded;
atomic_t refcnt;
spinlock_t busylock;
};
4.13: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
void *u32_node;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
struct sk_buff *gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff *skb_bad_txq;
struct callback_head callback_head;
int padded;
refcount_t refcnt;
spinlock_t busylock;
};
4.15: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
struct sk_buff *gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff *skb_bad_txq;
int padded;
refcount_t refcnt;
spinlock_t busylock;
};
4.18: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
};
5.0: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
struct callback_head rcu;
};
5.3: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
5.4: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
5.8: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
5.11: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
long int privdata[0];
};
5.13: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
long int privdata[0];
};
5.15: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
struct callback_head rcu;
long int privdata[0];
};
5.19: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_sync *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
long unsigned int state;
long unsigned int state2;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
struct callback_head rcu;
netdevice_tracker dev_tracker;
long int privdata[0];
};
6.2: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_sync *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
long unsigned int state;
long unsigned int state2;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
struct callback_head rcu;
netdevice_tracker dev_tracker;
long int privdata[0];
};
6.5: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_sync *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
long unsigned int state;
long unsigned int state2;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
struct callback_head rcu;
netdevice_tracker dev_tracker;
long int privdata[0];
};
6.8: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_sync *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
long unsigned int state;
long unsigned int state2;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
struct callback_head rcu;
netdevice_tracker dev_tracker;
long int privdata[0];
};
arm64: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
armhf: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
ppc64el: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
riscv64: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
aws: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
azure: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
gcp: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
lowlatency: ✅struct Qdisc {
int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
struct sk_buff * (*dequeue)(struct Qdisc *);
unsigned int flags;
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;
struct netdev_queue *dev_queue;
struct net_rate_estimator *rate_est;
struct gnet_stats_basic_cpu *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int padded;
refcount_t refcnt;
struct sk_buff_head gso_skb;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
long unsigned int state;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;
spinlock_t busylock;
spinlock_t seqlock;
bool empty;
struct callback_head rcu;
};
Regular
4.4 and 4.8 ⚠️seqcount_t running
struct sk_buff *skb_bad_txq
int (*reshape_fail)(struct sk_buff *, struct Qdisc *)
struct Qdisc *__parent
unsigned int __state
int (*enqueue)(struct sk_buff *, struct Qdisc *) ➡️ int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **)
4.8 and 4.10 ⚠️struct hlist_node hash
struct list_head list
struct gnet_stats_rate_est64 rate_est ➡️ struct net_rate_estimator *rate_est
struct sk_buff_head q ➡️ struct qdisc_skb_head q
4.10 and 4.13 ⚠️atomic_t refcnt ➡️ refcount_t refcnt
4.13 and 4.15 ⚠️void *u32_node
struct callback_head callback_head
4.15 and 4.18 ⚠️spinlock_t seqlock
struct sk_buff *gso_skb ➡️ struct sk_buff_head gso_skb
struct sk_buff *skb_bad_txq ➡️ struct sk_buff_head skb_bad_txq
4.18 and 5.0 ⚠️struct callback_head rcu
5.0 and 5.3 ⚠️bool empty
5.3 and 5.4 ✅
5.4 and 5.8 ✅
5.8 and 5.11 ⚠️int pad
long int privdata[0]
int padded
5.11 and 5.13 ✅
5.13 and 5.15 ⚠️bool empty
5.15 and 5.19 ⚠️long unsigned int state2
netdevice_tracker dev_tracker
seqcount_t running
struct gnet_stats_basic_cpu *cpu_bstats ➡️ struct gnet_stats_basic_sync *cpu_bstats
struct gnet_stats_basic_packed bstats ➡️ struct gnet_stats_basic_sync bstats
5.19 and 6.2 ✅
6.2 and 6.5 ✅
6.5 and 6.8 ✅
amd64 and arm64 ✅
amd64 and armhf ✅
amd64 and ppc64el ✅
amd64 and riscv64 ✅
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅