sockRegular
4.4: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
__u32 sk_txhash;
unsigned int sk_napi_id;
unsigned int sk_ll_usec;
atomic_t sk_drops;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
struct sk_buff_head sk_write_queue;
unsigned int sk_shutdown;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
int sk_wmem_queued;
gfp_t sk_allocation;
u32 sk_pacing_rate;
u32 sk_max_pacing_rate;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
u16 sk_gso_max_segs;
int sk_rcvlowat;
long unsigned int sk_lingertime;
struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
__u32 sk_priority;
__u32 sk_cgrp_prioidx;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
long int sk_sndtimeo;
struct timer_list sk_timer;
ktime_t sk_stamp;
u16 sk_tsflags;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
struct page_frag sk_frag;
struct sk_buff *sk_send_head;
__s32 sk_peek_off;
int sk_write_pending;
void *sk_security;
__u32 sk_mark;
u32 sk_classid;
struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
};
4.8: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
__u32 sk_txhash;
unsigned int sk_napi_id;
unsigned int sk_ll_usec;
atomic_t sk_drops;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
struct sk_buff_head sk_write_queue;
unsigned int sk_padding;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
int sk_wmem_queued;
gfp_t sk_allocation;
u32 sk_pacing_rate;
u32 sk_max_pacing_rate;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
u16 sk_gso_max_segs;
int sk_rcvlowat;
long unsigned int sk_lingertime;
struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
__u32 sk_priority;
__u32 sk_mark;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
long int sk_sndtimeo;
struct timer_list sk_timer;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
struct page_frag sk_frag;
struct sk_buff *sk_send_head;
__s32 sk_peek_off;
int sk_write_pending;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct callback_head sk_rcu;
};
4.10: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
atomic_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
u32 sk_pacing_rate;
u32 sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct callback_head sk_rcu;
};
4.13: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
u32 sk_pacing_rate;
u32 sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct callback_head sk_rcu;
};
4.15: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
u32 sk_pacing_rate;
u32 sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct callback_head sk_rcu;
};
4.18: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
u32 sk_pacing_rate;
u32 sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct callback_head sk_rcu;
};
5.0: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct callback_head sk_rcu;
};
5.3: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
5.4: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
5.8: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_padding;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
5.11: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_padding;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_local_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
5.13: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_padding;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_local_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
5.15: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_padding;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
spinlock_t sk_peer_lock;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
int sk_bind_phc;
u8 sk_shutdown;
atomic_t sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_local_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
5.19: ✅struct sock {
struct sock_common __sk_common;
struct dst_entry *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
u32 sk_reserved_mem;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_gso_disabled;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_txrehash;
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
atomic_t sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_local_storage *sk_bpf_storage;
struct callback_head sk_rcu;
netns_tracker ns_tracker;
};
6.2: ✅struct sock {
struct sock_common __sk_common;
struct dst_entry *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
u32 sk_reserved_mem;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_gso_disabled;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_txrehash;
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
atomic_t sk_tskey;
atomic_t sk_zckey;
u32 sk_tsflags;
u8 sk_shutdown;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
bool sk_use_task_frag;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_local_storage *sk_bpf_storage;
struct callback_head sk_rcu;
netns_tracker ns_tracker;
struct hlist_node sk_bind2_node;
};
6.5: ✅struct sock {
struct sock_common __sk_common;
struct dst_entry *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
u32 sk_reserved_mem;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
int sk_wait_pending;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_gso_disabled;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_txrehash;
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
atomic_t sk_tskey;
atomic_t sk_zckey;
u32 sk_tsflags;
u8 sk_shutdown;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
bool sk_use_task_frag;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_local_storage *sk_bpf_storage;
struct callback_head sk_rcu;
netns_tracker ns_tracker;
struct hlist_node sk_bind2_node;
};
6.8: ✅struct sock {
struct sock_common __sk_common;
struct dst_entry *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
u32 sk_reserved_mem;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
int sk_disconnects;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
u8 sk_gso_disabled;
u8 sk_kern_sock;
u8 sk_no_check_tx;
u8 sk_no_check_rx;
u8 sk_userlocks;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_txrehash;
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
atomic_t sk_tskey;
atomic_t sk_zckey;
u32 sk_tsflags;
u8 sk_shutdown;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
bool sk_use_task_frag;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_local_storage *sk_bpf_storage;
struct callback_head sk_rcu;
netns_tracker ns_tracker;
};
arm64: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
armhf: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
seqlock_t sk_stamp_seq;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
ppc64el: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
riscv64: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
aws: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
azure: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
gcp: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
lowlatency: ✅struct sock {
struct sock_common __sk_common;
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
struct (anon) sk_backlog;
int sk_forward_alloc;
unsigned int sk_ll_usec;
unsigned int sk_napi_id;
int sk_rcvbuf;
struct sk_filter *sk_filter;
struct socket_wq *sk_wq;
struct socket_wq *sk_wq_raw;
struct xfrm_policy * sk_policy[2];
struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
long unsigned int sk_tsq_flags;
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long int sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
long unsigned int sk_pacing_rate;
long unsigned int sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;
unsigned int __sk_flags_offset[0];
unsigned int sk_padding;
unsigned int sk_kern_sock;
unsigned int sk_no_check_tx;
unsigned int sk_no_check_rx;
unsigned int sk_userlocks;
unsigned int sk_protocol;
unsigned int sk_type;
u16 sk_gso_max_segs;
u8 sk_pacing_shift;
long unsigned int sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err;
int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long int sk_rcvtimeo;
ktime_t sk_stamp;
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
u8 sk_txtime_deadline_mode;
u8 sk_txtime_report_errors;
u8 sk_txtime_unused;
struct socket *sk_socket;
void *sk_user_data;
void *sk_security;
struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *);
void (*sk_data_ready)(struct sock *);
void (*sk_write_space)(struct sock *);
void (*sk_error_report)(struct sock *);
int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
void (*sk_destruct)(struct sock *);
struct sock_reuseport *sk_reuseport_cb;
struct bpf_sk_storage *sk_bpf_storage;
struct callback_head sk_rcu;
};
Regular
4.4 and 4.8 ⚠️unsigned int sk_padding
struct sock_cgroup_data sk_cgrp_data
struct mem_cgroup *sk_memcg
struct sock_reuseport *sk_reuseport_cb
struct callback_head sk_rcu
__u32 sk_cgrp_prioidx
u32 sk_classid
struct cg_proto *sk_cgrp
unsigned int sk_shutdown ➡️ u8 sk_shutdown
4.8 and 4.10 ⚠️long unsigned int sk_tsq_flags
unsigned int __sk_flags_offset[0]
kuid_t sk_uid
4.10 and 4.13 ⚠️__u32 sk_dst_pending_confirm
u32 sk_pacing_status
unsigned int sk_kern_sock
atomic_t sk_wmem_alloc ➡️ refcount_t sk_wmem_alloc
4.13 and 4.15 ⚠️struct rb_root tcp_rtx_queue
u8 sk_pacing_shift
atomic_t sk_zckey
4.15 and 4.18 ⚠️netdev_features_t sk_route_forced_caps
struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *)
4.18 and 5.0 ⚠️u8 sk_clockid
u8 sk_txtime_deadline_mode
u8 sk_txtime_report_errors
u8 sk_txtime_unused
u32 sk_pacing_rate ➡️ long unsigned int sk_pacing_rate
u32 sk_max_pacing_rate ➡️ long unsigned int sk_max_pacing_rate
5.0 and 5.3 ⚠️struct sk_buff *sk_rx_skb_cache
struct sk_buff *sk_tx_skb_cache
struct bpf_sk_storage *sk_bpf_storage
5.3 and 5.4 ✅
5.4 and 5.8 ⚠️unsigned int __sk_flags_offset[0]
unsigned int sk_padding ➡️ u8 sk_padding
unsigned int sk_kern_sock ➡️ u8 sk_kern_sock
unsigned int sk_no_check_tx ➡️ u8 sk_no_check_tx
unsigned int sk_no_check_rx ➡️ u8 sk_no_check_rx
unsigned int sk_userlocks ➡️ u8 sk_userlocks
unsigned int sk_protocol ➡️ u16 sk_protocol
unsigned int sk_type ➡️ u16 sk_type
5.8 and 5.11 ⚠️u8 sk_prefer_busy_poll
u16 sk_busy_poll_budget
struct bpf_sk_storage *sk_bpf_storage ➡️ struct bpf_local_storage *sk_bpf_storage
5.11 and 5.13 ✅
5.13 and 5.15 ⚠️int sk_rx_dst_ifindex
u32 sk_rx_dst_cookie
spinlock_t sk_peer_lock
int sk_bind_phc
u32 sk_tskey ➡️ atomic_t sk_tskey
5.15 and 5.19 ⚠️u32 sk_reserved_mem
u8 sk_gso_disabled
u8 sk_txrehash
netns_tracker ns_tracker
struct sk_buff *sk_rx_skb_cache
struct sk_buff *sk_tx_skb_cache
netdev_features_t sk_route_nocaps
netdev_features_t sk_route_forced_caps
u8 sk_padding
5.19 and 6.2 ⚠️bool sk_use_task_frag
struct hlist_node sk_bind2_node
u16 sk_tsflags ➡️ u32 sk_tsflags
6.2 and 6.5 ⚠️int sk_wait_pending
6.5 and 6.8 ⚠️int sk_disconnects
int sk_wait_pending
struct hlist_node sk_bind2_node
amd64 and arm64 ✅
amd64 and armhf ⚠️seqlock_t sk_stamp_seq
amd64 and ppc64el ✅
amd64 and riscv64 ✅
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅