io_ring_ctx
Regular
4.4
: Absent ⚠️
4.8
: Absent ⚠️
4.10
: Absent ⚠️
4.13
: Absent ⚠️
4.15
: Absent ⚠️
4.18
: Absent ⚠️
5.0
: Absent ⚠️
5.3
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
struct io_sq_ring *sq_ring;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct workqueue_struct *sqo_wq;
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
struct io_cq_ring *cq_ring;
unsigned int cached_cq_tail;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
5.4
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
5.8
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
unsigned int compat;
unsigned int account_mem;
unsigned int cq_overflow_flushed;
unsigned int drain_next;
unsigned int eventfd_async;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
atomic_t cached_cq_overflow;
long unsigned int sq_check_overflow;
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
wait_queue_head_t inflight_wait;
struct io_uring_sqe *sq_sqes;
struct io_rings *rings;
struct io_wq *io_wq;
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct fixed_file_data *file_data;
unsigned int nr_user_files;
int ring_fd;
struct file *ring_file;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ref_comp;
struct completion sq_thread_comp;
struct io_kiocb *fallback_req;
struct socket *ring_sock;
struct idr io_buffer_idr;
struct idr personality_idr;
unsigned int cached_cq_tail;
unsigned int cq_entries;
unsigned int cq_mask;
atomic_t cq_timeouts;
long unsigned int cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
struct list_head poll_list;
struct hlist_head *cancel_hash;
unsigned int cancel_hash_bits;
bool poll_multi_file;
spinlock_t inflight_lock;
struct list_head inflight_list;
struct delayed_work file_put_work;
struct llist_head file_put_llist;
struct work_struct exit_work;
};
5.11
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
unsigned int compat;
unsigned int limit_mem;
unsigned int cq_overflow_flushed;
unsigned int drain_next;
unsigned int eventfd_async;
unsigned int restricted;
unsigned int sqo_dead;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
unsigned int cached_cq_overflow;
long unsigned int sq_check_overflow;
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
struct io_uring_sqe *sq_sqes;
struct io_rings *rings;
struct io_wq *io_wq;
struct task_struct *sqo_task;
struct mm_struct *mm_account;
struct cgroup_subsys_state *sqo_blkcg_css;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
struct fixed_file_data *file_data;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
kuid_t loginuid;
unsigned int sessionid;
struct completion ref_comp;
struct completion sq_thread_comp;
struct io_kiocb *fallback_req;
struct socket *ring_sock;
struct idr io_buffer_idr;
struct idr personality_idr;
unsigned int cached_cq_tail;
unsigned int cq_entries;
unsigned int cq_mask;
atomic_t cq_timeouts;
unsigned int cq_last_tm_flush;
long unsigned int cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
struct list_head iopoll_list;
struct hlist_head *cancel_hash;
unsigned int cancel_hash_bits;
bool poll_multi_file;
spinlock_t inflight_lock;
struct list_head inflight_list;
struct delayed_work file_put_work;
struct llist_head file_put_llist;
struct work_struct exit_work;
struct io_restriction restrictions;
};
5.13
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
unsigned int compat;
unsigned int drain_next;
unsigned int eventfd_async;
unsigned int restricted;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
unsigned int cached_cq_overflow;
long unsigned int sq_check_overflow;
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
struct io_uring_sqe *sq_sqes;
struct mutex uring_lock;
wait_queue_head_t wait;
struct io_submit_state submit_state;
struct list_head locked_free_list;
unsigned int locked_free_nr;
struct io_rings *rings;
const struct cred *sq_creds;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
struct io_rsrc_data *file_data;
struct io_file_table file_table;
unsigned int nr_user_files;
struct io_rsrc_data *buf_data;
unsigned int nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct xarray io_buffers;
struct xarray personalities;
u32 pers_next;
unsigned int cached_cq_tail;
unsigned int cq_entries;
unsigned int cq_mask;
atomic_t cq_timeouts;
unsigned int cq_last_tm_flush;
unsigned int cq_extra;
long unsigned int cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
spinlock_t completion_lock;
struct list_head iopoll_list;
struct hlist_head *cancel_hash;
unsigned int cancel_hash_bits;
bool poll_multi_file;
struct delayed_work rsrc_put_work;
struct llist_head rsrc_put_llist;
struct list_head rsrc_ref_list;
spinlock_t rsrc_ref_lock;
struct io_rsrc_node *rsrc_node;
struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf;
struct io_restriction restrictions;
struct socket *ring_sock;
struct io_wq_hash *hash_map;
struct user_struct *user;
struct mm_struct *mm_account;
struct callback_head *exit_task_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
};
5.15
: ✅struct io_ring_ctx {
struct percpu_ref refs;
struct io_rings *rings;
unsigned int flags;
unsigned int compat;
unsigned int drain_next;
unsigned int eventfd_async;
unsigned int restricted;
unsigned int off_timeout_used;
unsigned int drain_active;
struct mutex uring_lock;
u32 *sq_array;
struct io_uring_sqe *sq_sqes;
unsigned int cached_sq_head;
unsigned int sq_entries;
struct list_head defer_list;
struct io_rsrc_node *rsrc_node;
struct io_file_table file_table;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct io_submit_state submit_state;
struct list_head timeout_list;
struct list_head ltimeout_list;
struct list_head cq_overflow_list;
struct xarray io_buffers;
struct xarray personalities;
u32 pers_next;
unsigned int sq_thread_idle;
struct list_head locked_free_list;
unsigned int locked_free_nr;
const struct cred *sq_creds;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
long unsigned int check_cq_overflow;
unsigned int cached_cq_tail;
unsigned int cq_entries;
struct eventfd_ctx *cq_ev_fd;
struct wait_queue_head poll_wait;
struct wait_queue_head cq_wait;
unsigned int cq_extra;
atomic_t cq_timeouts;
unsigned int cq_last_tm_flush;
spinlock_t completion_lock;
spinlock_t timeout_lock;
struct list_head iopoll_list;
struct hlist_head *cancel_hash;
unsigned int cancel_hash_bits;
bool poll_multi_queue;
struct io_restriction restrictions;
struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
struct delayed_work rsrc_put_work;
struct llist_head rsrc_put_llist;
struct list_head rsrc_ref_list;
spinlock_t rsrc_ref_lock;
struct socket *ring_sock;
struct io_wq_hash *hash_map;
struct user_struct *user;
struct mm_struct *mm_account;
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
u32 iowq_limits[2];
bool iowq_limits_set;
};
5.19
: ✅struct io_ring_ctx {
struct percpu_ref refs;
struct io_rings *rings;
unsigned int flags;
enum task_work_notify_mode notify_method;
unsigned int compat;
unsigned int drain_next;
unsigned int restricted;
unsigned int off_timeout_used;
unsigned int drain_active;
unsigned int drain_disabled;
unsigned int has_evfd;
unsigned int syscall_iopoll;
struct mutex uring_lock;
u32 *sq_array;
struct io_uring_sqe *sq_sqes;
unsigned int cached_sq_head;
unsigned int sq_entries;
struct list_head defer_list;
struct io_rsrc_node *rsrc_node;
int rsrc_cached_refs;
atomic_t cancel_seq;
struct io_file_table file_table;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct io_submit_state submit_state;
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct list_head io_buffers_cache;
struct list_head timeout_list;
struct list_head ltimeout_list;
struct list_head cq_overflow_list;
struct list_head apoll_cache;
struct xarray personalities;
u32 pers_next;
unsigned int sq_thread_idle;
struct io_wq_work_list locked_free_list;
unsigned int locked_free_nr;
const struct cred *sq_creds;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
long unsigned int check_cq;
struct io_uring_cqe *cqe_cached;
struct io_uring_cqe *cqe_sentinel;
unsigned int cached_cq_tail;
unsigned int cq_entries;
struct io_ev_fd *io_ev_fd;
struct wait_queue_head cq_wait;
unsigned int cq_extra;
atomic_t cq_timeouts;
unsigned int cq_last_tm_flush;
spinlock_t completion_lock;
spinlock_t timeout_lock;
struct io_wq_work_list iopoll_list;
struct hlist_head *cancel_hash;
unsigned int cancel_hash_bits;
bool poll_multi_queue;
struct list_head io_buffers_comp;
struct io_restriction restrictions;
struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
struct delayed_work rsrc_put_work;
struct llist_head rsrc_put_llist;
struct list_head rsrc_ref_list;
spinlock_t rsrc_ref_lock;
struct list_head io_buffers_pages;
struct socket *ring_sock;
struct io_wq_hash *hash_map;
struct user_struct *user;
struct mm_struct *mm_account;
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
u32 iowq_limits[2];
bool iowq_limits_set;
};
6.2
: ✅struct io_ring_ctx {
struct percpu_ref refs;
struct io_rings *rings;
unsigned int flags;
enum task_work_notify_mode notify_method;
unsigned int compat;
unsigned int drain_next;
unsigned int restricted;
unsigned int off_timeout_used;
unsigned int drain_active;
unsigned int drain_disabled;
unsigned int has_evfd;
unsigned int syscall_iopoll;
unsigned int task_complete;
struct mutex uring_lock;
u32 *sq_array;
struct io_uring_sqe *sq_sqes;
unsigned int cached_sq_head;
unsigned int sq_entries;
struct io_rsrc_node *rsrc_node;
int rsrc_cached_refs;
atomic_t cancel_seq;
struct io_file_table file_table;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct io_submit_state submit_state;
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct list_head io_buffers_cache;
struct io_hash_table cancel_table_locked;
struct list_head cq_overflow_list;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
struct io_wq_work_list locked_free_list;
unsigned int locked_free_nr;
const struct cred *sq_creds;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
long unsigned int check_cq;
unsigned int file_alloc_start;
unsigned int file_alloc_end;
struct xarray personalities;
u32 pers_next;
struct io_uring_cqe *cqe_cached;
struct io_uring_cqe *cqe_sentinel;
unsigned int cached_cq_tail;
unsigned int cq_entries;
struct io_ev_fd *io_ev_fd;
struct wait_queue_head cq_wait;
unsigned int cq_extra;
spinlock_t completion_lock;
bool poll_multi_queue;
struct io_wq_work_list iopoll_list;
struct io_hash_table cancel_table;
struct llist_head work_llist;
struct list_head io_buffers_comp;
spinlock_t timeout_lock;
atomic_t cq_timeouts;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned int cq_last_tm_flush;
struct io_restriction restrictions;
struct task_struct *submitter_task;
struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
struct delayed_work rsrc_put_work;
struct callback_head rsrc_put_tw;
struct llist_head rsrc_put_llist;
struct list_head rsrc_ref_list;
spinlock_t rsrc_ref_lock;
struct list_head io_buffers_pages;
struct socket *ring_sock;
struct io_wq_hash *hash_map;
struct user_struct *user;
struct mm_struct *mm_account;
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
u32 iowq_limits[2];
bool iowq_limits_set;
struct list_head defer_list;
unsigned int sq_thread_idle;
unsigned int evfd_last_cq_tail;
};
6.5
: ✅struct io_ring_ctx {
unsigned int flags;
unsigned int drain_next;
unsigned int restricted;
unsigned int off_timeout_used;
unsigned int drain_active;
unsigned int has_evfd;
unsigned int task_complete;
unsigned int syscall_iopoll;
unsigned int poll_activated;
unsigned int drain_disabled;
unsigned int compat;
enum task_work_notify_mode notify_method;
short unsigned int n_ring_pages;
short unsigned int n_sqe_pages;
struct page **ring_pages;
struct page **sqe_pages;
struct io_rings *rings;
struct task_struct *submitter_task;
struct percpu_ref refs;
struct mutex uring_lock;
u32 *sq_array;
struct io_uring_sqe *sq_sqes;
unsigned int cached_sq_head;
unsigned int sq_entries;
struct io_rsrc_node *rsrc_node;
atomic_t cancel_seq;
struct io_file_table file_table;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct io_submit_state submit_state;
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct list_head io_buffers_cache;
struct io_hash_table cancel_table_locked;
struct list_head cq_overflow_list;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
struct io_wq_work_list locked_free_list;
unsigned int locked_free_nr;
const struct cred *sq_creds;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
long unsigned int check_cq;
unsigned int file_alloc_start;
unsigned int file_alloc_end;
struct xarray personalities;
u32 pers_next;
struct io_uring_cqe *cqe_cached;
struct io_uring_cqe *cqe_sentinel;
unsigned int cached_cq_tail;
unsigned int cq_entries;
struct io_ev_fd *io_ev_fd;
struct wait_queue_head cq_wait;
unsigned int cq_extra;
spinlock_t completion_lock;
bool poll_multi_queue;
atomic_t cq_wait_nr;
struct io_wq_work_list iopoll_list;
struct io_hash_table cancel_table;
struct llist_head work_llist;
struct list_head io_buffers_comp;
spinlock_t timeout_lock;
atomic_t cq_timeouts;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned int cq_last_tm_flush;
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
struct list_head rsrc_ref_list;
struct io_alloc_cache rsrc_node_cache;
struct wait_queue_head rsrc_quiesce_wq;
unsigned int rsrc_quiesce;
struct list_head io_buffers_pages;
struct socket *ring_sock;
struct io_wq_hash *hash_map;
struct user_struct *user;
struct mm_struct *mm_account;
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
u32 iowq_limits[2];
bool iowq_limits_set;
struct callback_head poll_wq_task_work;
struct list_head defer_list;
unsigned int sq_thread_idle;
unsigned int evfd_last_cq_tail;
};
6.8
: ✅struct io_ring_ctx {
unsigned int flags;
unsigned int drain_next;
unsigned int restricted;
unsigned int off_timeout_used;
unsigned int drain_active;
unsigned int has_evfd;
unsigned int task_complete;
unsigned int lockless_cq;
unsigned int syscall_iopoll;
unsigned int poll_activated;
unsigned int drain_disabled;
unsigned int compat;
struct task_struct *submitter_task;
struct io_rings *rings;
struct percpu_ref refs;
enum task_work_notify_mode notify_method;
struct mutex uring_lock;
u32 *sq_array;
struct io_uring_sqe *sq_sqes;
unsigned int cached_sq_head;
unsigned int sq_entries;
struct io_rsrc_node *rsrc_node;
atomic_t cancel_seq;
struct io_file_table file_table;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
struct io_submit_state submit_state;
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct io_hash_table cancel_table_locked;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
struct io_wq_work_list iopoll_list;
bool poll_multi_queue;
struct hlist_head cancelable_uring_cmd;
struct io_uring_cqe *cqe_cached;
struct io_uring_cqe *cqe_sentinel;
unsigned int cached_cq_tail;
unsigned int cq_entries;
struct io_ev_fd *io_ev_fd;
unsigned int cq_extra;
struct llist_head work_llist;
long unsigned int check_cq;
atomic_t cq_wait_nr;
atomic_t cq_timeouts;
struct wait_queue_head cq_wait;
spinlock_t timeout_lock;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned int cq_last_tm_flush;
struct io_uring_cqe completion_cqes[16];
spinlock_t completion_lock;
struct io_wq_work_list locked_free_list;
unsigned int locked_free_nr;
struct list_head io_buffers_comp;
struct list_head cq_overflow_list;
struct io_hash_table cancel_table;
struct hlist_head waitid_list;
struct hlist_head futex_list;
struct io_alloc_cache futex_cache;
const struct cred *sq_creds;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
unsigned int file_alloc_start;
unsigned int file_alloc_end;
struct xarray personalities;
u32 pers_next;
struct list_head io_buffers_cache;
struct hlist_head io_buf_list;
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
struct list_head rsrc_ref_list;
struct io_alloc_cache rsrc_node_cache;
struct wait_queue_head rsrc_quiesce_wq;
unsigned int rsrc_quiesce;
struct io_wq_hash *hash_map;
struct user_struct *user;
struct mm_struct *mm_account;
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
u32 iowq_limits[2];
bool iowq_limits_set;
struct callback_head poll_wq_task_work;
struct list_head defer_list;
unsigned int sq_thread_idle;
unsigned int evfd_last_cq_tail;
short unsigned int n_ring_pages;
short unsigned int n_sqe_pages;
struct page **ring_pages;
struct page **sqe_pages;
};
arm64
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
armhf
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
ppc64el
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
riscv64
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
aws
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
azure
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
gcp
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
lowlatency
: ✅struct io_ring_ctx {
struct percpu_ref refs;
unsigned int flags;
bool compat;
bool account_mem;
u32 *sq_array;
unsigned int cached_sq_head;
unsigned int sq_entries;
unsigned int sq_mask;
unsigned int sq_thread_idle;
unsigned int cached_sq_dropped;
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
struct list_head timeout_list;
struct workqueue_struct * sqo_wq[2];
struct task_struct *sqo_thread;
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
unsigned int cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned int cq_entries;
unsigned int cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
struct io_rings *rings;
struct file **user_files;
unsigned int nr_user_files;
unsigned int nr_user_bufs;
struct io_mapped_ubuf *user_bufs;
struct user_struct *user;
const struct cred *creds;
struct completion ctx_done;
struct mutex uring_lock;
wait_queue_head_t wait;
spinlock_t completion_lock;
bool poll_multi_file;
struct list_head poll_list;
struct list_head cancel_list;
struct async_list pending_async[2];
struct socket *ring_sock;
};
Regular
5.3
and 5.4
⚠️u32 *sq_array
unsigned int cached_sq_dropped
struct list_head timeout_list
atomic_t cached_cq_overflow
atomic_t cq_timeouts
struct io_rings *rings
const struct cred *creds
struct io_sq_ring *sq_ring
struct io_cq_ring *cq_ring
struct workqueue_struct *sqo_wq
➡️ struct workqueue_struct * sqo_wq[2]
5.4
and 5.8
⚠️unsigned int cq_overflow_flushed
unsigned int drain_next
unsigned int eventfd_async
long unsigned int sq_check_overflow
struct list_head cq_overflow_list
wait_queue_head_t inflight_wait
struct io_wq *io_wq
struct fixed_file_data *file_data
int ring_fd
struct file *ring_file
struct completion ref_comp
struct completion sq_thread_comp
struct io_kiocb *fallback_req
struct idr io_buffer_idr
struct idr personality_idr
long unsigned int cq_check_overflow
struct hlist_head *cancel_hash
unsigned int cancel_hash_bits
spinlock_t inflight_lock
struct list_head inflight_list
struct delayed_work file_put_work
struct llist_head file_put_llist
struct work_struct exit_work
struct workqueue_struct * sqo_wq[2]
struct completion sqo_thread_started
struct file **user_files
struct completion ctx_done
struct list_head cancel_list
struct async_list pending_async[2]
bool compat
➡️ unsigned int compat
bool account_mem
➡️ unsigned int account_mem
5.8
and 5.11
⚠️unsigned int limit_mem
unsigned int restricted
unsigned int sqo_dead
struct task_struct *sqo_task
struct mm_struct *mm_account
struct cgroup_subsys_state *sqo_blkcg_css
struct io_sq_data *sq_data
struct wait_queue_head sqo_sq_wait
struct list_head sqd_list
kuid_t loginuid
unsigned int sessionid
unsigned int cq_last_tm_flush
struct list_head iopoll_list
struct io_restriction restrictions
unsigned int account_mem
wait_queue_head_t inflight_wait
struct task_struct *sqo_thread
struct mm_struct *sqo_mm
wait_queue_head_t sqo_wait
int ring_fd
struct file *ring_file
struct list_head poll_list
atomic_t cached_cq_overflow
➡️ unsigned int cached_cq_overflow
5.11
and 5.13
⚠️struct io_submit_state submit_state
struct list_head locked_free_list
unsigned int locked_free_nr
const struct cred *sq_creds
struct io_file_table file_table
struct io_rsrc_data *buf_data
struct xarray io_buffers
struct xarray personalities
u32 pers_next
unsigned int cq_extra
struct delayed_work rsrc_put_work
struct llist_head rsrc_put_llist
struct list_head rsrc_ref_list
spinlock_t rsrc_ref_lock
struct io_rsrc_node *rsrc_node
struct io_rsrc_node *rsrc_backup_node
struct io_mapped_ubuf *dummy_ubuf
struct io_wq_hash *hash_map
struct callback_head *exit_task_work
struct list_head tctx_list
unsigned int limit_mem
unsigned int cq_overflow_flushed
unsigned int sqo_dead
struct io_wq *io_wq
struct task_struct *sqo_task
struct cgroup_subsys_state *sqo_blkcg_css
const struct cred *creds
kuid_t loginuid
unsigned int sessionid
struct completion sq_thread_comp
struct io_kiocb *fallback_req
struct idr io_buffer_idr
struct idr personality_idr
spinlock_t inflight_lock
struct list_head inflight_list
struct delayed_work file_put_work
struct llist_head file_put_llist
struct fixed_file_data *file_data
➡️ struct io_rsrc_data *file_data
struct io_mapped_ubuf *user_bufs
➡️ struct io_mapped_ubuf **user_bufs
5.13
and 5.15
⚠️unsigned int off_timeout_used
unsigned int drain_active
struct list_head ltimeout_list
long unsigned int check_cq_overflow
struct wait_queue_head poll_wait
spinlock_t timeout_lock
bool poll_multi_queue
struct llist_head fallback_llist
struct delayed_work fallback_work
u32 iowq_limits[2]
bool iowq_limits_set
unsigned int sq_mask
unsigned int cached_sq_dropped
unsigned int cached_cq_overflow
long unsigned int sq_check_overflow
wait_queue_head_t wait
unsigned int cq_mask
long unsigned int cq_check_overflow
struct fasync_struct *cq_fasync
bool poll_multi_file
struct callback_head *exit_task_work
5.15
and 5.19
⚠️enum task_work_notify_mode notify_method
unsigned int drain_disabled
unsigned int has_evfd
unsigned int syscall_iopoll
int rsrc_cached_refs
atomic_t cancel_seq
struct io_buffer_list *io_bl
struct xarray io_bl_xa
struct list_head io_buffers_cache
struct list_head apoll_cache
long unsigned int check_cq
struct io_uring_cqe *cqe_cached
struct io_uring_cqe *cqe_sentinel
struct io_ev_fd *io_ev_fd
struct list_head io_buffers_comp
struct list_head io_buffers_pages
unsigned int eventfd_async
struct xarray io_buffers
long unsigned int check_cq_overflow
struct eventfd_ctx *cq_ev_fd
struct wait_queue_head poll_wait
struct list_head locked_free_list
➡️ struct io_wq_work_list locked_free_list
struct list_head iopoll_list
➡️ struct io_wq_work_list iopoll_list
5.19
and 6.2
⚠️unsigned int task_complete
struct io_hash_table cancel_table_locked
struct io_alloc_cache netmsg_cache
unsigned int file_alloc_start
unsigned int file_alloc_end
struct io_hash_table cancel_table
struct llist_head work_llist
struct task_struct *submitter_task
struct callback_head rsrc_put_tw
unsigned int evfd_last_cq_tail
struct hlist_head *cancel_hash
unsigned int cancel_hash_bits
struct list_head apoll_cache
➡️ struct io_alloc_cache apoll_cache
6.2
and 6.5
⚠️unsigned int poll_activated
short unsigned int n_ring_pages
short unsigned int n_sqe_pages
struct page **ring_pages
struct page **sqe_pages
atomic_t cq_wait_nr
struct wait_queue_head poll_wq
struct io_alloc_cache rsrc_node_cache
struct wait_queue_head rsrc_quiesce_wq
unsigned int rsrc_quiesce
struct callback_head poll_wq_task_work
int rsrc_cached_refs
struct io_rsrc_node *rsrc_backup_node
struct delayed_work rsrc_put_work
struct callback_head rsrc_put_tw
struct llist_head rsrc_put_llist
spinlock_t rsrc_ref_lock
6.5
and 6.8
⚠️unsigned int lockless_cq
struct hlist_head cancelable_uring_cmd
struct io_uring_cqe completion_cqes[16]
struct hlist_head waitid_list
struct hlist_head futex_list
struct io_alloc_cache futex_cache
struct hlist_head io_buf_list
struct list_head io_buffers_pages
struct socket *ring_sock
amd64
and arm64
✅
amd64
and armhf
✅
amd64
and ppc64el
✅
amd64
and riscv64
✅
generic
and aws
✅
generic
and azure
✅
generic
and gcp
✅
generic
and lowlatency
✅