kioctx
Regular
4.4
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct work_struct free_work;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
4.8
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct work_struct free_work;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
4.10
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct work_struct free_work;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
4.13
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct work_struct free_work;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
4.15
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct callback_head free_rcu;
struct work_struct free_work;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
4.18
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.0
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.3
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.4
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.8
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.11
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.13
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.15
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
5.19
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
6.2
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
6.5
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
6.8
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
arm64
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
armhf
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
ppc64el
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
riscv64
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
aws
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
azure
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
gcp
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
lowlatency
: ✅struct kioctx {
struct percpu_ref users;
atomic_t dead;
struct percpu_ref reqs;
long unsigned int user_id;
struct kioctx_cpu *cpu;
unsigned int req_batch;
unsigned int max_reqs;
unsigned int nr_events;
long unsigned int mmap_base;
long unsigned int mmap_size;
struct page **ring_pages;
long int nr_pages;
struct rcu_work free_rwork;
struct ctx_rq_wait *rq_wait;
atomic_t reqs_available;
spinlock_t ctx_lock;
struct list_head active_reqs;
struct mutex ring_lock;
wait_queue_head_t wait;
unsigned int tail;
unsigned int completed_events;
spinlock_t completion_lock;
struct page * internal_pages[8];
struct file *aio_ring_file;
unsigned int id;
};
Regular
4.4
and 4.8
✅
4.8
and 4.10
✅
4.10
and 4.13
✅
4.13
and 4.15
⚠️struct callback_head free_rcu
4.15
and 4.18
⚠️struct rcu_work free_rwork
struct callback_head free_rcu
struct work_struct free_work
4.18
and 5.0
✅
5.0
and 5.3
✅
5.3
and 5.4
✅
5.4
and 5.8
✅
5.8
and 5.11
✅
5.11
and 5.13
✅
5.13
and 5.15
✅
5.15
and 5.19
✅
5.19
and 6.2
✅
6.2
and 6.5
✅
6.5
and 6.8
✅
amd64
and arm64
✅
amd64
and armhf
✅
amd64
and ppc64el
✅
amd64
and riscv64
✅
generic
and aws
✅
generic
and azure
✅
generic
and gcp
✅
generic
and lowlatency
✅