request_queue
Regular
4.4
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
int nr_rqs[2];
int nr_rqs_elvpriv;
struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
sector_t end_sector;
struct request *boundary_rq;
struct delayed_work delay_work;
struct backing_dev_info backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
int id;
gfp_t bounce_gfp;
spinlock_t __queue_lock;
spinlock_t *queue_lock;
struct kobject kobj;
struct kobject mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
struct list_head tag_busy_list;
unsigned int nr_sorted;
unsigned int in_flight[2];
unsigned int request_fn_active;
unsigned int rq_timeout;
struct timer_list timeout;
struct list_head timeout_list;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
unsigned int flush_flags;
unsigned int flush_not_queueable;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex sysfs_lock;
int bypass_depth;
atomic_t mq_freeze_depth;
bsg_job_fn *bsg_job_fn;
int bsg_job_size;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set *bio_split;
bool mq_sysfs_init_done;
};
4.8
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
int nr_rqs[2];
int nr_rqs_elvpriv;
struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
sector_t end_sector;
struct request *boundary_rq;
struct delayed_work delay_work;
struct backing_dev_info backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
int id;
gfp_t bounce_gfp;
spinlock_t __queue_lock;
spinlock_t *queue_lock;
struct kobject kobj;
struct kobject mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
struct list_head tag_busy_list;
unsigned int nr_sorted;
unsigned int in_flight[2];
unsigned int request_fn_active;
unsigned int rq_timeout;
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex sysfs_lock;
int bypass_depth;
atomic_t mq_freeze_depth;
bsg_job_fn *bsg_job_fn;
int bsg_job_size;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set *bio_split;
bool mq_sysfs_init_done;
};
4.10
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
int nr_rqs[2];
int nr_rqs_elvpriv;
struct rq_wb *rq_wb;
struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
sector_t end_sector;
struct request *boundary_rq;
struct delayed_work delay_work;
struct backing_dev_info backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
int id;
gfp_t bounce_gfp;
spinlock_t __queue_lock;
spinlock_t *queue_lock;
struct kobject kobj;
struct kobject mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
struct list_head tag_busy_list;
unsigned int nr_sorted;
unsigned int in_flight[2];
struct blk_rq_stat rq_stats[2];
unsigned int request_fn_active;
unsigned int rq_timeout;
int poll_nsec;
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
int bypass_depth;
atomic_t mq_freeze_depth;
bsg_job_fn *bsg_job_fn;
int bsg_job_size;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set *bio_split;
bool mq_sysfs_init_done;
};
4.13
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
int nr_rqs[2];
int nr_rqs_elvpriv;
atomic_t shared_hctx_restart;
struct blk_queue_stats *stats;
struct rq_wb *rq_wb;
struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
init_rq_fn *init_rq_fn;
exit_rq_fn *exit_rq_fn;
void (*initialize_rq_fn)(struct request *);
const struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
sector_t end_sector;
struct request *boundary_rq;
struct delayed_work delay_work;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
int id;
gfp_t bounce_gfp;
spinlock_t __queue_lock;
spinlock_t *queue_lock;
struct kobject kobj;
struct kobject mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
struct list_head tag_busy_list;
unsigned int nr_sorted;
unsigned int in_flight[2];
unsigned int request_fn_active;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
int bypass_depth;
atomic_t mq_freeze_depth;
bsg_job_fn *bsg_job_fn;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set *bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
void *rq_alloc_data;
struct work_struct release_work;
u64 write_hints[5];
};
4.15
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
int nr_rqs[2];
int nr_rqs_elvpriv;
atomic_t shared_hctx_restart;
struct blk_queue_stats *stats;
struct rq_wb *rq_wb;
struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
poll_q_fn *poll_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
init_rq_fn *init_rq_fn;
exit_rq_fn *exit_rq_fn;
void (*initialize_rq_fn)(struct request *);
const struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
sector_t end_sector;
struct request *boundary_rq;
struct delayed_work delay_work;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
int id;
gfp_t bounce_gfp;
spinlock_t __queue_lock;
spinlock_t *queue_lock;
struct kobject kobj;
struct kobject mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
struct list_head tag_busy_list;
unsigned int nr_sorted;
unsigned int in_flight[2];
unsigned int request_fn_active;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
int bypass_depth;
atomic_t mq_freeze_depth;
bsg_job_fn *bsg_job_fn;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set *bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
void *rq_alloc_data;
struct work_struct release_work;
u64 write_hints[5];
};
4.18
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
int nr_rqs[2];
int nr_rqs_elvpriv;
atomic_t shared_hctx_restart;
struct blk_queue_stats *stats;
struct rq_wb *rq_wb;
struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
poll_q_fn *poll_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
init_rq_fn *init_rq_fn;
exit_rq_fn *exit_rq_fn;
void (*initialize_rq_fn)(struct request *);
const struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
sector_t end_sector;
struct request *boundary_rq;
struct delayed_work delay_work;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
int id;
gfp_t bounce_gfp;
spinlock_t __queue_lock;
spinlock_t *queue_lock;
struct kobject kobj;
struct kobject mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
unsigned int nr_sorted;
unsigned int in_flight[2];
unsigned int request_fn_active;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
int bypass_depth;
atomic_t mq_freeze_depth;
bsg_job_fn *bsg_job_fn;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
void *rq_alloc_data;
struct work_struct release_work;
u64 write_hints[5];
};
5.0
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
atomic_t mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
5.3
: ✅struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
5.4
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
5.8
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_keyslot_manager *ksm;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *conv_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
5.11
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct percpu_ref q_usage_counter;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
enum rpm_status rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_keyslot_manager *ksm;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_sbitmap;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *conv_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int max_open_zones;
unsigned int max_active_zones;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct mutex debugfs_mutex;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
u64 write_hints[5];
};
5.13
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct percpu_ref q_usage_counter;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
enum rpm_status rpm_status;
long unsigned int nr_requests;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_keyslot_manager *ksm;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_sbitmap;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *conv_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int max_open_zones;
unsigned int max_active_zones;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct mutex debugfs_mutex;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
u64 write_hints[5];
};
5.15
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct percpu_ref q_usage_counter;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
spinlock_t queue_lock;
struct gendisk *disk;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
enum rpm_status rpm_status;
long unsigned int nr_requests;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_keyslot_manager *ksm;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_sbitmap;
struct sbitmap_queue sched_bitmap_tags;
struct sbitmap_queue sched_breserved_tags;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *conv_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int max_open_zones;
unsigned int max_active_zones;
int node;
struct mutex debugfs_mutex;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
u64 write_hints[5];
};
5.19
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct percpu_ref q_usage_counter;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int queue_depth;
struct xarray hctx_table;
unsigned int nr_hw_queues;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
spinlock_t queue_lock;
struct gendisk *disk;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
enum rpm_status rpm_status;
long unsigned int nr_requests;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_crypto_profile *crypto_profile;
struct kobject *crypto_kobject;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat *poll_stat;
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_tags;
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *conv_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int max_open_zones;
unsigned int max_active_zones;
int node;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
int quiesce_depth;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
struct mutex debugfs_mutex;
bool mq_sysfs_init_done;
struct blk_independent_access_ranges *ia_ranges;
struct srcu_struct srcu[0];
};
6.2
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct percpu_ref q_usage_counter;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int queue_depth;
struct xarray hctx_table;
unsigned int nr_hw_queues;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
spinlock_t queue_lock;
struct gendisk *disk;
refcount_t refs;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
enum rpm_status rpm_status;
long unsigned int nr_requests;
unsigned int dma_pad_mask;
struct blk_crypto_profile *crypto_profile;
struct kobject *crypto_kobject;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat *poll_stat;
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_tags;
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
int node;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
int quiesce_depth;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
struct mutex debugfs_mutex;
bool mq_sysfs_init_done;
};
6.5
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct percpu_ref q_usage_counter;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
struct mutex rq_qos_mutex;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int queue_depth;
struct xarray hctx_table;
unsigned int nr_hw_queues;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
spinlock_t queue_lock;
struct gendisk *disk;
refcount_t refs;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
enum rpm_status rpm_status;
long unsigned int nr_requests;
unsigned int dma_pad_mask;
struct blk_crypto_profile *crypto_profile;
struct kobject *crypto_kobject;
unsigned int rq_timeout;
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_tags;
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct mutex blkcg_mutex;
struct queue_limits limits;
unsigned int required_elevator_features;
int node;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head flush_list;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
int quiesce_depth;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
struct mutex debugfs_mutex;
bool mq_sysfs_init_done;
};
6.8
: ✅struct request_queue {
void *queuedata;
struct elevator_queue *elevator;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
long unsigned int queue_flags;
unsigned int rq_timeout;
unsigned int queue_depth;
refcount_t refs;
unsigned int nr_hw_queues;
struct xarray hctx_table;
struct percpu_ref q_usage_counter;
struct request *last_merge;
spinlock_t queue_lock;
int quiesce_depth;
struct gendisk *disk;
struct kobject *mq_kobj;
struct queue_limits limits;
struct blk_integrity integrity;
struct device *dev;
enum rpm_status rpm_status;
atomic_t pm_only;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
struct mutex rq_qos_mutex;
int id;
unsigned int dma_pad_mask;
long unsigned int nr_requests;
struct blk_crypto_profile *crypto_profile;
struct kobject *crypto_kobject;
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_tags;
unsigned int required_elevator_features;
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct mutex blkcg_mutex;
int node;
spinlock_t requeue_lock;
struct list_head requeue_list;
struct delayed_work requeue_work;
struct blk_trace *blk_trace;
struct blk_flush_queue *fq;
struct list_head flush_list;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
struct mutex debugfs_mutex;
bool mq_sysfs_init_done;
};
arm64
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
armhf
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
ppc64el
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
riscv64
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
aws
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
azure
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
gcp
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
lowlatency
: ✅struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
make_request_fn *make_request_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
void *queuedata;
long unsigned int queue_flags;
atomic_t pm_only;
int id;
gfp_t bounce_gfp;
spinlock_t queue_lock;
struct kobject kobj;
struct kobject *mq_kobj;
struct blk_integrity integrity;
struct device *dev;
int rpm_status;
unsigned int nr_pending;
long unsigned int nr_requests;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
unsigned int rq_timeout;
int poll_nsec;
struct blk_stat_callback *poll_cb;
struct blk_rq_stat poll_stat[16];
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head icq_list;
long unsigned int blkcg_pols[1];
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
struct queue_limits limits;
unsigned int required_elevator_features;
unsigned int nr_zones;
long unsigned int *seq_zones_bitmap;
long unsigned int *seq_zones_wlock;
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct blk_trace *blk_trace;
struct mutex blk_trace_mutex;
struct blk_flush_queue *fq;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct delayed_work requeue_work;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
struct list_head unused_hctx_list;
spinlock_t unused_hctx_lock;
int mq_freeze_depth;
struct bsg_class_device bsg_dev;
struct throtl_data *td;
struct callback_head callback_head;
wait_queue_head_t mq_freeze_wq;
struct mutex mq_freeze_lock;
struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
bool mq_sysfs_init_done;
size_t cmd_size;
struct work_struct release_work;
u64 write_hints[5];
};
Regular
4.4
and 4.8
⚠️struct work_struct timeout_work
unsigned int flush_flags
unsigned int flush_not_queueable
4.8
and 4.10
⚠️struct rq_wb *rq_wb
unsigned int queue_depth
struct blk_rq_stat rq_stats[2]
int poll_nsec
struct work_struct requeue_work
➡️ struct delayed_work requeue_work
4.10
and 4.13
⚠️atomic_t shared_hctx_restart
struct blk_queue_stats *stats
init_rq_fn *init_rq_fn
exit_rq_fn *exit_rq_fn
void (*initialize_rq_fn)(struct request *)
struct blk_stat_callback *poll_cb
struct blk_rq_stat poll_stat[16]
struct dentry *debugfs_dir
struct dentry *sched_debugfs_dir
size_t cmd_size
void *rq_alloc_data
struct work_struct release_work
u64 write_hints[5]
struct blk_rq_stat rq_stats[2]
int bsg_job_size
struct blk_mq_ops *mq_ops
➡️ const struct blk_mq_ops *mq_ops
struct backing_dev_info backing_dev_info
➡️ struct backing_dev_info *backing_dev_info
4.13
and 4.15
⚠️poll_q_fn *poll_fn
struct mutex blk_trace_mutex
4.15
and 4.18
⚠️unsigned int nr_zones
long unsigned int *seq_zones_bitmap
long unsigned int *seq_zones_wlock
struct list_head tag_busy_list
struct bio_set *bio_split
➡️ struct bio_set bio_split
4.18
and 5.0
⚠️struct rq_qos *rq_qos
atomic_t pm_only
struct dentry *rqos_debugfs_dir
int nr_rqs[2]
int nr_rqs_elvpriv
atomic_t shared_hctx_restart
struct rq_wb *rq_wb
struct request_list root_rl
request_fn_proc *request_fn
poll_q_fn *poll_fn
prep_rq_fn *prep_rq_fn
unprep_rq_fn *unprep_rq_fn
softirq_done_fn *softirq_done_fn
rq_timed_out_fn *rq_timed_out_fn
lld_busy_fn *lld_busy_fn
init_rq_fn *init_rq_fn
exit_rq_fn *exit_rq_fn
void (*initialize_rq_fn)(struct request *)
unsigned int *mq_map
sector_t end_sector
struct request *boundary_rq
struct delayed_work delay_work
spinlock_t __queue_lock
unsigned int nr_congestion_on
unsigned int nr_congestion_off
unsigned int nr_batching
struct blk_queue_tag *queue_tags
unsigned int nr_sorted
unsigned int in_flight[2]
unsigned int request_fn_active
struct list_head timeout_list
int bypass_depth
bsg_job_fn *bsg_job_fn
void *rq_alloc_data
spinlock_t *queue_lock
➡️ spinlock_t queue_lock
struct kobject mq_kobj
➡️ struct kobject *mq_kobj
5.0
and 5.3
⚠️struct list_head unused_hctx_list
spinlock_t unused_hctx_lock
struct mutex mq_freeze_lock
struct list_head all_q_node
atomic_t mq_freeze_depth
➡️ int mq_freeze_depth
5.3
and 5.4
⚠️unsigned int required_elevator_features
struct mutex sysfs_dir_lock
struct list_head queue_head
5.4
and 5.8
⚠️struct blk_keyslot_manager *ksm
long unsigned int *conv_zones_bitmap
dma_drain_needed_fn *dma_drain_needed
unsigned int nr_queues
unsigned int dma_drain_size
void *dma_drain_buffer
long unsigned int *seq_zones_bitmap
5.8
and 5.11
⚠️atomic_t nr_active_requests_shared_sbitmap
unsigned int max_open_zones
unsigned int max_active_zones
struct mutex debugfs_mutex
make_request_fn *make_request_fn
struct mutex blk_trace_mutex
struct work_struct release_work
int rpm_status
➡️ enum rpm_status rpm_status
5.11
and 5.13
⚠️gfp_t bounce_gfp
unsigned int nr_pending
5.13
and 5.15
⚠️struct gendisk *disk
struct sbitmap_queue sched_bitmap_tags
struct sbitmap_queue sched_breserved_tags
struct backing_dev_info *backing_dev_info
unsigned int sg_timeout
unsigned int sg_reserved_size
struct bsg_class_device bsg_dev
5.15
and 5.19
⚠️struct xarray hctx_table
struct blk_crypto_profile *crypto_profile
struct kobject *crypto_kobject
atomic_t nr_active_requests_shared_tags
struct blk_mq_tags *sched_shared_tags
int quiesce_depth
struct blk_independent_access_ranges *ia_ranges
struct srcu_struct srcu[0]
struct blk_mq_hw_ctx **queue_hw_ctx
struct blk_keyslot_manager *ksm
atomic_t nr_active_requests_shared_sbitmap
struct sbitmap_queue sched_bitmap_tags
struct sbitmap_queue sched_breserved_tags
size_t cmd_size
u64 write_hints[5]
struct blk_rq_stat poll_stat[16]
➡️ struct blk_rq_stat *poll_stat
5.19
and 6.2
⚠️refcount_t refs
struct kobject kobj
unsigned int dma_alignment
unsigned int nr_zones
long unsigned int *conv_zones_bitmap
long unsigned int *seq_zones_wlock
unsigned int max_open_zones
unsigned int max_active_zones
struct bio_set bio_split
struct blk_independent_access_ranges *ia_ranges
struct srcu_struct srcu[0]
6.2
and 6.5
⚠️struct mutex rq_qos_mutex
struct mutex blkcg_mutex
struct list_head flush_list
int poll_nsec
struct blk_stat_callback *poll_cb
struct blk_rq_stat *poll_stat
6.5
and 6.8
✅
amd64
and arm64
✅
amd64
and armhf
✅
amd64
and ppc64el
✅
amd64
and riscv64
✅
generic
and aws
✅
generic
and azure
✅
generic
and gcp
✅
generic
and lowlatency
✅