pglist_data
Regular
4.4
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_max_order;
enum zone_type classzone_idx;
spinlock_t numabalancing_migrate_lock;
long unsigned int numabalancing_migrate_next_window;
long unsigned int numabalancing_migrate_nr_pages;
};
4.8
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
spinlock_t numabalancing_migrate_lock;
long unsigned int numabalancing_migrate_next_window;
long unsigned int numabalancing_migrate_nr_pages;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
spinlock_t split_queue_lock;
struct list_head split_queue;
long unsigned int split_queue_len;
struct lruvec lruvec;
unsigned int inactive_ratio;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[26];
};
4.10
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
spinlock_t numabalancing_migrate_lock;
long unsigned int numabalancing_migrate_next_window;
long unsigned int numabalancing_migrate_nr_pages;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
spinlock_t split_queue_lock;
struct list_head split_queue;
long unsigned int split_queue_len;
struct lruvec lruvec;
unsigned int inactive_ratio;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[26];
};
4.13
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
spinlock_t numabalancing_migrate_lock;
long unsigned int numabalancing_migrate_next_window;
long unsigned int numabalancing_migrate_nr_pages;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
spinlock_t split_queue_lock;
struct list_head split_queue;
long unsigned int split_queue_len;
struct lruvec lruvec;
unsigned int inactive_ratio;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[27];
};
4.15
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
spinlock_t numabalancing_migrate_lock;
long unsigned int numabalancing_migrate_next_window;
long unsigned int numabalancing_migrate_nr_pages;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
spinlock_t split_queue_lock;
struct list_head split_queue;
long unsigned int split_queue_len;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[27];
};
4.18
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
spinlock_t numabalancing_migrate_lock;
long unsigned int numabalancing_migrate_next_window;
long unsigned int numabalancing_migrate_nr_pages;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
spinlock_t split_queue_lock;
struct list_head split_queue;
long unsigned int split_queue_len;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[28];
};
5.0
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
spinlock_t split_queue_lock;
struct list_head split_queue;
long unsigned int split_queue_len;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[30];
};
5.3
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
spinlock_t split_queue_lock;
struct list_head split_queue;
long unsigned int split_queue_len;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[30];
};
5.4
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
5.8
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec __lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[33];
};
5.11
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
struct deferred_split deferred_split_queue;
struct lruvec __lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[38];
};
5.13
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
struct deferred_split deferred_split_queue;
struct lruvec __lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[39];
};
5.15
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
bool proactive_compact_trigger;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
struct deferred_split deferred_split_queue;
struct lruvec __lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[39];
};
5.19
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
wait_queue_head_t reclaim_wait[4];
atomic_t nr_writeback_throttled;
long unsigned int nr_reclaim_start;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
bool proactive_compact_trigger;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
struct deferred_split deferred_split_queue;
struct lruvec __lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[41];
};
6.2
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
wait_queue_head_t reclaim_wait[4];
atomic_t nr_writeback_throttled;
long unsigned int nr_reclaim_start;
struct mutex kswapd_lock;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
bool proactive_compact_trigger;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct cacheline_padding _pad1_;
struct deferred_split deferred_split_queue;
unsigned int nbp_rl_start;
long unsigned int nbp_rl_nr_cand;
unsigned int nbp_threshold;
unsigned int nbp_th_start;
long unsigned int nbp_th_nr_cand;
struct lruvec __lruvec;
long unsigned int flags;
struct lru_gen_mm_walk mm_walk;
struct cacheline_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[43];
struct memory_tier *memtier;
};
6.5
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
wait_queue_head_t reclaim_wait[4];
atomic_t nr_writeback_throttled;
long unsigned int nr_reclaim_start;
struct mutex kswapd_lock;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
bool proactive_compact_trigger;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct cacheline_padding _pad1_;
struct deferred_split deferred_split_queue;
unsigned int nbp_rl_start;
long unsigned int nbp_rl_nr_cand;
unsigned int nbp_threshold;
unsigned int nbp_th_start;
long unsigned int nbp_th_nr_cand;
struct lruvec __lruvec;
long unsigned int flags;
struct lru_gen_mm_walk mm_walk;
struct lru_gen_memcg memcg_lru;
struct cacheline_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[43];
struct memory_tier *memtier;
struct memory_failure_stats mf_stats;
};
6.8
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
wait_queue_head_t reclaim_wait[4];
atomic_t nr_writeback_throttled;
long unsigned int nr_reclaim_start;
struct mutex kswapd_lock;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
bool proactive_compact_trigger;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct cacheline_padding _pad1_;
struct deferred_split deferred_split_queue;
unsigned int nbp_rl_start;
long unsigned int nbp_rl_nr_cand;
unsigned int nbp_threshold;
unsigned int nbp_th_start;
long unsigned int nbp_th_nr_cand;
struct lruvec __lruvec;
long unsigned int flags;
struct lru_gen_mm_walk mm_walk;
struct lru_gen_memcg memcg_lru;
struct cacheline_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[46];
struct memory_tier *memtier;
struct memory_failure_stats mf_stats;
};
arm64
: ✅struct pglist_data {
struct zone node_zones[3];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
armhf
: ✅struct pglist_data {
struct zone node_zones[3];
struct zonelist node_zonelists[1];
int nr_zones;
struct page *node_mem_map;
struct page_ext *node_page_ext;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
ppc64el
: ✅struct pglist_data {
struct zone node_zones[3];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
riscv64
: ✅struct pglist_data {
struct zone node_zones[3];
struct zonelist node_zonelists[1];
int nr_zones;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
aws
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
azure
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
gcp
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
lowlatency
: ✅struct pglist_data {
struct zone node_zones[5];
struct zonelist node_zonelists[2];
int nr_zones;
spinlock_t node_size_lock;
long unsigned int node_start_pfn;
long unsigned int node_present_pages;
long unsigned int node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd;
int kswapd_order;
enum zone_type kswapd_classzone_idx;
int kswapd_failures;
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
long unsigned int totalreserve_pages;
long unsigned int min_unmapped_pages;
long unsigned int min_slab_pages;
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct deferred_split deferred_split_queue;
struct lruvec lruvec;
long unsigned int flags;
struct zone_padding _pad2_;
struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[32];
};
Regular
4.4
and 4.8
⚠️int kswapd_order
enum zone_type kswapd_classzone_idx
int kcompactd_max_order
enum zone_type kcompactd_classzone_idx
wait_queue_head_t kcompactd_wait
struct task_struct *kcompactd
long unsigned int totalreserve_pages
long unsigned int min_unmapped_pages
long unsigned int min_slab_pages
struct zone_padding _pad1_
spinlock_t lru_lock
spinlock_t split_queue_lock
struct list_head split_queue
long unsigned int split_queue_len
struct lruvec lruvec
unsigned int inactive_ratio
long unsigned int flags
struct zone_padding _pad2_
struct per_cpu_nodestat *per_cpu_nodestats
atomic_long_t vm_stat[26]
int kswapd_max_order
enum zone_type classzone_idx
4.8
and 4.10
✅
4.10
and 4.13
⚠️int kswapd_failures
atomic_long_t vm_stat[26]
➡️ atomic_long_t vm_stat[27]
4.13
and 4.15
⚠️unsigned int inactive_ratio
4.15
and 4.18
⚠️atomic_long_t vm_stat[27]
➡️ atomic_long_t vm_stat[28]
4.18
and 5.0
⚠️spinlock_t numabalancing_migrate_lock
long unsigned int numabalancing_migrate_next_window
long unsigned int numabalancing_migrate_nr_pages
atomic_long_t vm_stat[28]
➡️ atomic_long_t vm_stat[30]
5.0
and 5.3
✅
5.3
and 5.4
⚠️struct deferred_split deferred_split_queue
spinlock_t split_queue_lock
struct list_head split_queue
long unsigned int split_queue_len
atomic_long_t vm_stat[30]
➡️ atomic_long_t vm_stat[32]
5.4
and 5.8
⚠️enum zone_type kswapd_highest_zoneidx
enum zone_type kcompactd_highest_zoneidx
struct lruvec __lruvec
enum zone_type kswapd_classzone_idx
enum zone_type kcompactd_classzone_idx
struct lruvec lruvec
atomic_long_t vm_stat[32]
➡️ atomic_long_t vm_stat[33]
5.8
and 5.11
⚠️spinlock_t lru_lock
atomic_long_t vm_stat[33]
➡️ atomic_long_t vm_stat[38]
5.11
and 5.13
⚠️atomic_long_t vm_stat[38]
➡️ atomic_long_t vm_stat[39]
5.13
and 5.15
⚠️bool proactive_compact_trigger
5.15
and 5.19
⚠️wait_queue_head_t reclaim_wait[4]
atomic_t nr_writeback_throttled
long unsigned int nr_reclaim_start
atomic_long_t vm_stat[39]
➡️ atomic_long_t vm_stat[41]
5.19
and 6.2
⚠️struct mutex kswapd_lock
unsigned int nbp_rl_start
long unsigned int nbp_rl_nr_cand
unsigned int nbp_threshold
unsigned int nbp_th_start
long unsigned int nbp_th_nr_cand
struct lru_gen_mm_walk mm_walk
struct memory_tier *memtier
struct zone_padding _pad1_
➡️ struct cacheline_padding _pad1_
struct zone_padding _pad2_
➡️ struct cacheline_padding _pad2_
atomic_long_t vm_stat[41]
➡️ atomic_long_t vm_stat[43]
6.2
and 6.5
⚠️struct lru_gen_memcg memcg_lru
struct memory_failure_stats mf_stats
6.5
and 6.8
⚠️atomic_long_t vm_stat[43]
➡️ atomic_long_t vm_stat[46]
amd64
and arm64
⚠️struct zone node_zones[5]
➡️ struct zone node_zones[3]
amd64
and armhf
⚠️struct page *node_mem_map
struct page_ext *node_page_ext
spinlock_t node_size_lock
long unsigned int min_unmapped_pages
long unsigned int min_slab_pages
struct deferred_split deferred_split_queue
struct zone node_zones[5]
➡️ struct zone node_zones[3]
struct zonelist node_zonelists[2]
➡️ struct zonelist node_zonelists[1]
amd64
and ppc64el
⚠️struct zone node_zones[5]
➡️ struct zone node_zones[3]
amd64
and riscv64
⚠️spinlock_t node_size_lock
long unsigned int min_unmapped_pages
long unsigned int min_slab_pages
struct deferred_split deferred_split_queue
struct zone node_zones[5]
➡️ struct zone node_zones[3]
struct zonelist node_zonelists[2]
➡️ struct zonelist node_zonelists[1]
generic
and aws
✅
generic
and azure
✅
generic
and gcp
✅
generic
and lowlatency
✅