mem_cgroupRegular
4.4: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct page_counter memory;
struct page_counter memsw;
struct page_counter kmem;
long unsigned int low;
long unsigned int high;
long unsigned int soft_limit;
struct vmpressure vmpressure;
int initialized;
bool use_hierarchy;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
atomic_t moving_account;
spinlock_t move_lock;
struct task_struct *move_lock_task;
long unsigned int move_lock_flags;
struct mem_cgroup_stat_cpu *stat;
struct cg_proto tcp_mem;
int kmemcg_id;
bool kmem_acct_activated;
bool kmem_acct_active;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
4.8: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int low;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
atomic_t moving_account;
spinlock_t move_lock;
struct task_struct *move_lock_task;
long unsigned int move_lock_flags;
struct mem_cgroup_stat_cpu *stat;
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
4.10: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int low;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
atomic_t moving_account;
spinlock_t move_lock;
struct task_struct *move_lock_task;
long unsigned int move_lock_flags;
struct mem_cgroup_stat_cpu *stat;
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
4.13: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int low;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
atomic_t moving_account;
spinlock_t move_lock;
struct task_struct *move_lock_task;
long unsigned int move_lock_flags;
struct mem_cgroup_stat_cpu *stat;
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
4.15: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int low;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
atomic_t moving_account;
spinlock_t move_lock;
struct task_struct *move_lock_task;
long unsigned int move_lock_flags;
struct mem_cgroup_stat_cpu *stat;
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
4.18: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct mem_cgroup_stat_cpu *stat_cpu;
struct memcg_padding _pad2_;
atomic_long_t stat[34];
atomic_long_t events[85];
atomic_long_t memory_events[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.0: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct mem_cgroup_stat_cpu *stat_cpu;
struct memcg_padding _pad2_;
atomic_long_t stat[36];
atomic_long_t events[85];
atomic_long_t memory_events[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.3: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[36];
atomic_long_t vmevents[85];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.4: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[85];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.8: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[36];
atomic_long_t vmevents[92];
atomic_long_t memory_events[8];
atomic_long_t memory_events_local[8];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.11: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_long_t vmstats[41];
atomic_long_t vmevents[96];
atomic_long_t memory_events[8];
atomic_long_t memory_events_local[8];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct obj_cgroup *objcg;
struct list_head objcg_list;
struct memcg_padding _pad2_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.13: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
struct memcg_vmstats vmstats;
atomic_long_t memory_events[8];
atomic_long_t memory_events_local[8];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct obj_cgroup *objcg;
struct list_head objcg_list;
struct memcg_padding _pad2_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_percpu;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.15: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
struct memcg_vmstats vmstats;
atomic_long_t memory_events[8];
atomic_long_t memory_events_local[8];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct obj_cgroup *objcg;
struct list_head objcg_list;
struct memcg_padding _pad2_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_percpu;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
5.19: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int zswap_max;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
struct memcg_vmstats vmstats;
atomic_long_t memory_events[9];
atomic_long_t memory_events_local[9];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
struct obj_cgroup *objcg;
struct list_head objcg_list;
struct memcg_padding _pad2_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_percpu;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
6.2: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int zswap_max;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct cacheline_padding _pad1_;
struct memcg_vmstats *vmstats;
atomic_long_t memory_events[9];
atomic_long_t memory_events_local[9];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
struct obj_cgroup *objcg;
struct list_head objcg_list;
struct cacheline_padding _pad2_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_percpu;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct lru_gen_mm_list mm_list;
struct mem_cgroup_per_node * nodeinfo[0];
};
6.5: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int zswap_max;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct cacheline_padding _pad1_;
struct memcg_vmstats *vmstats;
atomic_long_t memory_events[9];
atomic_long_t memory_events_local[9];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
struct obj_cgroup *objcg;
struct list_head objcg_list;
struct cacheline_padding _pad2_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_percpu;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct lru_gen_mm_list mm_list;
struct mem_cgroup_per_node * nodeinfo[0];
};
6.8: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
struct work_struct high_work;
long unsigned int zswap_max;
bool zswap_writeback;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct cacheline_padding _pad1_;
struct memcg_vmstats *vmstats;
atomic_long_t memory_events[9];
atomic_long_t memory_events_local[9];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
struct obj_cgroup *objcg;
struct obj_cgroup *orig_objcg;
struct list_head objcg_list;
struct cacheline_padding _pad2_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_percpu;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct lru_gen_mm_list mm_list;
struct mem_cgroup_per_node * nodeinfo[0];
};
arm64: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[81];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
armhf: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[59];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
ppc64el: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[78];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
riscv64: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[61];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct mem_cgroup_per_node * nodeinfo[0];
};
aws: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[85];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
azure: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[85];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
gcp: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[85];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
lowlatency: ✅struct mem_cgroup {
struct cgroup_subsys_state css;
struct mem_cgroup_id id;
struct page_counter memory;
struct page_counter swap;
struct page_counter memsw;
struct page_counter kmem;
struct page_counter tcpmem;
long unsigned int high;
struct work_struct high_work;
long unsigned int soft_limit;
struct vmpressure vmpressure;
bool use_hierarchy;
bool oom_group;
bool oom_lock;
int under_oom;
int swappiness;
int oom_kill_disable;
struct cgroup_file events_file;
struct cgroup_file events_local_file;
struct cgroup_file swap_events_file;
struct mutex thresholds_lock;
struct mem_cgroup_thresholds thresholds;
struct mem_cgroup_thresholds memsw_thresholds;
struct list_head oom_notify;
long unsigned int move_charge_at_immigrate;
spinlock_t move_lock;
long unsigned int move_lock_flags;
struct memcg_padding _pad1_;
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu *vmstats_local;
struct memcg_vmstats_percpu *vmstats_percpu;
struct memcg_padding _pad2_;
atomic_long_t vmstats[38];
atomic_long_t vmevents[85];
atomic_long_t memory_events[7];
atomic_long_t memory_events_local[7];
long unsigned int socket_pressure;
bool tcpmem_active;
int tcpmem_pressure;
int kmemcg_id;
enum memcg_kmem_state kmem_state;
struct list_head kmem_caches;
int last_scanned_node;
nodemask_t scan_nodes;
atomic_t numainfo_events;
atomic_t numainfo_updating;
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
struct memcg_cgwb_frn cgwb_frn[4];
struct list_head event_list;
spinlock_t event_list_lock;
struct deferred_split deferred_split_queue;
struct mem_cgroup_per_node * nodeinfo[0];
};
Regular
4.4 and 4.8 ⚠️struct mem_cgroup_id id
struct page_counter swap
struct page_counter tcpmem
struct work_struct high_work
long unsigned int socket_pressure
bool tcpmem_active
int tcpmem_pressure
enum memcg_kmem_state kmem_state
int initialized
struct cg_proto tcp_mem
bool kmem_acct_activated
bool kmem_acct_active
4.8 and 4.10 ✅
4.10 and 4.13 ⚠️struct list_head kmem_caches
4.13 and 4.15 ✅
4.15 and 4.18 ⚠️struct cgroup_file swap_events_file
struct memcg_padding _pad1_
struct mem_cgroup_stat_cpu *stat_cpu
struct memcg_padding _pad2_
atomic_long_t events[85]
atomic_long_t memory_events[7]
long unsigned int low
struct mem_cgroup_stat_cpu *stat ➡️ atomic_long_t stat[34]
4.18 and 5.0 ⚠️bool oom_group
atomic_long_t stat[34] ➡️ atomic_long_t stat[36]
5.0 and 5.3 ⚠️struct cgroup_file events_local_file
struct memcg_vmstats_percpu *vmstats_local
struct memcg_vmstats_percpu *vmstats_percpu
atomic_long_t vmstats[36]
atomic_long_t vmevents[85]
atomic_long_t memory_events_local[7]
struct mem_cgroup_stat_cpu *stat_cpu
atomic_long_t stat[36]
atomic_long_t events[85]
5.3 and 5.4 ⚠️struct memcg_cgwb_frn cgwb_frn[4]
struct deferred_split deferred_split_queue
atomic_long_t vmstats[36] ➡️ atomic_long_t vmstats[38]
5.4 and 5.8 ⚠️long unsigned int high
int last_scanned_node
nodemask_t scan_nodes
atomic_t numainfo_events
atomic_t numainfo_updating
atomic_long_t vmstats[38] ➡️ atomic_long_t vmstats[36]
atomic_long_t vmevents[85] ➡️ atomic_long_t vmevents[92]
atomic_long_t memory_events[7] ➡️ atomic_long_t memory_events[8]
atomic_long_t memory_events_local[7] ➡️ atomic_long_t memory_events_local[8]
5.8 and 5.11 ⚠️struct obj_cgroup *objcg
struct list_head objcg_list
bool use_hierarchy
struct list_head kmem_caches
atomic_long_t vmstats[36] ➡️ atomic_long_t vmstats[41]
atomic_long_t vmevents[92] ➡️ atomic_long_t vmevents[96]
5.11 and 5.13 ⚠️atomic_long_t vmevents[96]
struct memcg_vmstats_percpu *vmstats_local
atomic_long_t vmstats[41] ➡️ struct memcg_vmstats vmstats
5.13 and 5.15 ✅
5.15 and 5.19 ⚠️long unsigned int zswap_max
enum memcg_kmem_state kmem_state
atomic_long_t memory_events[8] ➡️ atomic_long_t memory_events[9]
atomic_long_t memory_events_local[8] ➡️ atomic_long_t memory_events_local[9]
5.19 and 6.2 ⚠️struct lru_gen_mm_list mm_list
struct memcg_padding _pad1_ ➡️ struct cacheline_padding _pad1_
struct memcg_vmstats vmstats ➡️ struct memcg_vmstats *vmstats
struct memcg_padding _pad2_ ➡️ struct cacheline_padding _pad2_
6.2 and 6.5 ✅
6.5 and 6.8 ⚠️bool zswap_writeback
struct obj_cgroup *orig_objcg
amd64 and arm64 ⚠️atomic_long_t vmevents[85] ➡️ atomic_long_t vmevents[81]
amd64 and armhf ⚠️nodemask_t scan_nodes
atomic_t numainfo_events
atomic_t numainfo_updating
struct deferred_split deferred_split_queue
atomic_long_t vmevents[85] ➡️ atomic_long_t vmevents[59]
amd64 and ppc64el ⚠️atomic_long_t vmevents[85] ➡️ atomic_long_t vmevents[78]
amd64 and riscv64 ⚠️nodemask_t scan_nodes
atomic_t numainfo_events
atomic_t numainfo_updating
struct deferred_split deferred_split_queue
atomic_long_t vmevents[85] ➡️ atomic_long_t vmevents[61]
generic and aws ✅
generic and azure ✅
generic and gcp ✅
generic and lowlatency ✅