mem_cgroup_per_node
Regular
4.4
: ✅struct mem_cgroup_per_node {
struct mem_cgroup_per_zone zoneinfo[5];
};
4.8
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
long unsigned int lru_size[5];
struct mem_cgroup_reclaim_iter iter[13];
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
4.10
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
4.13
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
4.15
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
4.18
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[28];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
5.0
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[30];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
5.3
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[30];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
5.4
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
5.8
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[33];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
5.11
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[38];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
5.13
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct batched_lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[39];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct shrinker_info *shrinker_info;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
5.15
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stats_percpu *lruvec_stats_percpu;
struct lruvec_stats lruvec_stats;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct shrinker_info *shrinker_info;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
5.19
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stats_percpu *lruvec_stats_percpu;
struct lruvec_stats lruvec_stats;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct shrinker_info *shrinker_info;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
6.2
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stats_percpu *lruvec_stats_percpu;
struct lruvec_stats lruvec_stats;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct shrinker_info *shrinker_info;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
6.5
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stats_percpu *lruvec_stats_percpu;
struct lruvec_stats lruvec_stats;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct shrinker_info *shrinker_info;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
6.8
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stats_percpu *lruvec_stats_percpu;
struct lruvec_stats lruvec_stats;
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter;
struct shrinker_info *shrinker_info;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
struct mem_cgroup *memcg;
};
arm64
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[15];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
armhf
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[15];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
ppc64el
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[15];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
riscv64
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[15];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
aws
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
azure
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
gcp
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
lowlatency
: ✅struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat *lruvec_stat_local;
struct lruvec_stat *lruvec_stat_cpu;
atomic_long_t lruvec_stat[32];
long unsigned int lru_zone_size[25];
struct mem_cgroup_reclaim_iter iter[13];
struct memcg_shrinker_map *shrinker_map;
struct rb_node tree_node;
long unsigned int usage_in_excess;
bool on_tree;
bool congested;
struct mem_cgroup *memcg;
};
Regular
4.4
and 4.8
⚠️struct lruvec lruvec
long unsigned int lru_size[5]
struct mem_cgroup_reclaim_iter iter[13]
struct rb_node tree_node
long unsigned int usage_in_excess
bool on_tree
struct mem_cgroup *memcg
struct mem_cgroup_per_zone zoneinfo[5]
4.8
and 4.10
⚠️long unsigned int lru_zone_size[25]
long unsigned int lru_size[5]
4.10
and 4.13
⚠️struct lruvec_stat *lruvec_stat
4.13
and 4.15
✅
4.15
and 4.18
⚠️struct lruvec_stat *lruvec_stat_cpu
bool congested
struct lruvec_stat *lruvec_stat
➡️ atomic_long_t lruvec_stat[28]
4.18
and 5.0
⚠️struct memcg_shrinker_map *shrinker_map
atomic_long_t lruvec_stat[28]
➡️ atomic_long_t lruvec_stat[30]
5.0
and 5.3
⚠️struct lruvec_stat *lruvec_stat_local
5.3
and 5.4
⚠️atomic_long_t lruvec_stat[30]
➡️ atomic_long_t lruvec_stat[32]
5.4
and 5.8
⚠️bool congested
atomic_long_t lruvec_stat[32]
➡️ atomic_long_t lruvec_stat[33]
struct mem_cgroup_reclaim_iter iter[13]
➡️ struct mem_cgroup_reclaim_iter iter
5.8
and 5.11
⚠️atomic_long_t lruvec_stat[33]
➡️ atomic_long_t lruvec_stat[38]
5.11
and 5.13
⚠️struct shrinker_info *shrinker_info
struct memcg_shrinker_map *shrinker_map
struct lruvec_stat *lruvec_stat_cpu
➡️ struct batched_lruvec_stat *lruvec_stat_cpu
atomic_long_t lruvec_stat[38]
➡️ atomic_long_t lruvec_stat[39]
5.13
and 5.15
⚠️struct lruvec_stats_percpu *lruvec_stats_percpu
struct lruvec_stats lruvec_stats
struct lruvec_stat *lruvec_stat_local
struct batched_lruvec_stat *lruvec_stat_cpu
atomic_long_t lruvec_stat[39]
5.15
and 5.19
✅
5.19
and 6.2
✅
6.2
and 6.5
✅
6.5
and 6.8
✅
amd64
and arm64
⚠️long unsigned int lru_zone_size[25]
➡️ long unsigned int lru_zone_size[15]
amd64
and armhf
⚠️long unsigned int lru_zone_size[25]
➡️ long unsigned int lru_zone_size[15]
amd64
and ppc64el
⚠️long unsigned int lru_zone_size[25]
➡️ long unsigned int lru_zone_size[15]
amd64
and riscv64
⚠️long unsigned int lru_zone_size[25]
➡️ long unsigned int lru_zone_size[15]
generic
and aws
✅
generic
and azure
✅
generic
and gcp
✅
generic
and lowlatency
✅