kvm_mmu
Regular
4.4
: Absent ⚠️
4.8
: Absent ⚠️
4.10
: Absent ⚠️
4.13
: Absent ⚠️
4.15
: Absent ⚠️
4.18
: Absent ⚠️
5.0
: Absent ⚠️
5.3
: Absent ⚠️
5.4
: Absent ⚠️
5.8
: Absent ⚠️
5.11
: ✅struct kvm_mmu {
long unsigned int (*get_guest_pgd)(struct kvm_vcpu *);
u64 (*get_pdptr)(struct kvm_vcpu *, int);
int (*page_fault)(struct kvm_vcpu *, gpa_t, u32, bool);
void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *);
gpa_t (*translate_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *);
int (*sync_page)(struct kvm_vcpu *, struct kvm_mmu_page *);
void (*invlpg)(struct kvm_vcpu *, gva_t, hpa_t);
void (*update_pte)(struct kvm_vcpu *, struct kvm_mmu_page *, u64 *, const void *);
hpa_t root_hpa;
gpa_t root_pgd;
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
u8 ept_ad;
bool direct_map;
struct kvm_mmu_root_info prev_roots[3];
u8 permissions[16];
u32 pkru_mask;
u64 *pae_root;
u64 *lm_root;
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check;
u8 last_nonleaf_level;
bool nx;
u64 pdptrs[4];
};
5.13
: ✅struct kvm_mmu {
long unsigned int (*get_guest_pgd)(struct kvm_vcpu *);
u64 (*get_pdptr)(struct kvm_vcpu *, int);
int (*page_fault)(struct kvm_vcpu *, gpa_t, u32, bool);
void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *);
gpa_t (*translate_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *);
int (*sync_page)(struct kvm_vcpu *, struct kvm_mmu_page *);
void (*invlpg)(struct kvm_vcpu *, gva_t, hpa_t);
hpa_t root_hpa;
gpa_t root_pgd;
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
u8 ept_ad;
bool direct_map;
struct kvm_mmu_root_info prev_roots[3];
u8 permissions[16];
u32 pkru_mask;
u64 *pae_root;
u64 *pml4_root;
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check;
u8 last_nonleaf_level;
bool nx;
u64 pdptrs[4];
};
5.15
: ✅struct kvm_mmu {
long unsigned int (*get_guest_pgd)(struct kvm_vcpu *);
u64 (*get_pdptr)(struct kvm_vcpu *, int);
int (*page_fault)(struct kvm_vcpu *, gpa_t, u32, bool);
void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *);
gpa_t (*translate_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *);
int (*sync_page)(struct kvm_vcpu *, struct kvm_mmu_page *);
void (*invlpg)(struct kvm_vcpu *, gva_t, hpa_t);
hpa_t root_hpa;
gpa_t root_pgd;
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
u8 ept_ad;
bool direct_map;
struct kvm_mmu_root_info prev_roots[3];
u8 permissions[16];
u32 pkru_mask;
u64 *pae_root;
u64 *pml4_root;
u64 *pml5_root;
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check;
u64 pdptrs[4];
};
5.19
: ✅struct kvm_mmu {
long unsigned int (*get_guest_pgd)(struct kvm_vcpu *);
u64 (*get_pdptr)(struct kvm_vcpu *, int);
int (*page_fault)(struct kvm_vcpu *, struct kvm_page_fault *);
void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, struct kvm_mmu *, gpa_t, u64, struct x86_exception *);
int (*sync_page)(struct kvm_vcpu *, struct kvm_mmu_page *);
void (*invlpg)(struct kvm_vcpu *, gva_t, hpa_t);
struct kvm_mmu_root_info root;
union kvm_cpu_role cpu_role;
union kvm_mmu_page_role root_role;
u32 pkru_mask;
struct kvm_mmu_root_info prev_roots[3];
u8 permissions[16];
u64 *pae_root;
u64 *pml4_root;
u64 *pml5_root;
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check;
u64 pdptrs[4];
};
6.2
: ✅struct kvm_mmu {
long unsigned int (*get_guest_pgd)(struct kvm_vcpu *);
u64 (*get_pdptr)(struct kvm_vcpu *, int);
int (*page_fault)(struct kvm_vcpu *, struct kvm_page_fault *);
void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, struct kvm_mmu *, gpa_t, u64, struct x86_exception *);
int (*sync_page)(struct kvm_vcpu *, struct kvm_mmu_page *);
void (*invlpg)(struct kvm_vcpu *, gva_t, hpa_t);
struct kvm_mmu_root_info root;
union kvm_cpu_role cpu_role;
union kvm_mmu_page_role root_role;
u32 pkru_mask;
struct kvm_mmu_root_info prev_roots[3];
u8 permissions[16];
u64 *pae_root;
u64 *pml4_root;
u64 *pml5_root;
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check;
u64 pdptrs[4];
};
6.5
: ✅struct kvm_mmu {
long unsigned int (*get_guest_pgd)(struct kvm_vcpu *);
u64 (*get_pdptr)(struct kvm_vcpu *, int);
int (*page_fault)(struct kvm_vcpu *, struct kvm_page_fault *);
void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, struct kvm_mmu *, gpa_t, u64, struct x86_exception *);
int (*sync_spte)(struct kvm_vcpu *, struct kvm_mmu_page *, int);
struct kvm_mmu_root_info root;
union kvm_cpu_role cpu_role;
union kvm_mmu_page_role root_role;
u32 pkru_mask;
struct kvm_mmu_root_info prev_roots[3];
u8 permissions[16];
u64 *pae_root;
u64 *pml4_root;
u64 *pml5_root;
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check;
u64 pdptrs[4];
};
6.8
: ✅struct kvm_mmu {
long unsigned int (*get_guest_pgd)(struct kvm_vcpu *);
u64 (*get_pdptr)(struct kvm_vcpu *, int);
int (*page_fault)(struct kvm_vcpu *, struct kvm_page_fault *);
void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, struct kvm_mmu *, gpa_t, u64, struct x86_exception *);
int (*sync_spte)(struct kvm_vcpu *, struct kvm_mmu_page *, int);
struct kvm_mmu_root_info root;
union kvm_cpu_role cpu_role;
union kvm_mmu_page_role root_role;
u32 pkru_mask;
struct kvm_mmu_root_info prev_roots[3];
u8 permissions[16];
u64 *pae_root;
u64 *pml4_root;
u64 *pml5_root;
struct rsvd_bits_validate shadow_zero_check;
struct rsvd_bits_validate guest_rsvd_check;
u64 pdptrs[4];
};
arm64
: Absent ⚠️
armhf
: Absent ⚠️
ppc64el
: Absent ⚠️
riscv64
: Absent ⚠️
aws
: Absent ⚠️
azure
: Absent ⚠️
gcp
: Absent ⚠️
lowlatency
: Absent ⚠️
Regular
5.11
and 5.13
⚠️u64 *pml4_root
void (*update_pte)(struct kvm_vcpu *, struct kvm_mmu_page *, u64 *, const void *)
u64 *lm_root
5.13
and 5.15
⚠️u64 *pml5_root
u8 last_nonleaf_level
bool nx
5.15
and 5.19
⚠️struct kvm_mmu_root_info root
union kvm_cpu_role cpu_role
union kvm_mmu_page_role root_role
gpa_t (*translate_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *)
hpa_t root_hpa
gpa_t root_pgd
union kvm_mmu_role mmu_role
u8 root_level
u8 shadow_root_level
u8 ept_ad
bool direct_map
int (*page_fault)(struct kvm_vcpu *, gpa_t, u32, bool)
➡️ int (*page_fault)(struct kvm_vcpu *, struct kvm_page_fault *)
gpa_t (*gva_to_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *)
➡️ gpa_t (*gva_to_gpa)(struct kvm_vcpu *, struct kvm_mmu *, gpa_t, u64, struct x86_exception *)
5.19
and 6.2
✅
6.2
and 6.5
⚠️int (*sync_spte)(struct kvm_vcpu *, struct kvm_mmu_page *, int)
int (*sync_page)(struct kvm_vcpu *, struct kvm_mmu_page *)
void (*invlpg)(struct kvm_vcpu *, gva_t, hpa_t)
6.5
and 6.8
✅