kvm_x86_ops
Regular
4.4
: Absent ⚠️
4.8
: Absent ⚠️
4.10
: Absent ⚠️
4.13
: Absent ⚠️
4.15
: Absent ⚠️
4.18
: Absent ⚠️
5.0
: Absent ⚠️
5.3
: Absent ⚠️
5.4
: Absent ⚠️
5.8
: Absent ⚠️
5.11
: Absent ⚠️
5.13
: ✅struct kvm_x86_ops {
int (*hardware_enable)();
void (*hardware_disable)();
void (*hardware_unsetup)();
bool (*cpu_has_accelerated_tpr)();
bool (*has_emulated_msr)(struct kvm *, u32);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *);
unsigned int vm_size;
int (*vm_init)(struct kvm *);
void (*vm_destroy)(struct kvm *);
int (*vcpu_create)(struct kvm_vcpu *);
void (*vcpu_free)(struct kvm_vcpu *);
void (*vcpu_reset)(struct kvm_vcpu *, bool);
void (*prepare_guest_switch)(struct kvm_vcpu *);
void (*vcpu_load)(struct kvm_vcpu *, int);
void (*vcpu_put)(struct kvm_vcpu *);
void (*update_exception_bitmap)(struct kvm_vcpu *);
int (*get_msr)(struct kvm_vcpu *, struct msr_data *);
int (*set_msr)(struct kvm_vcpu *, struct msr_data *);
u64 (*get_segment_base)(struct kvm_vcpu *, int);
void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
int (*get_cpl)(struct kvm_vcpu *);
void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *);
void (*set_cr0)(struct kvm_vcpu *, long unsigned int);
bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int);
void (*set_cr4)(struct kvm_vcpu *, long unsigned int);
int (*set_efer)(struct kvm_vcpu *, u64);
void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *);
void (*set_dr7)(struct kvm_vcpu *, long unsigned int);
void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg);
long unsigned int (*get_rflags)(struct kvm_vcpu *);
void (*set_rflags)(struct kvm_vcpu *, long unsigned int);
void (*tlb_flush_all)(struct kvm_vcpu *);
void (*tlb_flush_current)(struct kvm_vcpu *);
int (*tlb_remote_flush)(struct kvm *);
int (*tlb_remote_flush_with_range)(struct kvm *, struct kvm_tlb_range *);
void (*tlb_flush_gva)(struct kvm_vcpu *, gva_t);
void (*tlb_flush_guest)(struct kvm_vcpu *);
enum exit_fastpath_completion (*run)(struct kvm_vcpu *);
int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion);
int (*skip_emulated_instruction)(struct kvm_vcpu *);
void (*update_emulated_instruction)(struct kvm_vcpu *);
void (*set_interrupt_shadow)(struct kvm_vcpu *, int);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *);
void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *);
void (*set_irq)(struct kvm_vcpu *);
void (*set_nmi)(struct kvm_vcpu *);
void (*queue_exception)(struct kvm_vcpu *);
void (*cancel_injection)(struct kvm_vcpu *);
int (*interrupt_allowed)(struct kvm_vcpu *, bool);
int (*nmi_allowed)(struct kvm_vcpu *, bool);
bool (*get_nmi_mask)(struct kvm_vcpu *);
void (*set_nmi_mask)(struct kvm_vcpu *, bool);
void (*enable_nmi_window)(struct kvm_vcpu *);
void (*enable_irq_window)(struct kvm_vcpu *);
void (*update_cr8_intercept)(struct kvm_vcpu *, int, int);
bool (*check_apicv_inhibit_reasons)(ulong);
void (*pre_update_apicv_exec_ctrl)(struct kvm *, bool);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *);
void (*hwapic_irr_update)(struct kvm_vcpu *, int);
void (*hwapic_isr_update)(struct kvm_vcpu *, int);
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *);
void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *);
void (*set_virtual_apic_mode)(struct kvm_vcpu *);
void (*set_apic_access_page_addr)(struct kvm_vcpu *);
int (*deliver_posted_interrupt)(struct kvm_vcpu *, int);
int (*sync_pir_to_irr)(struct kvm_vcpu *);
int (*set_tss_addr)(struct kvm *, unsigned int);
int (*set_identity_map_addr)(struct kvm *, u64);
u64 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool);
void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int);
bool (*has_wbinvd_exit)();
u64 (*write_l1_tsc_offset)(struct kvm_vcpu *, u64);
void (*get_exit_info)(struct kvm_vcpu *, u64 *, u64 *, u32 *, u32 *);
int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *);
void (*handle_exit_irqoff)(struct kvm_vcpu *);
void (*request_immediate_exit)(struct kvm_vcpu *);
void (*sched_in)(struct kvm_vcpu *, int);
int cpu_dirty_log_size;
void (*update_cpu_dirty_logging)(struct kvm_vcpu *);
const struct kvm_pmu_ops *pmu_ops;
const struct kvm_x86_nested_ops *nested_ops;
int (*pre_block)(struct kvm_vcpu *);
void (*post_block)(struct kvm_vcpu *);
void (*vcpu_blocking)(struct kvm_vcpu *);
void (*vcpu_unblocking)(struct kvm_vcpu *);
int (*update_pi_irte)(struct kvm *, unsigned int, uint32_t, bool);
void (*start_assignment)(struct kvm *);
void (*apicv_post_state_restore)(struct kvm_vcpu *);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *);
int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *);
void (*cancel_hv_timer)(struct kvm_vcpu *);
void (*setup_mce)(struct kvm_vcpu *);
int (*smi_allowed)(struct kvm_vcpu *, bool);
int (*pre_enter_smm)(struct kvm_vcpu *, char *);
int (*pre_leave_smm)(struct kvm_vcpu *, const char *);
void (*enable_smi_window)(struct kvm_vcpu *);
int (*mem_enc_op)(struct kvm *, void *);
int (*mem_enc_reg_region)(struct kvm *, struct kvm_enc_region *);
int (*mem_enc_unreg_region)(struct kvm *, struct kvm_enc_region *);
int (*vm_copy_enc_context_from)(struct kvm *, unsigned int);
int (*get_msr_feature)(struct kvm_msr_entry *);
bool (*can_emulate_instruction)(struct kvm_vcpu *, void *, int);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *);
int (*enable_direct_tlbflush)(struct kvm_vcpu *);
void (*migrate_timers)(struct kvm_vcpu *);
void (*msr_filter_changed)(struct kvm_vcpu *);
int (*complete_emulated_msr)(struct kvm_vcpu *, int);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8);
};
5.15
: ✅struct kvm_x86_ops {
int (*hardware_enable)();
void (*hardware_disable)();
void (*hardware_unsetup)();
bool (*cpu_has_accelerated_tpr)();
bool (*has_emulated_msr)(struct kvm *, u32);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *);
unsigned int vm_size;
int (*vm_init)(struct kvm *);
void (*vm_destroy)(struct kvm *);
int (*vcpu_create)(struct kvm_vcpu *);
void (*vcpu_free)(struct kvm_vcpu *);
void (*vcpu_reset)(struct kvm_vcpu *, bool);
void (*prepare_guest_switch)(struct kvm_vcpu *);
void (*vcpu_load)(struct kvm_vcpu *, int);
void (*vcpu_put)(struct kvm_vcpu *);
void (*update_exception_bitmap)(struct kvm_vcpu *);
int (*get_msr)(struct kvm_vcpu *, struct msr_data *);
int (*set_msr)(struct kvm_vcpu *, struct msr_data *);
u64 (*get_segment_base)(struct kvm_vcpu *, int);
void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
int (*get_cpl)(struct kvm_vcpu *);
void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *);
void (*set_cr0)(struct kvm_vcpu *, long unsigned int);
bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int);
void (*set_cr4)(struct kvm_vcpu *, long unsigned int);
int (*set_efer)(struct kvm_vcpu *, u64);
void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *);
void (*set_dr7)(struct kvm_vcpu *, long unsigned int);
void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg);
long unsigned int (*get_rflags)(struct kvm_vcpu *);
void (*set_rflags)(struct kvm_vcpu *, long unsigned int);
bool (*get_if_flag)(struct kvm_vcpu *);
void (*tlb_flush_all)(struct kvm_vcpu *);
void (*tlb_flush_current)(struct kvm_vcpu *);
int (*tlb_remote_flush)(struct kvm *);
int (*tlb_remote_flush_with_range)(struct kvm *, struct kvm_tlb_range *);
void (*tlb_flush_gva)(struct kvm_vcpu *, gva_t);
void (*tlb_flush_guest)(struct kvm_vcpu *);
enum exit_fastpath_completion (*run)(struct kvm_vcpu *);
int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion);
int (*skip_emulated_instruction)(struct kvm_vcpu *);
void (*update_emulated_instruction)(struct kvm_vcpu *);
void (*set_interrupt_shadow)(struct kvm_vcpu *, int);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *);
void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *);
void (*set_irq)(struct kvm_vcpu *);
void (*set_nmi)(struct kvm_vcpu *);
void (*queue_exception)(struct kvm_vcpu *);
void (*cancel_injection)(struct kvm_vcpu *);
int (*interrupt_allowed)(struct kvm_vcpu *, bool);
int (*nmi_allowed)(struct kvm_vcpu *, bool);
bool (*get_nmi_mask)(struct kvm_vcpu *);
void (*set_nmi_mask)(struct kvm_vcpu *, bool);
void (*enable_nmi_window)(struct kvm_vcpu *);
void (*enable_irq_window)(struct kvm_vcpu *);
void (*update_cr8_intercept)(struct kvm_vcpu *, int, int);
bool (*check_apicv_inhibit_reasons)(ulong);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *);
void (*hwapic_irr_update)(struct kvm_vcpu *, int);
void (*hwapic_isr_update)(struct kvm_vcpu *, int);
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *);
void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *);
void (*set_virtual_apic_mode)(struct kvm_vcpu *);
void (*set_apic_access_page_addr)(struct kvm_vcpu *);
int (*deliver_posted_interrupt)(struct kvm_vcpu *, int);
int (*sync_pir_to_irr)(struct kvm_vcpu *);
int (*set_tss_addr)(struct kvm *, unsigned int);
int (*set_identity_map_addr)(struct kvm *, u64);
u64 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool);
void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int);
bool (*has_wbinvd_exit)();
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *);
void (*write_tsc_offset)(struct kvm_vcpu *, u64);
void (*write_tsc_multiplier)(struct kvm_vcpu *, u64);
void (*get_exit_info)(struct kvm_vcpu *, u64 *, u64 *, u32 *, u32 *);
int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *);
void (*handle_exit_irqoff)(struct kvm_vcpu *);
void (*request_immediate_exit)(struct kvm_vcpu *);
void (*sched_in)(struct kvm_vcpu *, int);
int cpu_dirty_log_size;
void (*update_cpu_dirty_logging)(struct kvm_vcpu *);
const struct kvm_pmu_ops *pmu_ops;
const struct kvm_x86_nested_ops *nested_ops;
int (*pre_block)(struct kvm_vcpu *);
void (*post_block)(struct kvm_vcpu *);
void (*vcpu_blocking)(struct kvm_vcpu *);
void (*vcpu_unblocking)(struct kvm_vcpu *);
int (*update_pi_irte)(struct kvm *, unsigned int, uint32_t, bool);
void (*start_assignment)(struct kvm *);
void (*apicv_post_state_restore)(struct kvm_vcpu *);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *);
int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *);
void (*cancel_hv_timer)(struct kvm_vcpu *);
void (*setup_mce)(struct kvm_vcpu *);
int (*smi_allowed)(struct kvm_vcpu *, bool);
int (*enter_smm)(struct kvm_vcpu *, char *);
int (*leave_smm)(struct kvm_vcpu *, const char *);
void (*enable_smi_window)(struct kvm_vcpu *);
int (*mem_enc_op)(struct kvm *, void *);
int (*mem_enc_reg_region)(struct kvm *, struct kvm_enc_region *);
int (*mem_enc_unreg_region)(struct kvm *, struct kvm_enc_region *);
int (*vm_copy_enc_context_from)(struct kvm *, unsigned int);
int (*get_msr_feature)(struct kvm_msr_entry *);
bool (*can_emulate_instruction)(struct kvm_vcpu *, void *, int);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *);
int (*enable_direct_tlbflush)(struct kvm_vcpu *);
void (*migrate_timers)(struct kvm_vcpu *);
void (*msr_filter_changed)(struct kvm_vcpu *);
int (*complete_emulated_msr)(struct kvm_vcpu *, int);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8);
};
5.19
: ✅struct kvm_x86_ops {
const char *name;
int (*hardware_enable)();
void (*hardware_disable)();
void (*hardware_unsetup)();
bool (*has_emulated_msr)(struct kvm *, u32);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *);
unsigned int vm_size;
int (*vm_init)(struct kvm *);
void (*vm_destroy)(struct kvm *);
int (*vcpu_create)(struct kvm_vcpu *);
void (*vcpu_free)(struct kvm_vcpu *);
void (*vcpu_reset)(struct kvm_vcpu *, bool);
void (*prepare_switch_to_guest)(struct kvm_vcpu *);
void (*vcpu_load)(struct kvm_vcpu *, int);
void (*vcpu_put)(struct kvm_vcpu *);
void (*update_exception_bitmap)(struct kvm_vcpu *);
int (*get_msr)(struct kvm_vcpu *, struct msr_data *);
int (*set_msr)(struct kvm_vcpu *, struct msr_data *);
u64 (*get_segment_base)(struct kvm_vcpu *, int);
void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
int (*get_cpl)(struct kvm_vcpu *);
void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *);
void (*set_cr0)(struct kvm_vcpu *, long unsigned int);
void (*post_set_cr3)(struct kvm_vcpu *, long unsigned int);
bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int);
void (*set_cr4)(struct kvm_vcpu *, long unsigned int);
int (*set_efer)(struct kvm_vcpu *, u64);
void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *);
void (*set_dr7)(struct kvm_vcpu *, long unsigned int);
void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg);
long unsigned int (*get_rflags)(struct kvm_vcpu *);
void (*set_rflags)(struct kvm_vcpu *, long unsigned int);
bool (*get_if_flag)(struct kvm_vcpu *);
void (*flush_tlb_all)(struct kvm_vcpu *);
void (*flush_tlb_current)(struct kvm_vcpu *);
int (*tlb_remote_flush)(struct kvm *);
int (*tlb_remote_flush_with_range)(struct kvm *, struct kvm_tlb_range *);
void (*flush_tlb_gva)(struct kvm_vcpu *, gva_t);
void (*flush_tlb_guest)(struct kvm_vcpu *);
int (*vcpu_pre_run)(struct kvm_vcpu *);
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *);
int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion);
int (*skip_emulated_instruction)(struct kvm_vcpu *);
void (*update_emulated_instruction)(struct kvm_vcpu *);
void (*set_interrupt_shadow)(struct kvm_vcpu *, int);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *);
void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *);
void (*inject_irq)(struct kvm_vcpu *);
void (*inject_nmi)(struct kvm_vcpu *);
void (*queue_exception)(struct kvm_vcpu *);
void (*cancel_injection)(struct kvm_vcpu *);
int (*interrupt_allowed)(struct kvm_vcpu *, bool);
int (*nmi_allowed)(struct kvm_vcpu *, bool);
bool (*get_nmi_mask)(struct kvm_vcpu *);
void (*set_nmi_mask)(struct kvm_vcpu *, bool);
void (*enable_nmi_window)(struct kvm_vcpu *);
void (*enable_irq_window)(struct kvm_vcpu *);
void (*update_cr8_intercept)(struct kvm_vcpu *, int, int);
bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *);
void (*hwapic_irr_update)(struct kvm_vcpu *, int);
void (*hwapic_isr_update)(struct kvm_vcpu *, int);
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *);
void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *);
void (*set_virtual_apic_mode)(struct kvm_vcpu *);
void (*set_apic_access_page_addr)(struct kvm_vcpu *);
void (*deliver_interrupt)(struct kvm_lapic *, int, int, int);
int (*sync_pir_to_irr)(struct kvm_vcpu *);
int (*set_tss_addr)(struct kvm *, unsigned int);
int (*set_identity_map_addr)(struct kvm *, u64);
u64 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool);
void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int);
bool (*has_wbinvd_exit)();
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *);
void (*write_tsc_offset)(struct kvm_vcpu *, u64);
void (*write_tsc_multiplier)(struct kvm_vcpu *, u64);
void (*get_exit_info)(struct kvm_vcpu *, u32 *, u64 *, u64 *, u32 *, u32 *);
int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *);
void (*handle_exit_irqoff)(struct kvm_vcpu *);
void (*request_immediate_exit)(struct kvm_vcpu *);
void (*sched_in)(struct kvm_vcpu *, int);
int cpu_dirty_log_size;
void (*update_cpu_dirty_logging)(struct kvm_vcpu *);
const struct kvm_x86_nested_ops *nested_ops;
void (*vcpu_blocking)(struct kvm_vcpu *);
void (*vcpu_unblocking)(struct kvm_vcpu *);
int (*pi_update_irte)(struct kvm *, unsigned int, uint32_t, bool);
void (*pi_start_assignment)(struct kvm *);
void (*apicv_post_state_restore)(struct kvm_vcpu *);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *);
int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *);
void (*cancel_hv_timer)(struct kvm_vcpu *);
void (*setup_mce)(struct kvm_vcpu *);
int (*smi_allowed)(struct kvm_vcpu *, bool);
int (*enter_smm)(struct kvm_vcpu *, char *);
int (*leave_smm)(struct kvm_vcpu *, const char *);
void (*enable_smi_window)(struct kvm_vcpu *);
int (*mem_enc_ioctl)(struct kvm *, void *);
int (*mem_enc_register_region)(struct kvm *, struct kvm_enc_region *);
int (*mem_enc_unregister_region)(struct kvm *, struct kvm_enc_region *);
int (*vm_copy_enc_context_from)(struct kvm *, unsigned int);
int (*vm_move_enc_context_from)(struct kvm *, unsigned int);
void (*guest_memory_reclaimed)(struct kvm *);
int (*get_msr_feature)(struct kvm_msr_entry *);
bool (*can_emulate_instruction)(struct kvm_vcpu *, int, void *, int);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *);
int (*enable_direct_tlbflush)(struct kvm_vcpu *);
void (*migrate_timers)(struct kvm_vcpu *);
void (*msr_filter_changed)(struct kvm_vcpu *);
int (*complete_emulated_msr)(struct kvm_vcpu *, int);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8);
long unsigned int (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *);
};
6.2
: ✅struct kvm_x86_ops {
const char *name;
int (*hardware_enable)();
void (*hardware_disable)();
void (*hardware_unsetup)();
bool (*has_emulated_msr)(struct kvm *, u32);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *);
unsigned int vm_size;
int (*vm_init)(struct kvm *);
void (*vm_destroy)(struct kvm *);
int (*vcpu_precreate)(struct kvm *);
int (*vcpu_create)(struct kvm_vcpu *);
void (*vcpu_free)(struct kvm_vcpu *);
void (*vcpu_reset)(struct kvm_vcpu *, bool);
void (*prepare_switch_to_guest)(struct kvm_vcpu *);
void (*vcpu_load)(struct kvm_vcpu *, int);
void (*vcpu_put)(struct kvm_vcpu *);
void (*update_exception_bitmap)(struct kvm_vcpu *);
int (*get_msr)(struct kvm_vcpu *, struct msr_data *);
int (*set_msr)(struct kvm_vcpu *, struct msr_data *);
u64 (*get_segment_base)(struct kvm_vcpu *, int);
void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
int (*get_cpl)(struct kvm_vcpu *);
void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *);
void (*set_cr0)(struct kvm_vcpu *, long unsigned int);
void (*post_set_cr3)(struct kvm_vcpu *, long unsigned int);
bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int);
void (*set_cr4)(struct kvm_vcpu *, long unsigned int);
int (*set_efer)(struct kvm_vcpu *, u64);
void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *);
void (*set_dr7)(struct kvm_vcpu *, long unsigned int);
void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg);
long unsigned int (*get_rflags)(struct kvm_vcpu *);
void (*set_rflags)(struct kvm_vcpu *, long unsigned int);
bool (*get_if_flag)(struct kvm_vcpu *);
void (*flush_tlb_all)(struct kvm_vcpu *);
void (*flush_tlb_current)(struct kvm_vcpu *);
int (*tlb_remote_flush)(struct kvm *);
int (*tlb_remote_flush_with_range)(struct kvm *, struct kvm_tlb_range *);
void (*flush_tlb_gva)(struct kvm_vcpu *, gva_t);
void (*flush_tlb_guest)(struct kvm_vcpu *);
int (*vcpu_pre_run)(struct kvm_vcpu *);
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *);
int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion);
int (*skip_emulated_instruction)(struct kvm_vcpu *);
void (*update_emulated_instruction)(struct kvm_vcpu *);
void (*set_interrupt_shadow)(struct kvm_vcpu *, int);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *);
void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *);
void (*inject_irq)(struct kvm_vcpu *, bool);
void (*inject_nmi)(struct kvm_vcpu *);
void (*inject_exception)(struct kvm_vcpu *);
void (*cancel_injection)(struct kvm_vcpu *);
int (*interrupt_allowed)(struct kvm_vcpu *, bool);
int (*nmi_allowed)(struct kvm_vcpu *, bool);
bool (*get_nmi_mask)(struct kvm_vcpu *);
void (*set_nmi_mask)(struct kvm_vcpu *, bool);
void (*enable_nmi_window)(struct kvm_vcpu *);
void (*enable_irq_window)(struct kvm_vcpu *);
void (*update_cr8_intercept)(struct kvm_vcpu *, int, int);
bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *);
void (*hwapic_irr_update)(struct kvm_vcpu *, int);
void (*hwapic_isr_update)(int);
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *);
void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *);
void (*set_virtual_apic_mode)(struct kvm_vcpu *);
void (*set_apic_access_page_addr)(struct kvm_vcpu *);
void (*deliver_interrupt)(struct kvm_lapic *, int, int, int);
int (*sync_pir_to_irr)(struct kvm_vcpu *);
int (*set_tss_addr)(struct kvm *, unsigned int);
int (*set_identity_map_addr)(struct kvm *, u64);
u8 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool);
void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int);
bool (*has_wbinvd_exit)();
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *);
void (*write_tsc_offset)(struct kvm_vcpu *, u64);
void (*write_tsc_multiplier)(struct kvm_vcpu *, u64);
void (*get_exit_info)(struct kvm_vcpu *, u32 *, u64 *, u64 *, u32 *, u32 *);
int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *);
void (*handle_exit_irqoff)(struct kvm_vcpu *);
void (*request_immediate_exit)(struct kvm_vcpu *);
void (*sched_in)(struct kvm_vcpu *, int);
int cpu_dirty_log_size;
void (*update_cpu_dirty_logging)(struct kvm_vcpu *);
const struct kvm_x86_nested_ops *nested_ops;
void (*vcpu_blocking)(struct kvm_vcpu *);
void (*vcpu_unblocking)(struct kvm_vcpu *);
int (*pi_update_irte)(struct kvm *, unsigned int, uint32_t, bool);
void (*pi_start_assignment)(struct kvm *);
void (*apicv_post_state_restore)(struct kvm_vcpu *);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *);
int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *);
void (*cancel_hv_timer)(struct kvm_vcpu *);
void (*setup_mce)(struct kvm_vcpu *);
int (*smi_allowed)(struct kvm_vcpu *, bool);
int (*enter_smm)(struct kvm_vcpu *, union kvm_smram *);
int (*leave_smm)(struct kvm_vcpu *, const union kvm_smram *);
void (*enable_smi_window)(struct kvm_vcpu *);
int (*mem_enc_ioctl)(struct kvm *, void *);
int (*mem_enc_register_region)(struct kvm *, struct kvm_enc_region *);
int (*mem_enc_unregister_region)(struct kvm *, struct kvm_enc_region *);
int (*vm_copy_enc_context_from)(struct kvm *, unsigned int);
int (*vm_move_enc_context_from)(struct kvm *, unsigned int);
void (*guest_memory_reclaimed)(struct kvm *);
int (*get_msr_feature)(struct kvm_msr_entry *);
bool (*can_emulate_instruction)(struct kvm_vcpu *, int, void *, int);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *);
int (*enable_l2_tlb_flush)(struct kvm_vcpu *);
void (*migrate_timers)(struct kvm_vcpu *);
void (*msr_filter_changed)(struct kvm_vcpu *);
int (*complete_emulated_msr)(struct kvm_vcpu *, int);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8);
long unsigned int (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *);
};
6.5
: ✅struct kvm_x86_ops {
const char *name;
int (*check_processor_compatibility)();
int (*hardware_enable)();
void (*hardware_disable)();
void (*hardware_unsetup)();
bool (*has_emulated_msr)(struct kvm *, u32);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *);
unsigned int vm_size;
int (*vm_init)(struct kvm *);
void (*vm_destroy)(struct kvm *);
int (*vcpu_precreate)(struct kvm *);
int (*vcpu_create)(struct kvm_vcpu *);
void (*vcpu_free)(struct kvm_vcpu *);
void (*vcpu_reset)(struct kvm_vcpu *, bool);
void (*prepare_switch_to_guest)(struct kvm_vcpu *);
void (*vcpu_load)(struct kvm_vcpu *, int);
void (*vcpu_put)(struct kvm_vcpu *);
void (*update_exception_bitmap)(struct kvm_vcpu *);
int (*get_msr)(struct kvm_vcpu *, struct msr_data *);
int (*set_msr)(struct kvm_vcpu *, struct msr_data *);
u64 (*get_segment_base)(struct kvm_vcpu *, int);
void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
int (*get_cpl)(struct kvm_vcpu *);
void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *);
bool (*is_valid_cr0)(struct kvm_vcpu *, long unsigned int);
void (*set_cr0)(struct kvm_vcpu *, long unsigned int);
void (*post_set_cr3)(struct kvm_vcpu *, long unsigned int);
bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int);
void (*set_cr4)(struct kvm_vcpu *, long unsigned int);
int (*set_efer)(struct kvm_vcpu *, u64);
void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *);
void (*set_dr7)(struct kvm_vcpu *, long unsigned int);
void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg);
long unsigned int (*get_rflags)(struct kvm_vcpu *);
void (*set_rflags)(struct kvm_vcpu *, long unsigned int);
bool (*get_if_flag)(struct kvm_vcpu *);
void (*flush_tlb_all)(struct kvm_vcpu *);
void (*flush_tlb_current)(struct kvm_vcpu *);
int (*flush_remote_tlbs)(struct kvm *);
int (*flush_remote_tlbs_range)(struct kvm *, gfn_t, gfn_t);
void (*flush_tlb_gva)(struct kvm_vcpu *, gva_t);
void (*flush_tlb_guest)(struct kvm_vcpu *);
int (*vcpu_pre_run)(struct kvm_vcpu *);
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *);
int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion);
int (*skip_emulated_instruction)(struct kvm_vcpu *);
void (*update_emulated_instruction)(struct kvm_vcpu *);
void (*set_interrupt_shadow)(struct kvm_vcpu *, int);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *);
void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *);
void (*inject_irq)(struct kvm_vcpu *, bool);
void (*inject_nmi)(struct kvm_vcpu *);
void (*inject_exception)(struct kvm_vcpu *);
void (*cancel_injection)(struct kvm_vcpu *);
int (*interrupt_allowed)(struct kvm_vcpu *, bool);
int (*nmi_allowed)(struct kvm_vcpu *, bool);
bool (*get_nmi_mask)(struct kvm_vcpu *);
void (*set_nmi_mask)(struct kvm_vcpu *, bool);
bool (*is_vnmi_pending)(struct kvm_vcpu *);
bool (*set_vnmi_pending)(struct kvm_vcpu *);
void (*enable_nmi_window)(struct kvm_vcpu *);
void (*enable_irq_window)(struct kvm_vcpu *);
void (*update_cr8_intercept)(struct kvm_vcpu *, int, int);
bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit);
const long unsigned int required_apicv_inhibits;
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *);
void (*hwapic_irr_update)(struct kvm_vcpu *, int);
void (*hwapic_isr_update)(int);
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *);
void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *);
void (*set_virtual_apic_mode)(struct kvm_vcpu *);
void (*set_apic_access_page_addr)(struct kvm_vcpu *);
void (*deliver_interrupt)(struct kvm_lapic *, int, int, int);
int (*sync_pir_to_irr)(struct kvm_vcpu *);
int (*set_tss_addr)(struct kvm *, unsigned int);
int (*set_identity_map_addr)(struct kvm *, u64);
u8 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool);
void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int);
bool (*has_wbinvd_exit)();
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *);
void (*write_tsc_offset)(struct kvm_vcpu *, u64);
void (*write_tsc_multiplier)(struct kvm_vcpu *, u64);
void (*get_exit_info)(struct kvm_vcpu *, u32 *, u64 *, u64 *, u32 *, u32 *);
int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *);
void (*handle_exit_irqoff)(struct kvm_vcpu *);
void (*request_immediate_exit)(struct kvm_vcpu *);
void (*sched_in)(struct kvm_vcpu *, int);
int cpu_dirty_log_size;
void (*update_cpu_dirty_logging)(struct kvm_vcpu *);
const struct kvm_x86_nested_ops *nested_ops;
void (*vcpu_blocking)(struct kvm_vcpu *);
void (*vcpu_unblocking)(struct kvm_vcpu *);
int (*pi_update_irte)(struct kvm *, unsigned int, uint32_t, bool);
void (*pi_start_assignment)(struct kvm *);
void (*apicv_post_state_restore)(struct kvm_vcpu *);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *);
int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *);
void (*cancel_hv_timer)(struct kvm_vcpu *);
void (*setup_mce)(struct kvm_vcpu *);
int (*smi_allowed)(struct kvm_vcpu *, bool);
int (*enter_smm)(struct kvm_vcpu *, union kvm_smram *);
int (*leave_smm)(struct kvm_vcpu *, const union kvm_smram *);
void (*enable_smi_window)(struct kvm_vcpu *);
int (*mem_enc_ioctl)(struct kvm *, void *);
int (*mem_enc_register_region)(struct kvm *, struct kvm_enc_region *);
int (*mem_enc_unregister_region)(struct kvm *, struct kvm_enc_region *);
int (*vm_copy_enc_context_from)(struct kvm *, unsigned int);
int (*vm_move_enc_context_from)(struct kvm *, unsigned int);
void (*guest_memory_reclaimed)(struct kvm *);
int (*get_msr_feature)(struct kvm_msr_entry *);
bool (*can_emulate_instruction)(struct kvm_vcpu *, int, void *, int);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *);
int (*enable_l2_tlb_flush)(struct kvm_vcpu *);
void (*migrate_timers)(struct kvm_vcpu *);
void (*msr_filter_changed)(struct kvm_vcpu *);
int (*complete_emulated_msr)(struct kvm_vcpu *, int);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8);
long unsigned int (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *);
};
6.8
: ✅struct kvm_x86_ops {
const char *name;
int (*check_processor_compatibility)();
int (*hardware_enable)();
void (*hardware_disable)();
void (*hardware_unsetup)();
bool (*has_emulated_msr)(struct kvm *, u32);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *);
unsigned int vm_size;
int (*vm_init)(struct kvm *);
void (*vm_destroy)(struct kvm *);
int (*vcpu_precreate)(struct kvm *);
int (*vcpu_create)(struct kvm_vcpu *);
void (*vcpu_free)(struct kvm_vcpu *);
void (*vcpu_reset)(struct kvm_vcpu *, bool);
void (*prepare_switch_to_guest)(struct kvm_vcpu *);
void (*vcpu_load)(struct kvm_vcpu *, int);
void (*vcpu_put)(struct kvm_vcpu *);
void (*update_exception_bitmap)(struct kvm_vcpu *);
int (*get_msr)(struct kvm_vcpu *, struct msr_data *);
int (*set_msr)(struct kvm_vcpu *, struct msr_data *);
u64 (*get_segment_base)(struct kvm_vcpu *, int);
void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
int (*get_cpl)(struct kvm_vcpu *);
void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int);
void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *);
bool (*is_valid_cr0)(struct kvm_vcpu *, long unsigned int);
void (*set_cr0)(struct kvm_vcpu *, long unsigned int);
void (*post_set_cr3)(struct kvm_vcpu *, long unsigned int);
bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int);
void (*set_cr4)(struct kvm_vcpu *, long unsigned int);
int (*set_efer)(struct kvm_vcpu *, u64);
void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *);
void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *);
void (*set_dr7)(struct kvm_vcpu *, long unsigned int);
void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg);
long unsigned int (*get_rflags)(struct kvm_vcpu *);
void (*set_rflags)(struct kvm_vcpu *, long unsigned int);
bool (*get_if_flag)(struct kvm_vcpu *);
void (*flush_tlb_all)(struct kvm_vcpu *);
void (*flush_tlb_current)(struct kvm_vcpu *);
int (*flush_remote_tlbs)(struct kvm *);
int (*flush_remote_tlbs_range)(struct kvm *, gfn_t, gfn_t);
void (*flush_tlb_gva)(struct kvm_vcpu *, gva_t);
void (*flush_tlb_guest)(struct kvm_vcpu *);
int (*vcpu_pre_run)(struct kvm_vcpu *);
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *);
int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion);
int (*skip_emulated_instruction)(struct kvm_vcpu *);
void (*update_emulated_instruction)(struct kvm_vcpu *);
void (*set_interrupt_shadow)(struct kvm_vcpu *, int);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *);
void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *);
void (*inject_irq)(struct kvm_vcpu *, bool);
void (*inject_nmi)(struct kvm_vcpu *);
void (*inject_exception)(struct kvm_vcpu *);
void (*cancel_injection)(struct kvm_vcpu *);
int (*interrupt_allowed)(struct kvm_vcpu *, bool);
int (*nmi_allowed)(struct kvm_vcpu *, bool);
bool (*get_nmi_mask)(struct kvm_vcpu *);
void (*set_nmi_mask)(struct kvm_vcpu *, bool);
bool (*is_vnmi_pending)(struct kvm_vcpu *);
bool (*set_vnmi_pending)(struct kvm_vcpu *);
void (*enable_nmi_window)(struct kvm_vcpu *);
void (*enable_irq_window)(struct kvm_vcpu *);
void (*update_cr8_intercept)(struct kvm_vcpu *, int, int);
bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit);
const long unsigned int required_apicv_inhibits;
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *);
void (*hwapic_irr_update)(struct kvm_vcpu *, int);
void (*hwapic_isr_update)(int);
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *);
void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *);
void (*set_virtual_apic_mode)(struct kvm_vcpu *);
void (*set_apic_access_page_addr)(struct kvm_vcpu *);
void (*deliver_interrupt)(struct kvm_lapic *, int, int, int);
int (*sync_pir_to_irr)(struct kvm_vcpu *);
int (*set_tss_addr)(struct kvm *, unsigned int);
int (*set_identity_map_addr)(struct kvm *, u64);
u8 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool);
void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int);
bool (*has_wbinvd_exit)();
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *);
void (*write_tsc_offset)(struct kvm_vcpu *);
void (*write_tsc_multiplier)(struct kvm_vcpu *);
void (*get_exit_info)(struct kvm_vcpu *, u32 *, u64 *, u64 *, u32 *, u32 *);
int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *);
void (*handle_exit_irqoff)(struct kvm_vcpu *);
void (*request_immediate_exit)(struct kvm_vcpu *);
void (*sched_in)(struct kvm_vcpu *, int);
int cpu_dirty_log_size;
void (*update_cpu_dirty_logging)(struct kvm_vcpu *);
const struct kvm_x86_nested_ops *nested_ops;
void (*vcpu_blocking)(struct kvm_vcpu *);
void (*vcpu_unblocking)(struct kvm_vcpu *);
int (*pi_update_irte)(struct kvm *, unsigned int, uint32_t, bool);
void (*pi_start_assignment)(struct kvm *);
void (*apicv_pre_state_restore)(struct kvm_vcpu *);
void (*apicv_post_state_restore)(struct kvm_vcpu *);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *);
int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *);
void (*cancel_hv_timer)(struct kvm_vcpu *);
void (*setup_mce)(struct kvm_vcpu *);
int (*smi_allowed)(struct kvm_vcpu *, bool);
int (*enter_smm)(struct kvm_vcpu *, union kvm_smram *);
int (*leave_smm)(struct kvm_vcpu *, const union kvm_smram *);
void (*enable_smi_window)(struct kvm_vcpu *);
int (*mem_enc_ioctl)(struct kvm *, void *);
int (*mem_enc_register_region)(struct kvm *, struct kvm_enc_region *);
int (*mem_enc_unregister_region)(struct kvm *, struct kvm_enc_region *);
int (*vm_copy_enc_context_from)(struct kvm *, unsigned int);
int (*vm_move_enc_context_from)(struct kvm *, unsigned int);
void (*guest_memory_reclaimed)(struct kvm *);
int (*get_msr_feature)(struct kvm_msr_entry *);
int (*check_emulate_instruction)(struct kvm_vcpu *, int, void *, int);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *);
int (*enable_l2_tlb_flush)(struct kvm_vcpu *);
void (*migrate_timers)(struct kvm_vcpu *);
void (*msr_filter_changed)(struct kvm_vcpu *);
int (*complete_emulated_msr)(struct kvm_vcpu *, int);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8);
long unsigned int (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *);
gva_t (*get_untagged_addr)(struct kvm_vcpu *, gva_t, unsigned int);
};
arm64
: Absent ⚠️
armhf
: Absent ⚠️
ppc64el
: Absent ⚠️
riscv64
: Absent ⚠️
aws
: Absent ⚠️
azure
: Absent ⚠️
gcp
: Absent ⚠️
lowlatency
: Absent ⚠️
Regular
5.13
and 5.15
⚠️bool (*get_if_flag)(struct kvm_vcpu *)
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *)
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *)
void (*write_tsc_offset)(struct kvm_vcpu *, u64)
void (*write_tsc_multiplier)(struct kvm_vcpu *, u64)
int (*enter_smm)(struct kvm_vcpu *, char *)
int (*leave_smm)(struct kvm_vcpu *, const char *)
void (*pre_update_apicv_exec_ctrl)(struct kvm *, bool)
u64 (*write_l1_tsc_offset)(struct kvm_vcpu *, u64)
int (*pre_enter_smm)(struct kvm_vcpu *, char *)
int (*pre_leave_smm)(struct kvm_vcpu *, const char *)
5.15
and 5.19
⚠️const char *name
void (*prepare_switch_to_guest)(struct kvm_vcpu *)
void (*post_set_cr3)(struct kvm_vcpu *, long unsigned int)
void (*flush_tlb_all)(struct kvm_vcpu *)
void (*flush_tlb_current)(struct kvm_vcpu *)
void (*flush_tlb_gva)(struct kvm_vcpu *, gva_t)
void (*flush_tlb_guest)(struct kvm_vcpu *)
int (*vcpu_pre_run)(struct kvm_vcpu *)
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *)
void (*inject_irq)(struct kvm_vcpu *)
void (*inject_nmi)(struct kvm_vcpu *)
void (*deliver_interrupt)(struct kvm_lapic *, int, int, int)
int (*pi_update_irte)(struct kvm *, unsigned int, uint32_t, bool)
void (*pi_start_assignment)(struct kvm *)
int (*mem_enc_ioctl)(struct kvm *, void *)
int (*mem_enc_register_region)(struct kvm *, struct kvm_enc_region *)
int (*mem_enc_unregister_region)(struct kvm *, struct kvm_enc_region *)
int (*vm_move_enc_context_from)(struct kvm *, unsigned int)
void (*guest_memory_reclaimed)(struct kvm *)
long unsigned int (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *)
bool (*cpu_has_accelerated_tpr)()
void (*prepare_guest_switch)(struct kvm_vcpu *)
void (*tlb_flush_all)(struct kvm_vcpu *)
void (*tlb_flush_current)(struct kvm_vcpu *)
void (*tlb_flush_gva)(struct kvm_vcpu *, gva_t)
void (*tlb_flush_guest)(struct kvm_vcpu *)
enum exit_fastpath_completion (*run)(struct kvm_vcpu *)
void (*set_irq)(struct kvm_vcpu *)
void (*set_nmi)(struct kvm_vcpu *)
int (*deliver_posted_interrupt)(struct kvm_vcpu *, int)
const struct kvm_pmu_ops *pmu_ops
int (*pre_block)(struct kvm_vcpu *)
void (*post_block)(struct kvm_vcpu *)
int (*update_pi_irte)(struct kvm *, unsigned int, uint32_t, bool)
void (*start_assignment)(struct kvm *)
int (*mem_enc_op)(struct kvm *, void *)
int (*mem_enc_reg_region)(struct kvm *, struct kvm_enc_region *)
int (*mem_enc_unreg_region)(struct kvm *, struct kvm_enc_region *)
bool (*check_apicv_inhibit_reasons)(ulong)
➡️ bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit)
void (*get_exit_info)(struct kvm_vcpu *, u64 *, u64 *, u32 *, u32 *)
➡️ void (*get_exit_info)(struct kvm_vcpu *, u32 *, u64 *, u64 *, u32 *, u32 *)
bool (*can_emulate_instruction)(struct kvm_vcpu *, void *, int)
➡️ bool (*can_emulate_instruction)(struct kvm_vcpu *, int, void *, int)
5.19
and 6.2
⚠️int (*vcpu_precreate)(struct kvm *)
void (*inject_exception)(struct kvm_vcpu *)
int (*enable_l2_tlb_flush)(struct kvm_vcpu *)
void (*queue_exception)(struct kvm_vcpu *)
int (*enable_direct_tlbflush)(struct kvm_vcpu *)
void (*inject_irq)(struct kvm_vcpu *)
➡️ void (*inject_irq)(struct kvm_vcpu *, bool)
void (*hwapic_isr_update)(struct kvm_vcpu *, int)
➡️ void (*hwapic_isr_update)(int)
u64 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool)
➡️ u8 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool)
int (*enter_smm)(struct kvm_vcpu *, char *)
➡️ int (*enter_smm)(struct kvm_vcpu *, union kvm_smram *)
int (*leave_smm)(struct kvm_vcpu *, const char *)
➡️ int (*leave_smm)(struct kvm_vcpu *, const union kvm_smram *)
6.2
and 6.5
⚠️int (*check_processor_compatibility)()
bool (*is_valid_cr0)(struct kvm_vcpu *, long unsigned int)
int (*flush_remote_tlbs)(struct kvm *)
int (*flush_remote_tlbs_range)(struct kvm *, gfn_t, gfn_t)
bool (*is_vnmi_pending)(struct kvm_vcpu *)
bool (*set_vnmi_pending)(struct kvm_vcpu *)
const long unsigned int required_apicv_inhibits
bool allow_apicv_in_x2apic_without_x2apic_virtualization
int (*tlb_remote_flush)(struct kvm *)
int (*tlb_remote_flush_with_range)(struct kvm *, struct kvm_tlb_range *)
6.5
and 6.8
⚠️void (*apicv_pre_state_restore)(struct kvm_vcpu *)
int (*check_emulate_instruction)(struct kvm_vcpu *, int, void *, int)
gva_t (*get_untagged_addr)(struct kvm_vcpu *, gva_t, unsigned int)
bool (*can_emulate_instruction)(struct kvm_vcpu *, int, void *, int)
void (*write_tsc_offset)(struct kvm_vcpu *, u64)
➡️ void (*write_tsc_offset)(struct kvm_vcpu *)
void (*write_tsc_multiplier)(struct kvm_vcpu *, u64)
➡️ void (*write_tsc_multiplier)(struct kvm_vcpu *)