pv_lock_ops
Regular
4.4
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
};
4.8
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
};
4.10
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
4.13
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
4.15
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
4.18
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.0
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.3
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.4
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.8
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.11
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.13
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.15
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
5.19
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
6.2
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
6.5
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
6.8
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
arm64
: Absent ⚠️
armhf
: Absent ⚠️
ppc64el
: Absent ⚠️
riscv64
: Absent ⚠️
aws
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
azure
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
gcp
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
lowlatency
: ✅struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
struct paravirt_callee_save queued_spin_unlock;
void (*wait)(u8 *, u8);
void (*kick)(int);
struct paravirt_callee_save vcpu_is_preempted;
};
Regular
4.4
and 4.8
✅
4.8
and 4.10
⚠️struct paravirt_callee_save vcpu_is_preempted
4.10
and 4.13
✅
4.13
and 4.15
✅
4.15
and 4.18
✅
4.18
and 5.0
✅
5.0
and 5.3
✅
5.3
and 5.4
✅
5.4
and 5.8
✅
5.8
and 5.11
✅
5.11
and 5.13
✅
5.13
and 5.15
✅
5.15
and 5.19
✅
5.19
and 6.2
✅
6.2
and 6.5
✅
6.5
and 6.8
✅
generic
and aws
✅
generic
and azure
✅
generic
and gcp
✅
generic
and lowlatency
✅