mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Revert "x86: kvm: introduce periodic global clock updates"
This reverts commit332967a3ea. Commit332967a3ea("x86: kvm: introduce periodic global clock updates") introduced a 300s interval work to sync ntp corrections across all vcpus. Since commit53fafdbb8b("KVM: x86: switch KVMCLOCK base to monotonic raw clock"), kvmclock switched to mono raw clock, we can no longer take ntp into consideration. Signed-off-by: Lei Chen <lei.chen@smartx.com> Link: https://patch.msgid.link/20250819152027.1687487-2-lei.chen@smartx.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
committed by
Sean Christopherson
parent
a091fe60c2
commit
43ddbf16ed
@@ -1457,7 +1457,6 @@ struct kvm_arch {
|
||||
u64 master_kernel_ns;
|
||||
u64 master_cycle_now;
|
||||
struct delayed_work kvmclock_update_work;
|
||||
struct delayed_work kvmclock_sync_work;
|
||||
|
||||
#ifdef CONFIG_KVM_HYPERV
|
||||
struct kvm_hv hyperv;
|
||||
|
||||
@@ -159,9 +159,6 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(report_ignored_msrs);
|
||||
unsigned int min_timer_period_us = 200;
|
||||
module_param(min_timer_period_us, uint, 0644);
|
||||
|
||||
static bool __read_mostly kvmclock_periodic_sync = true;
|
||||
module_param(kvmclock_periodic_sync, bool, 0444);
|
||||
|
||||
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
|
||||
static u32 __read_mostly tsc_tolerance_ppm = 250;
|
||||
module_param(tsc_tolerance_ppm, uint, 0644);
|
||||
@@ -3558,20 +3555,6 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
|
||||
KVMCLOCK_UPDATE_DELAY);
|
||||
}
|
||||
|
||||
#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
|
||||
|
||||
static void kvmclock_sync_fn(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
|
||||
kvmclock_sync_work);
|
||||
struct kvm *kvm = container_of(ka, struct kvm, arch);
|
||||
|
||||
schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
|
||||
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
|
||||
KVMCLOCK_SYNC_PERIOD);
|
||||
}
|
||||
|
||||
/* These helpers are safe iff @msr is known to be an MCx bank MSR. */
|
||||
static bool is_mci_control_msr(u32 msr)
|
||||
{
|
||||
@@ -12757,8 +12740,6 @@ fail_mmu_destroy:
|
||||
|
||||
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (mutex_lock_killable(&vcpu->mutex))
|
||||
return;
|
||||
vcpu_load(vcpu);
|
||||
@@ -12769,10 +12750,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.msr_kvm_poll_control = 1;
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
|
||||
if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
|
||||
schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
|
||||
KVMCLOCK_SYNC_PERIOD);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
@@ -13187,7 +13164,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
#endif
|
||||
|
||||
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
|
||||
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
|
||||
|
||||
kvm_apicv_init(kvm);
|
||||
kvm_hv_init_vm(kvm);
|
||||
@@ -13295,7 +13271,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
|
||||
* is unsafe, i.e. will lead to use-after-free. The PIT also needs to
|
||||
* be stopped before IRQ routing is freed.
|
||||
*/
|
||||
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
|
||||
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
|
||||
|
||||
#ifdef CONFIG_KVM_IOAPIC
|
||||
|
||||
Reference in New Issue
Block a user