Revert "x86: kvm: rate-limit global clock updates"

This reverts commit 7e44e4495a.

Commit 7e44e4495a ("x86: kvm: rate-limit global clock updates")
intends to use a kvmclock_update_work to sync ntp corretion
across all vcpus kvmclock, which is based on commit 0061d53daf
("KVM: x86: limit difference between kvmclock updates")

Since kvmclock has been switched to mono raw, this commit can be
reverted.

Signed-off-by: Lei Chen <lei.chen@smartx.com>
Link: https://patch.msgid.link/20250819152027.1687487-3-lei.chen@smartx.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Lei Chen
2025-08-19 23:20:26 +08:00
committed by Sean Christopherson
parent 43ddbf16ed
commit 446fcce2a5
2 changed files with 4 additions and 26 deletions

View File

@@ -1456,7 +1456,6 @@ struct kvm_arch {
bool use_master_clock;
u64 master_kernel_ns;
u64 master_cycle_now;
struct delayed_work kvmclock_update_work;
#ifdef CONFIG_KVM_HYPERV
struct kvm_hv hyperv;

View File

@@ -3523,22 +3523,14 @@ uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
* the others.
*
* So in those cases, request a kvmclock update for all vcpus.
* We need to rate-limit these requests though, as they can
* considerably slow guests that have a large number of vcpus.
* The time for a remote vcpu to update its kvmclock is bound
* by the delay we use to rate-limit the updates.
* The worst case for a remote vcpu to update its kvmclock
* is then bounded by maximum nohz sleep latency.
*/
#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
static void kvmclock_update_fn(struct work_struct *work)
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
unsigned long i;
struct delayed_work *dwork = to_delayed_work(work);
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
kvmclock_update_work);
struct kvm *kvm = container_of(ka, struct kvm, arch);
struct kvm_vcpu *vcpu;
struct kvm *kvm = v->kvm;
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -3546,15 +3538,6 @@ static void kvmclock_update_fn(struct work_struct *work)
}
}
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
struct kvm *kvm = v->kvm;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
schedule_delayed_work(&kvm->arch.kvmclock_update_work,
KVMCLOCK_UPDATE_DELAY);
}
/* These helpers are safe iff @msr is known to be an MCx bank MSR. */
static bool is_mci_control_msr(u32 msr)
{
@@ -13163,8 +13146,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.hv_root_tdp = INVALID_PAGE;
#endif
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
kvm_xen_init_vm(kvm);
@@ -13271,8 +13252,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
* is unsafe, i.e. will lead to use-after-free. The PIT also needs to
* be stopped before IRQ routing is freed.
*/
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
#ifdef CONFIG_KVM_IOAPIC
kvm_free_pit(kvm);
#endif