mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Disable support for flushing the L1 data cache to mitigate L1TF if CPU mitigations are disabled for the entire kernel. KVM's mitigation of L1TF is in no way special enough to justify ignoring CONFIG_CPU_MITIGATIONS=n. Deliberately use CPU_MITIGATIONS instead of the more precise MITIGATION_L1TF, as MITIGATION_L1TF only controls the default behavior, i.e. CONFIG_MITIGATION_L1TF=n doesn't completely disable L1TF mitigations in the kernel. Keep the vmentry_l1d_flush module param to avoid breaking existing setups, and leverage the .set path to alert the user to the fact that vmentry_l1d_flush will be ignored. Don't bother validating the incoming value; if an admin misconfigures vmentry_l1d_flush, the fact that the bad configuration won't be detected when running with CONFIG_CPU_MITIGATIONS=n is likely the least of their worries. Reviewed-by: Brendan Jackman <jackmanb@google.com> Link: https://patch.msgid.link/20251113233746.1703361-9-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
95 lines
2.6 KiB
C
95 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_HARDIRQ_H
|
|
#define _ASM_X86_HARDIRQ_H
|
|
|
|
#include <linux/threads.h>
|
|
|
|
typedef struct {
|
|
#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
|
|
u8 kvm_cpu_l1tf_flush_l1d;
|
|
#endif
|
|
unsigned int __nmi_count; /* arch dependent */
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
unsigned int apic_timer_irqs; /* arch dependent */
|
|
unsigned int irq_spurious_count;
|
|
unsigned int icr_read_retry_count;
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_KVM)
|
|
unsigned int kvm_posted_intr_ipis;
|
|
unsigned int kvm_posted_intr_wakeup_ipis;
|
|
unsigned int kvm_posted_intr_nested_ipis;
|
|
#endif
|
|
unsigned int x86_platform_ipis; /* arch dependent */
|
|
unsigned int apic_perf_irqs;
|
|
unsigned int apic_irq_work_irqs;
|
|
#ifdef CONFIG_SMP
|
|
unsigned int irq_resched_count;
|
|
unsigned int irq_call_count;
|
|
#endif
|
|
unsigned int irq_tlb_count;
|
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
|
unsigned int irq_thermal_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE_THRESHOLD
|
|
unsigned int irq_threshold_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE_AMD
|
|
unsigned int irq_deferred_error_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
|
|
unsigned int irq_hv_callback_count;
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_HYPERV)
|
|
unsigned int irq_hv_reenlightenment_count;
|
|
unsigned int hyperv_stimer0_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_POSTED_MSI
|
|
unsigned int posted_msi_notification_count;
|
|
#endif
|
|
} ____cacheline_aligned irq_cpustat_t;
|
|
|
|
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
|
|
#ifdef CONFIG_X86_POSTED_MSI
|
|
DECLARE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc);
|
|
#endif
|
|
#define __ARCH_IRQ_STAT
|
|
|
|
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
|
|
|
|
extern void ack_bad_irq(unsigned int irq);
|
|
|
|
extern u64 arch_irq_stat_cpu(unsigned int cpu);
|
|
#define arch_irq_stat_cpu arch_irq_stat_cpu
|
|
|
|
extern u64 arch_irq_stat(void);
|
|
#define arch_irq_stat arch_irq_stat
|
|
|
|
DECLARE_PER_CPU_CACHE_HOT(u16, __softirq_pending);
|
|
#define local_softirq_pending_ref __softirq_pending
|
|
|
|
#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
|
|
/*
|
|
* This function is called from noinstr interrupt contexts
|
|
* and must be inlined to not get instrumentation.
|
|
*/
|
|
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void)
|
|
{
|
|
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
|
|
}
|
|
|
|
static __always_inline void kvm_clear_cpu_l1tf_flush_l1d(void)
|
|
{
|
|
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
|
|
}
|
|
|
|
static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
|
|
{
|
|
return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
|
|
}
|
|
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
|
|
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
|
|
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
|
|
|
|
#endif /* _ASM_X86_HARDIRQ_H */
|