mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
KVM: x86/pmu: Move PMU_CAP_{FW_WRITES,LBR_FMT} into msr-index.h header
Move PMU_CAP_{FW_WRITES,LBR_FMT} into msr-index.h and rename them with
PERF_CAP prefix to keep consistent with other perf capabilities macros.
No functional change intended.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://lore.kernel.org/r/20250806195706.1650976-24-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
committed by
Sean Christopherson
parent
1e24bece26
commit
cdfed9370b
@@ -315,12 +315,15 @@
|
|||||||
#define PERF_CAP_PT_IDX 16
|
#define PERF_CAP_PT_IDX 16
|
||||||
|
|
||||||
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
|
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
|
||||||
#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
|
|
||||||
#define PERF_CAP_ARCH_REG BIT_ULL(7)
|
#define PERF_CAP_LBR_FMT 0x3f
|
||||||
#define PERF_CAP_PEBS_FORMAT 0xf00
|
#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
|
||||||
#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
|
#define PERF_CAP_ARCH_REG BIT_ULL(7)
|
||||||
#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
|
#define PERF_CAP_PEBS_FORMAT 0xf00
|
||||||
PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
|
#define PERF_CAP_FW_WRITES BIT_ULL(13)
|
||||||
|
#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
|
||||||
|
#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
|
||||||
|
PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
|
||||||
|
|
||||||
#define MSR_IA32_RTIT_CTL 0x00000570
|
#define MSR_IA32_RTIT_CTL 0x00000570
|
||||||
#define RTIT_CTL_TRACEEN BIT(0)
|
#define RTIT_CTL_TRACEEN BIT(0)
|
||||||
|
|||||||
@@ -20,9 +20,6 @@ extern int __read_mostly pt_mode;
|
|||||||
#define PT_MODE_SYSTEM 0
|
#define PT_MODE_SYSTEM 0
|
||||||
#define PT_MODE_HOST_GUEST 1
|
#define PT_MODE_HOST_GUEST 1
|
||||||
|
|
||||||
#define PMU_CAP_FW_WRITES (1ULL << 13)
|
|
||||||
#define PMU_CAP_LBR_FMT 0x3f
|
|
||||||
|
|
||||||
struct nested_vmx_msrs {
|
struct nested_vmx_msrs {
|
||||||
/*
|
/*
|
||||||
* We only store the "true" versions of the VMX capability MSRs. We
|
* We only store the "true" versions of the VMX capability MSRs. We
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
|
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
|
return (vcpu_get_perf_capabilities(vcpu) & PERF_CAP_FW_WRITES) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
|
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
|
||||||
@@ -588,7 +588,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
|
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
|
||||||
if (intel_pmu_lbr_is_compatible(vcpu) &&
|
if (intel_pmu_lbr_is_compatible(vcpu) &&
|
||||||
(perf_capabilities & PMU_CAP_LBR_FMT))
|
(perf_capabilities & PERF_CAP_LBR_FMT))
|
||||||
memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
|
memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
|
||||||
else
|
else
|
||||||
lbr_desc->records.nr = 0;
|
lbr_desc->records.nr = 0;
|
||||||
|
|||||||
@@ -2127,7 +2127,7 @@ u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
|
|||||||
(host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
|
(host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
|
||||||
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
|
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
|
||||||
|
|
||||||
if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
|
if ((kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT) &&
|
||||||
(host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
|
(host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
|
||||||
debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
|
debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
|
||||||
|
|
||||||
@@ -2412,9 +2412,9 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
vmx->pt_desc.guest.addr_a[index / 2] = data;
|
vmx->pt_desc.guest.addr_a[index / 2] = data;
|
||||||
break;
|
break;
|
||||||
case MSR_IA32_PERF_CAPABILITIES:
|
case MSR_IA32_PERF_CAPABILITIES:
|
||||||
if (data & PMU_CAP_LBR_FMT) {
|
if (data & PERF_CAP_LBR_FMT) {
|
||||||
if ((data & PMU_CAP_LBR_FMT) !=
|
if ((data & PERF_CAP_LBR_FMT) !=
|
||||||
(kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
|
(kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT))
|
||||||
return 1;
|
return 1;
|
||||||
if (!cpuid_model_is_consistent(vcpu))
|
if (!cpuid_model_is_consistent(vcpu))
|
||||||
return 1;
|
return 1;
|
||||||
@@ -7810,7 +7810,7 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
static __init u64 vmx_get_perf_capabilities(void)
|
static __init u64 vmx_get_perf_capabilities(void)
|
||||||
{
|
{
|
||||||
u64 perf_cap = PMU_CAP_FW_WRITES;
|
u64 perf_cap = PERF_CAP_FW_WRITES;
|
||||||
u64 host_perf_cap = 0;
|
u64 host_perf_cap = 0;
|
||||||
|
|
||||||
if (!enable_pmu)
|
if (!enable_pmu)
|
||||||
@@ -7830,7 +7830,7 @@ static __init u64 vmx_get_perf_capabilities(void)
|
|||||||
if (!vmx_lbr_caps.has_callstack)
|
if (!vmx_lbr_caps.has_callstack)
|
||||||
memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
|
memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
|
||||||
else if (vmx_lbr_caps.nr)
|
else if (vmx_lbr_caps.nr)
|
||||||
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
|
perf_cap |= host_perf_cap & PERF_CAP_LBR_FMT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vmx_pebs_supported()) {
|
if (vmx_pebs_supported()) {
|
||||||
|
|||||||
Reference in New Issue
Block a user