mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
x86/boot: Move boot_*msr helpers to asm/shared/msr.h
The boot_{rdmsr,wrmsr}() helpers are *just* the barebones MSR access
functionality, without any tracing or exception handling glue as it is done in
kernel proper.
Move these helpers to asm/shared/msr.h and rename to raw_{rdmsr,wrmsr}() to
indicate what they are.
[ bp: Correct the reason why those helpers exist. I should've caught that in
the original patch that added them:
176db62257 ("x86/boot: Introduce helpers for MSR reads/writes"
but oh well...
- fixup include path delimiters to <> ]
Signed-off-by: John Allen <john.allen@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://patch.msgid.link/all/20250924200852.4452-2-john.allen@amd.com
This commit is contained in:
committed by
Borislav Petkov (AMD)
parent
dcb6fa37fd
commit
9249bcdea0
@@ -14,6 +14,7 @@
|
||||
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/shared/msr.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/trapnr.h>
|
||||
#include <asm/trap_pf.h>
|
||||
@@ -397,7 +398,7 @@ void sev_enable(struct boot_params *bp)
|
||||
}
|
||||
|
||||
/* Set the SME mask if this is an SEV guest. */
|
||||
boot_rdmsr(MSR_AMD64_SEV, &m);
|
||||
raw_rdmsr(MSR_AMD64_SEV, &m);
|
||||
sev_status = m.q;
|
||||
if (!(sev_status & MSR_AMD64_SEV_ENABLED))
|
||||
return;
|
||||
@@ -446,7 +447,7 @@ u64 sev_get_status(void)
|
||||
if (sev_check_cpu_support() < 0)
|
||||
return 0;
|
||||
|
||||
boot_rdmsr(MSR_AMD64_SEV, &m);
|
||||
raw_rdmsr(MSR_AMD64_SEV, &m);
|
||||
return m.q;
|
||||
}
|
||||
|
||||
@@ -496,7 +497,7 @@ bool early_is_sevsnp_guest(void)
|
||||
struct msr m;
|
||||
|
||||
/* Obtain the address of the calling area to use */
|
||||
boot_rdmsr(MSR_SVSM_CAA, &m);
|
||||
raw_rdmsr(MSR_SVSM_CAA, &m);
|
||||
boot_svsm_caa_pa = m.q;
|
||||
|
||||
/*
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
|
||||
#include "../msr.h"
|
||||
#include <asm/shared/msr.h>
|
||||
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
u64 sev_get_status(void);
|
||||
@@ -20,7 +20,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
|
||||
{
|
||||
struct msr m;
|
||||
|
||||
boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
|
||||
raw_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
|
||||
|
||||
return m.q;
|
||||
}
|
||||
@@ -30,7 +30,7 @@ static inline void sev_es_wr_ghcb_msr(u64 val)
|
||||
struct msr m;
|
||||
|
||||
m.q = val;
|
||||
boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
|
||||
raw_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
@@ -26,9 +26,9 @@
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/shared/msr.h>
|
||||
|
||||
#include "string.h"
|
||||
#include "msr.h"
|
||||
|
||||
static u32 err_flags[NCAPINTS];
|
||||
|
||||
@@ -134,9 +134,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
|
||||
|
||||
struct msr m;
|
||||
|
||||
boot_rdmsr(MSR_K7_HWCR, &m);
|
||||
raw_rdmsr(MSR_K7_HWCR, &m);
|
||||
m.l &= ~(1 << 15);
|
||||
boot_wrmsr(MSR_K7_HWCR, &m);
|
||||
raw_wrmsr(MSR_K7_HWCR, &m);
|
||||
|
||||
get_cpuflags(); /* Make sure it really did something */
|
||||
err = check_cpuflags();
|
||||
@@ -148,9 +148,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
|
||||
|
||||
struct msr m;
|
||||
|
||||
boot_rdmsr(MSR_VIA_FCR, &m);
|
||||
raw_rdmsr(MSR_VIA_FCR, &m);
|
||||
m.l |= (1 << 1) | (1 << 7);
|
||||
boot_wrmsr(MSR_VIA_FCR, &m);
|
||||
raw_wrmsr(MSR_VIA_FCR, &m);
|
||||
|
||||
set_bit(X86_FEATURE_CX8, cpu.flags);
|
||||
err = check_cpuflags();
|
||||
@@ -160,14 +160,14 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
|
||||
struct msr m, m_tmp;
|
||||
u32 level = 1;
|
||||
|
||||
boot_rdmsr(0x80860004, &m);
|
||||
raw_rdmsr(0x80860004, &m);
|
||||
m_tmp = m;
|
||||
m_tmp.l = ~0;
|
||||
boot_wrmsr(0x80860004, &m_tmp);
|
||||
raw_wrmsr(0x80860004, &m_tmp);
|
||||
asm("cpuid"
|
||||
: "+a" (level), "=d" (cpu.flags[0])
|
||||
: : "ecx", "ebx");
|
||||
boot_wrmsr(0x80860004, &m);
|
||||
raw_wrmsr(0x80860004, &m);
|
||||
|
||||
err = check_cpuflags();
|
||||
} else if (err == 0x01 &&
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Helpers/definitions related to MSR access.
|
||||
*/
|
||||
|
||||
#ifndef BOOT_MSR_H
|
||||
#define BOOT_MSR_H
|
||||
|
||||
#include <asm/shared/msr.h>
|
||||
|
||||
/*
|
||||
* The kernel proper already defines rdmsr()/wrmsr(), but they are not for the
|
||||
* boot kernel since they rely on tracepoint/exception handling infrastructure
|
||||
* that's not available here.
|
||||
*/
|
||||
static inline void boot_rdmsr(unsigned int reg, struct msr *m)
|
||||
{
|
||||
asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg));
|
||||
}
|
||||
|
||||
static inline void boot_wrmsr(unsigned int reg, const struct msr *m)
|
||||
{
|
||||
asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory");
|
||||
}
|
||||
|
||||
#endif /* BOOT_MSR_H */
|
||||
@@ -12,4 +12,19 @@ struct msr {
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* The kernel proper already defines rdmsr()/wrmsr(), but they are not for the
|
||||
* boot kernel since they rely on tracepoint/exception handling infrastructure
|
||||
* that's not available here.
|
||||
*/
|
||||
static inline void raw_rdmsr(unsigned int reg, struct msr *m)
|
||||
{
|
||||
asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg));
|
||||
}
|
||||
|
||||
static inline void raw_wrmsr(unsigned int reg, const struct msr *m)
|
||||
{
|
||||
asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory");
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_SHARED_MSR_H */
|
||||
|
||||
Reference in New Issue
Block a user