summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-04-21 19:11:16 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2021-04-26 05:27:14 -0400
commit4daf2a1c45ace29e3bacabdef0d4c7920e1f1ea4 (patch)
treebb6dc185f6dd9782649f226d5b5332f76a31692d
parent013380782d4d675d4f8d9891ca7d010795152dc8 (diff)
x86/sev: Drop redundant and potentially misleading 'sev_enabled'
Drop the sev_enabled flag and switch its one user over to sev_active(). sev_enabled was made redundant with the introduction of sev_status in commit b57de6cd1639 ("x86/sev-es: Add SEV-ES Feature Detection"). sev_enabled and sev_active() are guaranteed to be equivalent, as each is true iff 'sev_status & MSR_AMD64_SEV_ENABLED' is true, and are only ever written in tandem (ignoring compressed boot's version of sev_status). Removing sev_enabled avoids confusion over whether it refers to the guest or the host, and will also allow KVM to usurp "sev_enabled" for its own purposes. No functional change intended. Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210422021125.3417167-7-seanjc@google.com> Acked-by: Borislav Petkov <bp@suse.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/mem_encrypt.h1
-rw-r--r--arch/x86/mm/mem_encrypt.c10
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c1
3 files changed, 4 insertions, 8 deletions
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 31c4df123aa0..9c80c68d75b5 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -20,7 +20,6 @@
extern u64 sme_me_mask;
extern u64 sev_status;
-extern bool sev_enabled;
void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
unsigned long decrypted_kernel_vaddr,
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ae78cef79980..a2d492595213 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -44,8 +44,6 @@ EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);
-bool sev_enabled __section(".data");
-
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
@@ -373,14 +371,14 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
* up under SME the trampoline area cannot be encrypted, whereas under SEV
* the trampoline area must be encrypted.
*/
-bool sme_active(void)
+bool sev_active(void)
{
- return sme_me_mask && !sev_enabled;
+ return sev_status & MSR_AMD64_SEV_ENABLED;
}
-bool sev_active(void)
+bool sme_active(void)
{
- return sev_status & MSR_AMD64_SEV_ENABLED;
+ return sme_me_mask && !sev_active();
}
EXPORT_SYMBOL_GPL(sev_active);
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 6c5eb6f3f14f..0c2759b7f03a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -545,7 +545,6 @@ void __init sme_enable(struct boot_params *bp)
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
- sev_enabled = true;
physical_mask &= ~sme_me_mask;
return;
}