summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/vmx_ops.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx/vmx_ops.h')
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index db95bde52998..ce47dc265f89 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -147,7 +147,7 @@ do_exception:
static __always_inline u16 vmcs_read16(unsigned long field)
{
vmcs_check16(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_read16(field);
return __vmcs_readl(field);
}
@@ -155,7 +155,7 @@ static __always_inline u16 vmcs_read16(unsigned long field)
static __always_inline u32 vmcs_read32(unsigned long field)
{
vmcs_check32(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_read32(field);
return __vmcs_readl(field);
}
@@ -163,7 +163,7 @@ static __always_inline u32 vmcs_read32(unsigned long field)
static __always_inline u64 vmcs_read64(unsigned long field)
{
vmcs_check64(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_read64(field);
#ifdef CONFIG_X86_64
return __vmcs_readl(field);
@@ -175,7 +175,7 @@ static __always_inline u64 vmcs_read64(unsigned long field)
static __always_inline unsigned long vmcs_readl(unsigned long field)
{
vmcs_checkl(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_read64(field);
return __vmcs_readl(field);
}
@@ -222,7 +222,7 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val
static __always_inline void vmcs_write16(unsigned long field, u16 value)
{
vmcs_check16(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_write16(field, value);
__vmcs_writel(field, value);
@@ -231,7 +231,7 @@ static __always_inline void vmcs_write16(unsigned long field, u16 value)
static __always_inline void vmcs_write32(unsigned long field, u32 value)
{
vmcs_check32(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_write32(field, value);
__vmcs_writel(field, value);
@@ -240,7 +240,7 @@ static __always_inline void vmcs_write32(unsigned long field, u32 value)
static __always_inline void vmcs_write64(unsigned long field, u64 value)
{
vmcs_check64(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_write64(field, value);
__vmcs_writel(field, value);
@@ -252,7 +252,7 @@ static __always_inline void vmcs_write64(unsigned long field, u64 value)
static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
{
vmcs_checkl(field);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_write64(field, value);
__vmcs_writel(field, value);
@@ -262,7 +262,7 @@ static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
{
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
"vmcs_clear_bits does not support 64-bit fields");
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_write32(field, evmcs_read32(field) & ~mask);
__vmcs_writel(field, __vmcs_readl(field) & ~mask);
@@ -272,7 +272,7 @@ static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
{
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
"vmcs_set_bits does not support 64-bit fields");
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_write32(field, evmcs_read32(field) | mask);
__vmcs_writel(field, __vmcs_readl(field) | mask);
@@ -289,7 +289,7 @@ static inline void vmcs_load(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
- if (static_branch_unlikely(&enable_evmcs))
+ if (kvm_is_using_evmcs())
return evmcs_load(phys_addr);
vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);