diff --git a/proto/hypervisor.c b/proto/hypervisor.c index a57cf83223efc09523fa4ad44d6a91c0816eee20..cca764df5d1bf8d3de376e0e8858d69175d3fc85 100644 --- a/proto/hypervisor.c +++ b/proto/hypervisor.c @@ -1,10 +1,13 @@ +#include "asm/msr.h" #include "linux/kern_levels.h" +#include "linux/printk.h" #include <linux/init.h> /* Needed for the macros */ #include <linux/module.h> /* Needed by all modules */ -#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1 << 2) -#define FEATURE_CONTROL_LOCKED (1 << 0) -#define MSR_IA32_FEATURE_CONTROL 0x0000003a +#define IA32_FEATURE_CONTROL_LOCK_BIT (1 << 0) +#define IA32_FEATURE_CONTROL_VMXON_IN_SMX (1 << 1) +#define IA32_FEATURE_CONTROL_VMXON_OUTSIDE_SMX (1 << 2) +#define IA32_FEATURE_CONTROL_MSR (0x3a) /*asm ( assembler template*/ /* : output operands (optional)*/ @@ -12,50 +15,12 @@ /* : clobbered registers list (optional)*/ /* );*/ -static bool getVmxOperation(void) { - /* - * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: - * Bit 0: Lock bit. If clear, VMXON causes a #GP. - * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON - * outside of SMX causes a #GP. - */ - - unsigned long cr0, cr4; - unsigned long long required, feature_control; - unsigned long low1 = 0; - required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; - required |= FEATURE_CONTROL_LOCKED; - feature_control = __rdmsr(MSR_IA32_FEATURE_CONTROL); - printk(KERN_INFO "RDMS output is %ld\n", (long)feature_control); - - if ((feature_control & required) != required) { - wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required, low1); - } - - /* - * Ensure bits in CR0 and CR4 are valid in VMX operation: - * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. - * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. - */ - __asm__ volatile("mov %%cr0, %0" : "=r"(cr0) : : "memory"); - cr0 &= __rdmsr(MSR_IA32_VMX_CR0_FIXED1); - cr0 |= __rdmsr(MSR_IA32_VMX_CR0_FIXED0); - __asm__ volatile("mov %0, %%cr0" : : "r"(cr0) : "memory"); - - __asm__ volatile("mov %%cr4, %0" : "=r"(cr4) : : "memory"); - cr4 &= __rdmsr(MSR_IA32_VMX_CR4_FIXED1); - cr4 |= __rdmsr(MSR_IA32_VMX_CR4_FIXED0); - __asm__ volatile("mov %0, %%cr4" : : "r"(cr4) : "memory"); - - return true; -} - static void enable_vmx(void) { unsigned long cr4; - __asm__ volatile("mov %%cr4, %0" : "=r"(cr4)::"memory"); - cr4 |= (1 << 13); - __asm__ volatile("mov %0, %%cr4" ::"r"(cr4) : "memory"); + __asm__ volatile("mov %%cr4, %0" : "=r"(cr4)); + cr4 |= (1UL << 13); + __asm__ volatile("mov %0, %%cr4" ::"r"(cr4)); } static bool vmx_supported(void) { @@ -68,6 +33,25 @@ static bool vmx_supported(void) { return (ecx >> 5) & 1; } +static bool ia32_feature_control_flags(void) { + int msr_value = __rdmsr(IA32_FEATURE_CONTROL_MSR); + + if (!(msr_value & IA32_FEATURE_CONTROL_LOCK_BIT)) { + printk(KERN_INFO "Writing to the IA32_FEATURE_CONTROL MSR\n"); + __wrmsr(IA32_FEATURE_CONTROL_MSR, + IA32_FEATURE_CONTROL_LOCK_BIT | + IA32_FEATURE_CONTROL_VMXON_OUTSIDE_SMX, + 0); + } + + if (!(msr_value & IA32_FEATURE_CONTROL_VMXON_OUTSIDE_SMX)) { + printk(KERN_INFO "Virtualization isn't available\n"); + return false; + } + + return true; +} + static int my_init(void) { if (!vmx_supported()) { printk(KERN_INFO "VMX isn't supported\n"); @@ -75,20 +59,24 @@ static int my_init(void) { } printk(KERN_INFO "VMX is supported!\n"); - enable_vmx(); - printk(KERN_INFO "VMX has been successfully enabled!\n"); - if (!getVmxOperation()) { - printk(KERN_INFO "VMX operation couldn't correctly setup\n"); + printk(KERN_INFO + "Check the necessary flags of the IA32_FEATURE_CONTROL_MSR\n"); + + if (!ia32_feature_control_flags()) { + printk(KERN_INFO "The flags of the IA32_FEATURE_CONTROL MSR do not " + "permit virtualization\n"); return -1; } + printk(KERN_INFO "IA32_FEATURE_CONTROL MSR flags allow virtualization\n"); + return 0; } -static void my_exit(void) { printk(KERN_INFO "Goodbye world.\n"); } +static void my_exit(void) { printk(KERN_INFO "Hypervisor has exited\n"); } module_init(my_init); module_exit(my_exit);