Skip to content
Snippets Groups Projects
Commit effd9649 authored by iliya.saroukha's avatar iliya.saroukha :first_quarter_moon:
Browse files

fix: moved vmx functions to separate translation unit

parent 1affe68b
Branches
No related tags found
No related merge requests found
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "msr/msr.h" #include "msr/msr.h"
#include "region/vxmon.h" #include "region/vxmon.h"
#include "vcpu/vcpu.h" #include "vcpu/vcpu.h"
#include "vmx/vmx.h"
#define NULL ((void *)0) #define NULL ((void *)0)
...@@ -31,146 +32,7 @@ ...@@ -31,146 +32,7 @@
static struct vcpu_t vcpus[1]; static struct vcpu_t vcpus[1];
static int cr4_enable_vmx(void) {
unsigned long cr4;
/*BUG: Need to somehow switch to a CPU whose VMXE bit hasn't be modified*/
/*NOTE: seems like this bit is always set on every processor, weird..*/
/*if ((__read_cr4() >> 13) & 1) {*/
/* return -EBUSY;*/
/*}*/
__asm__ volatile("mov %%cr4, %0" : "=r"(cr4));
cr4 |= (1UL << 13);
__asm__ volatile("mov %0, %%cr4" ::"r"(cr4));
DEBUG_FMT("CR4[13].VMXE set? %s\n",
(__read_cr4() >> 13) & 1 ? "true" : "false");
return 0;
}
static bool vmx_support_cpuid(void) {
unsigned int eax, ebx, ecx, edx;
cpuid(0, &eax, &ebx, &ecx, &edx);
unsigned int ret_str[3] = {ebx, edx, ecx};
DEBUG_FMT("Vendor ID = %s\n", (char *)ret_str);
unsigned int ecx_vmx = cpuid_ecx(1);
/*__asm__ volatile("mov $1, %rax");*/
/*__asm__ volatile("cpuid");*/
/*__asm__ volatile("mov %%ecx , %0\n\t" : "=r"(ecx));*/
unsigned int addr_width = cpuid_eax(0x80000008);
DEBUG_FMT("Physical address width = %d\n", addr_width & 0xff);
return (ecx_vmx >> 5) & 1;
}
/*
* https://elixir.bootlin.com/linux/v6.12.4/source/tools/testing/selftests/kvm/include/x86_64/vmx.h#L297
*/
static unsigned char vmxon(unsigned long long pa) {
unsigned char ret;
// setna: "Set byte if not above" (CF=1 or ZF=1)
__asm__ volatile("vmxon %[pa]; setna %[ret]"
: [ret] "=rm"(ret)
: [pa] "m"(pa)
: "cc", "memory");
/*asm goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, % l[fault])*/
/* :*/
/* : [vmxon_pointer] "m"(pa)*/
/* :*/
/* : fault);*/
unsigned long long rflags;
__asm__ volatile("pushfq; popq %0" : "=r"(rflags));
DEBUG_FMT("RFLAGS: 0x%llx\n", rflags);
return ret;
/* unsigned long long msr;*/
/**/
/* cr4_set_bits(X86_CR4_VMXE);*/
/**/
/* __asm__ goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b,
* %l[fault])*/
/* :*/
/* : [vmxon_pointer] "m"(vmxon_pointer)*/
/* :*/
/* : fault);*/
/* return 0;*/
/**/
/*fault:*/
/* WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",*/
/* rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);*/
/* cr4_clear_bits(X86_CR4_VMXE);*/
/**/
/* return -EFAULT;*/
}
/*NOTE: arch/x86/kvm/vmx/vmx.c:2824 v6.12.10*/
static int kvm_cpu_vmxon(unsigned long long vmxon_pointer) {
unsigned long long msr;
cr4_set_bits(X86_CR4_VMXE);
// clang-format off
asm goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, %l[fault])
:
: [vmxon_pointer] "m"(vmxon_pointer)
:
: fault);
// clang-format on
return 0;
fault:
WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
cr4_clear_bits(X86_CR4_VMXE);
return -EFAULT;
}
/*NOTE: arch/x86/kvm/vmx/vmx.c:742 v6.12.10*/
static int kvm_cpu_vmxoff(void) {
// clang-format off
asm goto("1: vmxoff\n\t" _ASM_EXTABLE(1b, %l[fault])::
: "cc", "memory"
: fault);
// clang-format on
cr4_clear_bits(X86_CR4_VMXE);
return 0;
fault:
cr4_clear_bits(X86_CR4_VMXE);
return -EIO;
}
static int my_init(void) { static int my_init(void) {
/*
* https://www.sciencedirect.com/topics/computer-science/current-privilege-level
*/
DEBUG_FMT("Querying CS (lowest 2 bits) for CPL\n");
unsigned long cs, cpl;
__asm__ volatile("mov %%cs, %0" : "=r"(cs));
cpl = cs & 0x3;
DEBUG_FMT("CPL level = %lu\n", cpl);
if (cpl > 0) {
return -1;
}
pr_info("Checking VMX support using CPUID\n"); pr_info("Checking VMX support using CPUID\n");
if (!vmx_support_cpuid()) { if (!vmx_support_cpuid()) {
pr_err("VMX isn't supported\n"); pr_err("VMX isn't supported\n");
......
#include "vmx.h"
#include "../debug/debug.h"
#include "asm/cpuid.h"
#include "asm/special_insns.h"
#include "asm/tlbflush.h"
#include <linux/string.h>
int cr4_enable_vmx(void) {
unsigned long cr4;
/*BUG: Need to somehow switch to a CPU whose VMXE bit hasn't be modified*/
/*NOTE: seems like this bit is always set on every processor, weird..*/
/*if ((__read_cr4() >> 13) & 1) {*/
/* return -EBUSY;*/
/*}*/
__asm__ volatile("mov %%cr4, %0" : "=r"(cr4));
cr4 |= (1UL << 13);
__asm__ volatile("mov %0, %%cr4" ::"r"(cr4));
DEBUG_FMT("CR4[13].VMXE set? %s\n",
(__read_cr4() >> 13) & 1 ? "true" : "false");
return 0;
}
bool vmx_support_cpuid(void) {
unsigned int eax, ebx, ecx, edx;
cpuid(0, &eax, &ebx, &ecx, &edx);
unsigned int ret_str[3] = {ebx, edx, ecx};
DEBUG_FMT("Vendor ID = %s\n", (char *)ret_str);
unsigned int ecx_vmx = cpuid_ecx(1);
/*__asm__ volatile("mov $1, %rax");*/
/*__asm__ volatile("cpuid");*/
/*__asm__ volatile("mov %%ecx , %0\n\t" : "=r"(ecx));*/
unsigned int addr_width = cpuid_eax(0x80000008);
DEBUG_FMT("Physical address width = %d\n", addr_width & 0xff);
return (ecx_vmx >> 5) & 1;
}
/*
* https://elixir.bootlin.com/linux/v6.12.4/source/tools/testing/selftests/kvm/include/x86_64/vmx.h#L297
*/
static unsigned char __attribute__((unused)) vmxon(unsigned long long pa) {
unsigned char ret;
// setna: "Set byte if not above" (CF=1 or ZF=1)
__asm__ volatile("vmxon %[pa]; setna %[ret]"
: [ret] "=rm"(ret)
: [pa] "m"(pa)
: "cc", "memory");
/*asm goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, % l[fault])*/
/* :*/
/* : [vmxon_pointer] "m"(pa)*/
/* :*/
/* : fault);*/
unsigned long long rflags;
__asm__ volatile("pushfq; popq %0" : "=r"(rflags));
DEBUG_FMT("RFLAGS: 0x%llx\n", rflags);
return ret;
}
int kvm_cpu_vmxon(unsigned long long vmxon_pointer) {
unsigned long long msr;
cr4_set_bits(X86_CR4_VMXE);
// clang-format off
asm goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, %l[fault])
:
: [vmxon_pointer] "m"(vmxon_pointer)
:
: fault);
// clang-format on
return 0;
fault:
WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
cr4_clear_bits(X86_CR4_VMXE);
return -EFAULT;
}
int kvm_cpu_vmxoff(void) {
// clang-format off
asm goto("1: vmxoff\n\t" _ASM_EXTABLE(1b, %l[fault])::
: "cc", "memory"
: fault);
// clang-format on
cr4_clear_bits(X86_CR4_VMXE);
return 0;
fault:
cr4_clear_bits(X86_CR4_VMXE);
return -EIO;
}
#pragma once
#include "linux/types.h"
int cr4_enable_vmx(void);
bool vmx_support_cpuid(void);
/*NOTE: arch/x86/kvm/vmx/vmx.c:2824 v6.12.10*/
int kvm_cpu_vmxon(unsigned long long vmxon_pointer);
/*NOTE: arch/x86/kvm/vmx/vmx.c:742 v6.12.10*/
int kvm_cpu_vmxoff(void);
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment