diff --git a/proto/hypervisor.c b/proto/hypervisor.c
index 502545d218cd7ae38bb479e032d8e8720a560ab5..11b49ac33f43bceca66d205157a4c6b0a938a6ec 100644
--- a/proto/hypervisor.c
+++ b/proto/hypervisor.c
@@ -33,9 +33,10 @@ static int cr4_enable_vmx(void) {
     unsigned long cr4;
 
     /*BUG: Need to somehow switch to a CPU whose VMXE bit hasn't be modified*/
-    if ((__read_cr4() >> 13) & 1) {
-        return -EBUSY;
-    }
+    /*NOTE: seems like this bit is always set on every processor, weird..*/
+    /*if ((__read_cr4() >> 13) & 1) {*/
+    /*    return -EBUSY;*/
+    /*}*/
 
     __asm__ volatile("mov %%cr4, %0" : "=r"(cr4));
     cr4 |= (1UL << 13);
@@ -82,7 +83,7 @@ static unsigned char vmxon(unsigned long long pa) {
 
     /*asm goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, % l[fault])*/
     /*         :*/
-    /*         : [vmxon_pointer] "m"(vmxon_pointer)*/
+    /*         : [vmxon_pointer] "m"(pa)*/
     /*         :*/
     /*         : fault);*/
 
@@ -92,6 +93,65 @@ static unsigned char vmxon(unsigned long long pa) {
     DEBUG_FMT("RFLAGS: 0x%llx\n", rflags);
 
     return ret;
+
+    /*    unsigned long long msr;*/
+    /**/
+    /*    cr4_set_bits(X86_CR4_VMXE);*/
+    /**/
+    /*    __asm__ goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b,
+     * %l[fault])*/
+    /*                 :*/
+    /*                 : [vmxon_pointer] "m"(vmxon_pointer)*/
+    /*                 :*/
+    /*                 : fault);*/
+    /*    return 0;*/
+    /**/
+    /*fault:*/
+    /*    WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",*/
+    /*              rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);*/
+    /*    cr4_clear_bits(X86_CR4_VMXE);*/
+    /**/
+    /*    return -EFAULT;*/
+}
+
+/*NOTE: arch/x86/kvm/vmx/vmx.c:2824 v6.12.10*/
+static int kvm_cpu_vmxon(unsigned long long vmxon_pointer) {
+    unsigned long long msr;
+
+    cr4_set_bits(X86_CR4_VMXE);
+
+    // clang-format off
+    asm goto("1: vmxon %[vmxon_pointer]\n\t" _ASM_EXTABLE(1b, %l[fault])
+             :
+             : [vmxon_pointer] "m"(vmxon_pointer)
+             :
+             : fault);
+
+    // clang-format on
+    return 0;
+
+fault:
+    WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
+              rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
+    cr4_clear_bits(X86_CR4_VMXE);
+
+    return -EFAULT;
+}
+
+/*NOTE: arch/x86/kvm/vmx/vmx.c:742 v6.12.10*/
+static int kvm_cpu_vmxoff(void) {
+    // clang-format off
+    asm goto("1: vmxoff\n\t" _ASM_EXTABLE(1b, %l[fault])::
+                 : "cc", "memory"
+             : fault);
+
+    // clang-format on
+    cr4_clear_bits(X86_CR4_VMXE);
+    return 0;
+
+fault:
+    cr4_clear_bits(X86_CR4_VMXE);
+    return -EIO;
 }
 
 static int my_init(void) {
@@ -154,13 +214,14 @@ static int my_init(void) {
 
     unsigned char vmxon_ret = 0;
 
-    if ((vmxon_ret = vmxon(vmxon_region.pa) != 0)) {
+    /*if ((vmxon_ret = vmxon(vmxon_region.pa) != 0)) {*/
+    if ((vmxon_ret = kvm_cpu_vmxon(vmxon_region.pa) != 0)) {
         /*unsigned long vm_err = __rdmsr(0x4400);*/
         /*pr_err("VM_ERR val = 0x%lx\n", vm_err);*/
         cr4_clear_bits(13);
 
         kfree(vmxon_region.va);
-        __asm__ volatile("vmxoff");
+        /*__asm__ volatile("vmxoff");*/
         pr_err("`vmxon` failed with return code %d\n", vmxon_ret);
         return -1;
     }
@@ -172,7 +233,13 @@ static int my_init(void) {
 
 static void my_exit(void) {
     pr_info("Executing VMXOFF\n");
-    __asm__ volatile("vmxoff");
+    /*__asm__ volatile("vmxoff");*/
+    int vmxoff;
+
+    if ((vmxoff = kvm_cpu_vmxoff()) != 0) {
+        pr_err("Failed to execute VMXOFF\n");
+        return;
+    }
 
     pr_info("Freeing memory of the VMXON region\n");
     kfree(vmxon_region.va);