From 089b9111a6c3dcc6fda425880d75d6d35644c24e Mon Sep 17 00:00:00 2001
From: Florent Gluck <florent.gluck@hesge.ch>
Date: Wed, 3 May 2023 00:27:51 +0200
Subject: [PATCH] Reworked quite a bit of the codebase so that VM are now
 referenced instead of copied. It's lighter and more efficient, but I still
 need to work on some concurrency issues. Bumpped version to 1.6.2

---
 src/router/routerTemplates.go |   2 +-
 src/router/routerVMs.go       |  30 ++++-----
 src/version/version.go        |   2 +-
 src/vms/template.go           |  20 +++---
 src/vms/vm.go                 |  20 +++---
 src/vms/vms.go                | 112 ++++++++++------------------------
 6 files changed, 70 insertions(+), 116 deletions(-)

diff --git a/src/router/routerTemplates.go b/src/router/routerTemplates.go
index ac751c6..b13d606 100644
--- a/src/router/routerTemplates.go
+++ b/src/router/routerTemplates.go
@@ -101,7 +101,7 @@ func (r *RouterTemplates)CreateTemplateFromVM(c echo.Context) error {
     }
 
     // Creates a new template from the client's parameters.
-    template, err := vms.NewTemplateFromVM(p.Name, user.Email, p.Access, &vm)
+    template, err := vms.NewTemplateFromVM(p.Name, user.Email, p.Access, vm)
     if err != nil {
         return echo.NewHTTPError(http.StatusBadRequest, err.Error())
     }
diff --git a/src/router/routerVMs.go b/src/router/routerVMs.go
index 1eb1b09..cd9e4b7 100644
--- a/src/router/routerVMs.go
+++ b/src/router/routerVMs.go
@@ -34,7 +34,7 @@ func NewRouterVMs() *RouterVMs {
 // VM access cap: CAP_VM_LIST: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetListableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm *vms.VM) bool {
         return true
     })
 }
@@ -45,7 +45,7 @@ func (r *RouterVMs)GetListableVMs(c echo.Context) error {
 // VM access cap: CAP_VM_LIST: returns all running VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/attach -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetAttachableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm *vms.VM) bool {
         return vm.IsRunning()
     })
 }
@@ -56,7 +56,7 @@ func (r *RouterVMs)GetAttachableVMs(c echo.Context) error {
 // VM access cap: CAP_VM_DESTROY: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/del -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetDeletableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_DESTROY_ANY, caps.CAP_VM_DESTROY, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_DESTROY_ANY, caps.CAP_VM_DESTROY, func(vm *vms.VM) bool {
         return !vm.IsRunning()
     })
 }
@@ -67,7 +67,7 @@ func (r *RouterVMs)GetDeletableVMs(c echo.Context) error {
 // VM access cap: CAP_VM_START: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/start -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetStartableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_START_ANY, caps.CAP_VM_START, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_START_ANY, caps.CAP_VM_START, func(vm *vms.VM) bool {
         return !vm.IsRunning()
     })
 }
@@ -78,7 +78,7 @@ func (r *RouterVMs)GetStartableVMs(c echo.Context) error {
 // VM access cap: CAP_VM_STOP: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/stop -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetStoppableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_STOP_ANY, caps.CAP_VM_STOP, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_STOP_ANY, caps.CAP_VM_STOP, func(vm *vms.VM) bool {
         return vm.IsRunning()
     })
 }
@@ -89,7 +89,7 @@ func (r *RouterVMs)GetStoppableVMs(c echo.Context) error {
 // VM access cap: CAP_VM_REBOOT: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/reboot -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetRebootableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_REBOOT_ANY, caps.CAP_VM_REBOOT, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_REBOOT_ANY, caps.CAP_VM_REBOOT, func(vm *vms.VM) bool {
         return vm.IsRunning()
     })
 }
@@ -100,7 +100,7 @@ func (r *RouterVMs)GetRebootableVMs(c echo.Context) error {
 // VM access cap: CAP_VM_EDIT: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/edit -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetEditableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_EDIT_ANY, caps.CAP_VM_EDIT, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_EDIT_ANY, caps.CAP_VM_EDIT, func(vm *vms.VM) bool {
         return !vm.IsRunning()
     })
 }
@@ -118,7 +118,7 @@ func (r *RouterVMs)GetEditableVMAccessVMs(c echo.Context) error {
     }
 
     return c.JSONPretty(http.StatusOK, r.vms.GetNetworkSerializedVMs(
-        func(vm vms.VM) bool {
+        func(vm *vms.VM) bool {
             // First, checks if the user is the VM's owner
             if vm.IsOwner(user.Email) {
                 return !vm.IsRunning()
@@ -146,7 +146,7 @@ func (r *RouterVMs)GetEditableVMAccessVMs(c echo.Context) error {
 // VM access cap: VM_READFS: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/exportdir -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetDirExportableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_READFS_ANY, caps.CAP_VM_READFS, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_READFS_ANY, caps.CAP_VM_READFS, func(vm *vms.VM) bool {
         return true
     })
 }
@@ -157,7 +157,7 @@ func (r *RouterVMs)GetDirExportableVMs(c echo.Context) error {
 // VM access cap: VM_WRITEFS: returns all VMs with this cap for the logged user.
 // curl --cacert ca.pem -X GET https://localhost:1077/vms/importfiles -H "Authorization: Bearer <AccessToken>"
 func (r *RouterVMs)GetFilesImportableVMs(c echo.Context) error {
-    return r.performVMsList(c, caps.CAP_VM_WRITEFS_ANY, caps.CAP_VM_WRITEFS, func(vm vms.VM) bool {
+    return r.performVMsList(c, caps.CAP_VM_WRITEFS_ANY, caps.CAP_VM_WRITEFS, func(vm *vms.VM) bool {
         return true
     })
 }
@@ -299,7 +299,7 @@ func (r *RouterVMs)EditVMByID(c echo.Context) error {
         if err := r.vms.EditVM(vm.ID, p.Name, p.Cpus, p.Ram, p.Nic, p.UsbDevs); err != nil {
             return echo.NewHTTPError(http.StatusBadRequest, err.Error())
         }
-        return c.JSONPretty(http.StatusOK, jsonMsg("OK"), "    ")
+        return c.JSONPretty(http.StatusOK, vm.SerializeToNetwork(), "    ")
     })
 }
 
@@ -510,7 +510,7 @@ func (r *RouterVMs)performVMsList(c echo.Context, userCapabilityAny, vmAccessCap
         // - owned by the logged user
         // - for which the logged user has the specified capability (vmAccessCapability) in the VM's access
         return c.JSONPretty(http.StatusOK, r.vms.GetNetworkSerializedVMs(
-            func(vm vms.VM) bool {
+            func(vm *vms.VM) bool {
                 if vm.IsOwner(user.Email) {
                     return cond(vm)
                 } else {
@@ -549,10 +549,10 @@ func (r *RouterVMs)performVMAction(c echo.Context, userCapabilityAny, vmAccessCa
 
     // First, checks if the user is the VM's owner
     if vm.IsOwner(user.Email) {
-        return action(c, &vm)
+        return action(c, vm)
     } else if user.HasCapability(userCapabilityAny) {
         // Next, checks if the user has the XX_ANY capability
-        return action(c, &vm)
+        return action(c, vm)
     } else {
         // Finally, check if the VM access for the logged user matches the required capability
         userCaps, exists := vm.Access[user.Email]
@@ -565,6 +565,6 @@ func (r *RouterVMs)performVMAction(c echo.Context, userCapabilityAny, vmAccessCa
             return echo.NewHTTPError(http.StatusUnauthorized, msgInsufficientCaps)
         }
 
-        return action(c, &vm)
+        return action(c, vm)
     }
 }
diff --git a/src/version/version.go b/src/version/version.go
index fbd2612..42e2c10 100644
--- a/src/version/version.go
+++ b/src/version/version.go
@@ -7,7 +7,7 @@ import (
 const (
     major  = 1
     minor  = 6
-    bugfix = 1
+    bugfix = 2
 )
 
 type Version struct {
diff --git a/src/vms/template.go b/src/vms/template.go
index 6a445b0..774f584 100644
--- a/src/vms/template.go
+++ b/src/vms/template.go
@@ -32,16 +32,22 @@ var dummyTemplate = Template{}
 
 // Creates a template from a VM's disk.
 func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
-    // Marks the VM to copy from as being busy.
-    if err := vms.setDiskBusy(vm); err != nil {
-        return nil, errors.New("Failed setting disk busy flag during template creation: " + err.Error())
+    vm.mutex.Lock()
+
+    if vm.IsRunning() {
+        return nil, errors.New("VM must be stopped")
     }
+
+    // Marks VM as "busy", meaning its disk file is being accessed for a possibly long time.
+    vm.DiskBusy = true
+
     // Clears the VM from being busy.
-    defer vms.clearDiskBusy(vm)
+    defer func(vm *VM) { vm.DiskBusy = false }(vm)
 
     // Creates the template.
     template, err := newTemplate(name, owner, access)
     if err != nil {
+        vm.mutex.Unlock()
         return nil, err
     }
 
@@ -50,16 +56,18 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
     if err := os.Mkdir(templateDir, 0750); err != nil {
         msg := "Failed creating template dir: " + err.Error()
         log.Error(msg)
+        vm.mutex.Unlock()
         return nil, errors.New(msg)
     }
 
+    vm.mutex.Unlock()
+
     // 1) Copies VM's overlay file into a new one (in the VM's directory!):
     //    cp disk.qcow disk.tpl.qcow
     vmDiskFile, _ := filepath.Abs(filepath.Join(vm.dir, vmDiskFile))
     tplDiskFile, _ := filepath.Abs(filepath.Join(vm.dir, "/disk.tpl.qcow"))
     if err := utils.CopyFiles(vmDiskFile, tplDiskFile); err != nil {
         template.delete()
-        GetVMsInstance().updateVM(vm)
         log.Error("Failed copying VM overlay disk: " + err.Error())
         return nil, errors.New("Failed copying VM overlay disk: " + err.Error())
     }
@@ -69,7 +77,6 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
     if err := exec.QemuImgRebase(tplDiskFile); err != nil {
         template.delete()
         os.Remove(tplDiskFile)
-        GetVMsInstance().updateVM(vm)
         return nil, err
     }
 
@@ -79,7 +86,6 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
     if err := os.Rename(tplDiskFile, destFile); err != nil {
         template.delete()
         os.Remove(tplDiskFile)
-        GetVMsInstance().updateVM(vm)
         log.Error("Failed moving template disk image to final location: " + err.Error())
         return nil, errors.New("Failed moving template disk image to final location: " + err.Error())
     }
diff --git a/src/vms/vm.go b/src/vms/vm.go
index fcdbd4b..e5f0c87 100644
--- a/src/vms/vm.go
+++ b/src/vms/vm.go
@@ -346,10 +346,6 @@ func (vm *VM)delete() error {
 // Starts a VM and returns the access password.
 // Password is randomly generated.
 func (vm *VM)start(port int, endofExecFn endOfExecCallback) (string, error) {
-    if vm.IsRunning() {
-        return "", errors.New("Failed starting VM: VM is already running")
-    }
-
     if vm.DiskBusy {
         return "", errors.New("Failed starting VM: disk is busy")
     }
@@ -358,21 +354,21 @@ func (vm *VM)start(port int, endofExecFn endOfExecCallback) (string, error) {
     // allowing upper and lower case letters, disallowing repeat characters.
     pwd, err := passwordGen.Generate(8, 4, 0, false, false)
     if err != nil {
-        log.Error("Failed starting VM, password generation error: "+err.Error())
-        return "", errors.New("Failed starting VM, password generation error: "+err.Error())
+        log.Error("Failed starting VM: password generation error: "+err.Error())
+        return "", errors.New("Failed starting VM: password generation error: "+err.Error())
     }
 
     // Writes the password in a "secret" file.
     pwdFile, err := vm.writeSecretFile(pwd)
     if err != nil {
-        log.Error("Failed starting VM, error creating secret file: "+err.Error())
-        return "", errors.New("Failed starting VM, error creating secret file: "+err.Error())
+        log.Error("Failed starting VM: error creating secret file: "+err.Error())
+        return "", errors.New("Failed starting VM: error creating secret file: "+err.Error())
     }
 
     if err = vm.runQEMU(port, pwd, pwdFile, endofExecFn); err != nil {
         vm.removeSecretFile()
         os.Remove(vm.qgaSock)  // If QEMU fails it's likely the Guest Agent file it created is still there.
-        return "", errors.New("Failed starting VM, error running QEMU: "+err.Error())
+        return "", errors.New("Failed starting VM: error running QEMU: "+err.Error())
     }
 
     return pwd, nil
@@ -396,7 +392,7 @@ func (vm *VM)removeSecretFile() {
     os.Remove(pwdFile)
 }
 
-// Kills by force a running VM.
+// Kills a running VM.
 func (vm *VM)kill() error {
     if !vm.IsRunning() {
         return errors.New("Failed stopping VM: VM is not running")
@@ -404,7 +400,7 @@ func (vm *VM)kill() error {
 
     // Sends a SIGINT signal to terminate the QEMU process.
     // Note that QEMU terminates with status code 0 in this case (i.e. no error).
-    if err := syscall.Kill(vm.Run.Pid, syscall.SIGINT); err != nil {
+    if err := syscall.Kill(vm.Run.Pid, syscall.SIGTERM); err != nil {
         log.Error("Failed stopping VM: "+err.Error())
         return errors.New("Failed stopping VM: "+err.Error())
     }
@@ -414,7 +410,7 @@ func (vm *VM)kill() error {
 
 // Gracefully shutdowns a running VM.
 // Uses QGA commands to talk to the VM, which means QEMU Guest Agent must be
-// running in the VM, otherwise it won't work.
+// running in the VM, otherwise nothing will happen.
 func (vm *VM)shutdown() error {
     prefix := "Shutdown failed: "
 
diff --git a/src/vms/vms.go b/src/vms/vms.go
index 72d2891..56ed2ad 100644
--- a/src/vms/vms.go
+++ b/src/vms/vms.go
@@ -17,10 +17,10 @@ import (
 )
 
 type (
-    VMKeeperFn func(vm VM) bool
+    VMKeeperFn func(vm *VM) bool
 
     VMs struct {
-        m map[string]VM
+        m map[string]*VM
         dir string             // Base directory where VMs are stored
         rwlock *sync.RWMutex   // RWlock to ensure the VMs' map (m) coherency
         usedPorts [65536]bool  // Ports used by VMs
@@ -41,7 +41,7 @@ func GetVMsInstance() *VMs {
 // NOTE: path is the root directory where VMs reside.
 func InitVMs() error {
     vmsDir := paths.GetInstance().VMsDir
-    vms = &VMs { m: make(map[string]VM), dir: vmsDir, rwlock: new(sync.RWMutex), usedRAM: 0 }
+    vms = &VMs { m: make(map[string]*VM), dir: vmsDir, rwlock: new(sync.RWMutex), usedRAM: 0 }
 
     errMsg := "Failed reading VMs directory: "
     dirs1, err := utils.GetSubDirs(vmsDir)
@@ -78,7 +78,7 @@ func InitVMs() error {
                 }
                 f.Close()
 
-                vms.m[vm.ID.String()] = *vm
+                vms.m[vm.ID.String()] = vm
             }
         }
     }
@@ -91,9 +91,11 @@ func (vms *VMs)GetNetworkSerializedVMs(keepFn VMKeeperFn) []VMNetworkSerialized
     vms.rwlock.RLock()
     list := []VMNetworkSerialized{}
     for _, vm := range vms.m {
+        vm.mutex.Lock()
         if keepFn(vm) {
             list = append(list, vm.SerializeToNetwork())
         }
+        vm.mutex.Unlock()
     }
     vms.rwlock.RUnlock()
 
@@ -105,16 +107,16 @@ func (vms *VMs)GetNetworkSerializedVMs(keepFn VMKeeperFn) []VMNetworkSerialized
 }
 
 // Returns a VM by its ID.
-func (vms *VMs)GetVM(vmID uuid.UUID) (VM, error) {
+func (vms *VMs)GetVM(vmID uuid.UUID) (*VM, error) {
     vms.rwlock.RLock()
     defer vms.rwlock.RUnlock()
     return vms.getVMUnsafe(vmID)
 }
 
-func (vms *VMs)getVMUnsafe(vmID uuid.UUID) (VM, error) {
+func (vms *VMs)getVMUnsafe(vmID uuid.UUID) (*VM, error) {
     vm, exists := vms.m[vmID.String()]
     if !exists {
-        return dummyVM, errors.New("VM not found")
+        return &dummyVM, errors.New("VM not found")
     }
     return vm, nil
 }
@@ -152,18 +154,18 @@ func (vms *VMs)DeleteVM(vmID uuid.UUID) error {
 // Adds a VM and writes its files.
 func (vms *VMs)AddVM(vm *VM) error {
     vm.mutex.Lock()
+    defer vm.mutex.Unlock()
+
     // First, writes VM files since it can fail.
     err := vm.writeFiles()
     if err != nil {
-        vm.mutex.Unlock()
         return err
     }
-    vm.mutex.Unlock()
 
     // Adds VM to the map of VMs.
     vms.rwlock.Lock()
     key := vm.ID.String()
-    vms.m[key] = *vm
+    vms.m[key] = vm
     vms.rwlock.Unlock()
 
     return nil
@@ -180,13 +182,16 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) {
         return 0, "", err
     }
 
+    vm.mutex.Lock()
+    defer vm.mutex.Unlock()
+
     if vm.IsRunning() {
-        return 0, "", errors.New("VM must be stopped")
+        return 0, "", errors.New("Failed starting VM: VM already running")
     }
 
     totalRAM, availRAM, err := utils.GetRAM()
     if err != nil {
-        return -1, "", errors.New("Failed obtaining memory info: "+err.Error())
+        return -1, "", errors.New("Failed starting VM: failed obtaining memory info: "+err.Error())
     }
 
     // We estimate that KVM allows for ~30% RAM saving (due to page sharing across VMs).
@@ -198,7 +203,7 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) {
     // otherwise, refuses to run it in order to avoid RAM saturation.
     if availRAM - vms.usedRAM <= int(math.Round(float64(totalRAM)*(1.-consts.RamUsageLimit))) {
         vms.usedRAM -= estimatedVmRAM
-        return -1, "", errors.New("Insufficient free RAM to start VM")
+        return -1, "", errors.New("Failed starting VM: insufficient free RAM")
     }
 
     // Locates a free port randomly chosen between VMSpiceMinPort and VMSpiceMaxPort (inclusive).
@@ -222,15 +227,10 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) {
         vms.rwlock.Lock()
         vms.usedPorts[vm.Run.Port] = false
         vms.usedRAM -= estimatedVmRAM
-        vms.updateVMMap(vm)
         vms.rwlock.Unlock()
     }
 
-    vm.mutex.Lock()
     pwd, err := vm.start(port, endofExecFn)
-    vms.updateVMMap(&vm)
-    vm.mutex.Unlock()
-
     return port, pwd, err
 }
 
@@ -303,9 +303,12 @@ func (vms *VMs)IsTemplateUsed(templateID string) bool {
     defer vms.rwlock.RUnlock()
 
     for _, vm := range vms.m {
+        vm.mutex.Lock()
         if vm.TemplateID.String() == templateID {
+            vm.mutex.Unlock()
             return true
         }
+        vm.mutex.Unlock()
     }
     return false
 }
@@ -348,7 +351,7 @@ func (vms *VMs)EditVM(vmID uuid.UUID, name string, cpus, ram int, nic NicType, u
         return err
     }
 
-    if err = vms.updateVM(&vm); err != nil {
+    if err = vm.writeConfig(); err != nil {
         return err
     }
 
@@ -391,7 +394,7 @@ func (vms *VMs)SetVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string, ne
 
     vm.Access[userEmail] = newAccess
 
-    if err = vms.updateVM(&vm); err != nil {
+    if err = vm.writeConfig(); err != nil {
         return err
     }
 
@@ -435,7 +438,7 @@ func (vms *VMs)DeleteVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string)
         return errors.New("User "+userEmail+" has no VM access")
     }
 
-    if err = vms.updateVM(&vm); err != nil {
+    if err = vm.writeConfig(); err != nil {
         return err
     }
 
@@ -444,78 +447,27 @@ func (vms *VMs)DeleteVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string)
 
 // Exports a VM's directory and its subdirectories into a tar.gz archive on the host.
 func (vms *VMs)ExportVMFiles(vm *VM, vmDir, tarGzFile string) error {
+    vm.mutex.Lock()
+    defer vm.mutex.Unlock()
     vmDisk := vm.getDiskPath()
     return exec.CopyFromVM(vmDisk, vmDir, tarGzFile)
 }
 
 // Imports files from a tar.gz archive into a VM's filesystem, in a specified directory.
 func (vms *VMs)ImportFilesToVM(vm *VM, tarGzFile, vmDir string) error {
-    if vm.IsRunning() {
-        return errors.New("VM must be stopped")
-    }
-
-    // Marks the VM to copy from as being busy.
-    if err := vms.setDiskBusy(vm); err != nil {
-        return errors.New("Failed setting disk busy flag during VM files import: "+err.Error())
-    }
-    // Clears the VM from being busy.
-    defer vms.clearDiskBusy(vm)
-
-    vmDisk := vm.getDiskPath()
-    return exec.CopyToVM(vmDisk, tarGzFile, vmDir)
-}
-
-// Marks a VM as "busy", meaning its disk file is being accessed for a possibly long time.
-func (vms *VMs)setDiskBusy(vm *VM) error {
     vm.mutex.Lock()
     defer vm.mutex.Unlock()
 
     if vm.IsRunning() {
-        return errors.New("Only a non-running VM can be set to busy")
+        return errors.New("VM must be stopped")
     }
 
+    // Marks VM as "busy", meaning its disk file is being accessed for a possibly long time.
     vm.DiskBusy = true
 
-    vms.rwlock.Lock()
-    defer vms.rwlock.Unlock()
-
-    vms.updateVMMap(vm)
-
-    return nil
-}
-
-func (vms *VMs)clearDiskBusy(vm *VM) error {
-    vm.mutex.Lock()
-    defer vm.mutex.Unlock()
-
-    if vm.IsRunning() {
-        return errors.New("Only a non-running VM can have its busy flag cleared")
-    }
-
-    vm.DiskBusy = false
-
-    vms.rwlock.Lock()
-    defer vms.rwlock.Unlock()
-
-    vms.updateVMMap(vm)
-
-    return nil
-}
-
-// Updates a VM in the map of VMs and writes its updated config file.
-func (vms *VMs)updateVM(vm *VM) error {
-    err := vm.writeConfig()
-    if err != nil {
-        return err
-    }
-
-    vms.updateVMMap(vm)
-    return nil
-}
+    // Clears the VM from being busy.
+    defer func(vm *VM) { vm.DiskBusy = false }(vm)
 
-// Updates a VM in the map of VMs.
-func (vms *VMs)updateVMMap(vm *VM) {
-    key := vm.ID.String()
-    delete(vms.m, key)
-    vms.m[key] = *vm
+    vmDisk := vm.getDiskPath()
+    return exec.CopyToVM(vmDisk, tarGzFile, vmDir)
 }
-- 
GitLab