Skip to content
Snippets Groups Projects
Commit 089b9111 authored by Florent Gluck's avatar Florent Gluck
Browse files

Reworked quite a bit of the codebase so that VM are now referenced instead of copied.

It's lighter and more efficient, but I still need to work on some concurrency issues.
Bumpped version to 1.6.2
parent 352a4f67
Branches
No related tags found
No related merge requests found
...@@ -101,7 +101,7 @@ func (r *RouterTemplates)CreateTemplateFromVM(c echo.Context) error { ...@@ -101,7 +101,7 @@ func (r *RouterTemplates)CreateTemplateFromVM(c echo.Context) error {
} }
// Creates a new template from the client's parameters. // Creates a new template from the client's parameters.
template, err := vms.NewTemplateFromVM(p.Name, user.Email, p.Access, &vm) template, err := vms.NewTemplateFromVM(p.Name, user.Email, p.Access, vm)
if err != nil { if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error()) return echo.NewHTTPError(http.StatusBadRequest, err.Error())
} }
......
...@@ -34,7 +34,7 @@ func NewRouterVMs() *RouterVMs { ...@@ -34,7 +34,7 @@ func NewRouterVMs() *RouterVMs {
// VM access cap: CAP_VM_LIST: returns all VMs with this cap for the logged user. // VM access cap: CAP_VM_LIST: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetListableVMs(c echo.Context) error { func (r *RouterVMs)GetListableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm *vms.VM) bool {
return true return true
}) })
} }
...@@ -45,7 +45,7 @@ func (r *RouterVMs)GetListableVMs(c echo.Context) error { ...@@ -45,7 +45,7 @@ func (r *RouterVMs)GetListableVMs(c echo.Context) error {
// VM access cap: CAP_VM_LIST: returns all running VMs with this cap for the logged user. // VM access cap: CAP_VM_LIST: returns all running VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/attach -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/attach -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetAttachableVMs(c echo.Context) error { func (r *RouterVMs)GetAttachableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_LIST_ANY, caps.CAP_VM_LIST, func(vm *vms.VM) bool {
return vm.IsRunning() return vm.IsRunning()
}) })
} }
...@@ -56,7 +56,7 @@ func (r *RouterVMs)GetAttachableVMs(c echo.Context) error { ...@@ -56,7 +56,7 @@ func (r *RouterVMs)GetAttachableVMs(c echo.Context) error {
// VM access cap: CAP_VM_DESTROY: returns all VMs with this cap for the logged user. // VM access cap: CAP_VM_DESTROY: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/del -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/del -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetDeletableVMs(c echo.Context) error { func (r *RouterVMs)GetDeletableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_DESTROY_ANY, caps.CAP_VM_DESTROY, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_DESTROY_ANY, caps.CAP_VM_DESTROY, func(vm *vms.VM) bool {
return !vm.IsRunning() return !vm.IsRunning()
}) })
} }
...@@ -67,7 +67,7 @@ func (r *RouterVMs)GetDeletableVMs(c echo.Context) error { ...@@ -67,7 +67,7 @@ func (r *RouterVMs)GetDeletableVMs(c echo.Context) error {
// VM access cap: CAP_VM_START: returns all VMs with this cap for the logged user. // VM access cap: CAP_VM_START: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/start -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/start -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetStartableVMs(c echo.Context) error { func (r *RouterVMs)GetStartableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_START_ANY, caps.CAP_VM_START, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_START_ANY, caps.CAP_VM_START, func(vm *vms.VM) bool {
return !vm.IsRunning() return !vm.IsRunning()
}) })
} }
...@@ -78,7 +78,7 @@ func (r *RouterVMs)GetStartableVMs(c echo.Context) error { ...@@ -78,7 +78,7 @@ func (r *RouterVMs)GetStartableVMs(c echo.Context) error {
// VM access cap: CAP_VM_STOP: returns all VMs with this cap for the logged user. // VM access cap: CAP_VM_STOP: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/stop -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/stop -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetStoppableVMs(c echo.Context) error { func (r *RouterVMs)GetStoppableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_STOP_ANY, caps.CAP_VM_STOP, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_STOP_ANY, caps.CAP_VM_STOP, func(vm *vms.VM) bool {
return vm.IsRunning() return vm.IsRunning()
}) })
} }
...@@ -89,7 +89,7 @@ func (r *RouterVMs)GetStoppableVMs(c echo.Context) error { ...@@ -89,7 +89,7 @@ func (r *RouterVMs)GetStoppableVMs(c echo.Context) error {
// VM access cap: CAP_VM_REBOOT: returns all VMs with this cap for the logged user. // VM access cap: CAP_VM_REBOOT: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/reboot -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/reboot -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetRebootableVMs(c echo.Context) error { func (r *RouterVMs)GetRebootableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_REBOOT_ANY, caps.CAP_VM_REBOOT, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_REBOOT_ANY, caps.CAP_VM_REBOOT, func(vm *vms.VM) bool {
return vm.IsRunning() return vm.IsRunning()
}) })
} }
...@@ -100,7 +100,7 @@ func (r *RouterVMs)GetRebootableVMs(c echo.Context) error { ...@@ -100,7 +100,7 @@ func (r *RouterVMs)GetRebootableVMs(c echo.Context) error {
// VM access cap: CAP_VM_EDIT: returns all VMs with this cap for the logged user. // VM access cap: CAP_VM_EDIT: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/edit -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/edit -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetEditableVMs(c echo.Context) error { func (r *RouterVMs)GetEditableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_EDIT_ANY, caps.CAP_VM_EDIT, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_EDIT_ANY, caps.CAP_VM_EDIT, func(vm *vms.VM) bool {
return !vm.IsRunning() return !vm.IsRunning()
}) })
} }
...@@ -118,7 +118,7 @@ func (r *RouterVMs)GetEditableVMAccessVMs(c echo.Context) error { ...@@ -118,7 +118,7 @@ func (r *RouterVMs)GetEditableVMAccessVMs(c echo.Context) error {
} }
return c.JSONPretty(http.StatusOK, r.vms.GetNetworkSerializedVMs( return c.JSONPretty(http.StatusOK, r.vms.GetNetworkSerializedVMs(
func(vm vms.VM) bool { func(vm *vms.VM) bool {
// First, checks if the user is the VM's owner // First, checks if the user is the VM's owner
if vm.IsOwner(user.Email) { if vm.IsOwner(user.Email) {
return !vm.IsRunning() return !vm.IsRunning()
...@@ -146,7 +146,7 @@ func (r *RouterVMs)GetEditableVMAccessVMs(c echo.Context) error { ...@@ -146,7 +146,7 @@ func (r *RouterVMs)GetEditableVMAccessVMs(c echo.Context) error {
// VM access cap: VM_READFS: returns all VMs with this cap for the logged user. // VM access cap: VM_READFS: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/exportdir -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/exportdir -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetDirExportableVMs(c echo.Context) error { func (r *RouterVMs)GetDirExportableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_READFS_ANY, caps.CAP_VM_READFS, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_READFS_ANY, caps.CAP_VM_READFS, func(vm *vms.VM) bool {
return true return true
}) })
} }
...@@ -157,7 +157,7 @@ func (r *RouterVMs)GetDirExportableVMs(c echo.Context) error { ...@@ -157,7 +157,7 @@ func (r *RouterVMs)GetDirExportableVMs(c echo.Context) error {
// VM access cap: VM_WRITEFS: returns all VMs with this cap for the logged user. // VM access cap: VM_WRITEFS: returns all VMs with this cap for the logged user.
// curl --cacert ca.pem -X GET https://localhost:1077/vms/importfiles -H "Authorization: Bearer <AccessToken>" // curl --cacert ca.pem -X GET https://localhost:1077/vms/importfiles -H "Authorization: Bearer <AccessToken>"
func (r *RouterVMs)GetFilesImportableVMs(c echo.Context) error { func (r *RouterVMs)GetFilesImportableVMs(c echo.Context) error {
return r.performVMsList(c, caps.CAP_VM_WRITEFS_ANY, caps.CAP_VM_WRITEFS, func(vm vms.VM) bool { return r.performVMsList(c, caps.CAP_VM_WRITEFS_ANY, caps.CAP_VM_WRITEFS, func(vm *vms.VM) bool {
return true return true
}) })
} }
...@@ -299,7 +299,7 @@ func (r *RouterVMs)EditVMByID(c echo.Context) error { ...@@ -299,7 +299,7 @@ func (r *RouterVMs)EditVMByID(c echo.Context) error {
if err := r.vms.EditVM(vm.ID, p.Name, p.Cpus, p.Ram, p.Nic, p.UsbDevs); err != nil { if err := r.vms.EditVM(vm.ID, p.Name, p.Cpus, p.Ram, p.Nic, p.UsbDevs); err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err.Error()) return echo.NewHTTPError(http.StatusBadRequest, err.Error())
} }
return c.JSONPretty(http.StatusOK, jsonMsg("OK"), " ") return c.JSONPretty(http.StatusOK, vm.SerializeToNetwork(), " ")
}) })
} }
...@@ -510,7 +510,7 @@ func (r *RouterVMs)performVMsList(c echo.Context, userCapabilityAny, vmAccessCap ...@@ -510,7 +510,7 @@ func (r *RouterVMs)performVMsList(c echo.Context, userCapabilityAny, vmAccessCap
// - owned by the logged user // - owned by the logged user
// - for which the logged user has the specified capability (vmAccessCapability) in the VM's access // - for which the logged user has the specified capability (vmAccessCapability) in the VM's access
return c.JSONPretty(http.StatusOK, r.vms.GetNetworkSerializedVMs( return c.JSONPretty(http.StatusOK, r.vms.GetNetworkSerializedVMs(
func(vm vms.VM) bool { func(vm *vms.VM) bool {
if vm.IsOwner(user.Email) { if vm.IsOwner(user.Email) {
return cond(vm) return cond(vm)
} else { } else {
...@@ -549,10 +549,10 @@ func (r *RouterVMs)performVMAction(c echo.Context, userCapabilityAny, vmAccessCa ...@@ -549,10 +549,10 @@ func (r *RouterVMs)performVMAction(c echo.Context, userCapabilityAny, vmAccessCa
// First, checks if the user is the VM's owner // First, checks if the user is the VM's owner
if vm.IsOwner(user.Email) { if vm.IsOwner(user.Email) {
return action(c, &vm) return action(c, vm)
} else if user.HasCapability(userCapabilityAny) { } else if user.HasCapability(userCapabilityAny) {
// Next, checks if the user has the XX_ANY capability // Next, checks if the user has the XX_ANY capability
return action(c, &vm) return action(c, vm)
} else { } else {
// Finally, check if the VM access for the logged user matches the required capability // Finally, check if the VM access for the logged user matches the required capability
userCaps, exists := vm.Access[user.Email] userCaps, exists := vm.Access[user.Email]
...@@ -565,6 +565,6 @@ func (r *RouterVMs)performVMAction(c echo.Context, userCapabilityAny, vmAccessCa ...@@ -565,6 +565,6 @@ func (r *RouterVMs)performVMAction(c echo.Context, userCapabilityAny, vmAccessCa
return echo.NewHTTPError(http.StatusUnauthorized, msgInsufficientCaps) return echo.NewHTTPError(http.StatusUnauthorized, msgInsufficientCaps)
} }
return action(c, &vm) return action(c, vm)
} }
} }
...@@ -7,7 +7,7 @@ import ( ...@@ -7,7 +7,7 @@ import (
const ( const (
major = 1 major = 1
minor = 6 minor = 6
bugfix = 1 bugfix = 2
) )
type Version struct { type Version struct {
......
...@@ -32,16 +32,22 @@ var dummyTemplate = Template{} ...@@ -32,16 +32,22 @@ var dummyTemplate = Template{}
// Creates a template from a VM's disk. // Creates a template from a VM's disk.
func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) { func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
// Marks the VM to copy from as being busy. vm.mutex.Lock()
if err := vms.setDiskBusy(vm); err != nil {
return nil, errors.New("Failed setting disk busy flag during template creation: " + err.Error()) if vm.IsRunning() {
return nil, errors.New("VM must be stopped")
} }
// Marks VM as "busy", meaning its disk file is being accessed for a possibly long time.
vm.DiskBusy = true
// Clears the VM from being busy. // Clears the VM from being busy.
defer vms.clearDiskBusy(vm) defer func(vm *VM) { vm.DiskBusy = false }(vm)
// Creates the template. // Creates the template.
template, err := newTemplate(name, owner, access) template, err := newTemplate(name, owner, access)
if err != nil { if err != nil {
vm.mutex.Unlock()
return nil, err return nil, err
} }
...@@ -50,16 +56,18 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) { ...@@ -50,16 +56,18 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
if err := os.Mkdir(templateDir, 0750); err != nil { if err := os.Mkdir(templateDir, 0750); err != nil {
msg := "Failed creating template dir: " + err.Error() msg := "Failed creating template dir: " + err.Error()
log.Error(msg) log.Error(msg)
vm.mutex.Unlock()
return nil, errors.New(msg) return nil, errors.New(msg)
} }
vm.mutex.Unlock()
// 1) Copies VM's overlay file into a new one (in the VM's directory!): // 1) Copies VM's overlay file into a new one (in the VM's directory!):
// cp disk.qcow disk.tpl.qcow // cp disk.qcow disk.tpl.qcow
vmDiskFile, _ := filepath.Abs(filepath.Join(vm.dir, vmDiskFile)) vmDiskFile, _ := filepath.Abs(filepath.Join(vm.dir, vmDiskFile))
tplDiskFile, _ := filepath.Abs(filepath.Join(vm.dir, "/disk.tpl.qcow")) tplDiskFile, _ := filepath.Abs(filepath.Join(vm.dir, "/disk.tpl.qcow"))
if err := utils.CopyFiles(vmDiskFile, tplDiskFile); err != nil { if err := utils.CopyFiles(vmDiskFile, tplDiskFile); err != nil {
template.delete() template.delete()
GetVMsInstance().updateVM(vm)
log.Error("Failed copying VM overlay disk: " + err.Error()) log.Error("Failed copying VM overlay disk: " + err.Error())
return nil, errors.New("Failed copying VM overlay disk: " + err.Error()) return nil, errors.New("Failed copying VM overlay disk: " + err.Error())
} }
...@@ -69,7 +77,6 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) { ...@@ -69,7 +77,6 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
if err := exec.QemuImgRebase(tplDiskFile); err != nil { if err := exec.QemuImgRebase(tplDiskFile); err != nil {
template.delete() template.delete()
os.Remove(tplDiskFile) os.Remove(tplDiskFile)
GetVMsInstance().updateVM(vm)
return nil, err return nil, err
} }
...@@ -79,7 +86,6 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) { ...@@ -79,7 +86,6 @@ func NewTemplateFromVM(name, owner, access string, vm *VM) (*Template, error) {
if err := os.Rename(tplDiskFile, destFile); err != nil { if err := os.Rename(tplDiskFile, destFile); err != nil {
template.delete() template.delete()
os.Remove(tplDiskFile) os.Remove(tplDiskFile)
GetVMsInstance().updateVM(vm)
log.Error("Failed moving template disk image to final location: " + err.Error()) log.Error("Failed moving template disk image to final location: " + err.Error())
return nil, errors.New("Failed moving template disk image to final location: " + err.Error()) return nil, errors.New("Failed moving template disk image to final location: " + err.Error())
} }
......
...@@ -346,10 +346,6 @@ func (vm *VM)delete() error { ...@@ -346,10 +346,6 @@ func (vm *VM)delete() error {
// Starts a VM and returns the access password. // Starts a VM and returns the access password.
// Password is randomly generated. // Password is randomly generated.
func (vm *VM)start(port int, endofExecFn endOfExecCallback) (string, error) { func (vm *VM)start(port int, endofExecFn endOfExecCallback) (string, error) {
if vm.IsRunning() {
return "", errors.New("Failed starting VM: VM is already running")
}
if vm.DiskBusy { if vm.DiskBusy {
return "", errors.New("Failed starting VM: disk is busy") return "", errors.New("Failed starting VM: disk is busy")
} }
...@@ -358,21 +354,21 @@ func (vm *VM)start(port int, endofExecFn endOfExecCallback) (string, error) { ...@@ -358,21 +354,21 @@ func (vm *VM)start(port int, endofExecFn endOfExecCallback) (string, error) {
// allowing upper and lower case letters, disallowing repeat characters. // allowing upper and lower case letters, disallowing repeat characters.
pwd, err := passwordGen.Generate(8, 4, 0, false, false) pwd, err := passwordGen.Generate(8, 4, 0, false, false)
if err != nil { if err != nil {
log.Error("Failed starting VM, password generation error: "+err.Error()) log.Error("Failed starting VM: password generation error: "+err.Error())
return "", errors.New("Failed starting VM, password generation error: "+err.Error()) return "", errors.New("Failed starting VM: password generation error: "+err.Error())
} }
// Writes the password in a "secret" file. // Writes the password in a "secret" file.
pwdFile, err := vm.writeSecretFile(pwd) pwdFile, err := vm.writeSecretFile(pwd)
if err != nil { if err != nil {
log.Error("Failed starting VM, error creating secret file: "+err.Error()) log.Error("Failed starting VM: error creating secret file: "+err.Error())
return "", errors.New("Failed starting VM, error creating secret file: "+err.Error()) return "", errors.New("Failed starting VM: error creating secret file: "+err.Error())
} }
if err = vm.runQEMU(port, pwd, pwdFile, endofExecFn); err != nil { if err = vm.runQEMU(port, pwd, pwdFile, endofExecFn); err != nil {
vm.removeSecretFile() vm.removeSecretFile()
os.Remove(vm.qgaSock) // If QEMU fails it's likely the Guest Agent file it created is still there. os.Remove(vm.qgaSock) // If QEMU fails it's likely the Guest Agent file it created is still there.
return "", errors.New("Failed starting VM, error running QEMU: "+err.Error()) return "", errors.New("Failed starting VM: error running QEMU: "+err.Error())
} }
return pwd, nil return pwd, nil
...@@ -396,7 +392,7 @@ func (vm *VM)removeSecretFile() { ...@@ -396,7 +392,7 @@ func (vm *VM)removeSecretFile() {
os.Remove(pwdFile) os.Remove(pwdFile)
} }
// Kills by force a running VM. // Kills a running VM.
func (vm *VM)kill() error { func (vm *VM)kill() error {
if !vm.IsRunning() { if !vm.IsRunning() {
return errors.New("Failed stopping VM: VM is not running") return errors.New("Failed stopping VM: VM is not running")
...@@ -404,7 +400,7 @@ func (vm *VM)kill() error { ...@@ -404,7 +400,7 @@ func (vm *VM)kill() error {
// Sends a SIGINT signal to terminate the QEMU process. // Sends a SIGINT signal to terminate the QEMU process.
// Note that QEMU terminates with status code 0 in this case (i.e. no error). // Note that QEMU terminates with status code 0 in this case (i.e. no error).
if err := syscall.Kill(vm.Run.Pid, syscall.SIGINT); err != nil { if err := syscall.Kill(vm.Run.Pid, syscall.SIGTERM); err != nil {
log.Error("Failed stopping VM: "+err.Error()) log.Error("Failed stopping VM: "+err.Error())
return errors.New("Failed stopping VM: "+err.Error()) return errors.New("Failed stopping VM: "+err.Error())
} }
...@@ -414,7 +410,7 @@ func (vm *VM)kill() error { ...@@ -414,7 +410,7 @@ func (vm *VM)kill() error {
// Gracefully shutdowns a running VM. // Gracefully shutdowns a running VM.
// Uses QGA commands to talk to the VM, which means QEMU Guest Agent must be // Uses QGA commands to talk to the VM, which means QEMU Guest Agent must be
// running in the VM, otherwise it won't work. // running in the VM, otherwise nothing will happen.
func (vm *VM)shutdown() error { func (vm *VM)shutdown() error {
prefix := "Shutdown failed: " prefix := "Shutdown failed: "
......
...@@ -17,10 +17,10 @@ import ( ...@@ -17,10 +17,10 @@ import (
) )
type ( type (
VMKeeperFn func(vm VM) bool VMKeeperFn func(vm *VM) bool
VMs struct { VMs struct {
m map[string]VM m map[string]*VM
dir string // Base directory where VMs are stored dir string // Base directory where VMs are stored
rwlock *sync.RWMutex // RWlock to ensure the VMs' map (m) coherency rwlock *sync.RWMutex // RWlock to ensure the VMs' map (m) coherency
usedPorts [65536]bool // Ports used by VMs usedPorts [65536]bool // Ports used by VMs
...@@ -41,7 +41,7 @@ func GetVMsInstance() *VMs { ...@@ -41,7 +41,7 @@ func GetVMsInstance() *VMs {
// NOTE: path is the root directory where VMs reside. // NOTE: path is the root directory where VMs reside.
func InitVMs() error { func InitVMs() error {
vmsDir := paths.GetInstance().VMsDir vmsDir := paths.GetInstance().VMsDir
vms = &VMs { m: make(map[string]VM), dir: vmsDir, rwlock: new(sync.RWMutex), usedRAM: 0 } vms = &VMs { m: make(map[string]*VM), dir: vmsDir, rwlock: new(sync.RWMutex), usedRAM: 0 }
errMsg := "Failed reading VMs directory: " errMsg := "Failed reading VMs directory: "
dirs1, err := utils.GetSubDirs(vmsDir) dirs1, err := utils.GetSubDirs(vmsDir)
...@@ -78,7 +78,7 @@ func InitVMs() error { ...@@ -78,7 +78,7 @@ func InitVMs() error {
} }
f.Close() f.Close()
vms.m[vm.ID.String()] = *vm vms.m[vm.ID.String()] = vm
} }
} }
} }
...@@ -91,9 +91,11 @@ func (vms *VMs)GetNetworkSerializedVMs(keepFn VMKeeperFn) []VMNetworkSerialized ...@@ -91,9 +91,11 @@ func (vms *VMs)GetNetworkSerializedVMs(keepFn VMKeeperFn) []VMNetworkSerialized
vms.rwlock.RLock() vms.rwlock.RLock()
list := []VMNetworkSerialized{} list := []VMNetworkSerialized{}
for _, vm := range vms.m { for _, vm := range vms.m {
vm.mutex.Lock()
if keepFn(vm) { if keepFn(vm) {
list = append(list, vm.SerializeToNetwork()) list = append(list, vm.SerializeToNetwork())
} }
vm.mutex.Unlock()
} }
vms.rwlock.RUnlock() vms.rwlock.RUnlock()
...@@ -105,16 +107,16 @@ func (vms *VMs)GetNetworkSerializedVMs(keepFn VMKeeperFn) []VMNetworkSerialized ...@@ -105,16 +107,16 @@ func (vms *VMs)GetNetworkSerializedVMs(keepFn VMKeeperFn) []VMNetworkSerialized
} }
// Returns a VM by its ID. // Returns a VM by its ID.
func (vms *VMs)GetVM(vmID uuid.UUID) (VM, error) { func (vms *VMs)GetVM(vmID uuid.UUID) (*VM, error) {
vms.rwlock.RLock() vms.rwlock.RLock()
defer vms.rwlock.RUnlock() defer vms.rwlock.RUnlock()
return vms.getVMUnsafe(vmID) return vms.getVMUnsafe(vmID)
} }
func (vms *VMs)getVMUnsafe(vmID uuid.UUID) (VM, error) { func (vms *VMs)getVMUnsafe(vmID uuid.UUID) (*VM, error) {
vm, exists := vms.m[vmID.String()] vm, exists := vms.m[vmID.String()]
if !exists { if !exists {
return dummyVM, errors.New("VM not found") return &dummyVM, errors.New("VM not found")
} }
return vm, nil return vm, nil
} }
...@@ -152,18 +154,18 @@ func (vms *VMs)DeleteVM(vmID uuid.UUID) error { ...@@ -152,18 +154,18 @@ func (vms *VMs)DeleteVM(vmID uuid.UUID) error {
// Adds a VM and writes its files. // Adds a VM and writes its files.
func (vms *VMs)AddVM(vm *VM) error { func (vms *VMs)AddVM(vm *VM) error {
vm.mutex.Lock() vm.mutex.Lock()
defer vm.mutex.Unlock()
// First, writes VM files since it can fail. // First, writes VM files since it can fail.
err := vm.writeFiles() err := vm.writeFiles()
if err != nil { if err != nil {
vm.mutex.Unlock()
return err return err
} }
vm.mutex.Unlock()
// Adds VM to the map of VMs. // Adds VM to the map of VMs.
vms.rwlock.Lock() vms.rwlock.Lock()
key := vm.ID.String() key := vm.ID.String()
vms.m[key] = *vm vms.m[key] = vm
vms.rwlock.Unlock() vms.rwlock.Unlock()
return nil return nil
...@@ -180,13 +182,16 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) { ...@@ -180,13 +182,16 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) {
return 0, "", err return 0, "", err
} }
vm.mutex.Lock()
defer vm.mutex.Unlock()
if vm.IsRunning() { if vm.IsRunning() {
return 0, "", errors.New("VM must be stopped") return 0, "", errors.New("Failed starting VM: VM already running")
} }
totalRAM, availRAM, err := utils.GetRAM() totalRAM, availRAM, err := utils.GetRAM()
if err != nil { if err != nil {
return -1, "", errors.New("Failed obtaining memory info: "+err.Error()) return -1, "", errors.New("Failed starting VM: failed obtaining memory info: "+err.Error())
} }
// We estimate that KVM allows for ~30% RAM saving (due to page sharing across VMs). // We estimate that KVM allows for ~30% RAM saving (due to page sharing across VMs).
...@@ -198,7 +203,7 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) { ...@@ -198,7 +203,7 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) {
// otherwise, refuses to run it in order to avoid RAM saturation. // otherwise, refuses to run it in order to avoid RAM saturation.
if availRAM - vms.usedRAM <= int(math.Round(float64(totalRAM)*(1.-consts.RamUsageLimit))) { if availRAM - vms.usedRAM <= int(math.Round(float64(totalRAM)*(1.-consts.RamUsageLimit))) {
vms.usedRAM -= estimatedVmRAM vms.usedRAM -= estimatedVmRAM
return -1, "", errors.New("Insufficient free RAM to start VM") return -1, "", errors.New("Failed starting VM: insufficient free RAM")
} }
// Locates a free port randomly chosen between VMSpiceMinPort and VMSpiceMaxPort (inclusive). // Locates a free port randomly chosen between VMSpiceMinPort and VMSpiceMaxPort (inclusive).
...@@ -222,15 +227,10 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) { ...@@ -222,15 +227,10 @@ func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) {
vms.rwlock.Lock() vms.rwlock.Lock()
vms.usedPorts[vm.Run.Port] = false vms.usedPorts[vm.Run.Port] = false
vms.usedRAM -= estimatedVmRAM vms.usedRAM -= estimatedVmRAM
vms.updateVMMap(vm)
vms.rwlock.Unlock() vms.rwlock.Unlock()
} }
vm.mutex.Lock()
pwd, err := vm.start(port, endofExecFn) pwd, err := vm.start(port, endofExecFn)
vms.updateVMMap(&vm)
vm.mutex.Unlock()
return port, pwd, err return port, pwd, err
} }
...@@ -303,9 +303,12 @@ func (vms *VMs)IsTemplateUsed(templateID string) bool { ...@@ -303,9 +303,12 @@ func (vms *VMs)IsTemplateUsed(templateID string) bool {
defer vms.rwlock.RUnlock() defer vms.rwlock.RUnlock()
for _, vm := range vms.m { for _, vm := range vms.m {
vm.mutex.Lock()
if vm.TemplateID.String() == templateID { if vm.TemplateID.String() == templateID {
vm.mutex.Unlock()
return true return true
} }
vm.mutex.Unlock()
} }
return false return false
} }
...@@ -348,7 +351,7 @@ func (vms *VMs)EditVM(vmID uuid.UUID, name string, cpus, ram int, nic NicType, u ...@@ -348,7 +351,7 @@ func (vms *VMs)EditVM(vmID uuid.UUID, name string, cpus, ram int, nic NicType, u
return err return err
} }
if err = vms.updateVM(&vm); err != nil { if err = vm.writeConfig(); err != nil {
return err return err
} }
...@@ -391,7 +394,7 @@ func (vms *VMs)SetVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string, ne ...@@ -391,7 +394,7 @@ func (vms *VMs)SetVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string, ne
vm.Access[userEmail] = newAccess vm.Access[userEmail] = newAccess
if err = vms.updateVM(&vm); err != nil { if err = vm.writeConfig(); err != nil {
return err return err
} }
...@@ -435,7 +438,7 @@ func (vms *VMs)DeleteVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string) ...@@ -435,7 +438,7 @@ func (vms *VMs)DeleteVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string)
return errors.New("User "+userEmail+" has no VM access") return errors.New("User "+userEmail+" has no VM access")
} }
if err = vms.updateVM(&vm); err != nil { if err = vm.writeConfig(); err != nil {
return err return err
} }
...@@ -444,78 +447,27 @@ func (vms *VMs)DeleteVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string) ...@@ -444,78 +447,27 @@ func (vms *VMs)DeleteVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string)
// Exports a VM's directory and its subdirectories into a tar.gz archive on the host. // Exports a VM's directory and its subdirectories into a tar.gz archive on the host.
func (vms *VMs)ExportVMFiles(vm *VM, vmDir, tarGzFile string) error { func (vms *VMs)ExportVMFiles(vm *VM, vmDir, tarGzFile string) error {
vm.mutex.Lock()
defer vm.mutex.Unlock()
vmDisk := vm.getDiskPath() vmDisk := vm.getDiskPath()
return exec.CopyFromVM(vmDisk, vmDir, tarGzFile) return exec.CopyFromVM(vmDisk, vmDir, tarGzFile)
} }
// Imports files from a tar.gz archive into a VM's filesystem, in a specified directory. // Imports files from a tar.gz archive into a VM's filesystem, in a specified directory.
func (vms *VMs)ImportFilesToVM(vm *VM, tarGzFile, vmDir string) error { func (vms *VMs)ImportFilesToVM(vm *VM, tarGzFile, vmDir string) error {
if vm.IsRunning() {
return errors.New("VM must be stopped")
}
// Marks the VM to copy from as being busy.
if err := vms.setDiskBusy(vm); err != nil {
return errors.New("Failed setting disk busy flag during VM files import: "+err.Error())
}
// Clears the VM from being busy.
defer vms.clearDiskBusy(vm)
vmDisk := vm.getDiskPath()
return exec.CopyToVM(vmDisk, tarGzFile, vmDir)
}
// Marks a VM as "busy", meaning its disk file is being accessed for a possibly long time.
func (vms *VMs)setDiskBusy(vm *VM) error {
vm.mutex.Lock() vm.mutex.Lock()
defer vm.mutex.Unlock() defer vm.mutex.Unlock()
if vm.IsRunning() { if vm.IsRunning() {
return errors.New("Only a non-running VM can be set to busy") return errors.New("VM must be stopped")
} }
// Marks VM as "busy", meaning its disk file is being accessed for a possibly long time.
vm.DiskBusy = true vm.DiskBusy = true
vms.rwlock.Lock() // Clears the VM from being busy.
defer vms.rwlock.Unlock() defer func(vm *VM) { vm.DiskBusy = false }(vm)
vms.updateVMMap(vm)
return nil
}
func (vms *VMs)clearDiskBusy(vm *VM) error {
vm.mutex.Lock()
defer vm.mutex.Unlock()
if vm.IsRunning() {
return errors.New("Only a non-running VM can have its busy flag cleared")
}
vm.DiskBusy = false
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
vms.updateVMMap(vm)
return nil
}
// Updates a VM in the map of VMs and writes its updated config file.
func (vms *VMs)updateVM(vm *VM) error {
err := vm.writeConfig()
if err != nil {
return err
}
vms.updateVMMap(vm)
return nil
}
// Updates a VM in the map of VMs. vmDisk := vm.getDiskPath()
func (vms *VMs)updateVMMap(vm *VM) { return exec.CopyToVM(vmDisk, tarGzFile, vmDir)
key := vm.ID.String()
delete(vms.m, key)
vms.m[key] = *vm
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment