Select Git revision
vms.go 9.80 KiB
package vms
import (
"os"
"sort"
"sync"
"errors"
"path/filepath"
"nexus-server/exec"
"nexus-server/caps"
"nexus-server/paths"
"nexus-server/utils"
"nexus-server/logger"
"nexus-server/consts"
"github.com/google/uuid"
)
type (
VMKeeperFn func(vm VM) bool
VMs struct {
m map[string]VM
dir string // Base directory where VMs are stored
rwlock *sync.RWMutex // RWlock to ensure the coherency of the map (m) of VMs
usedPorts [65536]bool // Indicates which ports are used by the VMs
}
)
var log = logger.GetInstance()
var vms *VMs
// Returns a VMs "singleton".
// IMPORTANT: the InitVMs function must have been previously called!
func GetVMsInstance() *VMs {
return vms
}
// Creates all VMs from their files on disk.
// NOTE: path is the root directory where VMs reside.
func InitVMs() error {
vmsDir := paths.GetInstance().VMsDir
vms = &VMs { m: make(map[string]VM), dir: vmsDir, rwlock: new(sync.RWMutex) }
errMsg := "Failed reading VMs directory: "
dirs1, err := utils.GetSubDirs(vmsDir)
if err != nil {
return errors.New(errMsg+err.Error())
}
for d1 := range(dirs1) {
dirs2, err := utils.GetSubDirs(dirs1[d1])
if err != nil {
return errors.New(errMsg+err.Error())
}
for d2 := range(dirs2) {
dirs3, err := utils.GetSubDirs(dirs2[d2])
if err != nil {
return errors.New(errMsg+err.Error())
}
for i := range(dirs3) {
vmDir := dirs3[i]
filename := filepath.Join(vmDir, vmConfFile)
vm, err := newVMFromFile(filename)
if err != nil {
log.Warn("Skipping VM: failed reading \""+filename+"\" "+err.Error())
continue
}
filename = filepath.Join(vmDir, vmDiskFile)
f, err := os.OpenFile(filename, os.O_RDONLY, 0)
if err != nil {
log.Warn("Skipping VM: failed reading config file:"+err.Error())
continue
}
f.Close()
vms.m[vm.ID.String()] = *vm
}
}
}
return nil
}
// Returns the list of VMs for which VMKeeperFn returns true.
func (vms *VMs)GetVMs(keepFn VMKeeperFn) []VM {
vms.rwlock.RLock()
list := []VM{}
for _, vm := range vms.m {
if keepFn(vm) {
list = append(list, vm)
}
}
vms.rwlock.RUnlock()
// Sort VMs by names
sort.Slice(list, func(i, j int) bool {
return list[i].Name < list[j].Name
})
return list
}
// Returns a VM by its ID.
func (vms *VMs)GetVM(vmID uuid.UUID) (VM, error) {
vms.rwlock.RLock()
defer vms.rwlock.RUnlock()
return vms.getVMUnsafe(vmID)
}
func (vms *VMs)getVMUnsafe(vmID uuid.UUID) (VM, error) {
vm, exists := vms.m[vmID.String()]
if !exists {
return dummyVM, errors.New("VM not found")
}
return vm, nil
}
// Deletes a VM by its ID and deletes its files.
func (vms *VMs)DeleteVM(vmID uuid.UUID) error {
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
vm, err := vms.getVMUnsafe(vmID)
if err != nil {
return err
}
// Deletes the VM's files (and directories).
vm.mutex.Lock()
if err := vm.delete(); err != nil {
vm.mutex.Unlock()
return err
}
vm.mutex.Unlock()
// Removes the VM from the map.
delete(vms.m, vm.ID.String())
return nil
}
// Adds a VM and writes its files.
func (vms *VMs)AddVM(vm *VM) error {
vm.mutex.Lock()
// First, writes VM files since it can fail.
err := vm.writeFiles()
if err != nil {
vm.mutex.Unlock()
return err
}
vm.mutex.Unlock()
// Adds VM to the map of VMs.
vms.rwlock.Lock()
key := vm.ID.String()
vms.m[key] = *vm
vms.rwlock.Unlock()
return nil
}
// Starts a VM by its ID.
// Returns the port on which the VM is running and the access password.
func (vms *VMs)StartVM(vmID uuid.UUID) (int, string, error) {
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
vm, err := vms.getVMUnsafe(vmID)
if err != nil {
return 0, "", err
}
_, availRAM, err := utils.GetRAM()
if err != nil {
return -1, "", errors.New("Failed obtaining memory info: "+err.Error())
}
// Check there will be at least 16GB of RAM left after the VM has started,
// otherwise, we prevent the execution of the VM to avoid RAM saturation.
// Thanks to Linux's KSVM technology, pages sharing the same content are actually shared
// which dramatically reduces the amount of RAM being used (roughly ~50% in average).
log.Info("vm.RAM: ", vm.Ram)
log.Info("Available RAM: ", availRAM)
if availRAM - vm.Ram < 16*1024 {
return -1, "", errors.New("Insufficient free RAM to start VM")
}
// Locates a free port randomly chosen between VMSpiceMinPort and VMSpiceMaxPort (inclusive).
var port int
for {
port = utils.Rand(consts.VMSpiceMinPort, consts.VMSpiceMaxPort)
if !vms.usedPorts[port] {
if utils.IsPortAvailable(port) {
vms.usedPorts[port] = true
break
}
}
}
// This callback is called once the VM started with vm.start terminates.
endofExecFn := func (vm *VM) {
vm.mutex.Lock()
vm.removeSecretFile()
vm.resetStates()
vm.mutex.Unlock()
vms.rwlock.Lock()
vms.usedPorts[vm.Run.Port] = false
vms.updateVMMap(vm)
vms.rwlock.Unlock()
}
vm.mutex.Lock()
pwd, err := vm.start(port, endofExecFn)
vms.updateVMMap(&vm)
vm.mutex.Unlock()
return port, pwd, err
}
// Stops by force a VM by its ID.
func (vms *VMs)StopVM(vmID uuid.UUID) error {
vms.rwlock.RLock()
defer vms.rwlock.RUnlock()
vm, err := vms.getVMUnsafe(vmID)
if err != nil {
return err
}
vm.mutex.Lock()
err = vm.stop()
vm.mutex.Unlock()
return err
}
// Gracefully stops a VM by its ID.
func (vms *VMs)ShutdownVM(vmID uuid.UUID) error {
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
vm, err := vms.getVMUnsafe(vmID)
if err != nil {
return err
}
vm.mutex.Lock()
err = vm.shutdown()
vm.mutex.Unlock()
return err
}
// Returns true if the given template is used by a VM.
func (vms *VMs)IsTemplateUsed(templateID string) bool {
vms.rwlock.RLock()
defer vms.rwlock.RUnlock()
for _, vm := range vms.m {
if vm.TemplateID.String() == templateID {
return true
}
}
return false
}
// Edit a VM' specs: name, cpus, ram, nic
func (vms *VMs)EditVM(vmID uuid.UUID, name string, cpus, ram int, nic NicType) error {
vm, err := vms.getVMUnsafe(vmID)
if err != nil {
return err
}
// Only updates fields that have changed.
if name != "" {
vm.Name = name
}
if cpus > 0 {
vm.Cpus = cpus
}
if ram > 0 {
vm.Ram = ram
}
if nic != "" {
vm.Nic = nic
}
if err = vm.validate(); err != nil {
return err
}
vm.mutex.Lock()
defer vm.mutex.Unlock()
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
if err = vms.updateVM(&vm); err != nil {
return errors.New("Failed updating VM")
}
return nil
}
// Set a VM's Access for a given user (email).
// loggedUserEmail is the email of the currently logged user
// userMail is the email of the user for which to modify the access
func (vms *VMs)SetVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string, newAccess caps.Capabilities) error {
if !caps.AreVMAccessCapsValid(newAccess) {
return errors.New("Invalid capability")
}
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
// Retrieves the VM for which the access caps must be changed.
vm, err := vms.getVMUnsafe(vmID)
if err != nil {
return err
}
vm.mutex.Lock()
defer vm.mutex.Unlock()
// Checks the logged user has VM_SET_ACCESS set in her/his VM access.
userCaps := vm.Access[loggedUserEmail]
_, exists := userCaps[caps.CAP_VM_SET_ACCESS]
if !exists {
return errors.New("Insufficient capability")
}
vm.Access[userEmail] = newAccess
if err = vms.updateVM(&vm); err != nil {
return errors.New("Failed updating VM")
}
return nil
}
// Remove a VM's Access for a given user (email).
// loggedUserEmail is the email of the currently logged user
// userMail is the email of the user for which to remove the access
func (vms *VMs)DeleteVMAccess(vmID uuid.UUID, loggedUserEmail, userEmail string) error {
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
// Retrieves the VM for which the access caps must be changed.
vm, err := vms.getVMUnsafe(vmID)
if err != nil {
return err
}
vm.mutex.Lock()
defer vm.mutex.Unlock()
// Checks the user has VM_SET_ACCESS set in her/his VM access.
userCaps := vm.Access[loggedUserEmail]
_, exists := userCaps[caps.CAP_VM_SET_ACCESS]
if !exists {
return errors.New("Insufficient capability")
}
// Removes the user from the Access map
delete(vm.Access, userEmail)
if err = vms.updateVM(&vm); err != nil {
return errors.New("Failed updating VM")
}
return nil
}
// Exports a VM's directory and its subdirectories into a tar archive on the host.
func (vms *VMs)ExportVMFiles(vm *VM, vmDir, tarFile string) error {
vmDisk := vm.getDiskPath()
return exec.CopyFromVM(vmDisk, vmDir, tarFile)
}
// Import files into a VM's filesystem, in a specified directory.
func (vms *VMs)ImportFilesToVM(vm *VM, hostDir, vmDir string) error {
// Marks the VM to copy from as being busy.
if err := vms.setDiskBusy(vm); err != nil {
return errors.New("Failed setting disk busy flag during VM files import: "+err.Error())
}
// Clears the VM from being busy.
defer vms.clearDiskBusy(vm)
vmDisk := vm.getDiskPath()
return exec.CopyToVM(vmDisk, hostDir, vmDir)
}
// Marks a VM as "busy", meaning its disk file is being accessed for a possibly long time.
func (vms *VMs)setDiskBusy(vm *VM) error {
vm.mutex.Lock()
defer vm.mutex.Unlock()
if vm.IsRunning() {
return errors.New("Only a non-running VM can be set to busy")
}
vm.DiskBusy = true
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
vms.updateVMMap(vm)
return nil
}
func (vms *VMs)clearDiskBusy(vm *VM) error {
vm.mutex.Lock()
defer vm.mutex.Unlock()
if vm.IsRunning() {
return errors.New("Only a non-running VM can have its busy flag cleared")
}
vm.DiskBusy = false
vms.rwlock.Lock()
defer vms.rwlock.Unlock()
vms.updateVMMap(vm)
return nil
}
// Updates a VM in the map of VMs and writes its updated config file.
func (vms *VMs)updateVM(vm *VM) error {
err := vm.writeConfig()
if err != nil {
return err
}
vms.updateVMMap(vm)
return nil
}
// Updates a VM in the map of VMs.
func (vms *VMs)updateVMMap(vm *VM) {
key := vm.ID.String()
delete(vms.m, key)
vms.m[key] = *vm
}