[dts] [PATCH v2 11/16] framework/virt_resource: support multiple VMs module
Marvin Liu
yong.liu at intel.com
Wed Jan 10 01:11:09 CET 2018
1. Add serial/migrate/display port allocation support.
2. Add parallel lock for virtualzation resource allocation functions.
3. Quick scan free port in port allocation function.
Signed-off-by: Marvin Liu <yong.liu at intel.com>
diff --git a/framework/virt_resource.py b/framework/virt_resource.py
index b830f4e..1b37d4c 100644
--- a/framework/virt_resource.py
+++ b/framework/virt_resource.py
@@ -31,10 +31,14 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
-from utils import get_obj_funcs
+from utils import get_obj_funcs, parallel_lock, RED
-INIT_FREE_PORT = 6060
+INIT_FREE_PORT = 6000
+INIT_SERIAL_PORT = 7000
+INIT_MIGRATE_PORT = 8000
+INIT_DISPLAY_PORT = 0
+QuickScan = True
class VirtResource(object):
@@ -149,6 +153,7 @@ class VirtResource(object):
for cpu in cpus:
self.__core_used(cpu)
+ @parallel_lock()
def alloc_cpu(self, vm='', number=-1, socket=-1, corelist=None):
"""
There're two options for request cpu resouce for vm.
@@ -207,12 +212,14 @@ class VirtResource(object):
return False
return True
+ @parallel_lock()
def free_cpu(self, vm):
if self.__vm_has_resource(vm, 'cores'):
for core in self.allocated_info[vm]['cores']:
self.__core_unused(core)
self.allocated_info[vm].pop('cores')
+ @parallel_lock()
def alloc_pf(self, vm='', number=-1, socket=-1, pflist=[]):
"""
There're two options for request pf devices for vm.
@@ -246,12 +253,14 @@ class VirtResource(object):
self.allocated_info[vm]['ports'] = ports
return ports
+ @parallel_lock()
def free_pf(self, vm):
if self.__vm_has_resource(vm, 'ports'):
for pci in self.allocated_info[vm]['ports']:
self.__port_unused(pci)
self.allocated_info[vm].pop('ports')
+ @parallel_lock()
def alloc_vf_from_pf(self, vm='', pf_pci='', number=-1, vflist=[]):
"""
There're two options for request vf devices of pf device.
@@ -286,12 +295,14 @@ class VirtResource(object):
self.allocated_info[vm]['vfs'] = vfs
return vfs
+ @parallel_lock()
def free_vf(self, vm):
if self.__vm_has_resource(vm, 'vfs'):
for pci in self.allocated_info[vm]['vfs']:
self.__vf_unused(pci)
self.allocated_info[vm].pop('vfs')
+ @parallel_lock()
def add_vf_on_pf(self, pf_pci='', vflist=[]):
"""
Add vf devices generated by specified pf devices.
@@ -307,6 +318,7 @@ class VirtResource(object):
self.used_vfs += used_vfs
self.vfs += vfs
+ @parallel_lock()
def del_vf_on_pf(self, pf_pci='', vflist=[]):
"""
Remove vf devices generated by specified pf devices.
@@ -327,66 +339,95 @@ class VirtResource(object):
del self.used_vfs[index]
del self.vfs[index]
- def alloc_port(self, vm=''):
+ @parallel_lock()
+ def _check_port_allocated(self, port):
+ """
+ Check whether port has been pre-allocated
+ """
+ for vm_info in self.allocated_info.values():
+ if vm_info.has_key('hostport') and port == vm_info['hostport']:
+ return True
+ if vm_info.has_key('serialport') and port == vm_info['serialport']:
+ return True
+ if vm_info.has_key('migrateport') and port == vm_info['migrateport']:
+ return True
+ if vm_info.has_key('displayport') and port == (vm_info['displayport'] + 5900):
+ return True
+ return False
+
+ @parallel_lock()
+ def alloc_port(self, vm='', port_type='connect'):
"""
Allocate unused host port for vm
"""
+ global INIT_FREE_PORT
+ global INIT_SERIAL_PORT
+ global INIT_MIGRATE_PORT
+ global INIT_DISPLAY_PORT
+
if vm == '':
print "Alloc host port request vitual machine name!!!"
return None
- port_start = INIT_FREE_PORT + randint(1, 100)
- port_step = randint(1, 10)
- port = None
- count = 20
+ if port_type == 'connect':
+ port = INIT_FREE_PORT
+ elif port_type == 'serial':
+ port = INIT_SERIAL_PORT
+ elif port_type == 'migrate':
+ port = INIT_MIGRATE_PORT
+ elif port_type == 'display':
+ port = INIT_DISPLAY_PORT + 5900
+
while True:
- if self.dut.check_port_occupied(port_start) is False:
- port = port_start
+ if self.dut.check_port_occupied(port) is False and self._check_port_allocated(port) is False:
break
- count -= 1
- if count < 0:
- print 'No available port on the host!!!'
- break
- port_start += port_step
+ else:
+ port += 1
+ continue
if vm not in self.allocated_info:
self.allocated_info[vm] = {}
- self.allocated_info[vm]['hostport'] = port
+ if port_type == 'connect':
+ self.allocated_info[vm]['hostport'] = port
+ elif port_type == 'serial':
+ self.allocated_info[vm]['serialport'] = port
+ elif port_type == 'migrate':
+ self.allocated_info[vm]['migrateport'] = port
+ elif port_type == 'display':
+ port -= 5900
+ self.allocated_info[vm]['displayport'] = port
+
+ # do not scan port from the begining
+ if QuickScan:
+ if port_type == 'connect':
+ INIT_FREE_PORT = port
+ elif port_type == 'serial':
+ INIT_SERIAL_PORT = port
+ elif port_type == 'migrate':
+ INIT_MIGRATE_PORT = port
+ elif port_type == 'display':
+ INIT_DISPLAY_PORT = port
+
return port
+ @parallel_lock()
def free_port(self, vm):
if self.__vm_has_resource(vm, 'hostport'):
self.allocated_info[vm].pop('hostport')
-
- def alloc_vnc_num(self, vm=''):
- """
- Allocate unused host VNC display number for VM.
- """
- if vm == '':
- print "Alloc vnc display number request vitual machine name!!!"
- return None
-
- max_vnc_display_num = self.dut.get_maximal_vnc_num()
- free_vnc_display_num = max_vnc_display_num + 1
-
- if vm not in self.allocated_info:
- self.allocated_info[vm] = {}
-
- self.allocated_info[vm]['vnc_display_num'] = free_vnc_display_num
-
- return free_vnc_display_num
-
- def free_vnc_num(self, vm):
- if self.__vm_has_resource(vm, 'vnc_display_num'):
- self.allocated_info[vm].pop('vnc_display_num')
-
+ if self.__vm_has_resource(vm, 'serialport'):
+ self.allocated_info[vm].pop('serialport')
+ if self.__vm_has_resource(vm, 'migrateport'):
+ self.allocated_info[vm].pop('migrateport')
+ if self.__vm_has_resource(vm, 'displayport'):
+ self.allocated_info[vm].pop('displayport')
+
+ @parallel_lock()
def free_all_resource(self, vm):
"""
Free all resource VM has been allocated.
"""
self.free_port(vm)
- self.free_vnc_num(vm)
self.free_vf(vm)
self.free_pf(vm)
self.free_cpu(vm)
--
1.9.3
More information about the dts
mailing list