diff --git a/IM/ConfManager.py b/IM/ConfManager.py
index 0d21a2cd7..f0253e34e 100644
--- a/IM/ConfManager.py
+++ b/IM/ConfManager.py
@@ -147,6 +147,9 @@ def run(self):
while not self._stop:
if self.init_time + self.max_ctxt_time < time.time():
ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Max contextualization time passed. Exit thread.")
+ # Kill the ansible processes
+ for vm in self.inf.get_vm_list():
+ vm.kill_check_ctxt_process()
return
vms_configuring = self.check_running_pids(vms_configuring)
@@ -427,6 +430,40 @@ def generate_basic_playbook(self, tmp_dir):
recipe_files.append("basic_task_all.yml")
return recipe_files
+ def generate_mount_disks_tasks(self, system):
+ """
+ Generate a set of tasks to format and mount the specified disks
+ """
+ res = ""
+ cont = 1
+
+ while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".device"):
+ disk_device = system.getValue("disk." + str(cont) + ".device")
+ disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path")
+ disk_fstype = system.getValue("disk." + str(cont) + ".fstype")
+
+ # Only add the tasks if the user has specified a moun_path and a filesystem
+ if disk_mount_path and disk_fstype:
+ # This recipe works with EC2 and OpenNebula. It must be tested/completed with other providers
+ with_first_found = ' with_first_found: \n'
+ with_first_found += ' - "/dev/sd' + disk_device[-1] + '"\n'
+ with_first_found += ' - "/dev/hd' + disk_device[-1] + '"\n'
+ with_first_found += ' - "/dev/xvd' + disk_device[-1] + '"\n'
+
+ res += ' # Tasks to format and mount disk%d from device %s in %s\n' % (cont, disk_device, disk_mount_path)
+ res += ' - shell: (echo n; echo p; echo 1; echo ; echo; echo w) | fdisk {{item}} creates={{item}}1\n'
+ res += with_first_found
+ res += ' - filesystem: fstype=' + disk_fstype + ' dev={{item}}1\n'
+ res += with_first_found
+ res += ' - file: path=' + disk_mount_path + ' state=directory recurse=yes\n'
+ res += ' - mount: name=' + disk_mount_path + ' src={{item}}1 state=mounted fstype=' + disk_fstype +'\n'
+ res += with_first_found
+ res += '\n'
+
+ cont +=1
+
+ return res
+
def generate_main_playbook(self, vm, group, tmp_dir):
"""
Generate the main playbook to be launched in all the VMs.
@@ -447,6 +484,9 @@ def generate_main_playbook(self, vm, group, tmp_dir):
conf_content += " tasks: \n"
conf_content += " - debug: msg='Install user requested apps'\n"
+ # Generate a set of tasks to format and mount the specified disks
+ conf_content += self.generate_mount_disks_tasks(vm.info.systems[0])
+
for app_name, recipe in recipes:
self.inf.add_cont_msg("App: " + app_name + " set to be installed.")
@@ -512,43 +552,47 @@ def configure_master(self):
"""
success = True
if not self.inf.ansible_configured:
- try:
- ConfManager.logger.info("Inf ID: " + str(self.inf.id) + ": Start the contextualization process.")
-
- ssh = self.inf.vm_master.get_ssh(retry=True)
- # Activate tty mode to avoid some problems with sudo in REL
- ssh.tty = True
-
- # configuration dir os th emaster node to copy all the contextualization files
- tmp_dir = tempfile.mkdtemp()
- # Now call the ansible installation process on the master node
- configured_ok = self.configure_ansible(ssh, tmp_dir)
-
- if not configured_ok:
- ConfManager.logger.error("Inf ID: " + str(self.inf.id) + ": Error in the ansible installation process")
+ success = False
+ cont = 0
+ while not success and cont < Config.PLAYBOOK_RETRIES:
+ cont += 1
+ try:
+ ConfManager.logger.info("Inf ID: " + str(self.inf.id) + ": Start the contextualization process.")
+
+ ssh = self.inf.vm_master.get_ssh(retry=True)
+ # Activate tty mode to avoid some problems with sudo in REL
+ ssh.tty = True
+
+ # configuration dir os th emaster node to copy all the contextualization files
+ tmp_dir = tempfile.mkdtemp()
+ # Now call the ansible installation process on the master node
+ configured_ok = self.configure_ansible(ssh, tmp_dir)
+
+ if not configured_ok:
+ ConfManager.logger.error("Inf ID: " + str(self.inf.id) + ": Error in the ansible installation process")
+ if not self.inf.ansible_configured: self.inf.ansible_configured = False
+ else:
+ ConfManager.logger.info("Inf ID: " + str(self.inf.id) + ": Ansible installation finished successfully")
+
+ remote_dir = Config.REMOTE_CONF_DIR
+ ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Copy the contextualization agent files")
+ ssh.sftp_mkdir(remote_dir)
+ files = []
+ files.append((Config.IM_PATH + "/SSH.py",remote_dir + "/SSH.py"))
+ files.append((Config.IM_PATH + "/ansible/ansible_callbacks.py", remote_dir + "/ansible_callbacks.py"))
+ files.append((Config.IM_PATH + "/ansible/ansible_launcher.py", remote_dir + "/ansible_launcher.py"))
+ files.append((Config.CONTEXTUALIZATION_DIR + "/ctxt_agent.py", remote_dir + "/ctxt_agent.py"))
+ ssh.sftp_put_files(files)
+
+ success = configured_ok
+
+ except Exception, ex:
+ ConfManager.logger.exception("Inf ID: " + str(self.inf.id) + ": Error in the ansible installation process")
+ self.inf.add_cont_msg("Error in the ansible installation process: " + str(ex))
if not self.inf.ansible_configured: self.inf.ansible_configured = False
- else:
- ConfManager.logger.info("Inf ID: " + str(self.inf.id) + ": Ansible installation finished successfully")
-
- remote_dir = Config.REMOTE_CONF_DIR
- ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Copy the contextualization agent files")
- ssh.sftp_mkdir(remote_dir)
- files = []
- files.append((Config.IM_PATH + "/SSH.py",remote_dir + "/SSH.py"))
- files.append((Config.IM_PATH + "/ansible/ansible_callbacks.py", remote_dir + "/ansible_callbacks.py"))
- files.append((Config.IM_PATH + "/ansible/ansible_launcher.py", remote_dir + "/ansible_launcher.py"))
- files.append((Config.CONTEXTUALIZATION_DIR + "/ctxt_agent.py", remote_dir + "/ctxt_agent.py"))
- ssh.sftp_put_files(files)
-
- success = configured_ok
-
- except Exception, ex:
- ConfManager.logger.exception("Inf ID: " + str(self.inf.id) + ": Error in the ansible installation process")
- self.inf.add_cont_msg("Error in the ansible installation process: " + str(ex))
- if not self.inf.ansible_configured: self.inf.ansible_configured = False
- success = False
- finally:
- shutil.rmtree(tmp_dir, ignore_errors=True)
+ success = False
+ finally:
+ shutil.rmtree(tmp_dir, ignore_errors=True)
if success:
self.inf.ansible_configured = True
@@ -705,11 +749,19 @@ def relaunch_vm(self, vm, failed_cloud = False):
"""
Remove and launch again the specified VM
"""
- InfrastructureManager.InfrastructureManager.RemoveResource(self.inf.id, vm.id, self.auth)
+ try:
+ removed = InfrastructureManager.InfrastructureManager.RemoveResource(self.inf.id, vm.im_id, self.auth)
+ except:
+ ConfManager.logger.exception("Inf ID: " + str(self.inf.id) + ": Error removing a failed VM.")
+ removed = 0
+
+ if removed != 1:
+ ConfManager.logger.error("Inf ID: " + str(self.inf.id) + ": Error removing a failed VM. Not launching a new one.")
+ return
new_radl = ""
for net in vm.info.networks:
- new_radl = "network " + net.id + "\n"
+ new_radl += "network " + net.id + "\n"
new_radl += "system " + vm.getRequestedSystem().name + "\n"
new_radl += "deploy " + vm.getRequestedSystem().name + " 1"
@@ -1002,10 +1054,40 @@ def configure_ansible(self, ssh, tmp_dir):
for galaxy_name in modules:
if galaxy_name:
recipe_out = open(tmp_dir + "/" + ConfManager.MASTER_YAML, 'a')
- self.inf.add_cont_msg("Galaxy role " + galaxy_name + " detected setting to install.")
- ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Install " + galaxy_name + " with ansible-galaxy.")
- recipe_out.write(" - name: Install the " + galaxy_name + " role with ansible-galaxy\n")
- recipe_out.write(" command: ansible-galaxy --force install " + galaxy_name + "\n")
+
+ if galaxy_name.startswith("http"):
+ # in case of http url, the file must be compressed
+ # it must contain only one directory with the same name of the compressed file
+ # (without extension) with the ansible role content
+ filename = os.path.basename(galaxy_name)
+ self.inf.add_cont_msg("Remote file " + galaxy_name + " detected, setting to install.")
+ ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Install " + galaxy_name + " with ansible-galaxy.")
+ recipe_out.write(" - file: path=/etc/ansible/roles state=directory recurse=yes\n")
+ recipe_out.write(" - get_url: url=" + galaxy_name + " dest=/tmp/" + filename + "\n")
+ recipe_out.write(" - unarchive: src=/tmp/" + filename + " dest=/etc/ansible/roles copy=no\n")
+ if galaxy_name.startswith("git"):
+ # in case of git repo, the user must specify the rolname using a | afther the url
+ parts = galaxy_name.split("|")
+ if len(parts) > 1:
+ url = parts[0]
+ rolename = parts[1]
+ self.inf.add_cont_msg("Git Repo " + url + " detected, setting to install.")
+ ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Clone " + url + " with git.")
+ recipe_out.write(" - file: path=/etc/ansible/roles state=directory\n")
+ recipe_out.write(" - yum: name=git\n")
+ recipe_out.write(' when: ansible_os_family == "RedHat"\n')
+ recipe_out.write(" - apt: name=git\n")
+ recipe_out.write(' when: ansible_os_family == "Debian"\n')
+ recipe_out.write(" - git: repo=" + url + " dest=/etc/ansible/roles/" + rolename + " accept_hostkey=yes\n")
+ else:
+ self.inf.add_cont_msg("Not specified the rolename. Ignoring git repo.")
+ ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": Not specified the rolename. Ignoring git repo.")
+ else:
+ self.inf.add_cont_msg("Galaxy role " + galaxy_name + " detected setting to install.")
+ ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Install " + galaxy_name + " with ansible-galaxy.")
+ recipe_out.write(" - name: Install the " + galaxy_name + " role with ansible-galaxy\n")
+ recipe_out.write(" command: ansible-galaxy --force install " + galaxy_name + "\n")
+
recipe_out.close()
self.inf.add_cont_msg("Performing preliminary steps to configure Ansible.")
diff --git a/IM/InfrastructureInfo.py b/IM/InfrastructureInfo.py
index f92f57a93..10703fff3 100644
--- a/IM/InfrastructureInfo.py
+++ b/IM/InfrastructureInfo.py
@@ -371,7 +371,15 @@ def is_ctxt_process_running(self):
self.conf_threads = []
return not all_finished
- def Contextualize(self, auth):
+ def Contextualize(self, auth, vm_list = None):
+ """
+ Launch the contextualization process of this Inf
+
+ Args:
+
+ - auth(Authentication): parsed authentication tokens.
+ - vm_list(list of int): List of VM ids to reconfigure. If None all VMs will be reconfigured.
+ """
self.cont_out = ""
self.configured = None
# get the default ctxts in case of the RADL has not specified them
@@ -394,17 +402,20 @@ def Contextualize(self, auth):
vm.configured = None
tasks = {}
+ # Add basic tasks for all VMs
tasks[0] = ['basic']
tasks[1] = ['main_' + vm.info.systems[0].name]
-
- # Then add the configure sections
- for ctxt_num in contextualizes.keys():
- for ctxt_elem in contextualizes[ctxt_num]:
- if ctxt_elem.system == vm.info.systems[0].name:
- if ctxt_num not in tasks:
- tasks[ctxt_num] = []
- tasks[ctxt_num].append(ctxt_elem.configure + "_" + ctxt_elem.system)
-
+
+ # And the specific tasks only for the specified ones
+ if not vm_list or vm.im_id in vm_list:
+ # Then add the configure sections
+ for ctxt_num in contextualizes.keys():
+ for ctxt_elem in contextualizes[ctxt_num]:
+ if ctxt_elem.system == vm.info.systems[0].name:
+ if ctxt_num not in tasks:
+ tasks[ctxt_num] = []
+ tasks[ctxt_num].append(ctxt_elem.configure + "_" + ctxt_elem.system)
+
for step in tasks.keys():
priority = 0
ctxt_task.append((step,priority,vm,tasks[step]))
diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py
index e21a81a20..cdfe79174 100755
--- a/IM/InfrastructureManager.py
+++ b/IM/InfrastructureManager.py
@@ -32,6 +32,7 @@
from IM.radl import radl_parse
from IM.radl.radl import Feature
from IM.recipe import Recipe
+from IM.db import DataBase
from config import Config
@@ -246,7 +247,7 @@ def get_vm_from_inf(inf_id, vm_id, auth):
return sel_inf.get_vm(vm_id)
@staticmethod
- def Reconfigure(inf_id, radl_data, auth):
+ def Reconfigure(inf_id, radl_data, auth, vm_list = None):
"""
Add and update RADL definitions and reconfigure the infrastructure.
@@ -255,6 +256,7 @@ def Reconfigure(inf_id, radl_data, auth):
- inf_id(int): infrastructure id.
- radl_data(str): RADL description, it can be empty.
- auth(Authentication): parsed authentication tokens.
+ - vm_list(list of int): List of VM ids to reconfigure. If None all VMs will be reconfigured.
Return: "" if success.
"""
@@ -291,7 +293,7 @@ def Reconfigure(inf_id, radl_data, auth):
# Stick all virtual machines to be reconfigured
InfrastructureManager.logger.info("Contextualize the inf.")
- sel_inf.Contextualize(auth)
+ sel_inf.Contextualize(auth, vm_list)
return ""
@@ -349,6 +351,8 @@ def AddResource(inf_id, radl_data, auth, context = True, failed_clouds = []):
- inf_id(int): infrastructure id.
- radl(str): RADL description.
- auth(Authentication): parsed authentication tokens.
+ - context(bool): Flag to specify if the ctxt step will be made
+ - failed_clouds(list of CloudInfo): A list of failed Cloud providers to avoid launching the VMs in them.
Return(list of int): ids of the new virtual machine created.
"""
@@ -534,7 +538,7 @@ def AddResource(inf_id, radl_data, auth, context = True, failed_clouds = []):
return [vm.im_id for vm in new_vms]
@staticmethod
- def RemoveResource(inf_id, vm_list, auth):
+ def RemoveResource(inf_id, vm_list, auth, context = True):
"""
Remove a list of resources from the infrastructure.
@@ -543,6 +547,7 @@ def RemoveResource(inf_id, vm_list, auth):
- inf_id(int): infrastructure id.
- vm_list(str, int or list of str): list of virtual machine ids.
- auth(Authentication): parsed authentication tokens.
+ - context(bool): Flag to specify if the ctxt step will be made
Return(int): number of undeployed virtual machines.
"""
@@ -576,7 +581,7 @@ def RemoveResource(inf_id, vm_list, auth):
InfrastructureManager.save_data()
InfrastructureManager.logger.info(str(cont) + " VMs successfully removed")
- if cont > 0:
+ if context and cont > 0:
# Now test again if the infrastructure is contextualizing
sel_inf.Contextualize(auth)
@@ -1096,17 +1101,60 @@ def ImportInfrastructure(str_inf, auth_data):
InfrastructureManager.save_data()
return new_inf.id
+ @staticmethod
+ def get_data_from_db(db_url):
+ db = DataBase(db_url)
+ if db.connect():
+ if not db.table_exists("im_data"):
+ db.execute("CREATE TABLE im_data(id int PRIMARY KEY, date TIMESTAMP, inf_id int, data LONGBLOB)")
+ db.close()
+ return None
+ else:
+ res = db.select("select * from im_data order by id desc")
+
+ if len(res) > 0:
+ #id = res[0][0]
+ #date = res[0][1]
+ inf_id = res[0][2]
+ str_inf_list = res[0][3]
+
+ return inf_id, str_inf_list
+ else:
+ return None
+ else:
+ InfrastructureManager.logger.error("ERROR connecting with the database!.")
+ return None
+
+ @staticmethod
+ def save_data_to_db(db_url, inf_id, str_inf_list):
+ db = DataBase(db_url)
+ if db.connect():
+ # At this moment only use id = 0
+ res = db.execute("replace into im_data set inf_id = %s, data = %s, date = now(), id = 0", (inf_id, str_inf_list))
+ db.close()
+ return res
+ else:
+ InfrastructureManager.logger.error("ERROR connecting with the database!.")
+ return None
+
@staticmethod
def load_data():
with InfrastructureManager._lock:
try:
- data_file = open(Config.DATA_FILE, 'rb')
- InfrastructureManager.global_inf_id = pickle.load(data_file)
- InfrastructureManager.infrastructure_list = pickle.load(data_file)
- data_file.close()
+ if Config.DATA_DB:
+ data = InfrastructureManager.get_data_from_db(Config.DATA_DB)
+ if data:
+ inf_id, str_inf_list = data
+ InfrastructureManager.global_inf_id = inf_id
+ InfrastructureManager.infrastructure_list = pickle.loads(str_inf_list)
+ else:
+ data_file = open(Config.DATA_FILE, 'rb')
+ InfrastructureManager.global_inf_id = pickle.load(data_file)
+ InfrastructureManager.infrastructure_list = pickle.load(data_file)
+ data_file.close()
except Exception, ex:
- InfrastructureManager.logger.exception("ERROR loading data from file: " + Config.DATA_FILE + ". Correct or delete it!!")
- sys.stderr.write("ERROR loading data from file: " + Config.DATA_FILE + ": " + str(ex) + ".\nCorrect or delete it!! ")
+ InfrastructureManager.logger.exception("ERROR loading data. Correct or delete it!!")
+ sys.stderr.write("ERROR loading data: " + str(ex) + ".\nCorrect or delete it!! ")
sys.exit(-1)
@staticmethod
@@ -1115,13 +1163,21 @@ def save_data():
# to avoid writing data to the file if the IM is exiting
if not InfrastructureManager._exiting:
try:
- data_file = open(Config.DATA_FILE, 'wb')
- pickle.dump(InfrastructureManager.global_inf_id, data_file)
- pickle.dump(InfrastructureManager.infrastructure_list, data_file)
- data_file.close()
+ if Config.DATA_DB:
+ str_inf_list = pickle.dumps(InfrastructureManager.infrastructure_list)
+ res = InfrastructureManager.save_data_to_db(Config.DATA_DB,
+ InfrastructureManager.global_inf_id, str_inf_list)
+ if not res:
+ InfrastructureManager.logger.error("ERROR saving data.\nChanges not stored!!")
+ sys.stderr.write("ERROR saving data.\nChanges not stored!!")
+ else:
+ data_file = open(Config.DATA_FILE, 'wb')
+ pickle.dump(InfrastructureManager.global_inf_id, data_file)
+ pickle.dump(InfrastructureManager.infrastructure_list, data_file)
+ data_file.close()
except Exception, ex:
- InfrastructureManager.logger.exception("ERROR saving data to the file: " + Config.DATA_FILE + ". Changes not stored!!")
- sys.stderr.write("ERROR saving data to the file: " + Config.DATA_FILE + ": " + str(ex) + ".\nChanges not stored!!")
+ InfrastructureManager.logger.exception("ERROR saving data. Changes not stored!!")
+ sys.stderr.write("ERROR saving data: " + str(ex) + ".\nChanges not stored!!")
@staticmethod
def stop():
diff --git a/IM/REST.py b/IM/REST.py
index ac74af97e..86b5f1f8e 100644
--- a/IM/REST.py
+++ b/IM/REST.py
@@ -249,8 +249,18 @@ def RESTAddResource(id=None):
bottle.abort(401, "No authentication data provided")
try:
+ context = True
+ if "context" in bottle.request.params.keys():
+ str_ctxt = bottle.request.params.get("context").lower()
+ if str_ctxt in ['yes', 'true', '1']:
+ context = True
+ elif str_ctxt in ['no', 'false', '0']:
+ context = False
+ else:
+ bottle.abort(400, "Incorrect value in context parameter")
+
radl_data = bottle.request.body.read()
- vm_ids = InfrastructureManager.AddResource(int(id), radl_data, auth)
+ vm_ids = InfrastructureManager.AddResource(int(id), radl_data, auth, context)
server_ip = bottle.request.environ['SERVER_NAME']
server_port = bottle.request.environ['SERVER_PORT']
@@ -282,7 +292,17 @@ def RESTRemoveResource(infid=None, vmid=None):
bottle.abort(401, "No authentication data provided")
try:
- InfrastructureManager.RemoveResource(int(infid), vmid, auth)
+ context = True
+ if "context" in bottle.request.params.keys():
+ str_ctxt = bottle.request.params.get("context").lower()
+ if str_ctxt in ['yes', 'true', '1']:
+ context = True
+ elif str_ctxt in ['no', 'false', '0']:
+ context = False
+ else:
+ bottle.abort(400, "Incorrect value in context parameter")
+
+ InfrastructureManager.RemoveResource(int(infid), vmid, auth, context)
return ""
except DeletedInfrastructureException, ex:
bottle.abort(404, "Error Removing resources: " + str(ex))
@@ -338,8 +358,19 @@ def RESTReconfigureInfrastructure(id=None):
bottle.abort(401, "No authentication data provided")
try:
- radl_data = bottle.request.forms.get('radl')
- return InfrastructureManager.Reconfigure(int(id), radl_data, auth)
+ vm_list = None
+ if "vm_list" in bottle.request.params.keys():
+ str_vm_list = bottle.request.params.get("vm_list")
+ try:
+ vm_list = [int(vm_id) for vm_id in str_vm_list.split(",")]
+ except:
+ bottle.abort(400, "Incorrect vm_list format.")
+
+ if 'radl' in bottle.request.forms.keys():
+ radl_data = bottle.request.forms.get('radl')
+ else:
+ radl_data = ""
+ return InfrastructureManager.Reconfigure(int(id), radl_data, auth, vm_list)
except DeletedInfrastructureException, ex:
bottle.abort(404, "Error reconfiguring infrastructure: " + str(ex))
return False
diff --git a/IM/ServiceRequests.py b/IM/ServiceRequests.py
index 70895aa83..3ff1ea0b2 100644
--- a/IM/ServiceRequests.py
+++ b/IM/ServiceRequests.py
@@ -120,8 +120,8 @@ class Request_AddResource(IMBaseRequest):
"""
def _call_function(self):
self._error_mesage = "Error Adding resources."
- (inf_id, radl_data, auth_data) = self.arguments
- return InfrastructureManager.InfrastructureManager.AddResource(inf_id, radl_data, Authentication(auth_data))
+ (inf_id, radl_data, auth_data, context) = self.arguments
+ return InfrastructureManager.InfrastructureManager.AddResource(inf_id, radl_data, Authentication(auth_data), context)
class Request_RemoveResource(IMBaseRequest):
"""
@@ -129,8 +129,8 @@ class Request_RemoveResource(IMBaseRequest):
"""
def _call_function(self):
self._error_mesage = "Error Removing resources."
- (inf_id, vm_list, auth_data) = self.arguments
- return InfrastructureManager.InfrastructureManager.RemoveResource(inf_id, vm_list, Authentication(auth_data))
+ (inf_id, vm_list, auth_data, context) = self.arguments
+ return InfrastructureManager.InfrastructureManager.RemoveResource(inf_id, vm_list, Authentication(auth_data), context)
class Request_GetInfrastructureInfo(IMBaseRequest):
"""
@@ -220,8 +220,8 @@ class Request_Reconfigure(IMBaseRequest):
"""
def _call_function(self):
self._error_mesage = "Error Reconfiguring Inf."
- (inf_id, radl_data, auth_data) = self.arguments
- return InfrastructureManager.InfrastructureManager.Reconfigure(inf_id, radl_data, Authentication(auth_data))
+ (inf_id, radl_data, auth_data, vm_list) = self.arguments
+ return InfrastructureManager.InfrastructureManager.Reconfigure(inf_id, radl_data, Authentication(auth_data), vm_list)
class Request_ImportInfrastructure(IMBaseRequest):
"""
diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py
index b70cbb45f..a7c8aa1ba 100644
--- a/IM/VirtualMachine.py
+++ b/IM/VirtualMachine.py
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+from netaddr import IPNetwork, IPAddress
import time
import threading
from IM.radl.radl import network, RADL
@@ -461,7 +462,7 @@ def setIps(self,public_ips,private_ips):
# Get the private network mask
for mask in network.private_net_masks:
- if network.addressInNetwork(private_ip,mask):
+ if IPAddress(private_ip) in IPNetwork(mask):
private_net_mask = mask
break
@@ -472,8 +473,8 @@ def setIps(self,public_ips,private_ips):
# Search in previous user private ips
private_net = None
- for net_mask, net in private_net_map.iteritems():
- if network.addressInNetwork(private_ip, net_mask):
+ for net_mask, net in private_net_map.iteritems():
+ if IPAddress(private_ip) in IPNetwork(net_mask):
private_net = net
# Search in the RADL nets
@@ -524,6 +525,22 @@ def launch_check_ctxt_process(self):
t = threading.Thread(target=eval("self.check_ctxt_process"))
t.daemon = True
t.start()
+
+ def kill_check_ctxt_process(self):
+ """
+ Kill the check_ctxt_process thread
+ """
+ if self.ctxt_pid:
+ if self.ctxt_pid != self.WAIT_TO_PID:
+ ssh = self.inf.vm_master.get_ssh(retry = True)
+ try:
+ ssh.execute("kill -9 " + str(self.ctxt_pid))
+ except:
+ VirtualMachine.logger.exception("Error killing ctxt process with pid: " + str(self.ctxt_pid))
+ pass
+
+ self.ctxt_pid = None
+ self.configured = False
def check_ctxt_process(self):
"""
@@ -542,7 +559,7 @@ def check_ctxt_process(self):
wait = 0
while self.ctxt_pid:
if self.ctxt_pid != self.WAIT_TO_PID:
- ssh = self.inf.vm_master.get_ssh()
+ ssh = self.inf.vm_master.get_ssh(retry = True)
if self.state in VirtualMachine.NOT_RUNNING_STATES:
try:
@@ -551,8 +568,8 @@ def check_ctxt_process(self):
VirtualMachine.logger.exception("Error killing ctxt process with pid: " + str(self.ctxt_pid))
pass
- self.ctxt_pid = None
self.configured = False
+ self.ctxt_pid = None
else:
try:
(_, _, exit_status) = ssh.execute("ps " + str(self.ctxt_pid))
@@ -563,11 +580,11 @@ def check_ctxt_process(self):
if self.ssh_connect_errors > Config.MAX_SSH_ERRORS:
VirtualMachine.logger.error("Too much errors getting status of ctxt process with pid: " + str(self.ctxt_pid) + ". Forget it.")
self.ssh_connect_errors = 0
- self.ctxt_pid = None
self.configured = False
+ self.ctxt_pid = None
+ return None
if exit_status != 0:
- self.ctxt_pid = None
# The process has finished, get the outputs
ctxt_log = self.get_ctxt_log(remote_dir, True)
self.get_ctxt_output(remote_dir, True)
@@ -575,6 +592,7 @@ def check_ctxt_process(self):
self.cont_out = initial_count_out + ctxt_log
else:
self.cont_out = initial_count_out + "Error getting contextualization process log."
+ self.ctxt_pid = None
else:
# Get the log of the process to update the cont_out dynamically
if Config.UPDATE_CTXT_LOG_INTERVAL > 0 and wait > Config.UPDATE_CTXT_LOG_INTERVAL:
@@ -615,10 +633,14 @@ def get_ctxt_log(self, remote_dir, delete = False):
# Remove problematic chars
conf_out = filter(lambda x: x in string.printable, conf_out).encode("ascii", "replace")
- if delete:
- ssh.sftp_remove(remote_dir + '/ctxt_agent.log')
- except Exception:
- VirtualMachine.logger.exception("Error getting contextualization process log")
+ try:
+ if delete:
+ ssh.sftp_remove(remote_dir + '/ctxt_agent.log')
+ except:
+ VirtualMachine.logger.exception("Error deleting remote contextualization process log: " + remote_dir + '/ctxt_agent.log')
+ pass
+ except:
+ VirtualMachine.logger.exception("Error getting contextualization process log: " + remote_dir + '/ctxt_agent.log')
self.configured = False
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
@@ -634,12 +656,16 @@ def get_ctxt_output(self, remote_dir, delete = False):
# Get the JSON output of the ctxt_agent
ssh.sftp_get(remote_dir + '/ctxt_agent.out', tmp_dir + '/ctxt_agent.out')
with open(tmp_dir + '/ctxt_agent.out') as f: ctxt_agent_out = json.load(f)
- if delete:
- ssh.sftp_remove(remote_dir + '/ctxt_agent.out')
+ try:
+ if delete:
+ ssh.sftp_remove(remote_dir + '/ctxt_agent.out')
+ except:
+ VirtualMachine.logger.exception("Error deleting remote contextualization process output: " + remote_dir + '/ctxt_agent.out')
+ pass
# And process it
self.process_ctxt_agent_out(ctxt_agent_out)
except Exception, ex:
- VirtualMachine.logger.exception("Error getting contextualization agent output")
+ VirtualMachine.logger.exception("Error getting contextualization agent output: " + remote_dir + '/ctxt_agent.out')
self.configured = False
self.cont_out += "Error getting contextualization agent output: " + str(ex)
finally:
diff --git a/IM/__init__.py b/IM/__init__.py
index 6faaf348c..2c9dad366 100644
--- a/IM/__init__.py
+++ b/IM/__init__.py
@@ -16,6 +16,6 @@
__all__ = ['auth','bottle','CloudManager','config','ConfManager','db','ganglia','HTTPHeaderTransport','ImageManager','InfrastructureInfo','InfrastructureManager','parsetab','radl','recipe','request','REST', 'ServiceRequests','SSH','timedcall','uriparse','VMRC','xmlobject']
-__version__ = '1.3.0'
+__version__ = '1.3.1'
__author__ = 'Miguel Caballer'
diff --git a/IM/config.py b/IM/config.py
index 812106ea6..e0b2f3b04 100644
--- a/IM/config.py
+++ b/IM/config.py
@@ -59,6 +59,7 @@ class Config:
MAX_CONTEXTUALIZATION_TIME = 7200
MAX_SIMULTANEOUS_LAUNCHES = 1
DATA_FILE = '/etc/im/inf.dat'
+ DATA_DB = None
MAX_INF_STORED = 20
XMLRCP_SSL = False
XMLRCP_SSL_KEYFILE = "/etc/im/pki/server-key.pem"
diff --git a/IM/db.py b/IM/db.py
index 87bdda2f2..1960bb637 100644
--- a/IM/db.py
+++ b/IM/db.py
@@ -14,9 +14,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-"""Class to manage de DB operations"""
+"""Class to manage DB operations"""
import time
+from IM.uriparse import uriparse
+
try:
import sqlite3 as sqlite
SQLITE3_AVAILABLE = True
@@ -32,27 +34,79 @@
except:
SQLITE_AVAILABLE = False
-# Class to manage de DB operations
+try:
+ import MySQLdb as mdb
+ MYSQL_AVAILABLE = True
+except:
+ MYSQL_AVAILABLE = False
+
+
+# Class to manage DB operations
class DataBase:
- """Class to manage de DB operations"""
+ """Class to manage DB operations"""
- db_available = SQLITE_AVAILABLE
+ db_available = SQLITE_AVAILABLE or MYSQL_AVAILABLE
RETRY_SLEEP = 2
MAX_RETRIES = 15
- DB_TYPE = "SQLite"
+ DB_TYPES = ["SQLite", "MySQL"]
- def __init__(self, db_filename):
- self.db_filename = db_filename
+ def __init__(self, db_url):
+ self.db_url = db_url
self.connection = None
+ self.db_type = None
def connect(self):
- """ Function to connecto to the DB
+ """ Function to connect to the DB
Returns: True if the connection is established correctly
of False in case of errors.
"""
+ uri = uriparse(self.db_url)
+ protocol = uri[0]
+ if protocol == "mysql":
+ return self._connect_mysql(uri[1], uri[2][1:])
+ elif protocol == "file":
+ return self._connect_sqlite(uri[2])
+ elif not protocol:
+ return self._connect_sqlite(uri[2])
+
+ return False
+
+ def _get_user_pass_host_port(self, url):
+ username = password = server = port = None
+ if "@" in url:
+ parts = url.split("@")
+ user_pass = parts[0]
+ server_port = parts[1]
+ user_pass = user_pass.split(':')
+ username = user_pass[0]
+ if len(user_pass) > 1:
+ password = user_pass[1]
+ else:
+ server_port = url
+
+ server_port = server_port.split(':')
+ server = server_port[0]
+ if len(server_port) > 1:
+ port = int(server_port[1])
+
+ return username, password, server, port
+
+ def _connect_mysql(self, url, db):
+ if MYSQL_AVAILABLE:
+ username, password, server, port = self._get_user_pass_host_port(url)
+ if not port:
+ port = 3306
+ self.connection = mdb.connect(server, username, password, db, port);
+ self.db_type = "MySQL"
+ return True
+ else:
+ return False
+
+ def _connect_sqlite(self, db_filename):
if SQLITE_AVAILABLE:
- self.connection = sqlite.connect(self.db_filename)
+ self.connection = sqlite.connect(db_filename)
+ self.db_type = "SQLite"
return True
else:
return False
@@ -149,7 +203,13 @@ def table_exists(self, table_name):
Returns: True if the table exists or False otherwise
"""
- res = self.select('select name from sqlite_master where type="table" and name="' + table_name + '"')
+ if self.db_type == "SQLite":
+ res = self.select('select name from sqlite_master where type="table" and name="' + table_name + '"')
+ elif self.db_type == "MySQL":
+ res = self.select('SELECT * FROM information_schema.tables WHERE table_name ="' + table_name + '"')
+ else:
+ return False
+
if (len(res) == 0):
return False
else:
diff --git a/IM/radl/radl.py b/IM/radl/radl.py
index 92775b902..03323bb5b 100644
--- a/IM/radl/radl.py
+++ b/IM/radl/radl.py
@@ -14,8 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+from netaddr import IPNetwork, IPAddress
import copy
-import socket,struct
from distutils.version import LooseVersion
from IM.config import Config
@@ -678,24 +678,12 @@ def isPrivateIP(ip):
"""
Check if an IP address is private
"""
+
for mask in network.private_net_masks:
- if network.addressInNetwork(ip,mask):
+ if IPAddress(ip) in IPNetwork(mask):
return True
return False
- @staticmethod
- def addressInNetwork(ip,net):
- """Is an address in a network (format: 10.0.0.0/24)"""
- ipaddr = struct.unpack('>L',socket.inet_aton(ip))[0]
- netaddr,bits = net.split('/')
- netmask = struct.unpack('>L',socket.inet_aton(netaddr))[0]
- ipaddr_masked = ipaddr & (4294967295<<(32-int(bits))) # Logical AND of IP address and mask will equal the network address if it matches
- if netmask == netmask & (4294967295<<(32-int(bits))): # Validate network address is valid for mask
- return ipaddr_masked == netmask
- else:
- # print "***WARNING*** Network",netaddr,"not valid with mask /"+bits
- return False
-
def getId(self):
return self.id
@@ -707,7 +695,8 @@ def check(self, radl):
SIMPLE_FEATURES = {
"outbound": (str, ["YES", "NO"]),
- "outports": (str, check_outports_format)
+ "outports": (str, check_outports_format),
+ "provider_id": (str, None)
}
self.check_simple(SIMPLE_FEATURES, radl)
@@ -1053,6 +1042,7 @@ def check_app(f, x):
NUM_FEATURES = {
"net_interface": {
"connection": (str, check_net_interface_connection),
+ "ip": (str, None),
"dns_name": (str, None) },
"disk": {
"image.url": (str, system._check_disk_image_url),
@@ -1060,6 +1050,7 @@ def check_app(f, x):
"type": (str, ["SWAP", "ISO", "FILESYSTEM"]),
"device": (str, None),
"mount_path": (str, None),
+ "fstype": (str, None),
"size": (float, positive, mem_units),
"free_size": (float, positive, mem_units),
"os.name": (str, ["LINUX", "WINDOWS", "MAC OS X"]),
diff --git a/README b/README
index 9cbca5ce2..af797df30 100644
--- a/README
+++ b/README
@@ -39,6 +39,8 @@ However, if you install IM from sources you should install:
* The YAML library for Python, typically available as the 'python-yaml' or 'PyYAML' package.
* The SOAPpy library for Python, typically available as the 'python-soappy' or 'SOAPpy' package.
+
+ * The Netaddr library for Python, typically available as the 'python-netaddr' package.
* Ansible (http://www.ansibleworks.com/) to configure nodes in the infrastructures.
In particular, Ansible 1.4.2+ must be installed.
@@ -94,7 +96,7 @@ the packages python-paramiko and python-crypto before installing the IM with pip
You only have to install the IM package through the pip tool.
- pip install IM
+ $ pip install IM
Pip will install all the pre-requisites needed. So Ansible 1.4.2 or later will
be installed in the system. Yo will also need to install the sshpass command
@@ -173,3 +175,13 @@ or
REST_SSL = True
And then set the variables: XMLRCP_SSL_* or REST_SSL_* to your certificates paths.
+
+2. DOCKER IMAGE
+===============
+
+A Docker image named `grycap/im` has been created to make easier the deployment of an IM service using the
+default configuration. Information about this image can be found here: https://registry.hub.docker.com/u/grycap/im/.
+
+How to launch the IM service using docker:
+
+ $ sudo docker run -d -p 8899:8899 --name im grycap/im
\ No newline at end of file
diff --git a/README.md b/README.md
index 26034eec4..e7f0719ff 100644
--- a/README.md
+++ b/README.md
@@ -41,6 +41,8 @@ However, if you install IM from sources you should install:
+ The YAML library for Python, typically available as the 'python-yaml' or 'PyYAML' package.
+ The SOAPpy library for Python, typically available as the 'python-soappy' or 'SOAPpy' package.
+
+ + The Netaddr library for Python, typically available as the 'python-netaddr' package.
+ Ansible (http://www.ansibleworks.com/) to configure nodes in the infrastructures.
In particular, Ansible 1.4.2+ must be installed.
@@ -186,3 +188,15 @@ or
REST_SSL = True
And then set the variables: XMLRCP_SSL_* or REST_SSL_* to your certificates paths.
+
+2. DOCKER IMAGE
+===============
+
+A Docker image named `grycap/im` has been created to make easier the deployment of an IM service using the
+default configuration. Information about this image can be found here: https://registry.hub.docker.com/u/grycap/im/.
+
+How to launch the IM service using docker:
+
+```sh
+sudo docker run -d -p 8899:8899 --name im grycap/im
+```
\ No newline at end of file
diff --git a/changelog b/changelog
index f5c04ec80..bf4bf6505 100644
--- a/changelog
+++ b/changelog
@@ -122,3 +122,13 @@ IM 1.3.0
* Add StarVM and StopVM functions to the API
* Modify contextualziation process to ignore not running VMs enabling to configure the rest of VMs of an Inf.
* Enable SSH with retry in all the ctxt steps
+
+IM 1.3.1
+ * Bugfix in ConfManager when relaunching a VM afther a failure.
+ * Enable to specify git repo and http file in applications, in addition of galaxy roles, in the RADL.
+ * Add fstype property to disks and enable to automatically format and boot the disk.
+ * Add DATA_DB config variable enabling the usage of MySQL as Backend to store IM data
+ * Bugfix in OCCI storing the proxy filename
+ * Add context optional parameter to AddResource and RemoveResource functions.
+ * Add vm_list optional parameter to Reconfigure.
+ * Bugfixes in OpenStack and LibCloud connectors.
diff --git a/connectors/FogBow.py b/connectors/FogBow.py
index c2f4933d0..d7e0d1653 100644
--- a/connectors/FogBow.py
+++ b/connectors/FogBow.py
@@ -243,6 +243,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
else:
os_tpl = url[1]
+ # set the credentials the FogBow default username: fogbow
+ system.delValue('disk.0.os.credentials.username')
+ system.setValue('disk.0.os.credentials.username','fogbow')
+
public_key = system.getValue('disk.0.os.credentials.public_key')
if not public_key:
diff --git a/connectors/LibCloud.py b/connectors/LibCloud.py
index 8fe18db10..18b6f2dfa 100644
--- a/connectors/LibCloud.py
+++ b/connectors/LibCloud.py
@@ -20,7 +20,7 @@
from CloudConnector import CloudConnector
from libcloud.compute.base import NodeImage, NodeAuthSSHKey
-from libcloud.compute.types import NodeState, Provider
+from libcloud.compute.types import NodeState
from libcloud.compute.providers import get_driver
from IM.radl.radl import Feature
@@ -223,7 +223,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
if node:
vm = VirtualMachine(inf, node.id, self.cloud, radl, requested_radl, self)
# Add the keypair name to remove it later
- vm.keypair = keypair
+ vm.keypair = keypair_name
self.logger.debug("Node successfully created.")
res.append((True, vm))
else:
@@ -260,7 +260,9 @@ def finalize(self, vm, auth_data):
public_key = vm.getRequestedSystem().getValue('disk.0.os.credentials.public_key')
if vm.keypair and public_key is None or len(public_key) == 0 or (len(public_key) >= 1 and public_key.find('-----BEGIN CERTIFICATE-----') != -1):
# only delete in case of the user do not specify the keypair name
- node.driver.delete_key_pair(vm.keypair)
+ keypair = node.driver.get_key_pair(vm.keypair)
+ if keypair:
+ node.driver.delete_key_pair(keypair)
self.delete_elastic_ips(node, vm)
@@ -289,6 +291,8 @@ def updateVMInfo(self, vm, auth_data):
res_state = VirtualMachine.OFF
elif node.state == NodeState.STOPPED:
res_state = VirtualMachine.STOPPED
+ elif node.state == NodeState.ERROR:
+ res_state = VirtualMachine.FAILED
else:
res_state = VirtualMachine.UNKNOWN
diff --git a/connectors/OCCI.py b/connectors/OCCI.py
index ea4246610..e6081c5f0 100644
--- a/connectors/OCCI.py
+++ b/connectors/OCCI.py
@@ -47,25 +47,17 @@ class OCCICloudConnector(CloudConnector):
}
"""Dictionary with a map with the OCCI VM states to the IM states."""
- def __init__(self, cloud_info):
- self.proxy_filename = None
- CloudConnector.__init__(self, cloud_info)
-
def get_https_connection(self, auth, server, port):
"""
Get a HTTPS connection with the specified server.
It uses a proxy file if it has been specified in the auth credentials
"""
if auth and 'proxy' in auth:
- if self.proxy_filename and os.path.isfile(self.proxy_filename):
- proxy_filename = self.proxy_filename
- else:
- proxy = auth['proxy']
-
- (fproxy, proxy_filename) = tempfile.mkstemp()
- os.write(fproxy, proxy)
- os.close(fproxy)
- self.proxy_filename = proxy_filename
+ proxy = auth['proxy']
+
+ (fproxy, proxy_filename) = tempfile.mkstemp()
+ os.write(fproxy, proxy)
+ os.close(fproxy)
return httplib.HTTPSConnection(server, port, cert_file = proxy_filename)
else:
diff --git a/connectors/OpenNebula.py b/connectors/OpenNebula.py
index f88ed0a50..e19c93f5a 100644
--- a/connectors/OpenNebula.py
+++ b/connectors/OpenNebula.py
@@ -176,7 +176,7 @@ def getSessionID(self, auth_data, hash_password = None):
def setIPsFromTemplate(self, vm, template):
"""
- Set the IPs if the VM from the info obtained in the ONE template object
+ Set the IPs of the VM from the info obtained in the ONE template object
Arguments:
- vm(:py:class:`IM.VirtualMachine`): VM information.
diff --git a/connectors/OpenStack.py b/connectors/OpenStack.py
index 4d78359c2..40bd78d93 100644
--- a/connectors/OpenStack.py
+++ b/connectors/OpenStack.py
@@ -14,93 +14,388 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+import time
from connectors.LibCloud import LibCloudCloudConnector
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
+from libcloud.compute.base import NodeImage, NodeAuthSSHKey
from IM.uriparse import uriparse
+from IM.VirtualMachine import VirtualMachine
from IM.radl.radl import Feature
class OpenStackCloudConnector(LibCloudCloudConnector):
- """
- Cloud Launcher to OpenStack using LibCloud (Needs version 0.16.0 or higher version)
- """
-
- type = "OpenStack"
- """str with the name of the provider."""
-
- def get_driver(self, auth_data):
- """
- Get the driver from the auth data
-
- Arguments:
- - auth(Authentication): parsed authentication tokens.
-
- Returns: a :py:class:`libcloud.compute.base.NodeDriver` or None in case of error
- """
- if self.driver:
- return self.driver
- else:
- auths = auth_data.getAuthInfo(self.type, self.cloud.server)
- if not auths:
- self.logger.error("No correct auth data has been specified to OpenStack.")
- else:
- auth = auths[0]
-
- if 'username' in auth and 'password' in auth and 'tenant' in auth:
- parameters = {"auth_version":'2.0_password',
- "auth_url":"http://" + self.cloud.server + ":" + str(self.cloud.port),
- "auth_token":None,
- "service_type":None,
- "service_name":None,
- "service_region":'regionOne',
- "base_url":None}
-
- for param in parameters:
- if param in auth:
- parameters[param] = auth[param]
- else:
- self.logger.error("No correct auth data has been specified to OpenStack: username, password and tenant")
- return None
-
- cls = get_driver(Provider.OPENSTACK)
- driver = cls(auth['username'], auth['password'],
- ex_tenant_name=auth['tenant'],
- ex_force_auth_url=parameters["auth_url"],
- ex_force_auth_version=parameters["auth_version"],
- ex_force_service_region=parameters["service_region"],
- ex_force_base_url=parameters["base_url"],
- ex_force_service_name=parameters["service_name"],
- ex_force_service_type=parameters["service_type"],
- ex_force_auth_token=parameters["auth_token"])
-
- self.driver = driver
- return driver
-
- def concreteSystem(self, radl_system, auth_data):
- if radl_system.getValue("disk.0.image.url"):
- url = uriparse(radl_system.getValue("disk.0.image.url"))
- protocol = url[0]
- src_host = url[1].split(':')[0]
- # TODO: check the port
- if protocol == "ost" and self.cloud.server == src_host:
- driver = self.get_driver(auth_data)
-
- res_system = radl_system.clone()
- instance_type = self.get_instance_type(driver.list_sizes(), res_system)
-
- res_system.addFeature(Feature("memory.size", "=", instance_type.ram, 'M'), conflict="other", missing="other")
- res_system.addFeature(Feature("disk.0.free_size", "=", instance_type.disk , 'G'), conflict="other", missing="other")
- res_system.addFeature(Feature("price", "=", instance_type.price), conflict="me", missing="other")
-
- res_system.addFeature(Feature("instance_type", "=", instance_type.name), conflict="other", missing="other")
-
- res_system.addFeature(Feature("provider.type", "=", self.type), conflict="other", missing="other")
- res_system.addFeature(Feature("provider.host", "=", self.cloud.server), conflict="other", missing="other")
- res_system.addFeature(Feature("provider.port", "=", self.cloud.port), conflict="other", missing="other")
-
- return [res_system]
- else:
- return []
- else:
- return [radl_system.clone()]
+ """
+ Cloud Launcher to OpenStack using LibCloud (Needs version 0.16.0 or higher version)
+ """
+
+ type = "OpenStack"
+ """str with the name of the provider."""
+
+ def get_driver(self, auth_data):
+ """
+ Get the driver from the auth data
+
+ Arguments:
+ - auth(Authentication): parsed authentication tokens.
+
+ Returns: a :py:class:`libcloud.compute.base.NodeDriver` or None in case of error
+ """
+ if self.driver:
+ return self.driver
+ else:
+ auths = auth_data.getAuthInfo(self.type, self.cloud.server)
+ if not auths:
+ raise Exception("No correct auth data has been specified to OpenStack.")
+ else:
+ auth = auths[0]
+
+ if 'username' in auth and 'password' in auth and 'tenant' in auth:
+ parameters = {"auth_version":'2.0_password',
+ "auth_url":"http://" + self.cloud.server + ":" + str(self.cloud.port),
+ "auth_token":None,
+ "service_type":None,
+ "service_name":None,
+ "service_region":'RegionOne',
+ "base_url":None}
+
+ for param in parameters:
+ if param in auth:
+ parameters[param] = auth[param]
+ else:
+ self.logger.error("No correct auth data has been specified to OpenStack: username, password and tenant")
+ raise Exception("No correct auth data has been specified to OpenStack: username, password and tenant")
+
+ cls = get_driver(Provider.OPENSTACK)
+ driver = cls(auth['username'], auth['password'],
+ ex_tenant_name=auth['tenant'],
+ ex_force_auth_url=parameters["auth_url"],
+ ex_force_auth_version=parameters["auth_version"],
+ ex_force_service_region=parameters["service_region"],
+ ex_force_base_url=parameters["base_url"],
+ ex_force_service_name=parameters["service_name"],
+ ex_force_service_type=parameters["service_type"],
+ ex_force_auth_token=parameters["auth_token"])
+
+ self.driver = driver
+ return driver
+
+ def concreteSystem(self, radl_system, auth_data):
+ if radl_system.getValue("disk.0.image.url"):
+ url = uriparse(radl_system.getValue("disk.0.image.url"))
+ protocol = url[0]
+ src_host = url[1].split(':')[0]
+ # TODO: check the port
+ if protocol == "ost" and self.cloud.server == src_host:
+ driver = self.get_driver(auth_data)
+
+ res_system = radl_system.clone()
+ instance_type = self.get_instance_type(driver.list_sizes(), res_system)
+
+ res_system.addFeature(Feature("memory.size", "=", instance_type.ram, 'M'), conflict="other", missing="other")
+ res_system.addFeature(Feature("disk.0.free_size", "=", instance_type.disk , 'G'), conflict="other", missing="other")
+ res_system.addFeature(Feature("price", "=", instance_type.price), conflict="me", missing="other")
+
+ res_system.addFeature(Feature("instance_type", "=", instance_type.name), conflict="other", missing="other")
+
+ res_system.addFeature(Feature("provider.type", "=", self.type), conflict="other", missing="other")
+ res_system.addFeature(Feature("provider.host", "=", self.cloud.server), conflict="other", missing="other")
+ res_system.addFeature(Feature("provider.port", "=", self.cloud.port), conflict="other", missing="other")
+
+ return [res_system]
+ else:
+ return []
+ else:
+ return [radl_system.clone()]
+
+ def get_networks(self, driver, radl):
+ """
+ Get the list of networks to connect the VM
+ """
+ nets = []
+ ost_nets = driver.ex_list_networks()
+ used_nets = []
+ # I use this "patch" as used in the LibCloud OpenStack driver
+ public_networks_labels = ['public', 'internet', 'publica']
+
+ for radl_net in radl.networks:
+ # check if this net is connected with the current VM
+ if radl.systems[0].getNumNetworkWithConnection(radl_net.id) is not None:
+ # First check if the user has specified a provider ID
+ net_provider_id = radl_net.getValue('provider_id')
+ if net_provider_id:
+ for net in ost_nets:
+ if net.name == net_provider_id:
+ nets.append(net)
+ used_nets.append(net.name)
+ break
+ else:
+ # if not select the first not used net
+ for net in ost_nets:
+ # I use this "patch" as used in the LibCloud OpenStack driver
+ if net.name not in public_networks_labels:
+ if net.name not in used_nets:
+ nets.append(net)
+ used_nets.append(net.name)
+ break
+
+ return nets
+
+
+ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
+ driver = self.get_driver(auth_data)
+
+ system = radl.systems[0]
+ image_id = self.get_image_id(system.getValue("disk.0.image.url"))
+ image = NodeImage(id=image_id, name=None, driver=driver)
+
+ instance_type = self.get_instance_type(driver.list_sizes(), system)
+
+ name = system.getValue("disk.0.image.name")
+ if not name:
+ name = "userimage"
+
+ nets = self.get_networks(driver, radl)
+
+ sgs = self.create_security_group(driver, inf, radl)
+
+ args = {'size': instance_type,
+ 'image': image,
+ 'networks': nets,
+ 'ex_security_groups': sgs,
+ 'name': "%s-%s" % (name, int(time.time()*100))}
+
+ keypair = None
+ public_key = system.getValue("disk.0.os.credentials.public_key")
+ if public_key:
+ keypair = driver.get_key_pair(public_key)
+ if keypair:
+ system.setUserKeyCredentials(system.getCredentials().username, None, keypair.private_key)
+ else:
+ if "ssh_key" in driver.features.get("create_node", []):
+ args["auth"] = NodeAuthSSHKey(public_key)
+ else:
+ args["ex_keyname"] = keypair.name
+ elif not system.getValue("disk.0.os.credentials.password"):
+ keypair_name = "im-%d" % int(time.time()*100.0)
+ keypair = driver.create_key_pair(keypair_name)
+ system.setUserKeyCredentials(system.getCredentials().username, None, keypair.private_key)
+
+ if keypair.public_key and "ssh_key" in driver.features.get("create_node", []):
+ args["auth"] = NodeAuthSSHKey(keypair.public_key)
+ else:
+ args["ex_keyname"] = keypair_name
+
+ res = []
+ i = 0
+ all_failed = True
+ while i < num_vm:
+ self.logger.debug("Creating node")
+
+ node = driver.create_node(**args)
+
+ if node:
+ vm = VirtualMachine(inf, node.id, self.cloud, radl, requested_radl, self)
+ # Add the keypair name to remove it later
+ vm.keypair = keypair_name
+ self.logger.debug("Node successfully created.")
+ all_failed = False
+ res.append((True, vm))
+ else:
+ res.append((False, "Error creating the node"))
+ i += 1
+
+ # if all the VMs have failed, remove the sg and keypair
+ if all_failed:
+ if public_key is None or len(public_key) == 0 or (len(public_key) >= 1 and public_key.find('-----BEGIN CERTIFICATE-----') != -1):
+ # only delete in case of the user do not specify the keypair name
+ driver.delete_key_pair(keypair)
+ if sgs:
+ driver.ex_delete_security_group(sgs[0])
+
+ return res
+
+ def get_ip_pool(self, driver, fixed_ip):
+ """
+ Return the most suitable IP pool
+ """
+ pools = driver.ex_list_floating_ip_pools()
+
+ if fixed_ip:
+ for pool in pools:
+ ips = pool.list_floating_ips()
+
+ for ip in ips:
+ if ip.ip_address == fixed_ip:
+ return pool
+
+ #otherwise return the first pool
+ return pools[0]
+
+ def add_elastic_ip(self, vm, node, fixed_ip = None):
+ """
+ Add an elastic IP to an instance
+
+ Arguments:
+ - vm(:py:class:`IM.VirtualMachine`): VM information.
+ - node(:py:class:`libcloud.compute.base.Node`): node object to attach the volumes.
+ - fixed_ip(str, optional): specifies a fixed IP to add to the instance.
+ Returns: a :py:class:`OpenStack_1_1_FloatingIpAddress` added or None if some problem occur.
+ """
+ if vm.state == VirtualMachine.RUNNING:
+ try:
+ self.logger.debug("Add an Elastic/Floating IP")
+
+ if node.driver.ex_list_floating_ip_pools():
+ pool = self.get_ip_pool(node.driver, fixed_ip)
+ if fixed_ip:
+ floating_ip = node.driver.ex_get_floating_ip(fixed_ip)
+ else:
+ floating_ip = pool.create_floating_ip()
+ node.driver.ex_attach_floating_ip_to_node(node, floating_ip)
+ return floating_ip
+ else:
+ self.logger.error("Error adding a Floating IP: No pools available.")
+ return None
+
+ except Exception:
+ self.logger.exception("Error adding an Elastic/Floating IP to VM ID: " + str(vm.id))
+ return None
+ else:
+ self.logger.debug("The VM is not running, not adding an Elastic/Floating IP.")
+ return None
+
+ @staticmethod
+ def _get_security_group(driver, sg_name):
+ try:
+ sg = None
+ for elem in driver.ex_list_security_groups():
+ if elem.name == sg_name:
+ sg = elem
+ break
+ return sg
+ except Exception:
+ return None
+
+ def create_security_group(self, driver, inf, radl):
+ res = None
+ try:
+ sg_name = "im-" + str(inf.uuid)
+ sg = self._get_security_group(driver, sg_name)
+
+ if not sg:
+ self.logger.debug("Creating security group: " + sg_name)
+ try:
+ sg = driver.ex_create_security_group(sg_name, "Security group created by the IM")
+ except Exception, crex:
+ # First check if the SG does exist
+ sg = self._get_security_group(driver, sg_name)
+ if not sg:
+ # if not raise the exception
+ raise crex
+ else:
+ self.logger.debug("Security group: " + sg_name + " already created.")
+
+ res = [sg]
+
+ public_net = None
+ for net in radl.networks:
+ if net.isPublic():
+ public_net = net
+
+ if public_net:
+ outports = public_net.getOutPorts()
+ if outports:
+ for remote_port,remote_protocol,local_port,local_protocol in outports:
+ if local_port != 22 and local_port != 5099:
+ protocol = remote_protocol
+ if remote_protocol != local_protocol:
+ self.logger.warn("Different protocols used in outports ignoring local port protocol!")
+
+ driver.ex_create_security_group_rule(sg,protocol,remote_port, remote_port, '0.0.0.0/0')
+
+ try:
+ driver.ex_create_security_group_rule(sg,'tcp',22, 22, '0.0.0.0/0')
+ driver.ex_create_security_group_rule(sg,'tcp',5099, 5099, '0.0.0.0/0')
+
+ # open all the ports for the VMs in the security group
+ driver.ex_create_security_group_rule(sg,'tcp',1, 65535, source_security_group=sg)
+ driver.ex_create_security_group_rule(sg,'udp',1, 65535, source_security_group=sg)
+ except Exception, addex:
+ self.logger.warn("Exception adding SG rules. Probably the rules exists:" + str(addex))
+ pass
+
+ except Exception:
+ self.logger.exception("Error Creating the Security group")
+
+ return res
+
+
+ def finalize(self, vm, auth_data):
+ node = self.get_node_with_id(vm.id, auth_data)
+
+ if node:
+ sgs = node.driver.ex_get_node_security_groups(node)
+
+ success = node.destroy()
+
+ public_key = vm.getRequestedSystem().getValue('disk.0.os.credentials.public_key')
+ if vm.keypair and public_key is None or len(public_key) == 0 or (len(public_key) >= 1 and public_key.find('-----BEGIN CERTIFICATE-----') != -1):
+ # only delete in case of the user do not specify the keypair name
+ keypair = node.driver.get_key_pair(vm.keypair)
+ if keypair:
+ node.driver.delete_key_pair(keypair)
+
+ self.delete_elastic_ips(node, vm)
+
+ # Delete the EBS volumes
+ self.delete_volumes(vm)
+
+ # Delete the SG if this is the last VM
+ self.delete_security_group(node, sgs, vm.inf, vm.id)
+
+ if not success:
+ return (False, "Error destroying node: " + vm.id)
+
+ self.logger.debug("VM " + str(vm.id) + " successfully destroyed")
+ else:
+ self.logger.warn("VM " + str(vm.id) + " not found.")
+
+ return (True, "")
+
+ def delete_security_group(self, node, sgs, inf, vm_id, timeout = 60):
+ """
+ Delete the SG of this infrastructure if this is the last VM
+ """
+ if sgs:
+ # There will be only one
+ sg = sgs[0]
+
+ some_vm = False
+ for vm in inf.get_vm_list():
+ if vm.id != vm_id:
+ some_vm = True
+
+ if not some_vm:
+ # wait it to terminate and then remove the SG
+ cont = 0
+ deleted = False
+ while not deleted and cont < timeout:
+ time.sleep(5)
+ cont += 5
+ try:
+ node.driver.ex_delete_security_group(sg)
+ deleted = True
+ except Exception, ex:
+ # Check if it has been deleted yet
+ sg = self._get_security_group(node.driver, sg.name)
+ if not sg:
+ self.logger.debug("Error deleting the SG. But it does not exist. Ignore. " + str(ex))
+ deleted = True
+ else:
+ self.logger.exception("Error deleting the SG.")
+ else:
+ # If there are more than 1, we skip this step
+ self.logger.debug("There are active instances. Not removing the SG")
+ else:
+ self.logger.warn("No Security Group with name: " + sg.name)
\ No newline at end of file
diff --git a/doc/source/REST.rst b/doc/source/REST.rst
index ed4e85fa2..59a3fc159 100644
--- a/doc/source/REST.rst
+++ b/doc/source/REST.rst
@@ -90,6 +90,7 @@ GET ``http://imserver.com/infrastructures//``
POST ``http://imserver.com/infrastructures/``
:body: ``RADL document``
+ :input fields: ``context`` (optional)
:Content-type: text/uri-list
:ok response: 200 OK
:fail response: 401, 404, 400
@@ -97,7 +98,9 @@ POST ``http://imserver.com/infrastructures/``
Add the resources specified in the body contents to the infrastructure with ID
``infId``. The RADL restrictions are the same as in
:ref:`RPC-XML AddResource `. If success, it is returned
- a list of URIs of the new virtual machines.
+ a list of URIs of the new virtual machines. The ``context`` parameter is optional and
+ is a flag to specify if the contextualization step will be launched just after the VM
+ addition. Accetable values: yes, no, true, false, 1 or 0. If not specified the flag is set to True.
PUT ``http://imserver.com/infrastructures//stop``
:Content-type: text/uri-list
@@ -116,7 +119,7 @@ PUT ``http://imserver.com/infrastructures//start``
the infrastructure with ID ``infID``:
PUT ``http://imserver.com/infrastructures//reconfigure``
- :input fields: ``radl`` (compulsory)
+ :input fields: ``radl`` (compulsory), ``vm_list`` (optional)
:Content-type: text/uri-list
:ok response: 200 OK
:fail response: 401, 404, 400
@@ -126,6 +129,9 @@ PUT ``http://imserver.com/infrastructures//reconfigure``
of the infrastructure as indicated in ``radl``. The RADL restrictions
are the same as in :ref:`RPC-XML Reconfigure `. If no
RADL are specified, the contextualization process is stated again.
+ The last ``vm_list`` parameter is optional
+ and is a coma separated list of IDs of the VMs to reconfigure. If not
+ specified all the VMs will be reconfigured.
DELETE ``http://imserver.com/infrastructures/``
:ok response: 200 OK
@@ -161,11 +167,14 @@ PUT ``http://imserver.com/infrastructures//vms/``
in the body contents.
DELETE ``http://imserver.com/infrastructures//vms/``
+ :input fields: ``context`` (optional)
:ok response: 200 OK
:fail response: 401, 404, 400
Undeploy the virtual machine with ID ``vmId`` associated to the
- infrastructure with ID ``infId``.
+ infrastructure with ID ``infId``. The ``context`` parameter is optional and
+ is a flag to specify if the contextualization step will be launched just after the VM
+ addition. Accetable values: yes, no, true, false, 1 or 0. If not specified the flag is set to True.
PUT ``http://imserver.com/infrastructures//vms//start``
:Content-type: text/plain
diff --git a/doc/source/manual.rst b/doc/source/manual.rst
index 4b09af2b9..77d864c08 100644
--- a/doc/source/manual.rst
+++ b/doc/source/manual.rst
@@ -13,6 +13,8 @@ IM needs at least Python 2.6 to run, as well as the next libraries:
* `PyYAML `_, a YAML parser.
* `SOAPpy `_, a full-featured SOAP library
(we know it is not actively supported by upstream anymore).
+* `Netaddr `_, A Python library for representing
+ and manipulating network addresses.
Also, IM uses `Ansible `_ (1.4.2 or later) to configure the
infrastructure nodes.
@@ -20,12 +22,15 @@ infrastructure nodes.
These components are usually available from the distribution repositories. To
install them in Debian and Ubuntu based distributions, do::
- $ apt-get install python-ply python-paramiko python-yaml python-soappy ansible
+ $ apt-get install python-ply python-paramiko python-yaml python-soappy python-netaddr ansible
In Red Hat based distributions (RHEL, CentOS, Amazon Linux, Oracle Linux,
Fedora, etc.), do::
- $ yum install python-ply python-paramiko PyYAML SOAPpy ansible
+ $ yum install python-ply python-paramiko python-netaddr PyYAML SOAPpy ansible
+
+**WARNING: In some GNU/Linux distributions (RHEL 6 or equivalents) you must NOT install
+the packages 'python-paramiko' and 'python-crypto' with yum. You MUST use pip to install them**
Finally, check the next values in the Ansible configuration file
:file:`ansible.cfg`, (usually found in :file:`/etc/ansible`)::
@@ -101,7 +106,7 @@ content and move the extracted directory to the installation path (for instance
:file:`/usr/local` or :file:`/opt`)::
$ tar xvzf IM-0.1.tar.gz
- $ sudo chown -R root:root IM-0.1.tar.gz
+ $ sudo chown -R r```````````````````````````````````````````````oot:root IM-0.1.tar.gz
$ sudo mv IM-0.1 /usr/local
Finally you must copy (or link) $IM_PATH/scripts/im file to /etc/init.d directory::
@@ -153,6 +158,12 @@ Basic Options
Full path to the data file.
The default value is :file:`/etc/im/inf.dat`.
+.. confval:: DATA_DB
+
+ Save IM data into a MySQL DB instead of a file.
+ Using this format: 'mysql://username:password@server/db_name'
+ The default value is None.
+
.. confval:: USER_DB
Full path to the IM user DB json file.
@@ -428,3 +439,13 @@ The configuration values under the ``OpenNebula`` section:
Text to add to the ONE Template different to NAME, CPU, VCPU, MEMORY, OS, DISK and CONTEXT
The default value is ``GRAPHICS = [type="vnc",listen="0.0.0.0"]``.
+
+Docker Image
+============
+
+A Docker image named `grycap/im` has been created to make easier the deployment of an IM service using the
+default configuration. Information about this image can be found here: https://registry.hub.docker.com/u/grycap/im/.
+
+How to launch the IM service using docker::
+
+ $ sudo docker run -d -p 8899:8899 --name im grycap/im
diff --git a/doc/source/radl.rst b/doc/source/radl.rst
index bc01386f6..59c7e2eb6 100644
--- a/doc/source/radl.rst
+++ b/doc/source/radl.rst
@@ -217,6 +217,8 @@ machine. The supported features are:
* ``gce:///``, for Google Cloud;
* ``azr://``, for Microsoft Azure; and
* ``/``, for FedCloud OCCI connector.
+ * ``docker://``, for Docker images.
+ * ``fbw://``, for FogBow images.
Either ``disk.0.image.url`` or ``disk.0.image.name`` must be set.
@@ -229,10 +231,22 @@ machine. The supported features are:
``disk..device = ``
Set the device name, if it is disk with no source set.
-
- .. todo::
-
- ``disk..device = `` does not have a clear description.
+ It specifies the device where the disk will be located in the system
+ (hdb, hdc, etc.). Depending on the Cloud provider the meaning of this
+ field may change. In Docker and Kubernetes connectors the device
+ refers to a path to create a bind in the container.
+
+``disk..mount_path = ``
+ Set the mount point, if it is disk with no source set.
+ It specifies a path to mount the device. In Docker and Kubernetes
+ connectors this path refers to the directory in the container to
+ bind the host directory specified in ``device``.
+
+``disk..fstype = ``
+ Set the mount point, if it is disk with no source set.
+ It specifies the type of the filesystem of this disk. If specified
+ the contextualization agent will try to format and mount this disk
+ in the path specified in ``mount_path`` field.
``disk..size = B|K|M|G``
Set the size of the disk, if it is a disk with no source set.
@@ -273,6 +287,21 @@ machine. The supported features are:
the application must have already installed; and if ``no``, the application
can be installed during the contextualization of the virtual machine if it
is not installed.
+
+ There are a **special** type of application that starts with ``ansible.modules.``.
+ These applications installs `ansible roles `_
+ that can be used in the ``configure`` sections of the RADL.
+ There are three type of ansible modules:
+
+ * `Ansible Galaxy `_ roles: ``ansible.modules.micafer.hadoop``: The user
+ specifies the name of the galaxy role afther the string ``ansible.modules.``
+ * HTTP URL: ``ansible.modules.http://server.com/hadoop.tgz``: The user specifies an HTTP URL afther the
+ the string ``ansible.modules.``. The file must be compressed. it must contain only one directory
+ with the same name of the compressed file (without extension) with the ansible role content.
+ * Git Repo: ``ansible.modules.git://github.com/micafer/ansible-role-hadoop|hadoop``: The user specifies a Git repo
+ (using the git scheme in the URL) afther the string ``ansible.modules.``. Furthermore the
+ user must specify the rolname using a | afther the url, ash shown in the example.
+
Parametric Values
-----------------
diff --git a/doc/source/xmlrpc.rst b/doc/source/xmlrpc.rst
index 06a8c4cb0..045fda88f 100644
--- a/doc/source/xmlrpc.rst
+++ b/doc/source/xmlrpc.rst
@@ -155,11 +155,15 @@ This is the list of method names:
:parameter 0: ``infId``: integer
:parameter 1: ``radl``: string
:parameter 2: ``auth``: array of structs
+ :parameter 3: ``context``: (optional, default value True) boolean
:ok response: [true, ``infId``: integer]
:fail response: [false, ``error``: string]
Add the resources specified in ``radl`` to the infrastructure with ID
- ``infId``. The ``deploy`` instructions in the ``radl`` must refer to
+ ``infId``. The last ``context`` parameter is optional and is a flag to
+ specify if the contextualization step will be launched just after the VM
+ addition. The default value is True.
+ The ``deploy`` instructions in the ``radl`` must refer to
*systems* already defined. If all the *systems* defined in ``radl`` are
new, they will be added. Otherwise the new *systems* defined will be
ignored. All the *systems* specified in the ``deploy`` must be specified
@@ -175,13 +179,16 @@ This is the list of method names:
:parameter 0: ``infId``: integer
:parameter 1: ``vmIds``: string
:parameter 2: ``auth``: array of structs
+ :parameter 3: ``context``: (optional, default value True) boolean
:ok response: [true, integer]
:fail response: [false, ``error``: string]
Updeploy the virtual machines with IDs in ``vmIds`` associated to the
infrastructure with ID ``infId``. The different virtual machine IDs in
``vmIds`` are separated by commas. On success it returns the number of
- VMs that have been undeployed.
+ VMs that have been undeployed. The last ``context`` parameter is optional
+ and is a flag to specify if the contextualization step will be launched
+ just after the VM addition. The default value is True.
.. _StopInfrastructure-xmlrpc:
@@ -240,12 +247,15 @@ This is the list of method names:
:parameter 0: ``infId``: integer
:parameter 1: ``radl``: string
:parameter 2: ``auth``: array of structs
+ :parameter 3: ``vm_list``: (optional, default value None) array of integers
:ok response: [true, string of length zero]
:fail response: [false, ``error``: string]
Update the infrastructure with ID ``infId`` using the *configuration
sections* in the RADL ``radl``. Some virtual machines associated to the
- infrastructure may be reconfigured.
+ infrastructure may be reconfigured. The last ``vm_list`` parameter is optional
+ and is a list integers specifying the IDs of the VMs to reconfigure. The default
+ value is None that means that all the VMs will be reconfigured.
.. _ExportInfrastructure-xmlrpc:
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1f8a3c752..a1f060f99 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -8,4 +8,6 @@ RUN apt-get update && apt-get install -y gcc python-dev python-pip python-soappy
RUN pip install IM
RUN pip uninstall -y SOAPpy
COPY ansible.cfg /etc/ansible/ansible.cfg
+# Solve a bug in gce.py driver in libcloud 0.17
+COPY gce.py /usr/local/lib/python2.7/dist-packages/libcloud/compute/drivers/gce.py
CMD im_service.py
diff --git a/etc/im.cfg b/etc/im.cfg
index 70caa53aa..65eaf969b 100644
--- a/etc/im.cfg
+++ b/etc/im.cfg
@@ -21,8 +21,12 @@ XMLRCP_PORT = 8899
# Address where the XML-RPC server will be listening-in.
# 0.0.0.0 will listen in all the IPs of the machine
XMLRCP_ADDRESS = 0.0.0.0
+
# IM data file
DATA_FILE = /etc/im/inf.dat
+# Save IM data into a MySQL DB
+#DATA_DB = mysql://username:password@server/db_name
+
# IM user DB. To restrict the users that can access the IM service.
# Comment it or set a blank value to disable user check.
USER_DB =
diff --git a/im_service.py b/im_service.py
index 3190788a7..21d965130 100755
--- a/im_service.py
+++ b/im_service.py
@@ -46,12 +46,12 @@ def WaitRequest(request):
API functions.
They create the specified request and wait for it.
"""
-def AddResource(inf_id, radl_data, auth_data):
- request = IMBaseRequest.create_request(IMBaseRequest.ADD_RESOURCE, (inf_id, radl_data, auth_data))
+def AddResource(inf_id, radl_data, auth_data, context = True):
+ request = IMBaseRequest.create_request(IMBaseRequest.ADD_RESOURCE, (inf_id, radl_data, auth_data, context))
return WaitRequest(request)
-def RemoveResource(inf_id, vm_list, auth_data):
- request = IMBaseRequest.create_request(IMBaseRequest.REMOVE_RESOURCE, (inf_id, vm_list, auth_data))
+def RemoveResource(inf_id, vm_list, auth_data, context = True):
+ request = IMBaseRequest.create_request(IMBaseRequest.REMOVE_RESOURCE, (inf_id, vm_list, auth_data, context))
return WaitRequest(request)
def GetVMInfo(inf_id, vm_id, auth_data):
@@ -91,8 +91,8 @@ def GetInfrastructureList(auth_data):
request = IMBaseRequest.create_request(IMBaseRequest.GET_INFRASTRUCTURE_LIST,(auth_data))
return WaitRequest(request)
-def Reconfigure(inf_id, radl_data, auth_data):
- request = IMBaseRequest.create_request(IMBaseRequest.RECONFIGURE,(inf_id, radl_data, auth_data))
+def Reconfigure(inf_id, radl_data, auth_data, vm_list = None):
+ request = IMBaseRequest.create_request(IMBaseRequest.RECONFIGURE,(inf_id, radl_data, auth_data, vm_list))
return WaitRequest(request)
def ImportInfrastructure(str_inf, auth_data):
@@ -127,7 +127,7 @@ def launch_daemon():
"""
Launch the IM daemon
"""
- if os.path.isfile(Config.DATA_FILE):
+ if os.path.isfile(Config.DATA_FILE) or Config.DATA_DB:
InfrastructureManager.load_data()
if Config.XMLRCP_SSL:
diff --git a/setup.py b/setup.py
index 95ccecff3..b52c1b7ba 100644
--- a/setup.py
+++ b/setup.py
@@ -35,12 +35,12 @@
author='GRyCAP - Universitat Politecnica de Valencia',
author_email='micafer1@upv.es',
url='http://www.grycap.upv.es/im',
- packages=['IM', 'IM.radl', 'IM.ansible','connectors'],
+ packages=['IM', 'IM.radl', 'IM.ansible','connectors'],
scripts=["im_service.py"],
- data_files=datafiles,
+ data_files=datafiles,
license="GPL version 3, http://www.gnu.org/licenses/gpl-3.0.txt",
long_description="IM is a tool that ease the access and the usability of IaaS clouds by automating the VMI selection, deployment, configuration, software installation, monitoring and update of Virtual Appliances. It supports APIs from a large number of virtual platforms, making user applications cloud-agnostic. In addition it integrates a contextualization system to enable the installation and configuration of all the user required applications providing the user with a fully functional infrastructure.",
description="IM is a tool to manage virtual infrastructures on Cloud deployments",
platforms=["any"],
- install_requires=["ansible >= 1.4","paramiko >= 1.14","PyYAML","SOAPpy","boto >= 2.29","apache-libcloud >= 0.17","ply", "bottle"]
+ install_requires=["ansible >= 1.4","paramiko >= 1.14","PyYAML","SOAPpy","boto >= 2.29","apache-libcloud >= 0.17","ply", "bottle", "netaddr"]
)
diff --git a/test/TestIM.py b/test/TestIM.py
index e2dc94fc6..fd5c7cddb 100755
--- a/test/TestIM.py
+++ b/test/TestIM.py
@@ -195,7 +195,18 @@ def test_19_addresource(self):
all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 900)
self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
- def test_20_removeresource(self):
+ def test_20_addresource_noconfig(self):
+ """
+ Test AddResource function with the contex option to False
+ """
+ (success, res) = self.server.AddResource(self.inf_id, RADL_ADD, self.auth_data, False)
+ self.assertTrue(success, msg="ERROR calling AddResource: " + str(res))
+
+ (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling GetInfrastructureInfo:" + str(vm_ids))
+ self.assertEqual(len(vm_ids), 4, msg="ERROR getting infrastructure info: Incorrect number of VMs(" + str(len(vm_ids)) + "). It must be 3")
+
+ def test_21_removeresource(self):
"""
Test RemoveResource function
"""
@@ -207,7 +218,7 @@ def test_20_removeresource(self):
(success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data)
self.assertTrue(success, msg="ERROR calling GetInfrastructureInfo:" + str(vm_ids))
- self.assertEqual(len(vm_ids), 2, msg="ERROR getting infrastructure info: Incorrect number of VMs(" + str(len(vm_ids)) + "). It must be 2")
+ self.assertEqual(len(vm_ids), 3, msg="ERROR getting infrastructure info: Incorrect number of VMs(" + str(len(vm_ids)) + "). It must be 2")
(success, vm_state) = self.server.GetVMProperty(self.inf_id, vm_ids[0], "state", self.auth_data)
self.assertTrue(success, msg="ERROR getting VM state:" + str(res))
@@ -216,7 +227,60 @@ def test_20_removeresource(self):
all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
- def test_21_stop(self):
+ def test_22_removeresource_noconfig(self):
+ """
+ Test RemoveResource function with the context option to False
+ """
+ (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling GetInfrastructureInfo: " + str(vm_ids))
+
+ (success, res) = self.server.RemoveResource(self.inf_id, vm_ids[2], self.auth_data, False)
+ self.assertTrue(success, msg="ERROR calling RemoveResource: " + str(res))
+
+ (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling GetInfrastructureInfo:" + str(vm_ids))
+ self.assertEqual(len(vm_ids), 2, msg="ERROR getting infrastructure info: Incorrect number of VMs(" + str(len(vm_ids)) + "). It must be 2")
+
+ (success, vm_state) = self.server.GetVMProperty(self.inf_id, vm_ids[0], "state", self.auth_data)
+ self.assertTrue(success, msg="ERROR getting VM state:" + str(res))
+ self.assertEqual(vm_state, VirtualMachine.CONFIGURED, msg="ERROR unexpected state. Expected 'running' and obtained " + vm_state)
+
+ def test_23_reconfigure(self):
+ """
+ Test Reconfigure function
+ """
+ (success, res) = self.server.Reconfigure(self.inf_id, "", self.auth_data)
+ self.assertTrue(success, msg="ERROR calling Reconfigure: " + str(res))
+
+ all_stopped = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
+ self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
+ def test_24_reconfigure_vmlist(self):
+ """
+ Test Reconfigure function specifying a list of VMs
+ """
+ (success, res) = self.server.Reconfigure(self.inf_id, "", self.auth_data, [0])
+ self.assertTrue(success, msg="ERROR calling Reconfigure: " + str(res))
+
+ all_stopped = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
+ self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
+ def test_25_reconfigure_radl(self):
+ """
+ Test Reconfigure function specifying a new RADL
+ """
+ radl = """configure test (\n@begin\n---\n - tasks:\n - debug: msg="RECONFIGURERADL"\n@end\n)"""
+ (success, res) = self.server.Reconfigure(self.inf_id, radl, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling Reconfigure: " + str(res))
+
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
+ self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
+ (success, cont_out) = self.server.GetInfrastructureContMsg(self.inf_id, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling GetInfrastructureContMsg: " + str(cont_out))
+ self.assertIn("RECONFIGURERADL", cont_out, msg="Incorrect contextualization message: " + cont_out)
+
+ def test_30_stop(self):
"""
Test StopInfrastructure function
"""
@@ -226,7 +290,7 @@ def test_21_stop(self):
all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING])
self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be stopped (timeout).")
- def test_22_start(self):
+ def test_31_start(self):
"""
Test StartInfrastructure function
"""
@@ -238,7 +302,7 @@ def test_22_start(self):
all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 150, [VirtualMachine.RUNNING])
self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be started (timeout).")
- def test_23_stop_vm(self):
+ def test_32_stop_vm(self):
"""
Test StopVM function
"""
@@ -248,7 +312,7 @@ def test_23_stop_vm(self):
all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING], [0])
self.assertTrue(all_stopped, msg="ERROR waiting the vm to be stopped (timeout).")
- def test_24_start_vm(self):
+ def test_33_start_vm(self):
"""
Test StartVM function
"""
@@ -260,6 +324,18 @@ def test_24_start_vm(self):
all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 150, [VirtualMachine.RUNNING], [0])
self.assertTrue(all_configured, msg="ERROR waiting the vm to be started (timeout).")
+ def test_40_export_import(self):
+ """
+ Test ExportInfrastructure and ImportInfrastructure functions
+ """
+ (success, res) = self.server.ExportInfrastructure(self.inf_id, False, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling ExportInfrastructure: " + str(res))
+
+ (success, res) = self.server.ImportInfrastructure(res, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling ImportInfrastructure: " + str(res))
+
+ self.assertEqual(res, self.inf_id+1, msg="ERROR importing the inf.")
+
def test_50_destroy(self):
"""
Test DestroyInfrastructure function
diff --git a/test/TestREST.py b/test/TestREST.py
index 5b3cbed17..de628198d 100755
--- a/test/TestREST.py
+++ b/test/TestREST.py
@@ -71,6 +71,8 @@ def wait_inf_state(self, state, timeout, incorrect_states = [], vm_ids = None):
self.assertEqual(resp.status, 200, msg="ERROR getting infrastructure info:" + output)
vm_ids = output.split("\n")
+ else:
+ pass
err_states = [VirtualMachine.FAILED, VirtualMachine.OFF, VirtualMachine.UNCONFIGURED]
err_states.extend(incorrect_states)
@@ -84,7 +86,7 @@ def wait_inf_state(self, state, timeout, incorrect_states = [], vm_ids = None):
self.server.request('GET', vm_uri[2] + "/state", headers = {'AUTHORIZATION' : self.auth_data})
resp = self.server.getresponse()
vm_state = str(resp.read())
- self.assertEqual(resp.status, 200, msg="ERROR getting VM info:" + output)
+ self.assertEqual(resp.status, 200, msg="ERROR getting VM info:" + vm_state)
self.assertFalse(vm_state in err_states, msg="ERROR waiting for a state. '%s' state was expected and '%s' was obtained in the VM %s" % (state, vm_state, vm_uri))
@@ -206,6 +208,32 @@ def test_40_addresource(self):
all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+ def test_45_addresource_noconfig(self):
+ self.server.request('POST', "/infrastructures/" + self.inf_id + "?context=0", body = RADL_ADD, headers = {'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR adding resources:" + output)
+
+ def test_47_removeresource_noconfig(self):
+ self.server.request('GET', "/infrastructures/" + self.inf_id + "?context=0", headers = {'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR getting the infrastructure info:" + output)
+ vm_ids = output.split("\n")
+
+ vm_uri = uriparse(vm_ids[1])
+ self.server.request('DELETE', vm_uri[2], headers = {'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR removing resources:" + output)
+
+ self.server.request('GET', "/infrastructures/" + self.inf_id, headers = {'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR getting the infrastructure info:" + output)
+ vm_ids = output.split("\n")
+ self.assertEqual(len(vm_ids), 2, msg="ERROR getting infrastructure info: Incorrect number of VMs(" + str(len(vm_ids)) + "). It must be 1")
+
def test_50_removeresource(self):
self.server.request('GET', "/infrastructures/" + self.inf_id, headers = {'AUTHORIZATION' : self.auth_data})
resp = self.server.getresponse()
@@ -229,6 +257,24 @@ def test_50_removeresource(self):
all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 300)
self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+ def test_55_reconfigure(self):
+ self.server.request('PUT', "/infrastructures/" + self.inf_id + "/reconfigure", headers = {'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR reconfiguring:" + output)
+
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 300)
+ self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
+ def test_57_reconfigure_list(self):
+ self.server.request('PUT', "/infrastructures/" + self.inf_id + "/reconfigure?vm_list=0", headers = {'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR reconfiguring:" + output)
+
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 300)
+ self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
def test_60_stop(self):
self.server.request('PUT', "/infrastructures/" + self.inf_id + "/stop", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
resp = self.server.getresponse()
@@ -239,6 +285,8 @@ def test_60_stop(self):
self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be stopped (timeout).")
def test_70_start(self):
+ # To assure the VM is stopped
+ time.sleep(10)
self.server.request('PUT', "/infrastructures/" + self.inf_id + "/start", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
resp = self.server.getresponse()
output = str(resp.read())
@@ -248,24 +296,26 @@ def test_70_start(self):
self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be started (timeout).")
def test_80_stop_vm(self):
- self.server.request('PUT', "/infrastructures/" + self.inf_id + "/0/stop", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
+ self.server.request('PUT', "/infrastructures/" + self.inf_id + "/vms/0/stop", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
resp = self.server.getresponse()
output = str(resp.read())
self.assertEqual(resp.status, 200, msg="ERROR stopping the vm:" + output)
- all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING], [0])
+ all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING], ["/infrastructures/" + self.inf_id + "/vms/0"])
self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be stopped (timeout).")
def test_90_start_vm(self):
- self.server.request('PUT', "/infrastructures/" + self.inf_id + "/0/start", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
+ # To assure the VM is stopped
+ time.sleep(10)
+ self.server.request('PUT', "/infrastructures/" + self.inf_id + "/vms/0/start", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
resp = self.server.getresponse()
output = str(resp.read())
self.assertEqual(resp.status, 200, msg="ERROR starting the vm:" + output)
- all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING], [0])
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING], ["/infrastructures/" + self.inf_id + "/vms/0"])
self.assertTrue(all_configured, msg="ERROR waiting the vm to be started (timeout).")
- def test_100_destroy(self):
+ def test_95_destroy(self):
self.server.request('DELETE', "/infrastructures/" + self.inf_id, headers = {'Authorization' : self.auth_data})
resp = self.server.getresponse()
output = str(resp.read())
diff --git a/test/test.radl b/test/test.radl
index 5bfa6ea77..f63d10d0d 100644
--- a/test/test.radl
+++ b/test/test.radl
@@ -14,7 +14,9 @@ disk.0.os.name = 'linux' and
disk.0.applications contains (name = 'ansible.modules.micafer.hadoop') and
disk.0.applications contains (name='gmetad') and
disk.1.size=1GB and
-disk.1.device='hdb'
+disk.1.device='hdb' and
+disk.1.fstype='ext4' and
+disk.1.mount_path='/mnt/disk'
)
system wn (
@@ -32,7 +34,9 @@ disk.0.os.credentials.password = 'yoyoyo' and
disk.0.os.credentials.new.password = 'Tututu+01' and
disk.0.applications contains (name='ganglia') and
disk.1.size=1GB and
-disk.1.device='hdb'
+disk.1.device='hdb' and
+disk.1.fstype='ext4' and
+disk.1.mount_path='/mnt/disk'
)
configure hadoop (
@@ -43,24 +47,12 @@ configure hadoop (
@end
)
-configure hd (
+configure test (
@begin
---
- - vars:
- mount_point: /mnt/disk
- devices:
- - "/dev/sdb"
- - "/dev/xvdb"
- tasks:
- # truqillo para crear una particion de forma no interactiva con fdisk
- - shell: (echo n; echo p; echo 1; echo ; echo; echo w) | fdisk {{item}} creates={{item}}1
- with_first_found: devices
+ - tasks:
+ - shell: test -d "/mnt/disk/lost+found"
- - filesystem: fstype=ext3 dev={{item}}1
- with_first_found: devices
-
- - mount: name={{ mount_point }} src={{item}}1 state=mounted fstype=ext3
- with_first_found: devices
@end
)
@@ -69,6 +61,6 @@ deploy wn 1 one
contextualize (
system front configure hadoop step 1
- system front configure hd step 1
- system wn configure hd step 1
+ system front configure test step 1
+ system wn configure test step 1
)
diff --git a/test/test_connectors.py b/test/test_connectors.py
index 31e0308f9..4f8fb96db 100755
--- a/test/test_connectors.py
+++ b/test/test_connectors.py
@@ -42,7 +42,7 @@ class TestConnectors(unittest.TestCase):
""" List of VMs launched in the test """
#connectors_to_test = "all"
- connectors_to_test = ["docker"]
+ connectors_to_test = ["kub"]
""" Specify the connectors to test: "all": All the connectors specified in the auth file or a list with the IDs"""
@classmethod
@@ -115,10 +115,12 @@ def test_20_launch(self):
net_interface.0.dns_name = 'test' and
disk.0.os.flavour='ubuntu' and
disk.0.os.version>='12.04' and
+ disk.0.os.credentials.new.password = 'Passtest+01' and
#disk.0.os.flavour='centos' and
#disk.0.os.version>='6' and
disk.1.size=1GB and
- disk.1.device='hdb'
+ disk.1.device='hdb' and
+ disk.1.mount_path='/mnt/path'
)"""
radl = radl_parse.parse_radl(radl_data)
radl.check()
@@ -148,7 +150,7 @@ def test_30_updateVMInfo(self):
def wait_vm_state(self, cl, vm, state, timeout):
# wait the VM to be stopped
wait = 0
- err_states = [VirtualMachine.FAILED, VirtualMachine.OFF, VirtualMachine.UNKNOWN]
+ err_states = [VirtualMachine.FAILED, VirtualMachine.OFF, VirtualMachine.UNCONFIGURED]
while vm.state != state and vm.state not in err_states and wait < timeout:
try:
(success, new_vm) = cl.updateVMInfo(vm, auth)
@@ -159,6 +161,8 @@ def wait_vm_state(self, cl, vm, state, timeout):
vm = new_vm
wait += 5
time.sleep(5)
+ else:
+ return False
return vm.state == state
@@ -189,6 +193,24 @@ def test_50_start(self):
wait_ok = self.wait_vm_state(cl,vm,VirtualMachine.RUNNING,90)
self.assertTrue(wait_ok, msg="ERROR: waiting start op VM for cloud: " + vm.cloud.id)
+ def test_55_alter(self):
+ radl_data = """
+ system test (
+ cpu.count>=2 and
+ memory.size>=1024m
+ )"""
+ radl = radl_parse.parse_radl(radl_data)
+ for vm in self.vm_list:
+ cl = vm.cloud.getCloudConnector()
+ (success, msg) = cl.alterVM(vm, radl, auth)
+ self.assertTrue(success, msg="ERROR: updating VM for cloud: " + vm.cloud.id + ": " + str(msg))
+ # get the updated vm
+ (success, new_vm) = cl.updateVMInfo(vm, auth)
+ new_cpu = new_vm.info.systems[0].getValue('cpu.count')
+ new_memory = new_vm.info.systems[0].getFeature('memory.size').getValue('M')
+ self.assertEqual(new_cpu, 2, msg="ERROR: updating VM for cloud: " + vm.cloud.id + ". CPU num must be 2.")
+ self.assertEqual(new_memory, 1024, msg="ERROR: updating VM for cloud: " + vm.cloud.id + ". Memory must be 1024.")
+
def test_60_finalize(self):
for vm in self.vm_list:
cl = vm.cloud.getCloudConnector()
diff --git a/test/test_simple.radl b/test/test_simple.radl
index 3d44ccbce..2a5aef9e7 100644
--- a/test/test_simple.radl
+++ b/test/test_simple.radl
@@ -5,7 +5,7 @@ cpu.arch='x86_64' and
cpu.count>=1 and
memory.size>=512m and
net_interface.0.connection = 'publica' and
-disk.0.image.url = 'one://ramses.i3m.upv.es/37' and
+disk.0.image.url = 'one://ramses.i3m.upv.es/95' and
disk.0.os.credentials.username = 'ubuntu' and
disk.0.os.credentials.password = 'yoyoyo' and
disk.0.os.name = 'linux'