diff --git a/IM/ConfManager.py b/IM/ConfManager.py index 5eace3a85..d3d82d309 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -78,15 +78,15 @@ def check_running_pids(self, vms_configuring, failed_step): if step not in res: res[step] = [] res[step].append(vm) - self.log_debug("Ansible process to configure " + str(vm.im_id) + - " with PID " + vm.ctxt_pid + " is still running.") + self.log_info("Ansible process to configure " + str(vm.im_id) + + " with PID " + vm.ctxt_pid + " is still running.") else: - self.log_debug("Configuration process in VM: " + str(vm.im_id) + " finished.") + self.log_info("Configuration process in VM: " + str(vm.im_id) + " finished.") if vm.configured: - self.log_debug("Configuration process of VM %s success." % vm.im_id) + self.log_info("Configuration process of VM %s success." % vm.im_id) elif vm.configured is False: failed_step.append(step) - self.log_debug("Configuration process of VM %s failed." % vm.im_id) + self.log_info("Configuration process of VM %s failed." % vm.im_id) else: self.log_warn("Configuration process of VM %s in unfinished state." % vm.im_id) # Force to save the data to store the log data () @@ -97,14 +97,14 @@ def check_running_pids(self, vms_configuring, failed_step): if step not in res: res[step] = [] res[step].append(vm) - self.log_debug("Configuration process of master node: " + - str(vm.get_ctxt_process_names()) + " is still running.") + self.log_info("Configuration process of master node: " + + str(vm.get_ctxt_process_names()) + " is still running.") else: if vm.configured: - self.log_debug("Configuration process of master node successfully finished.") + self.log_info("Configuration process of master node successfully finished.") elif vm.configured is False: failed_step.append(step) - self.log_debug("Configuration process of master node failed.") + self.log_info("Configuration process of master node failed.") else: self.log_warn("Configuration process of master node in unfinished state.") # Force to save the data to store the log data @@ -116,9 +116,9 @@ def stop(self): self._stop_thread = True # put a task to assure to wake up the thread self.inf.add_ctxt_tasks([(-10, 0, None, None)]) - self.log_debug("Stop Configuration thread.") + self.log_info("Stop Configuration thread.") if self.ansible_process and self.ansible_process.is_alive(): - self.log_debug("Stopping pending Ansible process.") + self.log_info("Stopping pending Ansible process.") self.ansible_process.terminate() def check_vm_ips(self, timeout=Config.WAIT_RUNNING_VM_TIMEOUT): @@ -170,7 +170,7 @@ def check_vm_ips(self, timeout=Config.WAIT_RUNNING_VM_TIMEOUT): self.log_error("Error waiting all the VMs to have a correct IP") self.inf.set_configured(False) else: - self.log_debug("All the VMs have a correct IP") + self.log_info("All the VMs have a correct IP") self.inf.set_configured(True) return success @@ -180,7 +180,7 @@ def kill_ctxt_processes(self): Kill all the ctxt processes """ for vm in self.inf.get_vm_list(): - self.log_debug("Killing ctxt processes in VM: %s" % vm.id) + self.log_info("Killing ctxt processes in VM: %s" % vm.id) try: vm.kill_check_ctxt_process() except: @@ -188,7 +188,7 @@ def kill_ctxt_processes(self): vm.configured = None def run(self): - self.log_debug("Starting the ConfManager Thread") + self.log_info("Starting the ConfManager Thread") failed_step = [] last_step = None @@ -196,14 +196,14 @@ def run(self): while not self._stop_thread: if self.init_time + self.max_ctxt_time < time.time(): - self.log_debug("Max contextualization time passed. Exit thread.") + self.log_info("Max contextualization time passed. Exit thread.") self.inf.add_cont_msg("ERROR: Max contextualization time passed.") # Remove tasks from queue self.inf.reset_ctxt_tasks() # Kill the ansible processes self.kill_ctxt_processes() if self.ansible_process and self.ansible_process.is_alive(): - self.log_debug("Stopping pending Ansible process.") + self.log_info("Stopping pending Ansible process.") self.ansible_process.terminate() return @@ -219,14 +219,14 @@ def run(self): # stop the thread if the stop method has been called if self._stop_thread: - self.log_debug("Exit Configuration thread.") + self.log_info("Exit Configuration thread.") return # if this task is from a next step if last_step is not None and last_step < step: if failed_step and sorted(failed_step)[-1] < step: - self.log_debug("Configuration of process of step %s failed, " - "ignoring tasks of step %s." % (sorted(failed_step)[-1], step)) + self.log_info("Configuration of process of step %s failed, " + "ignoring tasks of step %s." % (sorted(failed_step)[-1], step)) else: # Add the task again to the queue only if the last step was # OK @@ -234,12 +234,12 @@ def run(self): # If there are any process running of last step, wait if last_step in vms_configuring and len(vms_configuring[last_step]) > 0: - self.log_debug("Waiting processes of step " + str(last_step) + " to finish.") + self.log_info("Waiting processes of step " + str(last_step) + " to finish.") time.sleep(Config.CONFMAMAGER_CHECK_STATE_INTERVAL) else: # if not, update the step, to go ahead with the new # step - self.log_debug("Step " + str(last_step) + " finished. Go to step: " + str(step)) + self.log_info("Step " + str(last_step) + " finished. Go to step: " + str(step)) last_step = step else: if isinstance(vm, VirtualMachine): @@ -247,12 +247,12 @@ def run(self): self.log_warn("VM ID " + str(vm.im_id) + " has been destroyed. Not launching new tasks for it.") elif vm.is_configured() is False: - self.log_debug("Configuration process of step %s failed, " - "ignoring tasks of step %s." % (last_step, step)) + self.log_info("Configuration process of step %s failed, " + "ignoring tasks of step %s." % (last_step, step)) # Check that the VM has no other ansible process # running elif vm.ctxt_pid: - self.log_debug("VM ID " + str(vm.im_id) + " has running processes, wait.") + self.log_info("VM ID " + str(vm.im_id) + " has running processes, wait.") # If there are, add the tasks again to the queue # Set the priority to a higher number to decrease the # priority enabling to select other items of the queue @@ -262,7 +262,7 @@ def run(self): time.sleep(Config.CONFMAMAGER_CHECK_STATE_INTERVAL) else: if not tasks: - self.log_debug("No tasks to execute. Ignore this step.") + self.log_info("No tasks to execute. Ignore this step.") else: # If not, launch it # Mark this VM as configuring @@ -318,11 +318,11 @@ def launch_ctxt_agent(self, vm, tasks): str(self.inf.id) + "/" + ip + "_" + str(vm.im_id) tmp_dir = tempfile.mkdtemp() - self.log_debug("Create the configuration file for the contextualization agent") + self.log_info("Create the configuration file for the contextualization agent") conf_file = tmp_dir + "/config.cfg" self.create_vm_conf_file(conf_file, vm, tasks, remote_dir) - self.log_debug("Copy the contextualization agent config file") + self.log_info("Copy the contextualization agent config file") # Copy the contextualization agent config file ssh = vm.get_ssh_ansible_master() @@ -332,10 +332,10 @@ def launch_ctxt_agent(self, vm, tasks): if vm.configured is None: if len(self.inf.get_vm_list()) > Config.VM_NUM_USE_CTXT_DIST: - self.log_debug("Using ctxt_agent_dist") + self.log_info("Using ctxt_agent_dist") ctxt_agent_command = "/ctxt_agent_dist.py " else: - self.log_debug("Using ctxt_agent") + self.log_info("Using ctxt_agent") ctxt_agent_command = "/ctxt_agent.py " vault_export = "" vault_password = vm.info.systems[0].getValue("vault.password") @@ -348,7 +348,7 @@ def launch_ctxt_agent(self, vm, tasks): " > " + remote_dir + "/stdout" + " 2> " + remote_dir + "/stderr < /dev/null & echo -n $!") - self.log_debug("Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) + self.log_info("Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) vm.ctxt_pid = pid vm.launch_check_ctxt_process() @@ -374,7 +374,7 @@ def generate_inventory(self, tmp_dir): """ Generate the ansible inventory file """ - self.log_debug("Create the ansible configuration file") + self.log_info("Create the ansible configuration file") res_filename = "hosts" ansible_file = tmp_dir + "/" + res_filename out = open(ansible_file, 'w') @@ -738,7 +738,7 @@ def configure_master(self): success = False cont = 0 while not self._stop_thread and not success and cont < Config.PLAYBOOK_RETRIES: - self.log_debug("Sleeping %s secs." % (cont ** 2 * 5)) + self.log_info("Sleeping %s secs." % (cont ** 2 * 5)) time.sleep(cont ** 2 * 5) cont += 1 try: @@ -768,7 +768,7 @@ def configure_master(self): if configured_ok: remote_dir = Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" - self.log_debug("Copy the contextualization agent files") + self.log_info("Copy the contextualization agent files") files = [] files.append((Config.IM_PATH + "/SSH.py", remote_dir + "/IM/SSH.py")) files.append((Config.IM_PATH + "/SSHRetry.py", remote_dir + "/IM/SSHRetry.py")) @@ -837,7 +837,7 @@ def wait_master(self): - Wait it to boot and has the SSH port open """ if self.inf.radl.ansible_hosts: - self.log_debug("Usign ansible host: " + self.inf.radl.ansible_hosts[0].getHost()) + self.log_info("Usign ansible host: " + self.inf.radl.ansible_hosts[0].getHost()) self.inf.set_configured(True) return True @@ -919,7 +919,7 @@ def generate_playbooks_and_hosts(self): # Get the groups for the different VM types vm_group = self.inf.get_vm_list_by_system_name() - self.log_debug("Generating YAML, hosts and inventory files.") + self.log_info("Generating YAML, hosts and inventory files.") # Create the other configure sections (it may be included in other # configure) filenames = [] @@ -971,7 +971,7 @@ def generate_playbooks_and_hosts(self): recipe_files.append((tmp_dir + "/" + f, remote_dir + "/" + f)) self.inf.add_cont_msg("Copying YAML, hosts and inventory files.") - self.log_debug("Copying YAML files.") + self.log_info("Copying YAML files.") if self.inf.radl.ansible_hosts: for ansible_host in self.inf.radl.ansible_hosts: (user, passwd, private_key) = ansible_host.getCredentialValues() @@ -1056,7 +1056,7 @@ def wait_vm_running(self, vm, timeout, relaunch=False): self.log_warn("VM deleted by the user, Exit") return False - self.log_debug("VM " + str(vm.id) + " is not running yet.") + self.log_info("VM " + str(vm.id) + " is not running yet.") time.sleep(delay) wait += delay @@ -1109,13 +1109,13 @@ def wait_vm_ssh_acccess(self, vm, timeout): else: vm.update_status(self.auth) if vm.state == VirtualMachine.FAILED: - self.log_debug('VM: ' + str(vm.id) + " is in state Failed. Does not wait for SSH.") + self.log_warn('VM: ' + str(vm.id) + " is in state Failed. Does not wait for SSH.") return False, "VM Failure." ip = vm.getPublicIP() if ip is not None: ssh = vm.get_ssh() - self.log_debug('SSH Connecting with: ' + ip + ' to the VM: ' + str(vm.id)) + self.log_info('SSH Connecting with: ' + ip + ' to the VM: ' + str(vm.id)) try: connected = ssh.test_connectivity(5) @@ -1128,14 +1128,14 @@ def wait_vm_ssh_acccess(self, vm, timeout): return False, "Error connecting with ip: " + ip + " incorrect credentials." if connected: - self.log_debug('Works!') + self.log_info('Works!') return True, "" else: - self.log_debug('do not connect, wait ...') + self.log_info('do not connect, wait ...') wait += delay time.sleep(delay) else: - self.log_debug('VM ' + str(vm.id) + ' with no IP') + self.log_warn('VM ' + str(vm.id) + ' with no IP') # Update the VM info and wait to have a valid public IP wait += delay time.sleep(delay) @@ -1232,7 +1232,7 @@ def call_ansible(self, tmp_dir, inventory, playbook, ssh): os.symlink(os.path.abspath( Config.RECIPES_DIR + "/utils"), tmp_dir + "/utils") - self.log_debug('Launching Ansible process.') + self.log_info('Launching Ansible process.') result = Queue() extra_vars = {'IM_HOST': 'all'} # store the process to terminate it later is Ansible does not finish correctly @@ -1253,18 +1253,15 @@ def call_ansible(self, tmp_dir, inventory, playbook, ssh): self.ansible_process = None return (False, "Timeout. Ansible process terminated.") else: - self.log_debug('Waiting Ansible process to finish (%d/%d).' % (wait, Config.ANSIBLE_INSTALL_TIMEOUT)) + self.log_info('Waiting Ansible process to finish (%d/%d).' % (wait, Config.ANSIBLE_INSTALL_TIMEOUT)) time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL) wait += Config.CHECK_CTXT_PROCESS_INTERVAL - self.log_debug('Ansible process finished.') + self.log_info('Ansible process finished.') try: - timeout = Config.ANSIBLE_INSTALL_TIMEOUT - wait - if timeout < Config.CHECK_CTXT_PROCESS_INTERVAL: - timeout = Config.CHECK_CTXT_PROCESS_INTERVAL - self.log_debug('Get the result with a timeout of %d seconds.' % timeout) - _, (return_code, _), output = result.get(timeout=timeout) + self.log_info('Get the results of the Ansible process.') + _, (return_code, _), output = result.get(timeout=10) msg = output.getvalue() except: self.log_exception('Error getting ansible results.') @@ -1369,18 +1366,18 @@ def configure_ansible(self, ssh, tmp_dir): self.inf.add_cont_msg("Performing preliminary steps to configure Ansible.") - self.log_debug("Remove requiretty in sshd config") + self.log_info("Remove requiretty in sshd config") try: cmd = "sudo -S sed -i 's/.*requiretty$/#Defaults requiretty/' /etc/sudoers" if ssh.password: cmd = "echo '" + ssh.password + "' | " + cmd (stdout, stderr, _) = ssh.execute(cmd, 120) - self.log_debug(stdout + "\n" + stderr) + self.log_info(stdout + "\n" + stderr) except: self.log_exception("Error removing requiretty. Ignoring.") self.inf.add_cont_msg("Configure Ansible in the master VM.") - self.log_debug("Call Ansible to (re)configure in the master node") + self.log_info("Call Ansible to (re)configure in the master node") (success, msg) = self.call_ansible( tmp_dir, "inventory.cfg", ConfManager.MASTER_YAML, ssh) @@ -1388,7 +1385,7 @@ def configure_ansible(self, ssh, tmp_dir): self.log_error("Error configuring master node: " + msg + "\n\n") self.inf.add_cont_msg("Error configuring the master VM: " + msg + " " + tmp_dir) else: - self.log_debug("Ansible successfully configured in the master VM:\n" + msg + "\n\n") + self.log_info("Ansible successfully configured in the master VM:\n" + msg + "\n\n") self.inf.add_cont_msg("Ansible successfully configured in the master VM.") except Exception as ex: self.log_exception("Error configuring master node.") diff --git a/IM/InfrastructureInfo.py b/IM/InfrastructureInfo.py index 84695b6d4..c8bc0891f 100644 --- a/IM/InfrastructureInfo.py +++ b/IM/InfrastructureInfo.py @@ -480,8 +480,7 @@ def Contextualize(self, auth, vm_list=None): break if not ctxt: - InfrastructureInfo.logger.debug( - "Inf ID: " + str(self.id) + ": Contextualization disabled by the RADL.") + InfrastructureInfo.logger.info("Inf ID: " + str(self.id) + ": Contextualization disabled by the RADL.") self.cont_out = "Contextualization disabled by the RADL." self.configured = True for vm in self.get_vm_list(): diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index 08d4344c5..81e04787b 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -159,7 +159,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c """Launch a group of deploys together.""" if not deploy_group: - InfrastructureManager.logger.warning("No VMs to deploy!") + InfrastructureManager.logger.warning("Inf ID: %s: No VMs to deploy!" % sel_inf.id) return if not deploys_group_cloud_list: cancel_deployment.append(Exception("No cloud provider available")) @@ -175,6 +175,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c concrete_system = concrete_systems[cloud_id][deploy.id][0] if not concrete_system: InfrastructureManager.logger.error( + "Inf ID: " + sel_inf.id + ": " + "Error, no concrete system to deploy: " + deploy.id + " in cloud: " + cloud_id + ". Check if a correct image is being used") exceptions.append("Error, no concrete system to deploy: " + deploy.id + @@ -183,20 +184,21 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c (username, _, _, _) = concrete_system.getCredentialValues() if not username: - raise IncorrectVMCrecentialsException( - "No username for deploy: " + deploy.id) + raise IncorrectVMCrecentialsException("No username for deploy: " + deploy.id) launch_radl = radl.clone() launch_radl.systems = [concrete_system.clone()] requested_radl = radl.clone() requested_radl.systems = [radl.get_system_by_name(concrete_system.name)] try: - InfrastructureManager.logger.debug( + InfrastructureManager.logger.info( + "Inf ID: " + sel_inf.id + ": " + "Launching %d VMs of type %s" % (remain_vm, concrete_system.name)) launched_vms = cloud.cloud.getCloudConnector(sel_inf).launch( sel_inf, launch_radl, requested_radl, remain_vm, auth) except Exception as e: - InfrastructureManager.logger.exception("Error launching some of the VMs: %s" % e) + InfrastructureManager.logger.exception("Inf ID: " + sel_inf.id + ": " + + "Error launching some of the VMs: %s" % e) exceptions.append("Error launching the VMs of type %s to cloud ID %s" " of type %s. Cloud Provider Error: %s" % (concrete_system.name, cloud.cloud.id, @@ -204,12 +206,14 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c launched_vms = [] for success, launched_vm in launched_vms: if success: - InfrastructureManager.logger.debug("VM successfully launched: " + str(launched_vm.id)) + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": " + + "VM successfully launched: " + str(launched_vm.id)) deployed_vm.setdefault(deploy, []).append(launched_vm) deploy.cloud_id = cloud_id remain_vm -= 1 else: InfrastructureManager.logger.warn( + "Inf ID: " + sel_inf.id + ": " + "Error launching some of the VMs: " + str(launched_vm)) exceptions.append("Error launching the VMs of type %s to cloud ID %s of type %s. %s" % ( concrete_system.name, cloud.cloud.id, cloud.cloud.type, str(launched_vm))) @@ -238,14 +242,14 @@ def get_infrastructure(inf_id, auth): """Return infrastructure info with some id if valid authorization provided.""" if inf_id not in IM.InfrastructureList.InfrastructureList.get_inf_ids(): - InfrastructureManager.logger.error("Error, incorrect infrastructure ID: %s" % inf_id) + InfrastructureManager.logger.error("Error, incorrect Inf ID: %s" % inf_id) raise IncorrectInfrastructureException() sel_inf = IM.InfrastructureList.InfrastructureList.get_infrastructure(inf_id) if not sel_inf.is_authorized(auth): - InfrastructureManager.logger.error("Access Error to infrastructure ID: %s" % inf_id) + InfrastructureManager.logger.error("Access Error to Inf ID: %s" % inf_id) raise UnauthorizedUserException() if sel_inf.deleted: - InfrastructureManager.logger.error("Infrastructure ID: %s is deleted." % inf_id) + InfrastructureManager.logger.error("Inf ID: %s is deleted." % inf_id) raise DeletedInfrastructureException() return sel_inf @@ -272,13 +276,12 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Reconfiguring the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Reconfiguring the Inf ID: " + str(inf_id)) if isinstance(radl_data, RADL): radl = radl_data else: radl = radl_parse.parse_radl(radl_data) - InfrastructureManager.logger.debug(radl) + InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": \n" + str(radl)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -287,6 +290,7 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): for s in radl.configures: sel_inf.radl.add(s.clone(), "replace") InfrastructureManager.logger.info( + "Inf ID: " + sel_inf.id + ": " + "(Re)definition of %s %s" % (type(s), s.getId())) # and update contextualize @@ -307,7 +311,7 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): password=password, public_key=public_key, private_key=private_key, new=True) # Stick all virtual machines to be reconfigured - InfrastructureManager.logger.info("Contextualize the inf.") + InfrastructureManager.logger.info("Contextualize the Inf ID: " + sel_inf.id) # reset ansible_configured to force the re-installation of galaxy roles sel_inf.ansible_configured = None sel_inf.Contextualize(auth, vm_list) @@ -379,15 +383,14 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): failed_clouds = [] auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Adding resources to inf: " + str(inf_id)) + InfrastructureManager.logger.info("Adding resources to Inf ID: " + str(inf_id)) if isinstance(radl_data, RADL): radl = radl_data else: radl = radl_parse.parse_radl(radl_data) - InfrastructureManager.logger.debug(radl) + InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": \n" + str(radl)) radl.check() sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -398,8 +401,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # If any deploy is defined, only update definitions. if not radl.deploys: sel_inf.update_radl(radl, []) - InfrastructureManager.logger.warn( - "Infrastructure without any deploy. Exiting.") + InfrastructureManager.logger.warn("Inf ID: " + sel_inf.id + ": without any deploy. Exiting.") return [] for system in radl.systems: @@ -417,10 +419,11 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): requirements_radl, conflict="other", missing="other") except Exception: InfrastructureManager.logger.exception( + "Inf ID: " + sel_inf.id + ": " + "Error in the requirements of the app: " + app_to_install.getValue("name") + ". Ignore them.") - InfrastructureManager.logger.debug(requirements) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + str(requirements)) break # Get VMRC credentials @@ -489,8 +492,8 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # Group virtual machines to deploy by network dependencies deploy_groups = InfrastructureManager._compute_deploy_groups(radl) - InfrastructureManager.logger.debug("Groups of VMs with dependencies") - InfrastructureManager.logger.debug(deploy_groups) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Groups of VMs with dependencies") + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + "\n" + str(deploy_groups)) # Sort by score the cloud providers # NOTE: consider fake deploys (vm_number == 0) @@ -507,8 +510,8 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): "are asked to be deployed in different cloud providers: %s" % deploy_group) elif len(suggested_cloud_ids) == 1: if suggested_cloud_ids[0] not in cloud_list: - InfrastructureManager.logger.debug("Cloud Provider list:") - InfrastructureManager.logger.debug(cloud_list) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Cloud Provider list:") + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + " - " + str(cloud_list)) raise Exception("No auth data for cloud with ID: %s" % suggested_cloud_ids[0]) else: cloud_list0 = [ @@ -594,8 +597,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # Add the new virtual machines to the infrastructure sel_inf.update_radl(radl, [(d, deployed_vm[d], concrete_systems[d.cloud_id][d.id][0]) for d in deployed_vm]) - InfrastructureManager.logger.info( - "VMs %s successfully added to Inf id %s" % (new_vms, sel_inf.id)) + InfrastructureManager.logger.info("VMs %s successfully added to Inf ID: %s" % (new_vms, sel_inf.id)) # Let's contextualize! if context and new_vms: @@ -621,8 +623,7 @@ def RemoveResource(inf_id, vm_list, auth, context=True): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Removing the VMs: " + str(vm_list) + " from inf ID: '" + str(inf_id) + "'") + InfrastructureManager.logger.info("Removing the VMs: " + str(vm_list) + " from Inf ID: '" + str(inf_id) + "'") sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -643,7 +644,7 @@ def RemoveResource(inf_id, vm_list, auth, context=True): if InfrastructureManager._delete_vm(vm, delete_list, auth, exceptions): cont += 1 - InfrastructureManager.logger.info("%d VMs successfully removed" % cont) + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": %d VMs successfully removed" % cont) if context and cont > 0: # Now test again if the infrastructure is contextualizing @@ -652,7 +653,7 @@ def RemoveResource(inf_id, vm_list, auth, context=True): IM.InfrastructureList.InfrastructureList.save_data(inf_id) if exceptions: - InfrastructureManager.logger.exception("Error removing resources") + InfrastructureManager.logger.exception("Inf ID: " + sel_inf.id + ": Error removing resources") raise Exception("Error removing resources: %s" % exceptions) return cont @@ -697,13 +698,14 @@ def GetVMInfo(inf_id, vm_id, auth, json_res=False): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Get information about the vm: '" + str(vm_id) + "' from inf: " + str(inf_id)) + "Get information about the vm: '" + str(vm_id) + "' from Inf ID: " + str(inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) success = vm.update_status(auth) if not success: - InfrastructureManager.logger.warn( + InfrastructureManager.logger.debug( + "Inf ID: " + str(inf_id) + ": " + "Information not updated. Using last information retrieved") if json_res: @@ -727,12 +729,12 @@ def GetVMContMsg(inf_id, vm_id, auth): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Get contextualization log of the vm: '" + str(vm_id) + "' from inf: " + str(inf_id)) + "Get contextualization log of the vm: '" + str(vm_id) + "' from Inf ID: " + str(inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) cont_msg = vm.get_cont_msg() - InfrastructureManager.logger.debug(cont_msg) + InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": " + cont_msg) return cont_msg @@ -753,10 +755,11 @@ def AlterVM(inf_id, vm_id, radl_data, auth): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Modifying the VM: '" + str(vm_id) + "' from inf: " + str(inf_id)) + "Modifying the VM: '" + str(vm_id) + "' from Inf ID: " + str(inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) if not vm: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "VM does not exist or Access Error") raise Exception("VM does not exist or Access Error") @@ -775,9 +778,8 @@ def AlterVM(inf_id, vm_id, radl_data, auth): raise exception if not success: InfrastructureManager.logger.warn( - "Error getting the information about the VM " + str(vm_id) + ": " + str(alter_res)) - InfrastructureManager.logger.warn( - "Using last information retrieved") + "Inf ID: " + str(inf_id) + ": " + + "Error modifying the information about the VM " + str(vm_id) + ": " + str(alter_res)) vm.update_status(auth) IM.InfrastructureList.InfrastructureList.save_data(inf_id) @@ -798,13 +800,12 @@ def GetInfrastructureRADL(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Getting RADL of the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting RADL of the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) radl = str(sel_inf.get_radl()) - InfrastructureManager.logger.debug(radl) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + radl) return radl @staticmethod @@ -821,20 +822,16 @@ def GetInfrastructureInfo(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Getting information about the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting information about the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) - # : .. todo:: - # : Return int instead res = [str(vm.im_id) for vm in sel_inf.get_vm_list()] - InfrastructureManager.logger.info("Information obtained successfully") - InfrastructureManager.logger.debug(res) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + str(res)) return res @staticmethod - def GetInfrastructureContMsg(inf_id, auth): + def GetInfrastructureContMsg(inf_id, auth, headeronly=False): """ Get cont msg of an infrastructure. @@ -842,23 +839,25 @@ def GetInfrastructureContMsg(inf_id, auth): - inf_id(str): infrastructure id. - auth(Authentication): parsed authentication tokens. + - headeronly(bool): Flag to return only the header part of the infra log. Return: a str with the cont msg """ auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Getting cont msg of the inf: " + str(inf_id)) + "Getting cont msg of the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) res = sel_inf.cont_out - for vm in sel_inf.get_vm_list(): - if vm.get_cont_msg(): - res += "VM " + str(vm.id) + ":\n" + vm.get_cont_msg() + "\n" - res += "***************************************************************************\n" + if not headeronly: + for vm in sel_inf.get_vm_list(): + if vm.get_cont_msg(): + res += "VM " + str(vm.id) + ":\n" + vm.get_cont_msg() + "\n" + res += "***************************************************************************\n" - InfrastructureManager.logger.debug(res) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + res) return res @staticmethod @@ -877,8 +876,7 @@ def GetInfrastructureState(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Getting state of the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting state of the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -918,20 +916,19 @@ def GetInfrastructureState(inf_id, auth): if state is None: state = VirtualMachine.UNKNOWN - InfrastructureManager.logger.debug( - "inf: " + str(inf_id) + " is in state: " + state) + InfrastructureManager.logger.info("Inf ID: " + str(inf_id) + " is in state: " + state) return {'state': state, 'vm_states': vm_states} @staticmethod def _stop_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.debug("Stopping the VM id: " + vm.id) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Stopping the VM id: " + vm.id) (success, msg) = vm.stop(auth) except Exception as e: msg = str(e) if not success: - InfrastructureManager.logger.info("The VM cannot be stopped") + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be stopped") exceptions.append(msg) @staticmethod @@ -948,8 +945,7 @@ def StopInfrastructure(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Stopping the infrastructure id: " + str(inf_id)) + InfrastructureManager.logger.info("Stopping the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) exceptions = [] @@ -971,20 +967,19 @@ def StopInfrastructure(inf_id, auth): msg += str(e) + "\n" raise Exception("Error stopping the infrastructure: %s" % msg) - InfrastructureManager.logger.info( - "Infrastructure successfully stopped") + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": Successfully stopped") return "" @staticmethod def _start_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.debug("Starting the VM id: " + vm.id) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Starting the VM id: " + vm.id) (success, msg) = vm.start(auth) except Exception as e: msg = str(e) if not success: - InfrastructureManager.logger.info("The VM cannot be restarted") + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be restarted") exceptions.append(msg) @staticmethod @@ -1001,8 +996,7 @@ def StartInfrastructure(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Starting the infrastructure id: " + str(inf_id)) + InfrastructureManager.logger.info("Starting the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) exceptions = [] @@ -1024,8 +1018,7 @@ def StartInfrastructure(inf_id, auth): msg += str(e) + "\n" raise Exception("Error starting the infrastructure: %s" % msg) - InfrastructureManager.logger.info( - "Infrastructure successfully restarted") + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": Successfully restarted") return "" @staticmethod @@ -1043,8 +1036,7 @@ def StartVM(inf_id, vm_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Starting the VM id %s from the infrastructure id: %s" % (vm_id, inf_id)) + InfrastructureManager.logger.info("Starting the VM id %s from the Inf ID: %s" % (vm_id, inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) success = False @@ -1055,10 +1047,12 @@ def StartVM(inf_id, vm_id, auth): if not success: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s cannot be restarted: %s" % (vm_id, msg)) raise Exception("Error starting the VM: %s" % msg) else: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s successfully restarted" % vm_id) return "" @@ -1079,7 +1073,7 @@ def StopVM(inf_id, vm_id, auth): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Stopping the VM id %s from the infrastructure id: %s" % (vm_id, inf_id)) + "Stopping the VM id %s from the Inf ID: %s" % (vm_id, inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) success = False @@ -1090,10 +1084,12 @@ def StopVM(inf_id, vm_id, auth): if not success: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s cannot be stopped: %s" % (vm_id, msg)) raise Exception("Error stopping the VM: %s" % msg) else: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s successfully stopped" % vm_id) return "" @@ -1124,12 +1120,12 @@ def _delete_vm(vm, delete_list, auth, exceptions): last = InfrastructureManager.is_last_in_cloud(vm, delete_list, remain_vms) success = False try: - InfrastructureManager.logger.debug("Finalizing the VM id: " + str(vm.id)) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Finalizing the VM id: " + str(vm.id)) (success, msg) = vm.finalize(last, auth) except Exception as e: msg = str(e) if not success: - InfrastructureManager.logger.info("The VM cannot be finalized: %s" % msg) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be finalized: %s" % msg) exceptions.append(msg) return success @@ -1148,8 +1144,7 @@ def DestroyInfrastructure(inf_id, auth): # First check the auth data auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Destroying the infrastructure id: " + str(inf_id)) + InfrastructureManager.logger.info("Destroying the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) exceptions = [] @@ -1178,8 +1173,7 @@ def DestroyInfrastructure(inf_id, auth): sel_inf.delete() IM.InfrastructureList.InfrastructureList.save_data(inf_id) IM.InfrastructureList.InfrastructureList.remove_inf(sel_inf) - InfrastructureManager.logger.info( - "Infrastructure %s successfully destroyed" % inf_id) + InfrastructureManager.logger.info("Inf ID: %s: Successfully destroyed" % inf_id) return "" @staticmethod @@ -1203,12 +1197,10 @@ def check_im_user(auth): break return found except Exception: - InfrastructureManager.logger.exception( - "Incorrect format in the User DB file %s" % Config.USER_DB) + InfrastructureManager.logger.exception("Incorrect format in the User DB file %s" % Config.USER_DB) return False else: - InfrastructureManager.logger.error( - "User DB file %s not found" % Config.USER_DB) + InfrastructureManager.logger.error("User DB file %s not found" % Config.USER_DB) return False else: return True @@ -1340,21 +1332,18 @@ def CreateInfrastructure(radl, auth): inf.auth = Authentication(auth.getAuthInfo("InfrastructureManager")) IM.InfrastructureList.InfrastructureList.add_infrastructure(inf) IM.InfrastructureList.InfrastructureList.save_data(inf.id) - InfrastructureManager.logger.info( - "Creating new infrastructure with id: " + str(inf.id)) + InfrastructureManager.logger.info("Creating new Inf ID: " + str(inf.id)) # Add the resources in radl_data try: InfrastructureManager.AddResource(inf.id, radl, auth) except Exception as e: - InfrastructureManager.logger.exception( - "Error Creating Inf id " + str(inf.id)) + InfrastructureManager.logger.exception("Error Creating Inf ID " + str(inf.id)) inf.delete() IM.InfrastructureList.InfrastructureList.save_data(inf.id) IM.InfrastructureList.InfrastructureList.remove_inf(inf) raise e - InfrastructureManager.logger.info( - "Infrastructure id " + str(inf.id) + " successfully created") + InfrastructureManager.logger.info("Inf ID:" + str(inf.id) + ": Successfully created") return inf.id @@ -1375,8 +1364,7 @@ def GetInfrastructureList(auth): auths = auth.getAuthInfo('InfrastructureManager') if not auths: - InfrastructureManager.logger.error( - "No correct auth data has been specified.") + InfrastructureManager.logger.error("No correct auth data has been specified.") raise InvaliddUserException() return IM.InfrastructureList.InfrastructureList.get_inf_ids(auth) @@ -1388,7 +1376,7 @@ def ExportInfrastructure(inf_id, delete, auth_data): sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) str_inf = sel_inf.serialize() - InfrastructureManager.logger.info("Exporting infrastructure id: " + str(sel_inf.id)) + InfrastructureManager.logger.info("Exporting Inf ID: " + str(sel_inf.id)) if delete: sel_inf.delete() IM.InfrastructureList.InfrastructureList.save_data(sel_inf.id) @@ -1409,8 +1397,7 @@ def ImportInfrastructure(str_inf, auth_data): new_inf.auth = Authentication(auth.getAuthInfo("InfrastructureManager")) IM.InfrastructureList.InfrastructureList.add_infrastructure(new_inf) - InfrastructureManager.logger.info( - "Importing new infrastructure with id: " + str(new_inf.id)) + InfrastructureManager.logger.info("Importing new infrastructure with Inf ID: " + str(new_inf.id)) # Save the state IM.InfrastructureList.InfrastructureList.save_data(new_inf.id) return new_inf.id @@ -1434,12 +1421,14 @@ def CreateDiskSnapshot(inf_id, vm_id, disk_num, image_name, auto_delete, auth): Return: a str with url of the saved snapshot. """ auth = InfrastructureManager.check_auth_data(auth) + InfrastructureManager.logger.info("Creating a snapshot of VM id: %s Inf ID: %s" % (vm_id, inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) success, image_url = vm.create_snapshot(disk_num, image_name, auto_delete, auth) if not success: - InfrastructureManager.logger.error("Error creating snapshot: %s" % image_url) + InfrastructureManager.logger.error("Error creating a snapshot: %s of VM id: %s " + "Inf ID: %s" % (image_url, vm_id, inf_id)) raise Exception("Error creating snapshot: %s" % image_url) else: return image_url diff --git a/IM/REST.py b/IM/REST.py index 09c31a17c..783399898 100644 --- a/IM/REST.py +++ b/IM/REST.py @@ -336,7 +336,17 @@ def RESTGetInfrastructureProperty(infid=None, prop=None): try: if prop == "contmsg": - res = InfrastructureManager.GetInfrastructureContMsg(infid, auth) + headeronly = False + if "headeronly" in bottle.request.params.keys(): + str_headeronly = bottle.request.params.get("headeronly").lower() + if str_headeronly in ['yes', 'true', '1']: + headeronly = True + elif str_headeronly in ['no', 'false', '0']: + headeronly = False + else: + return return_error(400, "Incorrect value in context parameter") + + res = InfrastructureManager.GetInfrastructureContMsg(infid, auth, headeronly) elif prop == "radl": res = InfrastructureManager.GetInfrastructureRADL(infid, auth) elif prop == "state": diff --git a/IM/ServiceRequests.py b/IM/ServiceRequests.py index b7a10fefc..ac12aa71b 100644 --- a/IM/ServiceRequests.py +++ b/IM/ServiceRequests.py @@ -317,9 +317,10 @@ class Request_GetInfrastructureContMsg(IMBaseRequest): def _call_function(self): self._error_mesage = "Error gettinf the Inf. cont msg" - (inf_id, auth_data) = self.arguments + (inf_id, auth_data, headeronly) = self.arguments return IM.InfrastructureManager.InfrastructureManager.GetInfrastructureContMsg(inf_id, - Authentication(auth_data)) + Authentication(auth_data), + headeronly) class Request_StartVM(IMBaseRequest): diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index 3aab7d246..e4444992e 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -465,7 +465,7 @@ def update_status(self, auth, force=False): updated = True self.last_update = now elif self.creating: - self.log_debug("VM is in creation process, set pending state") + self.log_info("VM is in creation process, set pending state") state = VirtualMachine.PENDING else: self.log_error("Error updating VM status: %s" % new_vm) @@ -633,8 +633,7 @@ def kill_check_ctxt_process(self): if self.ctxt_pid != self.WAIT_TO_PID: ssh = self.get_ssh_ansible_master() try: - self.log_debug( - "Killing ctxt process with pid: " + str(self.ctxt_pid)) + self.log_info("Killing ctxt process with pid: " + str(self.ctxt_pid)) # Try to get PGID to kill all child processes pgkill_success = False @@ -691,7 +690,7 @@ def check_ctxt_process(self): ssh = self.get_ssh_ansible_master() try: - self.log_debug("Getting status of ctxt process with pid: " + str(ctxt_pid)) + self.log_info("Getting status of ctxt process with pid: " + str(ctxt_pid)) (_, _, exit_status) = ssh.execute("ps " + str(ctxt_pid)) except: self.log_warn("Error getting status of ctxt process with pid: " + str(ctxt_pid)) @@ -710,7 +709,7 @@ def check_ctxt_process(self): if exit_status != 0: # The process has finished, get the outputs - self.log_debug("The process %s has finished, get the outputs" % ctxt_pid) + self.log_info("The process %s has finished, get the outputs" % ctxt_pid) ctxt_log = self.get_ctxt_log(remote_dir, True) msg = self.get_ctxt_output(remote_dir, True) if ctxt_log: @@ -724,11 +723,11 @@ def check_ctxt_process(self): # dynamically if Config.UPDATE_CTXT_LOG_INTERVAL > 0 and wait > Config.UPDATE_CTXT_LOG_INTERVAL: wait = 0 - self.log_debug("Get the log of the ctxt process with pid: " + str(ctxt_pid)) + self.log_info("Get the log of the ctxt process with pid: " + str(ctxt_pid)) ctxt_log = self.get_ctxt_log(remote_dir) self.cont_out = initial_count_out + ctxt_log # The process is still running, wait - self.log_debug("The process %s is still running. wait." % ctxt_pid) + self.log_info("The process %s is still running. wait." % ctxt_pid) time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL) wait += Config.CHECK_CTXT_PROCESS_INTERVAL else: diff --git a/IM/__init__.py b/IM/__init__.py index 6c5497868..e27452c8d 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -19,5 +19,5 @@ 'InfrastructureInfo', 'InfrastructureManager', 'recipe', 'request', 'REST', 'retry', 'ServiceRequests', 'SSH', 'SSHRetry', 'timedcall', 'UnixHTTPAdapter', 'uriparse', 'VirtualMachine', 'VMRC', 'xmlobject'] -__version__ = '1.6.3' +__version__ = '1.6.5' __author__ = 'Miguel Caballer' diff --git a/IM/ansible_utils/ansible_launcher.py b/IM/ansible_utils/ansible_launcher.py index 8ed588141..6981a4a90 100755 --- a/IM/ansible_utils/ansible_launcher.py +++ b/IM/ansible_utils/ansible_launcher.py @@ -157,6 +157,7 @@ def get_play_prereqs_2(self, options): variable_manager = VariableManager() variable_manager.extra_vars = self.extra_vars + variable_manager.options_vars = {'ansible_version': self.version_info(ansible_version)} # Add this to avoid the Ansible bug: no host vars as host is not in inventory # In version 2.0.1 it must be fixed @@ -186,9 +187,30 @@ def get_play_prereqs_2_4(self, options): # the code, ensuring a consistent view of global variables variable_manager = VariableManager(loader=loader, inventory=inventory) variable_manager.extra_vars = self.extra_vars + variable_manager.options_vars = {'ansible_version': self.version_info(ansible_version)} return loader, inventory, variable_manager + def version_info(self, ansible_version_string): + ''' return full ansible version info ''' + ansible_ver = ansible_version_string.split()[0] + ansible_versions = ansible_ver.split('.') + for counter in range(len(ansible_versions)): + if ansible_versions[counter] == "": + ansible_versions[counter] = 0 + try: + ansible_versions[counter] = int(ansible_versions[counter]) + except: + pass + if len(ansible_versions) < 3: + for counter in range(len(ansible_versions), 3): + ansible_versions.append(0) + return {'string': ansible_version_string.strip(), + 'full': ansible_ver, + 'major': ansible_versions[0], + 'minor': ansible_versions[1], + 'revision': ansible_versions[2]} + def launch_playbook_v2(self): ''' run ansible-playbook operations v2.X''' # create parser for CLI options diff --git a/IM/config.py b/IM/config.py index 4209a59d6..0f0113069 100644 --- a/IM/config.py +++ b/IM/config.py @@ -62,7 +62,7 @@ class Config: IM_PATH = os.path.dirname(os.path.realpath(__file__)) LOG_FILE = '/var/log/im/inf.log' LOG_FILE_MAX_SIZE = 10485760 - LOG_LEVEL = "DEBUG" + LOG_LEVEL = "INFO" CONTEXTUALIZATION_DIR = '/usr/share/im/contextualization' RECIPES_DIR = CONTEXTUALIZATION_DIR + '/AnsibleRecipes' RECIPES_DB_FILE = CONTEXTUALIZATION_DIR + '/recipes_ansible.db' diff --git a/IM/connectors/Azure.py b/IM/connectors/Azure.py index 54c1ed3f2..2aaa87e90 100644 --- a/IM/connectors/Azure.py +++ b/IM/connectors/Azure.py @@ -148,20 +148,18 @@ def get_instance_type(self, system, credentials, subscription_id): instace_types = list(compute_client.virtual_machine_sizes.list(location)) instace_types.sort(key=lambda x: (x.number_of_cores, x.memory_in_mb, x.resource_disk_size_in_mb)) - res = None default = None for instace_type in instace_types: if instace_type.name == self.INSTANCE_TYPE: default = instace_type - # get the instance type with the lowest Memory - if res is None: - str_compare = "instace_type.number_of_cores " + cpu_op + " cpu " - str_compare += " and instace_type.memory_in_mb " + memory_op + " memory " - str_compare += " and instace_type.resource_disk_size_in_mb " + disk_free_op + " disk_free" - if eval(str_compare): - if not instance_type_name or instace_type.name == instance_type_name: - return instace_type + str_compare = "instace_type.number_of_cores " + cpu_op + " cpu " + str_compare += " and instace_type.memory_in_mb " + memory_op + " memory " + str_compare += " and instace_type.resource_disk_size_in_mb " + disk_free_op + " disk_free" + + if eval(str_compare): + if not instance_type_name or instace_type.name == instance_type_name: + return instace_type return default @@ -449,7 +447,7 @@ def get_azure_vm_create_json(self, storage_account, vm_name, nics, radl, instanc data_disks = [] while system.getValue("disk." + str(cont) + ".size"): disk_size = system.getFeature("disk." + str(cont) + ".size").getValue('G') - self.log_debug("Adding a %s GB disk." % disk_size) + self.log_info("Adding a %s GB disk." % disk_size) data_disks.append({ 'name': '%s_disk_%d' % (vm_name, cont), 'disk_size_gb': disk_size, @@ -548,7 +546,7 @@ def create_vms(self, inf, radl, requested_radl, num_vm, location, storage_accoun vm_name, vm_parameters) - self.log_debug("VM ID: %s created." % vm.id) + self.log_info("VM ID: %s created." % vm.id) inf.add_vm(vm) vms.append((True, (vm, async_vm_creation))) except Exception as ex: @@ -557,7 +555,7 @@ def create_vms(self, inf, radl, requested_radl, num_vm, location, storage_accoun # Delete Resource group and everything in it if group_name: - self.log_debug("Delete Resource group %s and everything in it." % group_name) + self.log_info("Delete Resource group %s and everything in it." % group_name) try: resource_client.resource_groups.delete(group_name).wait() except: @@ -587,7 +585,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): with inf._lock: # Create resource group for the Infrastructure if it does not exists if not self.get_rg("rg-%s" % inf.id, credentials, subscription_id): - self.log_debug("Creating Inf RG: %s" % "rg-%s" % inf.id) + self.log_info("Creating Inf RG: %s" % "rg-%s" % inf.id) resource_client.resource_groups.create_or_update("rg-%s" % inf.id, {'location': location}) # Create an storage_account per Infrastructure @@ -595,7 +593,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): credentials, subscription_id) if not storage_account: - self.log_debug("Creating storage account: %s" % storage_account_name) + self.log_info("Creating storage account: %s" % storage_account_name) try: storage_client = StorageManagementClient(credentials, subscription_id) storage_client.storage_accounts.create("rg-%s" % inf.id, @@ -606,7 +604,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): ).wait() except: self.log_exception("Error creating storage account: %s" % storage_account) - self.log_debug("Delete Inf RG group %s" % "rg-%s" % inf.id) + self.log_info("Delete Inf RG group %s" % "rg-%s" % inf.id) try: resource_client.resource_groups.delete("rg-%s" % inf.id) except: @@ -626,29 +624,29 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if success: vm, async_vm_creation = data try: - self.log_debug("Waiting VM ID %s to be created." % vm.id) + self.log_info("Waiting VM ID %s to be created." % vm.id) async_vm_creation.wait() res.append((True, vm)) remaining_vms -= 1 except: self.log_exception("Error waiting the VM %s." % vm.id) - self.log_debug("End of retry %d of %d" % (retries, Config.MAX_VM_FAILS)) + self.log_info("End of retry %d of %d" % (retries, Config.MAX_VM_FAILS)) if remaining_vms > 0: # Remove the general group - self.log_debug("Delete Inf RG group %s" % "rg-%s" % inf.id) + self.log_info("Delete Inf RG group %s" % "rg-%s" % inf.id) try: resource_client.resource_groups.delete("rg-%s" % inf.id) except: pass else: - self.log_debug("All VMs created successfully.") + self.log_info("All VMs created successfully.") return res def updateVMInfo(self, vm, auth_data): - self.log_debug("Get the VM info with the id: " + vm.id) + self.log_info("Get the VM info with the id: " + vm.id) group_name = vm.id.split('/')[0] vm_name = vm.id.split('/')[1] @@ -661,9 +659,9 @@ def updateVMInfo(self, vm, auth_data): self.log_exception("Error getting the VM info: " + vm.id) return (False, "Error getting the VM info: " + vm.id + ". " + str(ex)) - self.log_debug("VM info: " + vm.id + " obtained.") + self.log_info("VM info: " + vm.id + " obtained.") vm.state = self.PROVISION_STATE_MAP.get(virtual_machine.provisioning_state, VirtualMachine.UNKNOWN) - self.log_debug("The VM state is: " + vm.state) + self.log_info("The VM state is: " + vm.state) instance_type = self.get_instance_type_by_name(virtual_machine.hardware_profile.vm_size, virtual_machine.location, credentials, subscription_id) @@ -699,11 +697,11 @@ def add_dns_entries(self, vm, credentials, subscription_id): except Exception: pass if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = dns_client.zones.create_or_update(group_name, domain, {'location': 'global'}) else: - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: record = None @@ -712,11 +710,11 @@ def add_dns_entries(self, vm, credentials, subscription_id): except Exception: pass if not record: - self.log_debug("Creating DNS record %s." % hostname) + self.log_info("Creating DNS record %s." % hostname) record_data = {"ttl": 300, "arecords": [{"ipv4_address": ip}]} dns_client.record_sets.create_or_update(group_name, domain, hostname, 'A', record_data) else: - self.log_debug("DNS record %s exists. Do not create." % hostname) + self.log_info("DNS record %s exists. Do not create." % hostname) return True except Exception: @@ -752,25 +750,25 @@ def setIPs(self, vm, network_profile, credentials, subscription_id): def finalize(self, vm, last, auth_data): try: - self.log_debug("Terminate VM: " + vm.id) + self.log_info("Terminate VM: " + vm.id) group_name = vm.id.split('/')[0] credentials, subscription_id = self.get_credentials(auth_data) resource_client = ResourceManagementClient(credentials, subscription_id) # Delete Resource group and everything in it if self.get_rg(group_name, credentials, subscription_id): - self.log_debug("Removing RG: %s" % group_name) + self.log_info("Removing RG: %s" % group_name) resource_client.resource_groups.delete(group_name).wait() else: - self.log_debug("RG: %s does not exist. Do not remove." % group_name) + self.log_info("RG: %s does not exist. Do not remove." % group_name) # if it is the last VM delete the RG of the Inf if last: if self.get_rg("rg-%s" % vm.inf.id, credentials, subscription_id): - self.log_debug("Removing Inf. RG: %s" % "rg-%s" % vm.inf.id) + self.log_info("Removing Inf. RG: %s" % "rg-%s" % vm.inf.id) resource_client.resource_groups.delete("rg-%s" % vm.inf.id) else: - self.log_debug("RG: %s does not exist. Do not remove." % "rg-%s" % vm.inf.id) + self.log_info("RG: %s does not exist. Do not remove." % "rg-%s" % vm.inf.id) except Exception as ex: self.log_exception("Error terminating the VM") diff --git a/IM/connectors/AzureClassic.py b/IM/connectors/AzureClassic.py index be0c5e07f..036c82687 100644 --- a/IM/connectors/AzureClassic.py +++ b/IM/connectors/AzureClassic.py @@ -517,7 +517,7 @@ def wait_operation_status(self, request_id, auth_data, delay=2, timeout=90): output = Operation(resp.text) status_str = output.Status # InProgress|Succeeded|Failed - self.log_debug("Operation string state: " + status_str) + self.log_info("Operation string state: " + status_str) else: self.log_error( "Error waiting operation to finish: Code %d. Msg: %s." % (resp.status_code, resp.text)) @@ -629,8 +629,7 @@ def get_storage_account(self, storage_account, auth_data): storage_info = StorageService(resp.text) return storage_info.StorageServiceProperties elif resp.status_code == 404: - self.log_debug( - "Storage " + storage_account + " does not exist") + self.log_info("Storage " + storage_account + " does not exist") return None else: self.log_warn( @@ -682,7 +681,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): res.append((False, error_msg)) break - self.log_debug("Creating the VM with id: " + service_name) + self.log_info("Creating the VM with id: " + service_name) # Create the VM to get the nodename vm = VirtualMachine(inf, service_name, self.cloud, radl, requested_radl, self) @@ -784,7 +783,7 @@ def get_instance_type(self, system, auth_data): return res def updateVMInfo(self, vm, auth_data): - self.log_debug("Get the VM info with the id: " + vm.id) + self.log_info("Get the VM info with the id: " + vm.id) service_name = vm.id try: @@ -801,13 +800,13 @@ def updateVMInfo(self, vm, auth_data): return (False, "Error getting the VM info: " + vm.id + ". Error Code: " + str(resp.status_code) + ". Msg: " + resp.text) else: - self.log_debug("VM info: " + vm.id + " obtained.") - self.log_debug(resp.text) + self.log_info("VM info: " + vm.id + " obtained.") + self.log_info(resp.text) vm_info = Deployment(resp.text) vm.state = self.get_vm_state(vm_info) - self.log_debug("The VM state is: " + vm.state) + self.log_info("The VM state is: " + vm.state) instance_type = self.get_instance_type_by_name( vm_info.RoleInstanceList.RoleInstance[0].InstanceSize, auth_data) @@ -857,7 +856,7 @@ def setIPs(self, vm, vm_info): vm.setIps(public_ips, private_ips) def finalize(self, vm, last, auth_data): - self.log_debug("Terminate VM: " + vm.id) + self.log_info("Terminate VM: " + vm.id) service_name = vm.id # Delete the service @@ -900,7 +899,7 @@ def call_role_operation(self, op, vm, auth_data): return (True, "") def stop(self, vm, auth_data): - self.log_debug("Stop VM: " + vm.id) + self.log_info("Stop VM: " + vm.id) op = """ @@ -910,7 +909,7 @@ def stop(self, vm, auth_data): return self.call_role_operation(op, vm, auth_data) def start(self, vm, auth_data): - self.log_debug("Start VM: " + vm.id) + self.log_info("Start VM: " + vm.id) op = """ @@ -935,7 +934,7 @@ def get_all_instance_types(self, auth_data): "Error getting Role Sizes. Error Code: " + str(resp.status_code) + ". Msg: " + resp.text) return [] else: - self.log_debug("Role List obtained.") + self.log_info("Role List obtained.") role_sizes = RoleSizes(resp.text) res = [] for role_size in role_sizes.RoleSize: diff --git a/IM/connectors/Docker.py b/IM/connectors/Docker.py index de90f62db..8a35befe8 100644 --- a/IM/connectors/Docker.py +++ b/IM/connectors/Docker.py @@ -352,7 +352,7 @@ def _generate_mounts(self, system): disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path") if not disk_mount_path.startswith('/'): disk_mount_path = '/' + disk_mount_path - self.log_debug("Attaching a volume in %s" % disk_mount_path) + self.log_info("Attaching a volume in %s" % disk_mount_path) mount = {"Source": source, "Target": disk_mount_path} mount["Type"] = "volume" mount["ReadOnly"] = False @@ -443,10 +443,10 @@ def _delete_volumes(self, vm, auth_data): self.log_warn("Error deleting volume %s: %s." % (source, resp.text)) time.sleep(delay) else: - self.log_debug("Volume %s successfully deleted." % source) + self.log_info("Volume %s successfully deleted." % source) break else: - self.log_debug("Volume %s not created by the IM, not deleting it." % source) + self.log_info("Volume %s not created by the IM, not deleting it." % source) def _delete_networks(self, vm, auth_data): for net in vm.info.networks: @@ -465,7 +465,7 @@ def _delete_networks(self, vm, auth_data): if resp.status_code not in [204, 404]: self.log_error("Error deleting network %s: %s" % (net.id, resp.text)) else: - self.log_debug("Network %s deleted successfully" % net.id) + self.log_info("Network %s deleted successfully" % net.id) def _attach_cont_to_networks(self, vm, auth_data): system = vm.info.systems[0] @@ -493,7 +493,7 @@ def _attach_cont_to_networks(self, vm, auth_data): self.log_error("Error attaching cont %s to network %s: %s" % (vm.id, net_name, resp.text)) all_ok = False else: - self.log_debug("Cont %s attached to network %s" % (vm.id, net_name)) + self.log_info("Cont %s attached to network %s" % (vm.id, net_name)) return all_ok def _create_volumes(self, system, auth_data): @@ -515,7 +515,7 @@ def _create_volumes(self, system, auth_data): resp = self.create_request('GET', "/volumes/%s" % source, auth_data, headers) if resp.status_code == 200: # the volume already exists - self.log_debug("Volume named %s already exists." % source) + self.log_info("Volume named %s already exists." % source) else: body = json.dumps({"Name": source, "Driver": driver}) resp = self.create_request('POST', "/volumes/create", auth_data, headers, body) @@ -524,7 +524,7 @@ def _create_volumes(self, system, auth_data): self.log_error("Error creating volume %s: %s." % (source, resp.text)) else: system.setValue("disk." + str(cont) + ".created", "yes") - self.log_debug("Volume %s successfully created." % source) + self.log_info("Volume %s successfully created." % source) cont += 1 @@ -678,7 +678,7 @@ def _get_svc_state(self, svc_name, auth_data): if task["Status"]["State"] == "running": return VirtualMachine.RUNNING elif task["Status"]["State"] == "rejected": - self.log_debug("Task %s rejected: %s." % (task["ID"], task["Status"]["Err"])) + self.log_info("Task %s rejected: %s." % (task["ID"], task["Status"]["Err"])) return VirtualMachine.PENDING else: return VirtualMachine.PENDING diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 0023e3045..d1d633565 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -123,8 +123,7 @@ def concreteSystem(self, radl_system, auth_data): instance_type = self.get_instance_type(res_system) if not instance_type: - self.log_error( - "Error launching the VM, no instance type available for the requirements.") + self.log_error("Error launching the VM, no instance type available for the requirements.") self.log_debug(res_system) return [] else: @@ -295,7 +294,7 @@ def get_instance_type(self, radl, vpc=None): performance = float(cpu_perf.value) performance_op = cpu_perf.getLogOperator() else: - self.log_debug("Performance unit unknown: " + cpu_perf.unit + ". Ignore it") + self.log_warn("Performance unit unknown: " + cpu_perf.unit + ". Ignore it") instace_types = self.get_all_instance_types() @@ -390,7 +389,7 @@ def create_security_groups(self, conn, inf, radl, vpc=None): with inf._lock: sg = self._get_security_group(conn, sg_name) if not sg: - self.log_debug("Creating security group: " + sg_name) + self.log_info("Creating security group: " + sg_name) try: sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) except Exception as crex: @@ -400,7 +399,7 @@ def create_security_groups(self, conn, inf, radl, vpc=None): # if not raise the exception raise crex else: - self.log_debug("Security group: " + sg_name + " already created.") + self.log_info("Security group: " + sg_name + " already created.") if vpc: res.append(sg.id) @@ -455,8 +454,7 @@ def create_keypair(self, system, conn): public = system.getValue('disk.0.os.credentials.public_key') if private and public: if public.find('-----BEGIN CERTIFICATE-----') != -1: - self.log_debug( - "The RADL specifies the PK, upload it to EC2") + self.log_info("The RADL specifies the PK, upload it to EC2") public_key = base64.b64encode(public) conn.import_key_pair(keypair_name, public_key) else: @@ -466,7 +464,7 @@ def create_keypair(self, system, conn): system.setUserKeyCredentials( system.getCredentials().username, public, private) else: - self.log_debug("Creating the Keypair name: %s" % keypair_name) + self.log_info("Creating the Keypair name: %s" % keypair_name) keypair_file = self.KEYPAIR_DIR + '/' + keypair_name + '.pem' keypair = conn.create_key_pair(keypair_name) created = True @@ -535,7 +533,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): (region_name, ami) = self.getAMIData( system.getValue("disk.0.image.url")) - self.log_debug("Connecting with the region: " + region_name) + self.log_info("Connecting with the region: " + region_name) conn = self.get_connection(region_name, auth_data) res = [] @@ -614,11 +612,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if spot: err_msg += " a spot instance " - self.log_debug("Launching a spot instance") + self.log_info("Launching a spot instance") instance_type = self.get_instance_type(system, vpc is not None) if not instance_type: - self.log_error( - "Error %s, no instance type available for the requirements." % err_msg) + self.log_error("Error %s, no instance type available for the requirements." % err_msg) self.log_debug(system) res.append( (False, "Error %s, no instance type available for the requirements." % err_msg)) @@ -652,14 +649,12 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): product_description=operative_system, availability_zone=zone.name, max_results=1) - self.log_debug( - "Spot price history for the region " + zone.name) + self.log_debug("Spot price history for the region " + zone.name) self.log_debug(history) if history and history[0].price < historical_price: historical_price = history[0].price availability_zone = zone.name - self.log_debug( - "Launching the spot request in the zone " + availability_zone) + self.log_info("Launching the spot request in the zone " + availability_zone) # Force to use magnetic volumes bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping( @@ -685,8 +680,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): 'instance_id', str(vm.id)) # Add the keypair name to remove it later vm.keypair_name = keypair_name - self.log_debug( - "Instance successfully launched.") + self.log_info("Instance successfully launched.") all_failed = False inf.add_vm(vm) res.append((True, vm)) @@ -694,11 +688,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): res.append((False, "Error %s." % err_msg)) else: err_msg += " an ondemand instance " - self.log_debug("Launching ondemand instance") + self.log_info("Launching ondemand instance") instance_type = self.get_instance_type(system, vpc is not None) if not instance_type: - self.log_error( - "Error %s, no instance type available for the requirements." % err_msg) + self.log_error("Error %s, no instance type available for the requirements." % err_msg) self.log_debug(system) res.append( (False, "Error %s, no instance type available for the requirements." % err_msg)) @@ -729,8 +722,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): 'instance_id', str(vm.id)) # Add the keypair name to remove it later vm.keypair_name = keypair_name - self.log_debug( - "Instance successfully launched.") + self.log_info("Instance successfully launched.") inf.add_vm(vm) res.append((True, vm)) all_failed = False @@ -753,14 +745,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if sg_ids: try: for sgid in sg_ids: - self.log_debug("Remove the SG: %s" % sgid) + self.log_info("Remove the SG: %s" % sgid) conn.delete_security_group(group_id=sgid) except: self.log_exception("Error deleting SG.") if sg_names and sg_names[0] != 'default': try: for sgname in sg_names: - self.log_debug("Remove the SG: %s" % sgname) + self.log_info("Remove the SG: %s" % sgname) conn.delete_security_group(sgname) except: self.log_exception("Error deleting SG.") @@ -782,7 +774,7 @@ def create_volume(self, conn, disk_size, placement, timeout=60): cont = 0 err_states = ["error"] while str(volume.status) != 'available' and str(volume.status) not in err_states and cont < timeout: - self.log_debug("State: " + str(volume.status)) + self.log_info("State: " + str(volume.status)) cont += 2 time.sleep(2) volume = conn.get_all_volumes([volume.id])[0] @@ -790,8 +782,7 @@ def create_volume(self, conn, disk_size, placement, timeout=60): if str(volume.status) == 'available': return volume else: - self.log_error( - "Error creating the volume %s, deleting it" % (volume.id)) + self.log_error("Error creating the volume %s, deleting it" % (volume.id)) conn.delete_volume(volume.id) return None @@ -816,13 +807,11 @@ def attach_volumes(self, instance, vm): "disk." + str(cont) + ".size").getValue('G') disk_device = vm.info.systems[0].getValue( "disk." + str(cont) + ".device") - self.log_debug( - "Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) volume = self.create_volume( conn, int(disk_size), instance.placement) if volume: - self.log_debug( - "Attach the volume ID " + str(volume.id)) + self.log_info("Attach the volume ID " + str(volume.id)) conn.attach_volume( volume.id, instance.id, "/dev/" + disk_device) cont += 1 @@ -854,16 +843,14 @@ def delete_volumes(self, conn, volumes, instance_id, timeout=240): try: curr_vol = conn.get_all_volumes([volume_id])[0] if str(curr_vol.attachment_state()) == "attached": - self.log_debug( - "Detaching the volume " + volume_id + " from the instance " + instance_id) + self.log_info("Detaching the volume " + volume_id + " from the instance " + instance_id) conn.detach_volume(volume_id, instance_id, force=True) elif curr_vol.attachment_state() is None: - self.log_debug("Removing the volume " + volume_id) + self.log_info("Removing the volume " + volume_id) conn.delete_volume(volume_id) deleted = True else: - self.log_debug( - "State: " + str(curr_vol.attachment_state())) + self.log_info("State: " + str(curr_vol.attachment_state())) except Exception as ex: self.log_warn("Error removing the volume: " + str(ex)) @@ -912,18 +899,16 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): vm.elastic_ip = True try: pub_address = None - self.log_debug("Add an Elastic IP") + self.log_info("Add an Elastic IP") if fixed_ip: for address in instance.connection.get_all_addresses(): if str(address.public_ip) == fixed_ip: pub_address = address if pub_address: - self.log_debug( - "Setting a fixed allocated IP: " + fixed_ip) + self.log_info("Setting a fixed allocated IP: " + fixed_ip) else: - self.log_warn( - "Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") + self.log_warn("Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") return None else: provider_id = self.get_net_provider_id(vm.info) @@ -948,8 +933,7 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): pub_address.release() return None else: - self.log_debug( - "The VM is not running, not adding an Elastic IP.") + self.log_info("The VM is not running, not adding an Elastic IP.") return None def delete_elastic_ips(self, conn, vm): @@ -965,8 +949,7 @@ def delete_elastic_ips(self, conn, vm): # Get the elastic IPs for address in conn.get_all_addresses(): if address.instance_id == instance_id: - self.log_debug( - "This VM has a Elastic IP, disassociate it") + self.log_info("This VM has a Elastic IP, disassociate it") address.disassociate() n = 0 @@ -982,11 +965,10 @@ def delete_elastic_ips(self, conn, vm): n += 1 if not found: - self.log_debug("Now release it") + self.log_info("Now release it") address.release() else: - self.log_debug( - "This is a fixed IP, it is not released") + self.log_info("This is a fixed IP, it is not released") except Exception: self.log_exception( "Error deleting the Elastic IPs to VM ID: " + str(vm.id)) @@ -1077,7 +1059,7 @@ def updateVMInfo(self, vm, auth_data): # deployed job_instance_id = None - self.log_debug("Check if the request has been fulfilled and the instance has been deployed") + self.log_info("Check if the request has been fulfilled and the instance has been deployed") job_sir_id = instance_id request_list = conn.get_all_spot_instance_requests() for sir in request_list: @@ -1090,7 +1072,7 @@ def updateVMInfo(self, vm, auth_data): break if job_instance_id: - self.log_debug("Request fulfilled, instance_id: " + str(job_instance_id)) + self.log_info("Request fulfilled, instance_id: " + str(job_instance_id)) instance_id = job_instance_id vm.id = region + ";" + instance_id vm.info.systems[0].setValue('instance_id', str(vm.id)) @@ -1161,22 +1143,22 @@ def add_dns_entries(self, vm, auth_data): domain += "." zone = conn.get_zone(domain) if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = conn.create_zone(domain) else: - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain record = zone.get_a(fqdn) if not record: - self.log_debug("Creating DNS record %s." % fqdn) + self.log_info("Creating DNS record %s." % fqdn) changes = boto.route53.record.ResourceRecordSets(conn, zone.id) change = changes.add_change("CREATE", fqdn, "A") change.add_value(ip) result = changes.commit() else: - self.log_debug("DNS record %s exists. Do not create." % fqdn) + self.log_info("DNS record %s exists. Do not create." % fqdn) return True except Exception: @@ -1205,14 +1187,14 @@ def del_dns_entries(self, vm, auth_data): domain += "." zone = conn.get_zone(domain) if not zone: - self.log_debug("The DNS zone %s does not exists. Do not delete records." % domain) + self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: fqdn = hostname + "." + domain record = zone.get_a(fqdn) if not record: - self.log_debug("DNS record %s does not exists. Do not delete." % fqdn) + self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: - self.log_debug("Deleting DNS record %s." % fqdn) + self.log_info("Deleting DNS record %s." % fqdn) changes = boto.route53.record.ResourceRecordSets(conn, zone.id) change = changes.add_change("DELETE", fqdn, "A") change.add_value(ip) @@ -1237,8 +1219,7 @@ def cancel_spot_requests(self, conn, vm): for sir in request_list: if sir.instance_id == instance_id: conn.cancel_spot_instance_requests(sir.id) - self.log_debug( - "Spot instance request " + str(sir.id) + " deleted") + self.log_info("Spot instance request " + str(sir.id) + " deleted") break except Exception: self.log_exception("Error deleting the spot instance request") @@ -1348,7 +1329,7 @@ def delete_security_groups(self, conn, vm, timeout=90): all_vms_terminated = False if all_vms_terminated: - self.log_debug("Remove the SG: " + sg.name) + self.log_info("Remove the SG: " + sg.name) try: sg.revoke('tcp', 0, 65535, src_group=sg) sg.revoke('udp', 0, 65535, src_group=sg) @@ -1367,13 +1348,13 @@ def delete_security_groups(self, conn, vm, timeout=90): # Check if it has been deleted yet sg = self._get_security_group(conn, sg.name) if not sg: - self.log_debug("Error deleting the SG. But it does not exist. Ignore. " + str(ex)) + self.log_info("Error deleting the SG. But it does not exist. Ignore. " + str(ex)) deleted = True else: self.log_exception("Error deleting the SG.") else: # If there are more than 1, we skip this step - self.log_debug("There are active instances. Not removing the SG") + self.log_info("There are active instances. Not removing the SG") def stop(self, vm, auth_data): region_name = vm.id.split(";")[0] @@ -1661,7 +1642,6 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): snapshot_id = "" # Obtain the connection object to connect with EC2 - self.logger.debug("Connecting with the region: " + region_name) conn = self.get_connection(region_name, auth_data) if not conn: @@ -1670,7 +1650,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): # Create the instance snapshot instance = self.get_instance_by_id(instance_id, region_name, auth_data) if instance: - self.logger.debug("Creating snapshot: " + image_name) + self.log_info("Creating snapshot: " + image_name) snapshot_id = instance.create_image(image_name, description="AMI automatically generated by IM", no_reboot=True) @@ -1689,7 +1669,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): def delete_image(self, image_url, auth_data): (region_name, ami) = self.getAMIData(image_url) - self.logger.debug("Connecting with the region: " + region_name) + self.log_info("Deleting image: %s." % image_url) conn = self.get_connection(region_name, auth_data) success = conn.deregister_image(ami, delete_snapshot=True) # https://github.com/boto/boto/issues/3019 diff --git a/IM/connectors/GCE.py b/IM/connectors/GCE.py index 5f050a9b1..e84e9979e 100644 --- a/IM/connectors/GCE.py +++ b/IM/connectors/GCE.py @@ -91,8 +91,7 @@ def get_driver(self, auth_data, datacenter=None): self.driver = driver return driver else: - self.log_error( - "No correct auth data has been specified to GCE: username, password and project") + self.log_error("No correct auth data has been specified to GCE: username, password and project") self.log_debug(auth) raise Exception( "No correct auth data has been specified to GCE: username, password and project") @@ -131,8 +130,7 @@ def get_dns_driver(self, auth_data): self.dns_driver = driver return driver else: - self.log_error( - "No correct auth data has been specified to GCE: username, password and project") + self.log_error("No correct auth data has been specified to GCE: username, password and project") self.log_debug(auth) raise Exception( "No correct auth data has been specified to GCE: username, password and project") @@ -302,7 +300,7 @@ def request_external_ip(self, radl): n += 1 if requested_ips: - self.log_debug("The user requested for a fixed IP") + self.log_info("The user requested for a fixed IP") if len(requested_ips) > 1: self.log_warn( "The user has requested more than one fixed IP. Using only the first one") @@ -394,7 +392,7 @@ def create_firewall(self, inf, net_name, radl, driver): try: firewall = driver.ex_get_firewall(firewall_name) except ResourceNotFoundError: - self.log_debug("The firewall %s does not exist." % firewall_name) + self.log_info("The firewall %s does not exist." % firewall_name) except: self.log_exception("Error trying to get FW %s." % firewall_name) @@ -402,14 +400,14 @@ def create_firewall(self, inf, net_name, radl, driver): try: firewall.allowed = allowed firewall.update() - self.log_debug("Firewall %s existing. Rules updated." % firewall_name) + self.log_info("Firewall %s existing. Rules updated." % firewall_name) except: self.log_exception("Error updating the firewall %s." % firewall_name) return try: driver.ex_create_firewall(firewall_name, allowed, network=net_name) - self.log_debug("Firewall %s successfully created." % firewall_name) + self.log_info("Firewall %s successfully created." % firewall_name) except Exception as addex: self.log_warn("Exception creating FW: " + str(addex)) @@ -458,14 +456,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if not public or not private: # We must generate them - self.log_debug("No keys. Generating key pair.") + self.log_info("No keys. Generating key pair.") (public, private) = self.keygen() system.setValue('disk.0.os.credentials.private_key', private) metadata = {} if private and public: metadata = {"sshKeys": username + ":" + public} - self.log_debug("Setting ssh for user: " + username) + self.log_info("Setting ssh for user: " + username) self.log_debug(metadata) startup_script = self.get_cloud_init_data(radl) @@ -503,7 +501,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): vm.info.systems[0].setValue('instance_id', str(vm.id)) vm.info.systems[0].setValue('instance_name', str(vm.id)) inf.add_vm(vm) - self.log_debug("Node successfully created.") + self.log_info("Node successfully created.") res.append((True, vm)) @@ -530,7 +528,7 @@ def finalize(self, vm, last, auth_data): if not success: return (False, "Error destroying node: " + vm.id) - self.log_debug("VM " + str(vm.id) + " successfully destroyed") + self.log_info("VM " + str(vm.id) + " successfully destroyed") else: self.log_warn("VM " + str(vm.id) + " not found.") return (True, "") @@ -546,14 +544,14 @@ def delete_firewall(self, vm, driver): try: firewall = driver.ex_get_firewall(firewall_name) except ResourceNotFoundError: - self.log_debug("Firewall %s does not exist. Do not delete." % firewall_name) + self.log_info("Firewall %s does not exist. Do not delete." % firewall_name) except: self.log_exception("Error trying to get FW %s." % firewall_name) if firewall: try: firewall.destroy() - self.log_debug("Firewall %s successfully deleted." % firewall_name) + self.log_info("Firewall %s successfully deleted." % firewall_name) except: self.log_exception("Error trying to delete FW %s." % firewall_name) @@ -583,7 +581,7 @@ def delete_disks(self, node): self.log_error( "Error destroying the volume: " + vol_name) except ResourceNotFoundError: - self.log_debug("The volume: " + vol_name + " does not exists. Ignore it.") + self.log_info("The volume: " + vol_name + " does not exists. Ignore it.") success = True except: self.log_exception( @@ -666,8 +664,7 @@ def attach_volumes(self, vm, node): "disk." + str(cont) + ".size").getValue('G') disk_device = vm.info.systems[0].getValue( "disk." + str(cont) + ".device") - self.log_debug( - "Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) volume_name = "im-%s" % str(uuid.uuid1()) location = self.get_node_location(node) @@ -675,7 +672,7 @@ def attach_volumes(self, vm, node): int(disk_size), volume_name, location=location) success = self.wait_volume(volume) if success: - self.log_debug("Attach the volume ID " + str(volume.id)) + self.log_info("Attach the volume ID " + str(volume.id)) try: volume.attach(node, disk_device) except: @@ -758,20 +755,20 @@ def add_dns_entries(self, vm, auth_data): domain += "." zone = [z for z in driver.iterate_zones() if z.domain == domain] if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = driver.create_zone(domain) else: zone = zone[0] - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain record = [r for r in driver.iterate_records(zone) if r.name == fqdn] if not record: - self.log_debug("Creating DNS record %s." % fqdn) + self.log_info("Creating DNS record %s." % fqdn) driver.create_record(fqdn, zone, RecordType.A, dict(ttl=300, rrdatas=[ip])) else: - self.log_debug("DNS record %s exists. Do not create." % fqdn) + self.log_info("DNS record %s exists. Do not create." % fqdn) return True except Exception: @@ -800,20 +797,20 @@ def del_dns_entries(self, vm, auth_data): domain += "." zone = [z for z in driver.iterate_zones() if z.domain == domain] if not zone: - self.log_debug("The DNS zone %s does not exists. Do not delete records." % domain) + self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: zone = zone[0] fqdn = hostname + "." + domain record = [r for r in driver.iterate_records(zone) if r.name == fqdn] if not record: - self.log_debug("DNS record %s does not exists. Do not delete." % fqdn) + self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: record = record[0] if record.data['rrdatas'] != [ip]: - self.log_debug("DNS record %s mapped to unexpected IP: %s != %s." - "Do not delete." % (fqdn, record.data['rrdatas'], ip)) + self.log_info("DNS record %s mapped to unexpected IP: %s != %s." + "Do not delete." % (fqdn, record.data['rrdatas'], ip)) else: - self.log_debug("Deleting DNS record %s." % fqdn) + self.log_info("Deleting DNS record %s." % fqdn) if not driver.delete_record(record): self.log_error("Error deleting DNS record %s." % fqdn) diff --git a/IM/connectors/Kubernetes.py b/IM/connectors/Kubernetes.py index 90d92a98a..7cadb4779 100644 --- a/IM/connectors/Kubernetes.py +++ b/IM/connectors/Kubernetes.py @@ -105,8 +105,7 @@ def get_api_version(self, auth_data): self.log_exception( "Error connecting with Kubernetes API server") - self.log_warn( - "Error getting a compatible API version. Setting the default one.") + self.log_warn("Error getting a compatible API version. Setting the default one.") self.log_debug("Using %s API version." % version) return version @@ -221,7 +220,7 @@ def _create_volumes(self, apiVersion, namespace, system, pod_name, auth_data, pe disk_mount_path = '/' + disk_mount_path if not disk_device.startswith('/'): disk_device = '/' + disk_device - self.log_debug("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) + self.log_info("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) name = "%s-%d" % (pod_name, cont) if persistent: @@ -536,8 +535,7 @@ def alterVM(self, vm, radl, auth_data): changed = True if not changed: - self.log_debug( - "Nothing changes in the kubernetes pod: " + str(vm.id)) + self.log_info("Nothing changes in the kubernetes pod: " + str(vm.id)) return (True, vm) # Create the container diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index cfcd0d792..de23f76fd 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -61,6 +61,8 @@ class OCCICloudConnector(CloudConnector): def __init__(self, cloud_info, inf): self.add_public_ip_count = 0 self.keystone_token = None + self.keystone_tenant = None + self.keystone_project = None if cloud_info.path == "/": cloud_info.path = "" CloudConnector.__init__(self, cloud_info, inf) @@ -253,11 +255,11 @@ def manage_public_ips(self, vm, auth_data): """ Manage public IPs in the VM """ - self.log_debug("The VM does not have public IP trying to add one.") + self.log_info("The VM does not have public IP trying to add one.") if self.add_public_ip_count < self.MAX_ADD_IP_COUNT: success, msgs = self.add_public_ip(vm, auth_data) if success: - self.log_debug("Public IP successfully added.") + self.log_info("Public IP successfully added.") else: self.add_public_ip_count += 1 self.log_warn("Error adding public IP the VM: %s (%d/%d)\n" % (msgs, @@ -268,7 +270,7 @@ def manage_public_ips(self, vm, auth_data): self.MAX_ADD_IP_COUNT) else: self.log_error("Error adding public IP the VM: Max number of retries reached.") - self.error_messages += "Error adding public IP the VM: Max number of retries reached.\n" + # self.error_messages += "Error adding public IP the VM: Max number of retries reached.\n" # this is a total fail, stop contextualization vm.configured = False vm.inf.set_configured(False) @@ -399,7 +401,7 @@ def add_public_ip(self, vm, auth_data): if resp.status_code != 201 and resp.status_code != 200: return (False, output) else: - self.log_debug("Public IP added from pool %s" % network_name) + self.log_info("Public IP added from pool %s" % network_name) return (True, vm.id) except Exception: self.log_exception("Error connecting with OCCI server") @@ -584,11 +586,11 @@ def create_volumes(self, system, auth_data): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) storage_name = "im-disk-%s" % str(uuid.uuid1()) success, volume_id = self.create_volume(int(disk_size), storage_name, auth_data) if success: - self.log_debug("Volume id %s sucessfully created." % volume_id) + self.log_info("Volume id %s sucessfully created." % volume_id) volumes.append((disk_device, volume_id)) system.setValue("disk." + str(cont) + ".provider_id", volume_id) # TODO: get the actual device_id from OCCI @@ -619,7 +621,7 @@ def wait_volume_state(self, volume_id, auth_data, wait_state="online", timeout=1 wait += delay success, storage_info = self.get_volume_info(volume_id, auth_data) state = self.get_occi_attribute_value(storage_info, 'occi.storage.state') - self.log_debug("Waiting volume %s to be %s. Current state: %s" % (volume_id, wait_state, state)) + self.log_info("Waiting volume %s to be %s. Current state: %s" % (volume_id, wait_state, state)) if success and state == wait_state: online = True elif not success: @@ -678,7 +680,7 @@ def create_volume(self, size, name, auth_data): self.log_exception("Error creating volume") return False, str(ex) - def detach_volume(self, volume, auth_data, timeout=180, delay=5): + def detach_volume(self, volume, auth_data, timeout=60, delay=5): auth = self.get_auth_header(auth_data) headers = {'Accept': 'text/plain', 'Connection': 'close'} if auth: @@ -691,18 +693,18 @@ def detach_volume(self, volume, auth_data, timeout=180, delay=5): wait = 0 while wait < timeout: try: - self.log_debug("Detaching volume: %s" % storage_id) + self.log_info("Detaching volume: %s" % storage_id) resp = self.create_request('GET', link, auth_data, headers) if resp.status_code == 200: - self.log_debug("Volume link %s exists. Try to delete it." % link) + self.log_info("Volume link %s exists. Try to delete it." % link) resp = self.create_request('DELETE', link, auth_data, headers) if resp.status_code in [204, 200]: - self.log_debug("Successfully detached. Wait it to be deleted.") + self.log_info("Successfully detached. Wait it to be deleted.") else: self.log_error("Error detaching volume: %s" + resp.reason + "\n" + resp.text) elif resp.status_code == 404: # wait until the resource does not exist - self.log_debug("Successfully detached") + self.log_info("Successfully detached") return (True, "") else: self.log_warn("Error detaching volume: %s" + resp.reason + "\n" + resp.text) @@ -732,26 +734,26 @@ def delete_volume(self, storage_id, auth_data, timeout=180, delay=5): wait = 0 while wait < timeout: - self.log_debug("Delete storage: %s" % storage_id) + self.log_info("Delete storage: %s" % storage_id) try: resp = self.create_request('GET', storage_id, auth_data, headers) if resp.status_code == 200: - self.log_debug("Storage %s exists. Try to delete it." % storage_id) + self.log_info("Storage %s exists. Try to delete it." % storage_id) resp = self.create_request('DELETE', storage_id, auth_data, headers) if resp.status_code == 404: - self.log_debug("It does not exist.") + self.log_info("It does not exist.") return (True, "") elif resp.status_code == 409: - self.log_debug("Error deleting the Volume. It seems that it is still " - "attached to a VM: %s" % resp.text) + self.log_info("Error deleting the Volume. It seems that it is still " + "attached to a VM: %s" % resp.text) elif resp.status_code != 200 and resp.status_code != 204: self.log_warn("Error deleting the Volume: " + resp.reason + "\n" + resp.text) else: - self.log_debug("Successfully deleted") + self.log_info("Successfully deleted") return (True, "") elif resp.status_code == 404: - self.log_debug("It does not exist.") + self.log_info("It does not exist.") return (True, "") else: self.log_warn("Error deleting storage: %s" + resp.reason + "\n" + resp.text) @@ -1063,19 +1065,19 @@ def add_new_disks(self, vm, radl, auth_data): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) success, volume_id = self.create_volume(int(disk_size), "im-disk-%d" % cont, auth_data) if success: - self.log_debug("Volume id %s successfuly created." % volume_id) + self.log_info("Volume id %s successfuly created." % volume_id) # let's wait the storage to be ready "online" wait_ok = self.wait_volume_state(volume_id, auth_data) if not wait_ok: - self.log_debug("Error waiting volume %s. Deleting it." % volume_id) + self.log_info("Error waiting volume %s. Deleting it." % volume_id) self.delete_volume(volume_id, auth_data) return (False, "Error waiting volume %s. Deleting it." % volume_id) else: - self.log_debug("Attaching to the instance") + self.log_info("Attaching to the instance") attached = self.attach_volume(vm, volume_id, disk_device, mount_path, auth_data) if attached: orig_system.setValue("disk." + str(cont) + ".size", disk_size, "G") @@ -1115,7 +1117,7 @@ def remove_public_ip(self, vm, auth_data): """ Remove/Detach public IP from VM """ - self.log_debug("Removing Public IP from VM %s" % vm.id) + self.log_info("Removing Public IP from VM %s" % vm.id) auth = self.get_auth_header(auth_data) headers = {'Accept': 'text/plain', 'Connection': 'close'} @@ -1135,7 +1137,7 @@ def remove_public_ip(self, vm, auth_data): return (True, "No public IP to delete.") resp = self.create_request('DELETE', link, auth_data, headers) if resp.status_code in [404, 204, 200]: - self.log_debug("Successfully removed") + self.log_info("Successfully removed") return (True, "") else: self.log_error("Error removing public IP: " + resp.reason + "\n" + resp.text) @@ -1305,9 +1307,9 @@ def check_keystone_token(occi, keystone_uri, version, auth): headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Auth-Token': occi.keystone_token, 'Connection': 'close'} if version == 2: - url = "%s/v2.0" % keystone_uri + url = "%s/v2.0/tenants" % keystone_uri elif version == 3: - url = "%s/v3" % keystone_uri + url = "%s/v3/auth/tokens" % keystone_uri else: return None resp = occi.create_request_static('GET', url, auth, headers) @@ -1334,11 +1336,11 @@ def get_keystone_token(occi, keystone_uri, auth): return token if version == 2: - occi.logger.debug("Getting Keystone v2 token") + occi.logger.info("Getting Keystone v2 token") occi.keystone_token = KeyStoneAuth.get_keystone_token_v2(occi, keystone_uri, auth) return occi.keystone_token elif version == 3: - occi.logger.debug("Getting Keystone v3 token") + occi.logger.info("Getting Keystone v3 token") occi.keystone_token = KeyStoneAuth.get_keystone_token_v3(occi, keystone_uri, auth) return occi.keystone_token else: @@ -1412,20 +1414,25 @@ def get_keystone_token_v2(occi, keystone_uri, auth): occi.logger.exception("Error obtaining Keystone Token.") raise Exception("Error obtaining Keystone Token: %s" % str(output)) - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', - 'X-Auth-Token': token_id, 'Connection': 'close'} - url = "%s/v2.0/tenants" % keystone_uri - resp = occi.create_request_static('GET', url, auth, headers) - resp.raise_for_status() + if occi.keystone_tenant is None: + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', + 'X-Auth-Token': token_id, 'Connection': 'close'} + url = "%s/v2.0/tenants" % keystone_uri + resp = occi.create_request_static('GET', url, auth, headers) + resp.raise_for_status() - # format: -> "{\"tenants_links\": [], \"tenants\": - # [{\"description\": \"egi fedcloud\", \"enabled\": true, \"id\": - # \"fffd98393bae4bf0acf66237c8f292ad\", \"name\": \"egi\"}]}" - output = resp.json() + # format: -> "{\"tenants_links\": [], \"tenants\": + # [{\"description\": \"egi fedcloud\", \"enabled\": true, \"id\": + # \"fffd98393bae4bf0acf66237c8f292ad\", \"name\": \"egi\"}]}" + output = resp.json() + tenants = output['tenants'] + else: + tenants = [occi.keystone_tenant] tenant_token_id = None + # retry for each available tenant (usually only one) - for tenant in output['tenants']: + for tenant in tenants: body = '{"auth":{"voms":true,"tenantName":"' + str(tenant['name']) + '"}}' headers = {'Accept': 'application/json', 'Content-Type': 'application/json', @@ -1444,6 +1451,8 @@ def get_keystone_token_v2(occi, keystone_uri, auth): # \"metadata\": {\"is_admin\": 0, \"roles\": []}}}" output = resp.json() if 'access' in output: + occi.logger.info("Using tenant: %s" % tenant["name"]) + occi.keystone_tenant = tenant tenant_token_id = str(output['access']['token']['id']) break @@ -1474,40 +1483,52 @@ def get_keystone_token_v3(occi, keystone_uri, auth): token = resp.headers['X-Subject-Token'] - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', - 'X-Auth-Token': token, 'Connection': 'close'} - url = "%s/v3/auth/projects" % keystone_uri - resp = occi.create_request_static('GET', url, auth, headers) - resp.raise_for_status() - - output = resp.json() + if occi.keystone_project is None: + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', + 'X-Auth-Token': token, 'Connection': 'close'} + url = "%s/v3/auth/projects" % keystone_uri + resp = occi.create_request_static('GET', url, auth, headers) + resp.raise_for_status() + + output = resp.json() + + if len(output['projects']) == 1: + # If there are only one get the first project + projects = output['projects'] + elif len(output['projects']) > 1: + # If there are more than one + if auth and "project" in auth: + project_found = None + for elem in output['projects']: + if elem['id'] == auth["project"] or elem['name'] == auth["project"]: + project_found = elem + if project_found: + projects = [project_found] + else: + projects = output['projects'] + self.log_warn("Keystone 3 project %s not found." % auth["project"]) + else: + projects = [occi.keystone_project] - if len(output['projects']) == 1: - # If there are only one get the first tenant - project = output['projects'].pop() - if len(output['projects']) >= 1: - # If there are more than one - if auth and "project" in auth: - project_found = None - for elem in output['projects']: - if elem['id'] == auth["project"] or elem['name'] == auth["project"]: - project_found = elem - if project_found: - project = project_found - else: - project = output['projects'].pop() - self.log_warn("Keystone 3 project %s not found. Using first one." % auth["project"]) - - # get scoped token for allowed project - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', - 'X-Auth-Token': token, 'Connection': 'close'} - body = {"auth": {"identity": {"methods": ["token"], "token": {"id": token}}, - "scope": {"project": {"id": project["id"]}}}} - url = "%s/v3/auth/tokens" % keystone_uri - resp = occi.create_request_static('POST', url, auth, headers, json.dumps(body)) - resp.raise_for_status() - token = resp.headers['X-Subject-Token'] - return token + scoped_token = None + for project in projects: + # get scoped token for allowed project + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', + 'X-Auth-Token': token, 'Connection': 'close'} + body = {"auth": {"identity": {"methods": ["token"], "token": {"id": token}}, + "scope": {"project": {"id": project["id"]}}}} + url = "%s/v3/auth/tokens" % keystone_uri + resp = occi.create_request_static('POST', url, auth, headers, json.dumps(body)) + if resp.status_code in [200, 201, 202]: + occi.logger.info("Using project: %s" % project["name"]) + occi.keystone_project = project + scoped_token = resp.headers['X-Subject-Token'] + break + + if not scoped_token: + occi.logger.error("Not project accesible for the user.") + + return scoped_token except Exception as ex: occi.logger.exception("Error obtaining Keystone v3 Token.") raise Exception("Error obtaining Keystone v3 Token: %s" % str(ex)) diff --git a/IM/connectors/OpenNebula.py b/IM/connectors/OpenNebula.py index 40d5e057e..fd223381d 100644 --- a/IM/connectors/OpenNebula.py +++ b/IM/connectors/OpenNebula.py @@ -418,7 +418,7 @@ def create_security_groups(self, inf, radl, auth_data): outport.get_remote_port())) if sg_template: - self.log_debug("Creating security group: %s" % sg_name) + self.log_info("Creating security group: %s" % sg_name) sg_template = ("NAME = %s\n" % sg_name) + sg_template success, sg_id, _ = server.one.secgroup.allocate(session_id, sg_template) if not success: @@ -470,14 +470,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i += 1 if all_failed: - self.log_debug("All VMs failed, delete Security Groups.") + self.log_info("All VMs failed, delete Security Groups.") for sg in sgs.values(): - self.log_debug("Delete Security Group: %d." % sg) + self.log_info("Delete Security Group: %d." % sg) success, sg_id, _ = server.one.secgroup.delete(session_id, sg) if success: - self.log_debug("Deleted.") + self.log_info("Deleted.") else: - self.log_debug("Error deleting SG: %s." % sg_id) + self.log_info("Error deleting SG: %s." % sg_id) return res def delete_security_groups(self, inf, auth_data, timeout=90, delay=10): @@ -497,17 +497,17 @@ def delete_security_groups(self, inf, auth_data, timeout=90, delay=10): # Get the SG to delete sg = self._get_security_group(sg_name, auth_data) if not sg: - self.log_debug("The SG %s does not exist. Do not delete it." % sg_name) + self.log_info("The SG %s does not exist. Do not delete it." % sg_name) deleted = True else: try: - self.log_debug("Deleting SG: %s" % sg_name) + self.log_info("Deleting SG: %s" % sg_name) success, sg_id, _ = server.one.secgroup.delete(session_id, sg) if success: - self.log_debug("Deleted.") + self.log_info("Deleted.") deleted = True else: - self.log_debug("Error deleting SG: %s." % sg_id) + self.log_info("Error deleting SG: %s." % sg_id) except Exception as ex: self.log_warn("Error deleting the SG: %s" % str(ex)) @@ -1103,7 +1103,7 @@ def attach_new_disks(self, vm, system, session_id): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) success, volume_id = self.attach_volume(vm, int(disk_size), disk_device, disk_fstype, session_id) if success: orig_system.setValue("disk." + str(cont) + ".size", disk_size, "M") diff --git a/IM/connectors/OpenStack.py b/IM/connectors/OpenStack.py index 6caed7184..5afbb82bd 100644 --- a/IM/connectors/OpenStack.py +++ b/IM/connectors/OpenStack.py @@ -147,6 +147,47 @@ def get_driver(self, auth_data): self.driver = driver return driver + def get_instance_type(self, sizes, radl): + """ + Get the name of the instance type to launch to LibCloud + + Arguments: + - size(list of :py:class: `libcloud.compute.base.NodeSize`): List of sizes on a provider + - radl(str): RADL document with the requirements of the VM to get the instance type + Returns: a :py:class:`libcloud.compute.base.NodeSize` with the instance type to launch + """ + instance_type_name = radl.getValue('instance_type') + + cpu = 1 + cpu_op = ">=" + if radl.getFeature('cpu.count'): + cpu = radl.getValue('cpu.count') + cpu_op = radl.getFeature('cpu.count').getLogOperator() + + memory = 1 + memory_op = ">=" + if radl.getFeature('memory.size'): + memory = radl.getFeature('memory.size').getValue('M') + memory_op = radl.getFeature('memory.size').getLogOperator() + disk_free = 0 + disk_free_op = ">=" + if radl.getValue('disk.0.free_size'): + disk_free = radl.getFeature('disk.0.free_size').getValue('G') + disk_free_op = radl.getFeature('memory.size').getLogOperator() + + # get the node size with the lowest price, vcpus and memory + sizes.sort(key=lambda x: (x.price, x.vcpus, x.ram)) + for size in sizes: + str_compare = "size.ram " + memory_op + " memory" + str_compare += " and size.vcpus " + cpu_op + " cpu " + str_compare += " and size.disk " + disk_free_op + " disk_free" + if eval(str_compare): + if not instance_type_name or size.name == instance_type_name: + return size + + self.log_error("No compatible size found") + return None + def concreteSystem(self, radl_system, auth_data): image_urls = radl_system.getValue("disk.0.image.url") if not image_urls: @@ -352,7 +393,7 @@ def setIPsFromInstance(self, vm, node): self.log_error("Error adding a floating IP: Max number of retries reached.") self.error_messages += "Error adding a floating IP: Max number of retries reached.\n" else: - self.log_debug("The VM is not running, not adding Elastic/Floating IPs.") + self.log_info("The VM is not running, not adding Elastic/Floating IPs.") def update_system_info_from_instance(self, system, instance_type): """ @@ -386,7 +427,7 @@ def get_networks(self, driver, radl): # site has IP pools, we do not need to assign a network to this interface # it will be assigned with a floating IP if network.isPublic() and num_nets > 1 and pool_names: - self.log_debug("Public IP to be assigned with a floating IP. Do not set a net.") + self.log_info("Public IP to be assigned with a floating IP. Do not set a net.") else: # First check if the user has specified a provider ID if net_provider_id: @@ -469,7 +510,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): elif not system.getValue("disk.0.os.credentials.password"): keypair_name = "im-%d" % int(time.time() * 100.0) - self.log_debug("Create keypair: %s" % keypair_name) + self.log_info("Create keypair: %s" % keypair_name) keypair = driver.create_key_pair(keypair_name) keypair_created = True public_key = keypair.public_key @@ -500,7 +541,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i = 0 all_failed = True while i < num_vm: - self.log_debug("Creating node") + self.log_info("Creating node") node = None retries = 0 @@ -520,7 +561,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): # Add the keypair name to remove it later if keypair_name: vm.keypair = keypair_name - self.log_debug("Node successfully created.") + self.log_info("Node successfully created.") all_failed = False inf.add_vm(vm) res.append((True, vm)) @@ -532,10 +573,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if all_failed: if keypair_created: # only delete in case of the user do not specify the keypair name - self.log_debug("Deleting keypair: %s." % keypair_name) + self.log_info("Deleting keypair: %s." % keypair_name) driver.delete_key_pair(keypair) for sg in sgs: - self.log_debug("Deleting security group: %s." % sg.id) + self.log_info("Deleting security group: %s." % sg.id) driver.ex_delete_security_group(sg) return res @@ -584,11 +625,11 @@ def manage_elastic_ips(self, vm, node, public_ips): # It is a fixed IP if ip not in public_ips: # It has not been created yet, do it - self.log_debug("Asking for a fixed ip: %s." % ip) + self.log_info("Asking for a fixed ip: %s." % ip) success, msg = self.add_elastic_ip(vm, node, ip, pool_name) else: if num >= len(public_ips): - self.log_debug("Asking for public IP %d and there are %d" % (num + 1, len(public_ips))) + self.log_info("Asking for public IP %d and there are %d" % (num + 1, len(public_ips))) success, msg = self.add_elastic_ip(vm, node, None, pool_name) if not success: @@ -608,7 +649,7 @@ def get_floating_ip(self, pool): if not ip.node_id: is_private = any([IPAddress(ip.ip_address) in IPNetwork(mask) for mask in Config.PRIVATE_NET_MASKS]) if is_private: - self.log_debug("Floating IP found %s, but it is private. Ignore." % ip.ip_address) + self.log_info("Floating IP found %s, but it is private. Ignore." % ip.ip_address) else: return True, ip @@ -625,7 +666,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): Returns: a :py:class:`OpenStack_1_1_FloatingIpAddress` added or None if some problem occur. """ try: - self.log_debug("Add an Floating IP") + self.log_info("Add an Floating IP") pool = self.get_ip_pool(node.driver, pool_name) if not pool: @@ -633,7 +674,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): msg = "Incorrect pool name: %s." % pool_name else: msg = "No pools available." - self.log_debug("No Floating IP assigned: %s" % msg) + self.log_info("No Floating IP assigned: %s" % msg) return False, msg if node.driver.ex_list_floating_ip_pools(): @@ -659,7 +700,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): if is_private: self.log_error("Error getting a Floating IP from pool %s. The IP is private." % pool_name) - self.log_debug("We have created it, so release it.") + self.log_info("We have created it, so release it.") floating_ip.delete() return False, "Error attaching a Floating IP to the node. Private IP returned." @@ -681,7 +722,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): if not attached: self.log_error("Error attaching a Floating IP to the node.") - self.log_debug("We have created it, so release it.") + self.log_info("We have created it, so release it.") floating_ip.delete() return False, "Error attaching a Floating IP to the node." return True, floating_ip @@ -720,7 +761,7 @@ def create_security_groups(self, driver, inf, radl): with inf._lock: sg = self._get_security_group(driver, sg_name) if not sg: - self.log_debug("Creating security group: %s" % sg_name) + self.log_info("Creating security group: %s" % sg_name) sg = driver.ex_create_security_group(sg_name, "Security group created by the IM") res.append(sg) @@ -792,14 +833,14 @@ def finalize(self, vm, last, auth_data): self.delete_security_groups(node, vm.inf) else: # If this is not the last vm, we skip this step - self.log_debug("There are active instances. Not removing the SG") + self.log_info("There are active instances. Not removing the SG") except: self.log_exception("Error deleting security groups.") if not success: return (False, "Error destroying node: " + vm.id) - self.log_debug("VM " + str(vm.id) + " successfully destroyed") + self.log_info("VM " + str(vm.id) + " successfully destroyed") else: self.log_warn("VM " + str(vm.id) + " not found.") @@ -819,11 +860,11 @@ def delete_security_groups(self, node, inf, timeout=90, delay=10): # Get the SG to delete sg = self._get_security_group(node.driver, sg_name) if not sg: - self.log_debug("The SG %s does not exist. Do not delete it." % sg_name) + self.log_info("The SG %s does not exist. Do not delete it." % sg_name) deleted = True else: try: - self.log_debug("Deleting SG: %s" % sg_name) + self.log_info("Deleting SG: %s" % sg_name) node.driver.ex_delete_security_group(sg) deleted = True except Exception as ex: diff --git a/README b/README index a6fcf63d0..d27b55c4d 100644 --- a/README +++ b/README @@ -12,21 +12,11 @@ contextualization system to enable the installation and configuration of all the user required applications providing the user with a fully functional infrastructure. -This version evolved in the INDIGO-Datacloud project (https://www.indigo-datacloud.eu/). It is used by the [INDIGO Orchestrator](https://github.com/indigo-dc/orchestrator) to contact Cloud sites to finally deploy the VMs/containers. - -New features added: - -+ Support for TOSCA 1.0 YAML specification with the custom node types described in https://github.com/indigo-dc/tosca-types/blob/master/custom_types.yaml -+ Support for the Identity and Access Management Service (IAM). -+ Support for the Token Translation Service (TTS) to support IAM authetication on OpenNebula Clouds. -+ Improvements to access OpenStack Clouds that support IAM. - Read the documentation and more at http://www.grycap.upv.es/im. There is also an Infrastructure Manager YouTube reproduction list with a set of videos with demos of the functionality of the platform: https://www.youtube.com/playlist?list=PLgPH186Qwh_37AMhEruhVKZSfoYpHkrUp. - 1. INSTALLATION =============== @@ -53,22 +43,14 @@ However, if you install IM from sources you should install: * The Requests library for Python, typically available as the 'python-requests' package. - * The TOSCA-Parser library for Python. Currently it must be used the INDIGO version located at - https://github.com/indigo-dc/tosca-parser but we are working to improve the mainstream version - to enable to use it with the IM. - * Ansible (http://www.ansibleworks.com/) to configure nodes in the infrastructures. - In particular, Ansible 1.4.2+ must be installed. The current recommended version is 1.9.4 untill the 2.X versions become stable. + In particular, Ansible 2.0.0+ must be installed. To ensure the functionality the following values must be set in the ansible.cfg file: [defaults] transport = smart host_key_checking = False - # For old versions 1.X - sudo_user = root - sudo_exe = sudo - - # For new versions 2.X + nocolor = 1 become_user = root become_method = sudo @@ -87,24 +69,26 @@ However, if you install IM from sources you should install: 1.2 OPTIONAL PACKAGES --------------------- -The Bottle framework (http://bottlepy.org/) is used for the REST API. It is typically available as the 'python-bottle' package. +The Bottle framework (http://bottlepy.org/) is used for the REST API. +It is typically available as the 'python-bottle' package. -he CherryPy Web framework (http://www.cherrypy.org/), is needed for the REST API. +The CherryPy Web framework (http://www.cherrypy.org/), is needed for the REST API. It is typically available as the 'python-cherrypy' or 'python-cherrypy3' package. In newer versions (9.0 and later) the functionality has been moved to the 'cheroot' library (https://github.com/cherrypy/cheroot) it can be installed using pip. -Boto (http://boto.readthedocs.org) 2.29.0 or later is used as interface to Amazon EC2. -It is available as package named python-boto in Debian based distributions. -It can also be downloaded from `boto GitHub repository (https://github.com/boto/boto). -Download the file and copy the boto subdirectory into the IM install path. +Apache-libcloud (http://libcloud.apache.org/) 0.17 or later is used in the +LibCloud, OpenStack and GCE connectors. -Apache-libcloud (http://libcloud.apache.org/) 0.17 or later is used in the LibCloud, OpenStack and GCE connectors. +Boto (http://boto.readthedocs.org) 2.29.0 or later is used as interface to +Amazon EC2. It is available as package named ``python-boto`` in Debian based +distributions. It can also be downloaded from `boto GitHub repository (https://github.com/boto/boto). +Download the file and copy the boto subdirectory into the IM install path. In case of using the a MySQL DB as the backend to store IM data. The Python interface to MySQL must be installed, typically available as the package 'python-mysqldb' or 'MySQL-python' package. -In case of using Python 3 use the PyMySQL package, available as the package 'python3-pymysql' -on debian systems or PyMySQL package in pip. +In case of using Python 3 use the PyMySQL package, available as the package 'python3-pymysql' on +debian systems or PyMySQL package in pip. In case of using the SSL secured version of the REST API pyOpenSSL must be installed. @@ -117,58 +101,113 @@ Microsoft Azure platform. 1.3.1 Using installer (Recommended option) ------------------------------------------ -The IM provides a script to install the IM in one single step. +The IM provides a script to install the IM in one single step (using pip). You only need to execute the following command: $ wget -qO- https://raw.githubusercontent.com/grycap/im/master/install.sh | bash -It works for the most recent version of the main Linux distributions (RHEL/CentOS 7, Ubuntu 14/16). +It works for the most recent version of the main Linux distributions (RHEL, CentOS, Fedora, Ubuntu, Debian). +In case that you O.S. does not work with this install script see next sections. -1.3.2 From RPM package ----------------------- +1.3.2 From PIP +-------------- + +**WARNING**: In some GNU/Linux distributions (RHEL 6 or equivalents) you must uninstall +the packages python-paramiko and python-crypto before installing the IM with pip. -You must have the epel repository enabled: + $ rpm -e python-crypto python-paramiko --nodeps + +First you need to install pip tool and some packages needed to compile some of the IM requirements. +To install them in Debian and Ubuntu based distributions, do:: + + $ apt update + $ apt install gcc python-dev libffi-dev libssl-dev python-pip sshpass python-pysqlite2 python-requests + +In Red Hat based distributions (RHEL, CentOS, Amazon Linux, Oracle Linux, +Fedora, etc.), do: $ yum install epel-release + $ yum install which gcc python-devel libffi-devel openssl-devel python-pip sshpass python-sqlite3dbm + +For some problems with the dependencies of the apache-libcloud package in some systems (as ubuntu 14.04 or CentOS 6) +this package has to be installed manually: -Then you have to enable the INDIGO - DataCloud packages repositories. See full instructions -at https://indigo-dc.gitbooks.io/indigo-datacloud-releases/content/generic_installation_and_configuration_guide_1.html#id4. -Briefly you have to download the repo file from http://repo.indigo-datacloud.eu/repos/1/indigo1.repo in your /etc/yum.repos.d folder. + $ pip install backports-ssl_match_hostname - $ cd /etc/yum.repos.d - $ wget http://repo.indigo-datacloud.eu/repos/1/indigo1.repo +Then you only have to call the install command of the pip tool with the IM package: -And then install the GPG key for the INDIGO repository: + $ pip install IM - $ rpm --import http://repo.indigo-datacloud.eu/repository/RPM-GPG-KEY-indigodc +Pip will also install the, non installed, pre-requisites needed. So Ansible 1.4.2 or later will +be installed in the system. Some of the optional packages are also installed please check if some +of IM features that you need requires to install some of the packages of section OPTIONAL PACKAGES. -Finally install the IM package. +You must also remember to modify the ansible.cfg file setting as specified in the +REQUISITES section. - $ yum install IM -Azure python SDK is not available in CentOS. So if you need the Azure plugin you have to manually install them using pip: +1.3.3 From RPM packages (RH6 and RH7) +------------------------------------- - $ pip install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource +Download the RPM package from GitHub (https://github.com/grycap/im/releases/latest). +Also remember to download the RPM of the RADL package also from GitHub (https://github.com/grycap/radl/releases/latest). +You must have the epel repository enabled:: + $ yum install epel-release + +Then install the downloaded RPMs:: -1.3.3 From Deb package ----------------------- + $ yum localinstall IM-*.rpm RADL-*.rpm + +Azure python SDK is not available in CentOS. So if you need the Azure plugin you have to manually install them using pip:: -You have to enable the INDIGO - DataCloud packages repositories. See full instructions -at https://indigo-dc.gitbooks.io/indigo-datacloud-releases/content/generic_installation_and_configuration_guide_1.html#id4. -Briefly you have to download the list file from http://repo.indigo-datacloud.eu/repos/1/indigo1-ubuntu14_04.list in your /etc/apt/sources.list.d folder. + $ pip install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns - $ cd /etc/apt/sources.list.d - $ wget http://repo.indigo-datacloud.eu/repos/1/indigo1-ubuntu14_04.list +1.3.4 From Deb package (Tested with Ubuntu 14.04 and 16.04) +----------------------------------------------------------- -And then install the GPG key for INDIGO the repository: +Download the Deb package from GitHub (https://github.com/grycap/im/releases/latest). +Also remember to download the Deb of the RADL package also from GitHub (https://github.com/grycap/radl/releases/latest). - $ wget -q -O - http://repo.indigo-datacloud.eu/repository/RPM-GPG-KEY-indigodc | sudo apt-key add - +In Ubuntu 14.04 there are some requisites not available for the "trusty" version or are too old, so you have to manually install them manually. +You can download it from their corresponding PPAs. But here you have some links: + + * python-backports.ssl-match-hostname: http://archive.ubuntu.com/ubuntu/pool/universe/b/backports.ssl-match-hostname/python-backports.ssl-match-hostname_3.4.0.2-1_all.deb + * python-scp: http://archive.ubuntu.com/ubuntu/pool/universe/p/python-scp/python-scp_0.10.2-1_all.deb + * python-libcloud: http://archive.ubuntu.com/ubuntu/pool/universe/libc/libcloud/python-libcloud_0.20.0-1_all.deb + * python-xmltodict: http://archive.ubuntu.com/ubuntu/pool/universe/p/python-xmltodict/python-xmltodict_0.9.2-3_all.deb -Finally install the IM package. +Also Azure python SDK is not available in Ubuntu 16.04. So if you need the Azure plugin you have to manually install them. +You can download it from their corresponding PPAs. But here you have some links: - $ apt update - $ apt install python-im + * python-msrestazure: https://launchpad.net/ubuntu/+archive/primary/+files/python-msrestazure_0.4.3-1_all.deb + * python-msrest: https://launchpad.net/ubuntu/+archive/primary/+files/python-msrest_0.4.4-1_all.deb + * python-azure: https://launchpad.net/ubuntu/+archive/primary/+files/python-azure_2.0.0~rc6+dfsg-2_all.deb + +It is also recommended to configure the Ansible PPA to install the newest versions of Ansible (see Ansible installation - http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-apt-ubuntu): + + $ sudo apt-get install software-properties-common + $ sudo apt-add-repository ppa:ansible/ansible + $ sudo apt-get update + +Put all the .deb files in the same directory and do: + + $ sudo dpkg -i *.deb + $ sudo apt install -f -y + +1.3.5 FROM SOURCE +----------------- + +Select a proper path where the IM service will be installed (i.e. /usr/local/im, +/opt/im or other). This path will be called IM_PATH + + $ tar xvzf IM-X.XX.tar.gz + $ chown -R root:root IM-X.XX + $ mv IM-X.XX /usr/local + +Finally you must copy (or link) $IM_PATH/scripts/im file to /etc/init.d directory. + + $ ln -s /usr/local/im/scripts/im /etc/init.d/im 1.4 CONFIGURATION ----------------- @@ -198,7 +237,7 @@ or set the name of the script file (im_service.py) if the file is in the PATH (pip puts the im_service.py file in the PATH as default). Check the parameters in $IM_PATH/etc/im.cfg or /etc/im/im.cfg. Please pay attention -to the next configuration variables, as they are the most important +to the next configuration variables, as they are the most important: DATA_DB - must be set to the URL to access the database to store the IM data. Be careful if you have two different instances of the IM service running in the same machine!!. @@ -229,14 +268,22 @@ And then set the variables: XMLRCP_SSL_* or REST_SSL_* to your certificates path 2. DOCKER IMAGE =============== -A Docker image named `indigodatacloud/im` has been created to make easier the deployment of an IM service using the -default configuration. Information about this image can be found here: https://hub.docker.com/r/indigodatacloud/im/. +A Docker image named `grycap/im` has been created to make easier the deployment of an IM service using the +default configuration. Information about this image can be found here: https://registry.hub.docker.com/u/grycap/im/. How to launch the IM service using docker: - $ sudo docker run -d -p 8899:8899 -p 8800:8800 --name im indigodatacloud/im + $ sudo docker run -d -p 8899:8899 -p 8800:8800 --name im grycap/im -You can use the IM as an entry point of an OpenNebula cloud provider as a TOSCA compliant endpoint for your site: +To make the IM data persistent you also have to specify a persistent location for the IM database using the +IM_DATA_DB environment variable and adding a volume:: - $ sudo docker run -d -p 8899:8899 -p 8800:8800 -e IM_SINGLE_SITE_ONE_HOST=oneserver.com --name im indigodatacloud/im - + $ sudo docker run -d -p 8899:8899 -p 8800:8800 -v "/some_local_path/db:/db" -e IM_DATA_DB=/db/inf.dat --name im grycap/im + +You can also specify an external MySQL server to store IM data using the IM_DATA_DB environment variable:: + + $ sudo docker run -d -p 8899:8899 -p 8800:8800 -e IM_DATA_DB=mysql://username:password@server/db_name --name im grycap/im + +Or you can also add a volume with all the IM configuration:: + + $ sudo docker run -d -p 8899:8899 -p 8800:8800 -v "/some_local_path/im.cfg:/etc/im/im.cfg" --name im grycap/im diff --git a/README.md b/README.md index 32fb63346..b9ecba1f9 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ - IM - Infrastructure Manager (With TOSCA Support) -================================================= + IM - Infrastructure Manager +============================ * Version ![PyPI](https://img.shields.io/pypi/v/im.svg) -* PyPI ![PypI](https://img.shields.io/pypi/dm/IM.svg) +* Build Status [![Build Status](http://jenkins.i3m.upv.es/buildStatus/icon?job=grycap/im-unit)](http://jenkins.i3m.upv.es/job/grycap/job/im-unit/) IM is a tool that deploys complex and customized virtual infrastructures on IaaS Cloud deployments (such as AWS, OpenStack, etc.). It eases the access and the @@ -14,16 +14,326 @@ contextualization system to enable the installation and configuration of all the user required applications providing the user with a fully functional infrastructure. -This version evolved in the INDIGO-Datacloud project (https://www.indigo-datacloud.eu/). It is used by the [INDIGO Orchestrator](https://github.com/indigo-dc/orchestrator) to contact Cloud sites to finally deploy the VMs/containers. +Read the documentation and more at http://www.grycap.upv.es/im. -New features added: +There is also an Infrastructure Manager YouTube reproduction list with a set of videos with demos +of the functionality of the platform: https://www.youtube.com/playlist?list=PLgPH186Qwh_37AMhEruhVKZSfoYpHkrUp. -+ Support for TOSCA 1.0 YAML specification with the custom node types described in https://github.com/indigo-dc/tosca-types/blob/master/custom_types.yaml -+ Support for the Identity and Access Management Service (IAM). -+ Support for the Token Translation Service (TTS) to support IAM authetication on OpenNebula Clouds. -+ Improvements to access OpenStack Clouds that support IAM. -Read the documentation and more at the [IM Webpage](http://www.grycap.upv.es/im) or at [Gitbook](https://indigo-dc.gitbooks.io/im/content/). +1 INSTALLATION +=============== -There is also an Infrastructure Manager YouTube reproduction list with a set of videos with demos -of the functionality of the platform: https://www.youtube.com/playlist?list=PLgPH186Qwh_37AMhEruhVKZSfoYpHkrUp. +1.1 REQUISITES +-------------- + +IM is based on Python, so Python 2.6 or higher runtime and standard library must +be installed in the system. + +If you use pip to install the IM, all the requisites will be installed. +However, if you install IM from sources you should install: + + + The RADL parser (https://github.com/grycap/radl), available in pip + as the 'RADL' package. + + + The paramiko ssh2 protocol library for python version 1.14 or later +(http://www.lag.net/paramiko/), typically available as the 'python-paramiko' package. + + + The YAML library for Python, typically available as the 'python-yaml' or 'PyYAML' package. + + + The suds library for Python, typically available as the 'python-suds' package. + + + The Netaddr library for Python, typically available as the 'python-netaddr' package. + + + The Requests library for Python, typically available as the 'python-requests' package. + + + Ansible (http://www.ansibleworks.com/) to configure nodes in the infrastructures. + In particular, Ansible 2.0.0+ must be installed. + To ensure the functionality the following values must be set in the ansible.cfg file (usually found in /etc/ansible/): + +``` +[defaults] +transport = smart +host_key_checking = False +nocolor = 1 + +# For old versions 1.X +sudo_user = root +sudo_exe = sudo + +# For new versions 2.X +become_user = root +become_method = sudo + +[paramiko_connection] + +record_host_keys=False + +[ssh_connection] + +# Only in systems with OpenSSH support to ControlPersist +ssh_args = -o ControlMaster=auto -o ControlPersist=900s +# In systems with older versions of OpenSSH (RHEL 6, CentOS 6, SLES 10 or SLES 11) +#ssh_args = +pipelining = True +``` + +1.2 OPTIONAL PACKAGES +--------------------- + +The Bottle framework (http://bottlepy.org/) is used for the REST API. +It is typically available as the 'python-bottle' package. + +The CherryPy Web framework (http://www.cherrypy.org/), is needed for the REST API. +It is typically available as the 'python-cherrypy' or 'python-cherrypy3' package. +In newer versions (9.0 and later) the functionality has been moved to the 'cheroot' library +(https://github.com/cherrypy/cheroot) it can be installed using pip. + +Apache-libcloud (http://libcloud.apache.org/) 0.17 or later is used in the +LibCloud, OpenStack and GCE connectors. + +Boto (http://boto.readthedocs.org) 2.29.0 or later is used as interface to +Amazon EC2. It is available as package named ``python-boto`` in Debian based +distributions. It can also be downloaded from `boto GitHub repository (https://github.com/boto/boto). +Download the file and copy the boto subdirectory into the IM install path. + +In case of using the a MySQL DB as the backend to store IM data. The Python interface to MySQL +must be installed, typically available as the package 'python-mysqldb' or 'MySQL-python' package. +In case of using Python 3 use the PyMySQL package, available as the package 'python3-pymysql' on +debian systems or PyMySQL package in pip. + +In case of using the SSL secured version of the REST API pyOpenSSL must be installed. + +Azure python SDK (https://azure.microsoft.com/es-es/develop/python/) is used to connect with the +Microsoft Azure platform. + + +1.3 INSTALLING +-------------- + +### 1.3.1 Using installer (Recommended option) + +The IM provides a script to install the IM in one single step (using pip). +You only need to execute the following command: + +```sh +$ wget -qO- https://raw.githubusercontent.com/grycap/im/master/install.sh | bash +``` + +It works for the most recent version of the main Linux distributions (RHEL, CentOS, Fedora, Ubuntu, Debian). +In case that you O.S. does not work with this install script see next sections. + +### 1.3.2 From PIP + +**WARNING: In some linux old distributions (REL 6 or equivalents) you must unistall +the package python-crypto and python-paramiko before installing the IM with pip.** + +```sh +$ rpm -e python-crypto python-paramiko --nodeps +``` + +First you need to install pip tool and some packages needed to compile some of the IM requirements. +To install them in Debian and Ubuntu based distributions, do:: + +```sh +$ apt update +$ apt install gcc python-dev libffi-dev libssl-dev python-pip sshpass python-pysqlite2 python-requests +``` + +In Red Hat based distributions (RHEL, CentOS, Amazon Linux, Oracle Linux, +Fedora, etc.), do: + +```sh +$ yum install epel-release +$ yum install which gcc python-devel libffi-devel openssl-devel python-pip sshpass python-sqlite3dbm +``` + +For some problems with the dependencies of the apache-libcloud package in some systems (as ubuntu 14.04 or CentOS 6) +this package has to be installed manually: + +```sh +$ pip install backports-ssl_match_hostname +``` + +Then you only have to call the install command of the pip tool with the IM package: + +```sh +$ pip install IM +``` + +Pip will also install the, non installed, pre-requisites needed. So Ansible 1.4.2 or later will +be installed in the system. Some of the optional packages are also installed please check if some +of IM features that you need requires to install some of the packages of section OPTIONAL PACKAGES. + +You must also remember to modify the ansible.cfg file setting as specified in the +REQUISITES section. + +### 1.3.3 From RPM packages (RH7) + +Download the RPM package from [GitHub](https://github.com/grycap/im/releases/latest). +Also remember to download the RPM of the RADL package also from [GitHub](https://github.com/grycap/radl/releases/latest). +You must have the epel repository enabled: + +```sh +$ yum install epel-release +``` + +Then install the downloaded RPMs: + +```sh +$ yum localinstall IM-*.rpm RADL-*.rpm +``` + +Azure python SDK is not available in CentOS. So if you need the Azure plugin you have to manually install them using pip: + +```sh +$ pip install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns +``` + +### 1.3.4 From Deb package (Tested with Ubuntu 14.04 and 16.04) + +Download the Deb package from [GitHub](https://github.com/grycap/im/releases/latest). +Also remember to download the Deb of the RADL package also from [GitHub](https://github.com/grycap/radl/releases/latest). + +In Ubuntu 14.04 there are some requisites not available for the "trusty" version or are too old, so you have to manually install them manually. +You can download it from their corresponding PPAs. But here you have some links: + + * python-backports.ssl-match-hostname: [download](http://archive.ubuntu.com/ubuntu/pool/universe/b/backports.ssl-match-hostname/python-backports.ssl-match-hostname_3.4.0.2-1_all.deb) + * python-scp: [download](http://archive.ubuntu.com/ubuntu/pool/universe/p/python-scp/python-scp_0.10.2-1_all.deb) + * python-libcloud: [download](http://archive.ubuntu.com/ubuntu/pool/universe/libc/libcloud/python-libcloud_0.20.0-1_all.deb) + * python-xmltodict: [download](http://archive.ubuntu.com/ubuntu/pool/universe/p/python-xmltodict/python-xmltodict_0.9.2-3_all.deb) + +Also Azure python SDK is not available in Ubuntu 16.04. So if you need the Azure plugin you have to manually install them. +You can download it from their corresponding PPAs. But here you have some links: + + * python-msrestazure: [download](https://launchpad.net/ubuntu/+archive/primary/+files/python-msrestazure_0.4.3-1_all.deb) + * python-msrest: [download](https://launchpad.net/ubuntu/+archive/primary/+files/python-msrest_0.4.4-1_all.deb) + * python-azure: [download](https://launchpad.net/ubuntu/+archive/primary/+files/python-azure_2.0.0~rc6+dfsg-2_all.deb) + +It is also recommended to configure the Ansible PPA to install the newest versions of Ansible (see [Ansible installation](http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-apt-ubuntu)): + +```sh +$ sudo apt-get install software-properties-common +$ sudo apt-add-repository ppa:ansible/ansible +$ sudo apt-get update +``` + +Put all the .deb files in the same directory and do:: + +```sh +$ sudo dpkg -i *.deb +$ sudo apt install -f -y +``` + +### 1.3.5 FROM SOURCE + +Select a proper path where the IM service will be installed (i.e. /usr/local/im, +/opt/im or other). This path will be called IM_PATH + +```sh +$ tar xvzf IM-X.XX.tar.gz +$ chown -R root:root IM-X.XX +$ mv IM-X.XX /usr/local +``` + +Finally you must copy (or link) $IM_PATH/scripts/im file to /etc/init.d directory. + +```sh +$ ln -s /usr/local/im/scripts/im /etc/init.d/im +``` + +1.4 CONFIGURATION +----------------- + +In case that you want the IM service to be started at boot time, you must +execute the next set of commands: + +On Debian Systems: + +```sh +$ chkconfig im on +``` + +Or for newer systems like ubuntu 14.04: + +```sh +$ sysv-rc-conf im on +``` + +On RedHat Systems: + +```sh +$ update-rc.d im start 99 2 3 4 5 . stop 05 0 1 6 . +``` + +Or you can do it manually: + +```sh +$ ln -s /etc/init.d/im /etc/rc2.d/S99im +$ ln -s /etc/init.d/im /etc/rc3.d/S99im +$ ln -s /etc/init.d/im /etc/rc5.d/S99im +$ ln -s /etc/init.d/im /etc/rc1.d/K05im +$ ln -s /etc/init.d/im /etc/rc6.d/K05im +``` + +Adjust the installation path by setting the IMDAEMON variable at /etc/init.d/im +to the path where the IM im_service.py file is installed (e.g. /usr/local/im/im_service.py), +or set the name of the script file (im_service.py) if the file is in the PATH +(pip puts the im_service.py file in the PATH as default). + +Check the parameters in $IM_PATH/etc/im.cfg or /etc/im/im.cfg. Please pay attention +to the next configuration variables, as they are the most important: + +DATA_DB - must be set to the URL to access the database to store the IM data. + Be careful if you have two different instances of the IM service running in the same machine!!. + It can be a MySQL DB: 'mysql://username:password@server/db_name' or + a SQLite one: 'sqlite:///etc/im/inf.dat'. + +CONTEXTUALIZATION_DIR - must be set to the full path where the IM contextualization files + are located. In case of using pip installation the default value is correct + (/usr/share/im/contextualization) in case of installing from sources set to + $IM_PATH/contextualization (e.g. /usr/local/im/contextualization) + +### 1.4.1 SECURITY + +Security is disabled by default. Please notice that someone with local network access can "sniff" the traffic and +get the messages with the IM with the authorisation data with the cloud providers. + +Security can be activated both in the XMLRPC and REST APIs. Setting this variables: + +XMLRCP_SSL = True + +or + +REST_SSL = True + +And then set the variables: XMLRCP_SSL_* or REST_SSL_* to your certificates paths. + +2 DOCKER IMAGE +=============== + +A Docker image named `grycap/im` has been created to make easier the deployment of an IM service using the +default configuration. Information about this image can be found here: https://registry.hub.docker.com/u/grycap/im/. + +How to launch the IM service using docker:: + +```sh +$ sudo docker run -d -p 8899:8899 -p 8800:8800 --name im grycap/im +``` + +To make the IM data persistent you also have to specify a persistent location for the IM database using the IM_DATA_DB environment variable and adding a volume:: + +```sh +$ sudo docker run -d -p 8899:8899 -p 8800:8800 -v "/some_local_path/db:/db" -e IM_DATA_DB=/db/inf.dat --name im grycap/im +``` + +You can also specify an external MySQL server to store IM data using the IM_DATA_DB environment variable:: + +```sh +$ sudo docker run -d -p 8899:8899 -p 8800:8800 -e IM_DATA_DB=mysql://username:password@server/db_name --name im grycap/im +``` + +Or you can also add a volume with all the IM configuration:: + +```sh +$ sudo docker run -d -p 8899:8899 -p 8800:8800 -v "/some_local_path/im.cfg:/etc/im/im.cfg" --name im grycap/im +``` diff --git a/changelog b/changelog index dd7f5ce1c..9efe44bad 100644 --- a/changelog +++ b/changelog @@ -351,3 +351,14 @@ IM 1.6.3 * Fix error setting Hostname in Docker, Kubernetes and AzureClassic conns. * Fix error connecting with Synefo OCCI sites. * Fix error deleting VM in OCCI OpenNebula sites. + +IM 1.6.4 + * Store tenant and project in OCCI connector. + * Fix error validating keystone token in OCCI conn. + * Decrease timeout getting ansible process results. + * Enable to get the initial infrastructure contextualization log. + +IM 1.6.5 + * Homogenize Inf ID log message + * Fix error cpu.count parameter is ignored in OpenStack conn. + * Fix ansible_version is not available in ctxt process. diff --git a/doc/source/REST.rst b/doc/source/REST.rst index 7d0dcf6d9..0f84d142f 100644 --- a/doc/source/REST.rst +++ b/doc/source/REST.rst @@ -117,11 +117,14 @@ GET ``http://imserver.com/infrastructures/`` GET ``http://imserver.com/infrastructures//`` :Response Content-type: text/plain or application/json :ok response: 200 OK + :input fields: ``headeronly`` (optional) :fail response: 401, 404, 400, 403 Return property ``property_name`` associated to the infrastructure with ID ``infId``. It has three properties: :``outputs``: in case of TOSCA documents it will return a JSON object with the outputs of the TOSCA document. - :``contmsg``: a string with the contextualization message. + :``contmsg``: a string with the contextualization message. In case of ``headeronly`` flag is set to 'yes', + 'true' or '1' only the initial part of the infrastructure contextualization log will be + returned (without any VM contextualization log). :``radl``: a string with the original specified RADL of the infrastructure. :``state``: a JSON object with two elements: diff --git a/doc/source/client.rst b/doc/source/client.rst index c62a37cea..209aea49c 100644 --- a/doc/source/client.rst +++ b/doc/source/client.rst @@ -102,9 +102,10 @@ The :program:`im_client` is called like this:: Stop (but not remove) the specified virtual machine ``vmId`` associated to the infrastructure with ID infrastructure with ID ``infId``. - ``sshvm infId vmId`` + ``sshvm infId vmId [show_only]`` Connect with SSH with the specified virtual machine ``vmId`` associated to the infrastructure with ID - infrastructure with ID ``infId``. + infrastructure with ID ``infId``. The ``show_only`` parameter is optional and is a flag to specify if ssh + command will only be shown in stdout instead of executed. ``export infId delete`` Export the data of the infrastructure with ID ``infId``. The ``delete`` parameter is optional diff --git a/doc/source/manual.rst b/doc/source/manual.rst index 5c30b0348..4e472bb00 100644 --- a/doc/source/manual.rst +++ b/doc/source/manual.rst @@ -144,7 +144,7 @@ You can download it from their corresponding PPAs. But here you have some links: * python-backports.ssl-match-hostname: `download `_ * python-scp: `download `_ * python-libcloud: `download `_ - * python-xmltodict: `download `_ + * python-xmltodict: `download `_ Also Azure python SDK is not available in Ubuntu 16.04. So if you need the Azure plugin you have to manually install them. You can download it from their corresponding PPAs. But here you have some links: diff --git a/doc/source/xmlrpc.rst b/doc/source/xmlrpc.rst index 04a534af7..e216c97c3 100644 --- a/doc/source/xmlrpc.rst +++ b/doc/source/xmlrpc.rst @@ -43,11 +43,13 @@ This is the list of method names: ``GetInfrastructureContMsg`` :parameter 0: ``infId``: integer :parameter 1: ``auth``: array of structs + :parameter 2: ``headeronly``: (optional, default value False) boolean :ok response: [true, ``cont_out``: string] :fail response: [false, ``error``: string] - Return the contextualization log associated to the - infrastructure with ID ``infId``. + Return the contextualization log associated to the infrastructure with ID ``infId``. + In case of ``headeronly`` flag is set to True. Only the initial part of the infrastructure + contextualization log will be returned (without any VM contextualization log). ``GetInfrastructureState`` :parameter 0: ``infId``: integer diff --git a/docker-devel/Dockerfile b/docker-devel/Dockerfile index 673589bae..7bdc2ce0f 100644 --- a/docker-devel/Dockerfile +++ b/docker-devel/Dockerfile @@ -2,7 +2,7 @@ FROM grycap/jenkins:ubuntu16.04-im ARG BRANCH=devel MAINTAINER Miguel Caballer -LABEL version="1.6.3" +LABEL version="1.6.5" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 diff --git a/docker-py3/Dockerfile b/docker-py3/Dockerfile index ff0b04267..758576331 100644 --- a/docker-py3/Dockerfile +++ b/docker-py3/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile to create a container with the IM service FROM ubuntu:16.04 LABEL maintainer="Miguel Caballer " -LABEL version="1.6.3" +LABEL version="1.6.4" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 @@ -16,7 +16,7 @@ RUN pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-c # Install IM RUN apt-get update && apt-get install --no-install-recommends -y gcc libssl-dev libffi-dev libsqlite3-dev && \ - pip3 install IM==1.6.3 && \ + pip3 install IM==1.6.4 && \ apt-get remove -y gcc libssl-dev libffi-dev libsqlite3-dev python-dev python-pip && \ apt-get autoremove -y && \ rm -rf /var/lib/apt/lists/* diff --git a/docker/Dockerfile b/docker/Dockerfile index 9b5db7196..d60e2b88d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile to create a container with the IM service FROM ubuntu:16.04 LABEL maintainer="Miguel Caballer " -LABEL version="1.6.3" +LABEL version="1.6.5" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 @@ -18,7 +18,7 @@ RUN pip install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-co RUN apt-get update && apt-get install --no-install-recommends -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev && \ pip install MySQL-python && \ pip install xmltodict && \ - pip install IM==1.6.3 && \ + pip install IM==1.6.5 && \ apt-get remove -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev python-dev python-pip && \ apt-get autoremove -y && \ rm -rf /var/lib/apt/lists/* diff --git a/etc/im.cfg b/etc/im.cfg index e6d3703aa..8509ed24b 100644 --- a/etc/im.cfg +++ b/etc/im.cfg @@ -49,7 +49,7 @@ VM_INFO_UPDATE_FREQUENCY = 10 VM_INFO_UPDATE_ERROR_GRACE_PERIOD = 120 # Log File -LOG_LEVEL = DEBUG +LOG_LEVEL = INFO LOG_FILE = /var/log/im/im.log LOG_FILE_MAX_SIZE = 10485760 diff --git a/etc/logging.conf b/etc/logging.conf index fd97a89eb..854b54070 100644 --- a/etc/logging.conf +++ b/etc/logging.conf @@ -12,26 +12,26 @@ level=ERROR handlers=fileHandler [logger_ConfManager] -level=DEBUG +level=INFO handlers=fileHandler qualname=ConfManager propagate=0 [logger_CloudConnector] -level=DEBUG +level=INFO handlers=fileHandler qualname=CloudConnector propagate=0 [logger_InfrastructureManager] -level=DEBUG +level=INFO handlers=fileHandler qualname=InfrastructureManager propagate=0 [handler_fileHandler] class=logging.handlers.RotatingFileHandler -level=DEBUG +level=INFO formatter=simpleFormatter args=('/var/log/im/im.log', 'w', 10485760, 3) diff --git a/im_service.py b/im_service.py index 1dad58b14..e0db244a7 100755 --- a/im_service.py +++ b/im_service.py @@ -160,9 +160,9 @@ def GetVMContMsg(inf_id, vm_id, auth_data): return WaitRequest(request) -def GetInfrastructureContMsg(inf_id, auth_data): +def GetInfrastructureContMsg(inf_id, auth_data, headeronly=False): request = IMBaseRequest.create_request( - IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG, (inf_id, auth_data)) + IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG, (inf_id, auth_data, headeronly)) return WaitRequest(request) diff --git a/packages/generate_deb.sh b/packages/generate_deb.sh index f174b23fa..6daad14ef 100755 --- a/packages/generate_deb.sh +++ b/packages/generate_deb.sh @@ -5,7 +5,7 @@ apt install -y python-stdeb # remove the ansible requirement as it makes to generate an incorrect dependency python-ansible # also remove the pysqlite requirement as it makes to generate an incorrect dependency python-pysqlite1.1 sed -i '/install_requires/c\ install_requires=["paramiko >= 1.14", "PyYAML", suds_pkg,' setup.py -python setup.py --command-packages=stdeb.command sdist_dsc --depends "python-radl, python-mysqldb, python-pysqlite2, ansible, python-paramiko, python-yaml, python-suds, python-boto, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy3, python-requests, python-xmltodict, python-tosca-parser" bdist_deb +python setup.py --command-packages=stdeb.command sdist_dsc --depends "python-radl, python-mysqldb, python-pysqlite2, ansible, python-paramiko, python-yaml, python-suds, python-boto, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy3, python-requests, python-tosca-parser" bdist_deb mkdir dist_pkg cp deb_dist/*.deb dist_pkg diff --git a/test/files/test.radl b/test/files/test.radl index fcdc88d5f..c192076fb 100644 --- a/test/files/test.radl +++ b/test/files/test.radl @@ -69,6 +69,7 @@ configure test ( - easy_install: name=jmespath tasks: - debug: msg="NODENAME = {{NODENAME}}" + - debug: msg="VERSION = {{ansible_version.major}}" - shell: test -d "/mnt/disk/lost+found" @end diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py index 3aea5910f..731ca346d 100755 --- a/test/integration/TestIM.py +++ b/test/integration/TestIM.py @@ -164,14 +164,18 @@ def test_13_getcontmsg(self): """ Test the GetInfrastructureContMsg IM function """ - (success, cont_out) = self.server.GetInfrastructureContMsg( - self.inf_id, self.auth_data) - self.assertTrue( - success, msg="ERROR calling GetInfrastructureContMsg: " + str(cont_out)) - self.assertGreater( - len(cont_out), 100, msg="Incorrect contextualization message: " + cont_out) + (success, cont_out) = self.server.GetInfrastructureContMsg(self.inf_id, self.auth_data) + self.assertTrue(success, msg="ERROR calling GetInfrastructureContMsg: " + str(cont_out)) + self.assertGreater(len(cont_out), 100, msg="Incorrect contextualization message: " + cont_out) + self.assertIn("Select master VM", cont_out) self.assertIn("NODENAME = front", cont_out) + (success, cont_out) = self.server.GetInfrastructureContMsg(self.inf_id, self.auth_data, True) + self.assertTrue(success, msg="ERROR calling GetInfrastructureContMsg: " + str(cont_out)) + self.assertGreater(len(cont_out), 100, msg="Incorrect contextualization message: " + cont_out) + self.assertIn("Select master VM", cont_out) + self.assertNotIn("NODENAME = front", cont_out) + def test_14_getvmcontmsg(self): """ Test the GetVMContMsg IM function diff --git a/test/integration/TestREST.py b/test/integration/TestREST.py index 214e50b15..1017c1e90 100755 --- a/test/integration/TestREST.py +++ b/test/integration/TestREST.py @@ -202,16 +202,16 @@ def test_30_get_vm_info(self): def test_32_get_vm_contmsg(self): resp = self.create_request("GET", "/infrastructures/" + self.inf_id) - self.assertEqual(resp.status_code, 200, - msg="ERROR getting the infrastructure info:" + resp.text) + self.assertEqual(resp.status_code, 200, msg="ERROR getting the infrastructure info:" + resp.text) vm_ids = resp.text.split("\n") vm_uri = uriparse(vm_ids[0]) resp = self.create_request("GET", vm_uri[2] + "/contmsg") - self.assertEqual(resp.status_code, 200, - msg="ERROR getting VM contmsg:" + resp.text) - self.assertEqual( - len(resp.text), 0, msg="Incorrect VM contextualization message: " + resp.text) + self.assertEqual(resp.status_code, 200, msg="ERROR getting VM contmsg:" + resp.text) + self.assertEqual(len(resp.text), 0, msg="Incorrect VM contextualization message: " + resp.text) + + resp2 = self.create_request("GET", vm_uri[2] + "/contmsg?headeronly=true") + self.assertEqual(resp2.status_code, 200, msg="ERROR getting VM contmsg:" + resp.text) def test_33_get_contmsg(self): resp = self.create_request("GET", "/infrastructures/" + self.inf_id + "/contmsg") diff --git a/test/unit/REST.py b/test/unit/REST.py index 53abe3ed6..8e22a7fcd 100755 --- a/test/unit/REST.py +++ b/test/unit/REST.py @@ -163,8 +163,13 @@ def test_GetInfrastructureProperty(self, bottle_request, get_infrastructure, Get res = RESTGetInfrastructureProperty("1", "contmsg") self.assertEqual(res, "contmsg") - res = RESTGetInfrastructureProperty("1", "outputs") - self.assertEqual(res, '{"outputs": "outputs"}') + bottle_request.params = {'headeronly': 'yes'} + res = RESTGetInfrastructureProperty("1", "contmsg") + self.assertEqual(res, "contmsg") + + bottle_request.params = {'headeronly': 'no'} + res = RESTGetInfrastructureProperty("1", "contmsg") + self.assertEqual(res, "contmsg") res = RESTGetInfrastructureProperty("1", "radl") self.assertEqual(res, "radl") diff --git a/test/unit/ServiceRequests.py b/test/unit/ServiceRequests.py index 2c9364df3..918a065ac 100755 --- a/test/unit/ServiceRequests.py +++ b/test/unit/ServiceRequests.py @@ -65,7 +65,7 @@ def test_cont_msg(self, inflist): import IM.ServiceRequests req = IM.ServiceRequests.IMBaseRequest.create_request(IM.ServiceRequests. IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG, - ("", "")) + ("", "", False)) req._call_function() @patch('IM.InfrastructureManager.InfrastructureManager') diff --git a/test/unit/connectors/OCCI.py b/test/unit/connectors/OCCI.py index 2b0a44f2a..db0216850 100755 --- a/test/unit/connectors/OCCI.py +++ b/test/unit/connectors/OCCI.py @@ -160,7 +160,7 @@ def get_response(self, method, url, verify, cert, headers, data): resp.json.return_value = {"tenants": [{"name": "tenantname"}]} elif url == "/v3/auth/projects": resp.status_code = 200 - resp.json.return_value = {"projects": [{"id": "projectid"}]} + resp.json.return_value = {"projects": [{"id": "projectid", "name": "prname"}]} elif url == "/v3/OS-FEDERATION/identity_providers/egi.eu/protocols/oidc/auth": resp.status_code = 200 resp.headers = {'X-Subject-Token': 'token1'} diff --git a/test/unit/test_im_logic.py b/test/unit/test_im_logic.py index f6b8fc92b..9ad7f1aa0 100755 --- a/test/unit/test_im_logic.py +++ b/test/unit/test_im_logic.py @@ -735,12 +735,16 @@ def test_get_vm_info(self): contmsg = IM.GetVMContMsg(infId, "0", auth0) self.assertEqual(contmsg, "") + InfrastructureList.infrastructure_list[infId].cont_out = "Header" InfrastructureList.infrastructure_list[infId].vm_list[0].cloud_connector = MagicMock() InfrastructureList.infrastructure_list[infId].vm_list[0].cloud_connector.error_messages = "TESTMSG" contmsg = IM.GetInfrastructureContMsg(infId, auth0) + header_contmsg = IM.GetInfrastructureContMsg(infId, auth0, True) InfrastructureList.infrastructure_list[infId].vm_list[0].cloud_connector = None self.assertIn("TESTMSG", contmsg) + self.assertNotIn("TESTMSG", header_contmsg) + self.assertIn("Header", header_contmsg) state = IM.GetInfrastructureState(infId, auth0) self.assertEqual(state["state"], "running")