From 83331dc8ae4cf855dac79a372e1c6c527d9c4200 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 8 Nov 2017 17:40:10 +0100 Subject: [PATCH 1/2] Set loglevel to INFO: #485 --- IM/ConfManager.py | 100 +++++++++++++++--------------- IM/InfrastructureInfo.py | 3 +- IM/InfrastructureManager.py | 12 ++-- IM/VirtualMachine.py | 13 ++-- IM/config.py | 2 +- IM/connectors/Azure.py | 44 ++++++------- IM/connectors/AzureClassic.py | 23 ++++--- IM/connectors/Docker.py | 16 ++--- IM/connectors/EC2.py | 112 ++++++++++++++-------------------- IM/connectors/GCE.py | 51 ++++++++-------- IM/connectors/Kubernetes.py | 8 +-- IM/connectors/OCCI.py | 54 ++++++++-------- IM/connectors/OpenNebula.py | 20 +++--- IM/connectors/OpenStack.py | 38 ++++++------ etc/im.cfg | 2 +- 15 files changed, 235 insertions(+), 263 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index c4b4e33d0..d3d82d309 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -78,15 +78,15 @@ def check_running_pids(self, vms_configuring, failed_step): if step not in res: res[step] = [] res[step].append(vm) - self.log_debug("Ansible process to configure " + str(vm.im_id) + - " with PID " + vm.ctxt_pid + " is still running.") + self.log_info("Ansible process to configure " + str(vm.im_id) + + " with PID " + vm.ctxt_pid + " is still running.") else: - self.log_debug("Configuration process in VM: " + str(vm.im_id) + " finished.") + self.log_info("Configuration process in VM: " + str(vm.im_id) + " finished.") if vm.configured: - self.log_debug("Configuration process of VM %s success." % vm.im_id) + self.log_info("Configuration process of VM %s success." % vm.im_id) elif vm.configured is False: failed_step.append(step) - self.log_debug("Configuration process of VM %s failed." % vm.im_id) + self.log_info("Configuration process of VM %s failed." % vm.im_id) else: self.log_warn("Configuration process of VM %s in unfinished state." % vm.im_id) # Force to save the data to store the log data () @@ -97,14 +97,14 @@ def check_running_pids(self, vms_configuring, failed_step): if step not in res: res[step] = [] res[step].append(vm) - self.log_debug("Configuration process of master node: " + - str(vm.get_ctxt_process_names()) + " is still running.") + self.log_info("Configuration process of master node: " + + str(vm.get_ctxt_process_names()) + " is still running.") else: if vm.configured: - self.log_debug("Configuration process of master node successfully finished.") + self.log_info("Configuration process of master node successfully finished.") elif vm.configured is False: failed_step.append(step) - self.log_debug("Configuration process of master node failed.") + self.log_info("Configuration process of master node failed.") else: self.log_warn("Configuration process of master node in unfinished state.") # Force to save the data to store the log data @@ -116,9 +116,9 @@ def stop(self): self._stop_thread = True # put a task to assure to wake up the thread self.inf.add_ctxt_tasks([(-10, 0, None, None)]) - self.log_debug("Stop Configuration thread.") + self.log_info("Stop Configuration thread.") if self.ansible_process and self.ansible_process.is_alive(): - self.log_debug("Stopping pending Ansible process.") + self.log_info("Stopping pending Ansible process.") self.ansible_process.terminate() def check_vm_ips(self, timeout=Config.WAIT_RUNNING_VM_TIMEOUT): @@ -170,7 +170,7 @@ def check_vm_ips(self, timeout=Config.WAIT_RUNNING_VM_TIMEOUT): self.log_error("Error waiting all the VMs to have a correct IP") self.inf.set_configured(False) else: - self.log_debug("All the VMs have a correct IP") + self.log_info("All the VMs have a correct IP") self.inf.set_configured(True) return success @@ -180,7 +180,7 @@ def kill_ctxt_processes(self): Kill all the ctxt processes """ for vm in self.inf.get_vm_list(): - self.log_debug("Killing ctxt processes in VM: %s" % vm.id) + self.log_info("Killing ctxt processes in VM: %s" % vm.id) try: vm.kill_check_ctxt_process() except: @@ -188,7 +188,7 @@ def kill_ctxt_processes(self): vm.configured = None def run(self): - self.log_debug("Starting the ConfManager Thread") + self.log_info("Starting the ConfManager Thread") failed_step = [] last_step = None @@ -196,14 +196,14 @@ def run(self): while not self._stop_thread: if self.init_time + self.max_ctxt_time < time.time(): - self.log_debug("Max contextualization time passed. Exit thread.") + self.log_info("Max contextualization time passed. Exit thread.") self.inf.add_cont_msg("ERROR: Max contextualization time passed.") # Remove tasks from queue self.inf.reset_ctxt_tasks() # Kill the ansible processes self.kill_ctxt_processes() if self.ansible_process and self.ansible_process.is_alive(): - self.log_debug("Stopping pending Ansible process.") + self.log_info("Stopping pending Ansible process.") self.ansible_process.terminate() return @@ -219,14 +219,14 @@ def run(self): # stop the thread if the stop method has been called if self._stop_thread: - self.log_debug("Exit Configuration thread.") + self.log_info("Exit Configuration thread.") return # if this task is from a next step if last_step is not None and last_step < step: if failed_step and sorted(failed_step)[-1] < step: - self.log_debug("Configuration of process of step %s failed, " - "ignoring tasks of step %s." % (sorted(failed_step)[-1], step)) + self.log_info("Configuration of process of step %s failed, " + "ignoring tasks of step %s." % (sorted(failed_step)[-1], step)) else: # Add the task again to the queue only if the last step was # OK @@ -234,12 +234,12 @@ def run(self): # If there are any process running of last step, wait if last_step in vms_configuring and len(vms_configuring[last_step]) > 0: - self.log_debug("Waiting processes of step " + str(last_step) + " to finish.") + self.log_info("Waiting processes of step " + str(last_step) + " to finish.") time.sleep(Config.CONFMAMAGER_CHECK_STATE_INTERVAL) else: # if not, update the step, to go ahead with the new # step - self.log_debug("Step " + str(last_step) + " finished. Go to step: " + str(step)) + self.log_info("Step " + str(last_step) + " finished. Go to step: " + str(step)) last_step = step else: if isinstance(vm, VirtualMachine): @@ -247,12 +247,12 @@ def run(self): self.log_warn("VM ID " + str(vm.im_id) + " has been destroyed. Not launching new tasks for it.") elif vm.is_configured() is False: - self.log_debug("Configuration process of step %s failed, " - "ignoring tasks of step %s." % (last_step, step)) + self.log_info("Configuration process of step %s failed, " + "ignoring tasks of step %s." % (last_step, step)) # Check that the VM has no other ansible process # running elif vm.ctxt_pid: - self.log_debug("VM ID " + str(vm.im_id) + " has running processes, wait.") + self.log_info("VM ID " + str(vm.im_id) + " has running processes, wait.") # If there are, add the tasks again to the queue # Set the priority to a higher number to decrease the # priority enabling to select other items of the queue @@ -262,7 +262,7 @@ def run(self): time.sleep(Config.CONFMAMAGER_CHECK_STATE_INTERVAL) else: if not tasks: - self.log_debug("No tasks to execute. Ignore this step.") + self.log_info("No tasks to execute. Ignore this step.") else: # If not, launch it # Mark this VM as configuring @@ -318,11 +318,11 @@ def launch_ctxt_agent(self, vm, tasks): str(self.inf.id) + "/" + ip + "_" + str(vm.im_id) tmp_dir = tempfile.mkdtemp() - self.log_debug("Create the configuration file for the contextualization agent") + self.log_info("Create the configuration file for the contextualization agent") conf_file = tmp_dir + "/config.cfg" self.create_vm_conf_file(conf_file, vm, tasks, remote_dir) - self.log_debug("Copy the contextualization agent config file") + self.log_info("Copy the contextualization agent config file") # Copy the contextualization agent config file ssh = vm.get_ssh_ansible_master() @@ -332,10 +332,10 @@ def launch_ctxt_agent(self, vm, tasks): if vm.configured is None: if len(self.inf.get_vm_list()) > Config.VM_NUM_USE_CTXT_DIST: - self.log_debug("Using ctxt_agent_dist") + self.log_info("Using ctxt_agent_dist") ctxt_agent_command = "/ctxt_agent_dist.py " else: - self.log_debug("Using ctxt_agent") + self.log_info("Using ctxt_agent") ctxt_agent_command = "/ctxt_agent.py " vault_export = "" vault_password = vm.info.systems[0].getValue("vault.password") @@ -348,7 +348,7 @@ def launch_ctxt_agent(self, vm, tasks): " > " + remote_dir + "/stdout" + " 2> " + remote_dir + "/stderr < /dev/null & echo -n $!") - self.log_debug("Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) + self.log_info("Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) vm.ctxt_pid = pid vm.launch_check_ctxt_process() @@ -374,7 +374,7 @@ def generate_inventory(self, tmp_dir): """ Generate the ansible inventory file """ - self.log_debug("Create the ansible configuration file") + self.log_info("Create the ansible configuration file") res_filename = "hosts" ansible_file = tmp_dir + "/" + res_filename out = open(ansible_file, 'w') @@ -738,7 +738,7 @@ def configure_master(self): success = False cont = 0 while not self._stop_thread and not success and cont < Config.PLAYBOOK_RETRIES: - self.log_debug("Sleeping %s secs." % (cont ** 2 * 5)) + self.log_info("Sleeping %s secs." % (cont ** 2 * 5)) time.sleep(cont ** 2 * 5) cont += 1 try: @@ -768,7 +768,7 @@ def configure_master(self): if configured_ok: remote_dir = Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" - self.log_debug("Copy the contextualization agent files") + self.log_info("Copy the contextualization agent files") files = [] files.append((Config.IM_PATH + "/SSH.py", remote_dir + "/IM/SSH.py")) files.append((Config.IM_PATH + "/SSHRetry.py", remote_dir + "/IM/SSHRetry.py")) @@ -837,7 +837,7 @@ def wait_master(self): - Wait it to boot and has the SSH port open """ if self.inf.radl.ansible_hosts: - self.log_debug("Usign ansible host: " + self.inf.radl.ansible_hosts[0].getHost()) + self.log_info("Usign ansible host: " + self.inf.radl.ansible_hosts[0].getHost()) self.inf.set_configured(True) return True @@ -919,7 +919,7 @@ def generate_playbooks_and_hosts(self): # Get the groups for the different VM types vm_group = self.inf.get_vm_list_by_system_name() - self.log_debug("Generating YAML, hosts and inventory files.") + self.log_info("Generating YAML, hosts and inventory files.") # Create the other configure sections (it may be included in other # configure) filenames = [] @@ -971,7 +971,7 @@ def generate_playbooks_and_hosts(self): recipe_files.append((tmp_dir + "/" + f, remote_dir + "/" + f)) self.inf.add_cont_msg("Copying YAML, hosts and inventory files.") - self.log_debug("Copying YAML files.") + self.log_info("Copying YAML files.") if self.inf.radl.ansible_hosts: for ansible_host in self.inf.radl.ansible_hosts: (user, passwd, private_key) = ansible_host.getCredentialValues() @@ -1056,7 +1056,7 @@ def wait_vm_running(self, vm, timeout, relaunch=False): self.log_warn("VM deleted by the user, Exit") return False - self.log_debug("VM " + str(vm.id) + " is not running yet.") + self.log_info("VM " + str(vm.id) + " is not running yet.") time.sleep(delay) wait += delay @@ -1109,13 +1109,13 @@ def wait_vm_ssh_acccess(self, vm, timeout): else: vm.update_status(self.auth) if vm.state == VirtualMachine.FAILED: - self.log_debug('VM: ' + str(vm.id) + " is in state Failed. Does not wait for SSH.") + self.log_warn('VM: ' + str(vm.id) + " is in state Failed. Does not wait for SSH.") return False, "VM Failure." ip = vm.getPublicIP() if ip is not None: ssh = vm.get_ssh() - self.log_debug('SSH Connecting with: ' + ip + ' to the VM: ' + str(vm.id)) + self.log_info('SSH Connecting with: ' + ip + ' to the VM: ' + str(vm.id)) try: connected = ssh.test_connectivity(5) @@ -1128,14 +1128,14 @@ def wait_vm_ssh_acccess(self, vm, timeout): return False, "Error connecting with ip: " + ip + " incorrect credentials." if connected: - self.log_debug('Works!') + self.log_info('Works!') return True, "" else: - self.log_debug('do not connect, wait ...') + self.log_info('do not connect, wait ...') wait += delay time.sleep(delay) else: - self.log_debug('VM ' + str(vm.id) + ' with no IP') + self.log_warn('VM ' + str(vm.id) + ' with no IP') # Update the VM info and wait to have a valid public IP wait += delay time.sleep(delay) @@ -1232,7 +1232,7 @@ def call_ansible(self, tmp_dir, inventory, playbook, ssh): os.symlink(os.path.abspath( Config.RECIPES_DIR + "/utils"), tmp_dir + "/utils") - self.log_debug('Launching Ansible process.') + self.log_info('Launching Ansible process.') result = Queue() extra_vars = {'IM_HOST': 'all'} # store the process to terminate it later is Ansible does not finish correctly @@ -1253,14 +1253,14 @@ def call_ansible(self, tmp_dir, inventory, playbook, ssh): self.ansible_process = None return (False, "Timeout. Ansible process terminated.") else: - self.log_debug('Waiting Ansible process to finish (%d/%d).' % (wait, Config.ANSIBLE_INSTALL_TIMEOUT)) + self.log_info('Waiting Ansible process to finish (%d/%d).' % (wait, Config.ANSIBLE_INSTALL_TIMEOUT)) time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL) wait += Config.CHECK_CTXT_PROCESS_INTERVAL - self.log_debug('Ansible process finished.') + self.log_info('Ansible process finished.') try: - self.log_debug('Get the results of the Ansible process.') + self.log_info('Get the results of the Ansible process.') _, (return_code, _), output = result.get(timeout=10) msg = output.getvalue() except: @@ -1366,18 +1366,18 @@ def configure_ansible(self, ssh, tmp_dir): self.inf.add_cont_msg("Performing preliminary steps to configure Ansible.") - self.log_debug("Remove requiretty in sshd config") + self.log_info("Remove requiretty in sshd config") try: cmd = "sudo -S sed -i 's/.*requiretty$/#Defaults requiretty/' /etc/sudoers" if ssh.password: cmd = "echo '" + ssh.password + "' | " + cmd (stdout, stderr, _) = ssh.execute(cmd, 120) - self.log_debug(stdout + "\n" + stderr) + self.log_info(stdout + "\n" + stderr) except: self.log_exception("Error removing requiretty. Ignoring.") self.inf.add_cont_msg("Configure Ansible in the master VM.") - self.log_debug("Call Ansible to (re)configure in the master node") + self.log_info("Call Ansible to (re)configure in the master node") (success, msg) = self.call_ansible( tmp_dir, "inventory.cfg", ConfManager.MASTER_YAML, ssh) @@ -1385,7 +1385,7 @@ def configure_ansible(self, ssh, tmp_dir): self.log_error("Error configuring master node: " + msg + "\n\n") self.inf.add_cont_msg("Error configuring the master VM: " + msg + " " + tmp_dir) else: - self.log_debug("Ansible successfully configured in the master VM:\n" + msg + "\n\n") + self.log_info("Ansible successfully configured in the master VM:\n" + msg + "\n\n") self.inf.add_cont_msg("Ansible successfully configured in the master VM.") except Exception as ex: self.log_exception("Error configuring master node.") diff --git a/IM/InfrastructureInfo.py b/IM/InfrastructureInfo.py index 6899104cd..50036c1cc 100644 --- a/IM/InfrastructureInfo.py +++ b/IM/InfrastructureInfo.py @@ -468,8 +468,7 @@ def Contextualize(self, auth, vm_list=None): break if not ctxt: - InfrastructureInfo.logger.debug( - "Inf ID: " + str(self.id) + ": Contextualization disabled by the RADL.") + InfrastructureInfo.logger.info("Inf ID: " + str(self.id) + ": Contextualization disabled by the RADL.") self.cont_out = "Contextualization disabled by the RADL." self.configured = True for vm in self.get_vm_list(): diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index befc1bb19..4f75883fa 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -185,7 +185,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c requested_radl = radl.clone() requested_radl.systems = [radl.get_system_by_name(concrete_system.name)] try: - InfrastructureManager.logger.debug( + InfrastructureManager.logger.info( "Launching %d VMs of type %s" % (remain_vm, concrete_system.name)) launched_vms = cloud.cloud.getCloudConnector(sel_inf).launch( sel_inf, launch_radl, requested_radl, remain_vm, auth) @@ -198,7 +198,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c launched_vms = [] for success, launched_vm in launched_vms: if success: - InfrastructureManager.logger.debug("VM successfully launched: " + str(launched_vm.id)) + InfrastructureManager.logger.info("VM successfully launched: " + str(launched_vm.id)) deployed_vm.setdefault(deploy, []).append(launched_vm) deploy.cloud_id = cloud_id remain_vm -= 1 @@ -907,7 +907,7 @@ def GetInfrastructureState(inf_id, auth): if state is None: state = VirtualMachine.UNKNOWN - InfrastructureManager.logger.debug( + InfrastructureManager.logger.info( "inf: " + str(inf_id) + " is in state: " + state) return {'state': state, 'vm_states': vm_states} @@ -915,7 +915,7 @@ def GetInfrastructureState(inf_id, auth): def _stop_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.debug("Stopping the VM id: " + vm.id) + InfrastructureManager.logger.info("Stopping the VM id: " + vm.id) (success, msg) = vm.stop(auth) except Exception as e: msg = str(e) @@ -968,7 +968,7 @@ def StopInfrastructure(inf_id, auth): def _start_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.debug("Starting the VM id: " + vm.id) + InfrastructureManager.logger.info("Starting the VM id: " + vm.id) (success, msg) = vm.start(auth) except Exception as e: msg = str(e) @@ -1113,7 +1113,7 @@ def _delete_vm(vm, delete_list, auth, exceptions): last = InfrastructureManager.is_last_in_cloud(vm, delete_list, remain_vms) success = False try: - InfrastructureManager.logger.debug("Finalizing the VM id: " + str(vm.id)) + InfrastructureManager.logger.info("Finalizing the VM id: " + str(vm.id)) (success, msg) = vm.finalize(last, auth) except Exception as e: msg = str(e) diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index 3aab7d246..e4444992e 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -465,7 +465,7 @@ def update_status(self, auth, force=False): updated = True self.last_update = now elif self.creating: - self.log_debug("VM is in creation process, set pending state") + self.log_info("VM is in creation process, set pending state") state = VirtualMachine.PENDING else: self.log_error("Error updating VM status: %s" % new_vm) @@ -633,8 +633,7 @@ def kill_check_ctxt_process(self): if self.ctxt_pid != self.WAIT_TO_PID: ssh = self.get_ssh_ansible_master() try: - self.log_debug( - "Killing ctxt process with pid: " + str(self.ctxt_pid)) + self.log_info("Killing ctxt process with pid: " + str(self.ctxt_pid)) # Try to get PGID to kill all child processes pgkill_success = False @@ -691,7 +690,7 @@ def check_ctxt_process(self): ssh = self.get_ssh_ansible_master() try: - self.log_debug("Getting status of ctxt process with pid: " + str(ctxt_pid)) + self.log_info("Getting status of ctxt process with pid: " + str(ctxt_pid)) (_, _, exit_status) = ssh.execute("ps " + str(ctxt_pid)) except: self.log_warn("Error getting status of ctxt process with pid: " + str(ctxt_pid)) @@ -710,7 +709,7 @@ def check_ctxt_process(self): if exit_status != 0: # The process has finished, get the outputs - self.log_debug("The process %s has finished, get the outputs" % ctxt_pid) + self.log_info("The process %s has finished, get the outputs" % ctxt_pid) ctxt_log = self.get_ctxt_log(remote_dir, True) msg = self.get_ctxt_output(remote_dir, True) if ctxt_log: @@ -724,11 +723,11 @@ def check_ctxt_process(self): # dynamically if Config.UPDATE_CTXT_LOG_INTERVAL > 0 and wait > Config.UPDATE_CTXT_LOG_INTERVAL: wait = 0 - self.log_debug("Get the log of the ctxt process with pid: " + str(ctxt_pid)) + self.log_info("Get the log of the ctxt process with pid: " + str(ctxt_pid)) ctxt_log = self.get_ctxt_log(remote_dir) self.cont_out = initial_count_out + ctxt_log # The process is still running, wait - self.log_debug("The process %s is still running. wait." % ctxt_pid) + self.log_info("The process %s is still running. wait." % ctxt_pid) time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL) wait += Config.CHECK_CTXT_PROCESS_INTERVAL else: diff --git a/IM/config.py b/IM/config.py index b3a5d2afe..5921607a6 100644 --- a/IM/config.py +++ b/IM/config.py @@ -62,7 +62,7 @@ class Config: IM_PATH = os.path.dirname(os.path.realpath(__file__)) LOG_FILE = '/var/log/im/inf.log' LOG_FILE_MAX_SIZE = 10485760 - LOG_LEVEL = "DEBUG" + LOG_LEVEL = "INFO" CONTEXTUALIZATION_DIR = '/usr/share/im/contextualization' RECIPES_DIR = CONTEXTUALIZATION_DIR + '/AnsibleRecipes' RECIPES_DB_FILE = CONTEXTUALIZATION_DIR + '/recipes_ansible.db' diff --git a/IM/connectors/Azure.py b/IM/connectors/Azure.py index 54c1ed3f2..32be3f6a9 100644 --- a/IM/connectors/Azure.py +++ b/IM/connectors/Azure.py @@ -449,7 +449,7 @@ def get_azure_vm_create_json(self, storage_account, vm_name, nics, radl, instanc data_disks = [] while system.getValue("disk." + str(cont) + ".size"): disk_size = system.getFeature("disk." + str(cont) + ".size").getValue('G') - self.log_debug("Adding a %s GB disk." % disk_size) + self.log_info("Adding a %s GB disk." % disk_size) data_disks.append({ 'name': '%s_disk_%d' % (vm_name, cont), 'disk_size_gb': disk_size, @@ -548,7 +548,7 @@ def create_vms(self, inf, radl, requested_radl, num_vm, location, storage_accoun vm_name, vm_parameters) - self.log_debug("VM ID: %s created." % vm.id) + self.log_info("VM ID: %s created." % vm.id) inf.add_vm(vm) vms.append((True, (vm, async_vm_creation))) except Exception as ex: @@ -557,7 +557,7 @@ def create_vms(self, inf, radl, requested_radl, num_vm, location, storage_accoun # Delete Resource group and everything in it if group_name: - self.log_debug("Delete Resource group %s and everything in it." % group_name) + self.log_info("Delete Resource group %s and everything in it." % group_name) try: resource_client.resource_groups.delete(group_name).wait() except: @@ -587,7 +587,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): with inf._lock: # Create resource group for the Infrastructure if it does not exists if not self.get_rg("rg-%s" % inf.id, credentials, subscription_id): - self.log_debug("Creating Inf RG: %s" % "rg-%s" % inf.id) + self.log_info("Creating Inf RG: %s" % "rg-%s" % inf.id) resource_client.resource_groups.create_or_update("rg-%s" % inf.id, {'location': location}) # Create an storage_account per Infrastructure @@ -595,7 +595,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): credentials, subscription_id) if not storage_account: - self.log_debug("Creating storage account: %s" % storage_account_name) + self.log_info("Creating storage account: %s" % storage_account_name) try: storage_client = StorageManagementClient(credentials, subscription_id) storage_client.storage_accounts.create("rg-%s" % inf.id, @@ -606,7 +606,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): ).wait() except: self.log_exception("Error creating storage account: %s" % storage_account) - self.log_debug("Delete Inf RG group %s" % "rg-%s" % inf.id) + self.log_info("Delete Inf RG group %s" % "rg-%s" % inf.id) try: resource_client.resource_groups.delete("rg-%s" % inf.id) except: @@ -626,29 +626,29 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if success: vm, async_vm_creation = data try: - self.log_debug("Waiting VM ID %s to be created." % vm.id) + self.log_info("Waiting VM ID %s to be created." % vm.id) async_vm_creation.wait() res.append((True, vm)) remaining_vms -= 1 except: self.log_exception("Error waiting the VM %s." % vm.id) - self.log_debug("End of retry %d of %d" % (retries, Config.MAX_VM_FAILS)) + self.log_info("End of retry %d of %d" % (retries, Config.MAX_VM_FAILS)) if remaining_vms > 0: # Remove the general group - self.log_debug("Delete Inf RG group %s" % "rg-%s" % inf.id) + self.log_info("Delete Inf RG group %s" % "rg-%s" % inf.id) try: resource_client.resource_groups.delete("rg-%s" % inf.id) except: pass else: - self.log_debug("All VMs created successfully.") + self.log_info("All VMs created successfully.") return res def updateVMInfo(self, vm, auth_data): - self.log_debug("Get the VM info with the id: " + vm.id) + self.log_info("Get the VM info with the id: " + vm.id) group_name = vm.id.split('/')[0] vm_name = vm.id.split('/')[1] @@ -661,9 +661,9 @@ def updateVMInfo(self, vm, auth_data): self.log_exception("Error getting the VM info: " + vm.id) return (False, "Error getting the VM info: " + vm.id + ". " + str(ex)) - self.log_debug("VM info: " + vm.id + " obtained.") + self.log_info("VM info: " + vm.id + " obtained.") vm.state = self.PROVISION_STATE_MAP.get(virtual_machine.provisioning_state, VirtualMachine.UNKNOWN) - self.log_debug("The VM state is: " + vm.state) + self.log_info("The VM state is: " + vm.state) instance_type = self.get_instance_type_by_name(virtual_machine.hardware_profile.vm_size, virtual_machine.location, credentials, subscription_id) @@ -699,11 +699,11 @@ def add_dns_entries(self, vm, credentials, subscription_id): except Exception: pass if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = dns_client.zones.create_or_update(group_name, domain, {'location': 'global'}) else: - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: record = None @@ -712,11 +712,11 @@ def add_dns_entries(self, vm, credentials, subscription_id): except Exception: pass if not record: - self.log_debug("Creating DNS record %s." % hostname) + self.log_info("Creating DNS record %s." % hostname) record_data = {"ttl": 300, "arecords": [{"ipv4_address": ip}]} dns_client.record_sets.create_or_update(group_name, domain, hostname, 'A', record_data) else: - self.log_debug("DNS record %s exists. Do not create." % hostname) + self.log_info("DNS record %s exists. Do not create." % hostname) return True except Exception: @@ -752,25 +752,25 @@ def setIPs(self, vm, network_profile, credentials, subscription_id): def finalize(self, vm, last, auth_data): try: - self.log_debug("Terminate VM: " + vm.id) + self.log_info("Terminate VM: " + vm.id) group_name = vm.id.split('/')[0] credentials, subscription_id = self.get_credentials(auth_data) resource_client = ResourceManagementClient(credentials, subscription_id) # Delete Resource group and everything in it if self.get_rg(group_name, credentials, subscription_id): - self.log_debug("Removing RG: %s" % group_name) + self.log_info("Removing RG: %s" % group_name) resource_client.resource_groups.delete(group_name).wait() else: - self.log_debug("RG: %s does not exist. Do not remove." % group_name) + self.log_info("RG: %s does not exist. Do not remove." % group_name) # if it is the last VM delete the RG of the Inf if last: if self.get_rg("rg-%s" % vm.inf.id, credentials, subscription_id): - self.log_debug("Removing Inf. RG: %s" % "rg-%s" % vm.inf.id) + self.log_info("Removing Inf. RG: %s" % "rg-%s" % vm.inf.id) resource_client.resource_groups.delete("rg-%s" % vm.inf.id) else: - self.log_debug("RG: %s does not exist. Do not remove." % "rg-%s" % vm.inf.id) + self.log_info("RG: %s does not exist. Do not remove." % "rg-%s" % vm.inf.id) except Exception as ex: self.log_exception("Error terminating the VM") diff --git a/IM/connectors/AzureClassic.py b/IM/connectors/AzureClassic.py index be0c5e07f..036c82687 100644 --- a/IM/connectors/AzureClassic.py +++ b/IM/connectors/AzureClassic.py @@ -517,7 +517,7 @@ def wait_operation_status(self, request_id, auth_data, delay=2, timeout=90): output = Operation(resp.text) status_str = output.Status # InProgress|Succeeded|Failed - self.log_debug("Operation string state: " + status_str) + self.log_info("Operation string state: " + status_str) else: self.log_error( "Error waiting operation to finish: Code %d. Msg: %s." % (resp.status_code, resp.text)) @@ -629,8 +629,7 @@ def get_storage_account(self, storage_account, auth_data): storage_info = StorageService(resp.text) return storage_info.StorageServiceProperties elif resp.status_code == 404: - self.log_debug( - "Storage " + storage_account + " does not exist") + self.log_info("Storage " + storage_account + " does not exist") return None else: self.log_warn( @@ -682,7 +681,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): res.append((False, error_msg)) break - self.log_debug("Creating the VM with id: " + service_name) + self.log_info("Creating the VM with id: " + service_name) # Create the VM to get the nodename vm = VirtualMachine(inf, service_name, self.cloud, radl, requested_radl, self) @@ -784,7 +783,7 @@ def get_instance_type(self, system, auth_data): return res def updateVMInfo(self, vm, auth_data): - self.log_debug("Get the VM info with the id: " + vm.id) + self.log_info("Get the VM info with the id: " + vm.id) service_name = vm.id try: @@ -801,13 +800,13 @@ def updateVMInfo(self, vm, auth_data): return (False, "Error getting the VM info: " + vm.id + ". Error Code: " + str(resp.status_code) + ". Msg: " + resp.text) else: - self.log_debug("VM info: " + vm.id + " obtained.") - self.log_debug(resp.text) + self.log_info("VM info: " + vm.id + " obtained.") + self.log_info(resp.text) vm_info = Deployment(resp.text) vm.state = self.get_vm_state(vm_info) - self.log_debug("The VM state is: " + vm.state) + self.log_info("The VM state is: " + vm.state) instance_type = self.get_instance_type_by_name( vm_info.RoleInstanceList.RoleInstance[0].InstanceSize, auth_data) @@ -857,7 +856,7 @@ def setIPs(self, vm, vm_info): vm.setIps(public_ips, private_ips) def finalize(self, vm, last, auth_data): - self.log_debug("Terminate VM: " + vm.id) + self.log_info("Terminate VM: " + vm.id) service_name = vm.id # Delete the service @@ -900,7 +899,7 @@ def call_role_operation(self, op, vm, auth_data): return (True, "") def stop(self, vm, auth_data): - self.log_debug("Stop VM: " + vm.id) + self.log_info("Stop VM: " + vm.id) op = """ @@ -910,7 +909,7 @@ def stop(self, vm, auth_data): return self.call_role_operation(op, vm, auth_data) def start(self, vm, auth_data): - self.log_debug("Start VM: " + vm.id) + self.log_info("Start VM: " + vm.id) op = """ @@ -935,7 +934,7 @@ def get_all_instance_types(self, auth_data): "Error getting Role Sizes. Error Code: " + str(resp.status_code) + ". Msg: " + resp.text) return [] else: - self.log_debug("Role List obtained.") + self.log_info("Role List obtained.") role_sizes = RoleSizes(resp.text) res = [] for role_size in role_sizes.RoleSize: diff --git a/IM/connectors/Docker.py b/IM/connectors/Docker.py index de90f62db..8a35befe8 100644 --- a/IM/connectors/Docker.py +++ b/IM/connectors/Docker.py @@ -352,7 +352,7 @@ def _generate_mounts(self, system): disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path") if not disk_mount_path.startswith('/'): disk_mount_path = '/' + disk_mount_path - self.log_debug("Attaching a volume in %s" % disk_mount_path) + self.log_info("Attaching a volume in %s" % disk_mount_path) mount = {"Source": source, "Target": disk_mount_path} mount["Type"] = "volume" mount["ReadOnly"] = False @@ -443,10 +443,10 @@ def _delete_volumes(self, vm, auth_data): self.log_warn("Error deleting volume %s: %s." % (source, resp.text)) time.sleep(delay) else: - self.log_debug("Volume %s successfully deleted." % source) + self.log_info("Volume %s successfully deleted." % source) break else: - self.log_debug("Volume %s not created by the IM, not deleting it." % source) + self.log_info("Volume %s not created by the IM, not deleting it." % source) def _delete_networks(self, vm, auth_data): for net in vm.info.networks: @@ -465,7 +465,7 @@ def _delete_networks(self, vm, auth_data): if resp.status_code not in [204, 404]: self.log_error("Error deleting network %s: %s" % (net.id, resp.text)) else: - self.log_debug("Network %s deleted successfully" % net.id) + self.log_info("Network %s deleted successfully" % net.id) def _attach_cont_to_networks(self, vm, auth_data): system = vm.info.systems[0] @@ -493,7 +493,7 @@ def _attach_cont_to_networks(self, vm, auth_data): self.log_error("Error attaching cont %s to network %s: %s" % (vm.id, net_name, resp.text)) all_ok = False else: - self.log_debug("Cont %s attached to network %s" % (vm.id, net_name)) + self.log_info("Cont %s attached to network %s" % (vm.id, net_name)) return all_ok def _create_volumes(self, system, auth_data): @@ -515,7 +515,7 @@ def _create_volumes(self, system, auth_data): resp = self.create_request('GET', "/volumes/%s" % source, auth_data, headers) if resp.status_code == 200: # the volume already exists - self.log_debug("Volume named %s already exists." % source) + self.log_info("Volume named %s already exists." % source) else: body = json.dumps({"Name": source, "Driver": driver}) resp = self.create_request('POST', "/volumes/create", auth_data, headers, body) @@ -524,7 +524,7 @@ def _create_volumes(self, system, auth_data): self.log_error("Error creating volume %s: %s." % (source, resp.text)) else: system.setValue("disk." + str(cont) + ".created", "yes") - self.log_debug("Volume %s successfully created." % source) + self.log_info("Volume %s successfully created." % source) cont += 1 @@ -678,7 +678,7 @@ def _get_svc_state(self, svc_name, auth_data): if task["Status"]["State"] == "running": return VirtualMachine.RUNNING elif task["Status"]["State"] == "rejected": - self.log_debug("Task %s rejected: %s." % (task["ID"], task["Status"]["Err"])) + self.log_info("Task %s rejected: %s." % (task["ID"], task["Status"]["Err"])) return VirtualMachine.PENDING else: return VirtualMachine.PENDING diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 0023e3045..d1d633565 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -123,8 +123,7 @@ def concreteSystem(self, radl_system, auth_data): instance_type = self.get_instance_type(res_system) if not instance_type: - self.log_error( - "Error launching the VM, no instance type available for the requirements.") + self.log_error("Error launching the VM, no instance type available for the requirements.") self.log_debug(res_system) return [] else: @@ -295,7 +294,7 @@ def get_instance_type(self, radl, vpc=None): performance = float(cpu_perf.value) performance_op = cpu_perf.getLogOperator() else: - self.log_debug("Performance unit unknown: " + cpu_perf.unit + ". Ignore it") + self.log_warn("Performance unit unknown: " + cpu_perf.unit + ". Ignore it") instace_types = self.get_all_instance_types() @@ -390,7 +389,7 @@ def create_security_groups(self, conn, inf, radl, vpc=None): with inf._lock: sg = self._get_security_group(conn, sg_name) if not sg: - self.log_debug("Creating security group: " + sg_name) + self.log_info("Creating security group: " + sg_name) try: sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) except Exception as crex: @@ -400,7 +399,7 @@ def create_security_groups(self, conn, inf, radl, vpc=None): # if not raise the exception raise crex else: - self.log_debug("Security group: " + sg_name + " already created.") + self.log_info("Security group: " + sg_name + " already created.") if vpc: res.append(sg.id) @@ -455,8 +454,7 @@ def create_keypair(self, system, conn): public = system.getValue('disk.0.os.credentials.public_key') if private and public: if public.find('-----BEGIN CERTIFICATE-----') != -1: - self.log_debug( - "The RADL specifies the PK, upload it to EC2") + self.log_info("The RADL specifies the PK, upload it to EC2") public_key = base64.b64encode(public) conn.import_key_pair(keypair_name, public_key) else: @@ -466,7 +464,7 @@ def create_keypair(self, system, conn): system.setUserKeyCredentials( system.getCredentials().username, public, private) else: - self.log_debug("Creating the Keypair name: %s" % keypair_name) + self.log_info("Creating the Keypair name: %s" % keypair_name) keypair_file = self.KEYPAIR_DIR + '/' + keypair_name + '.pem' keypair = conn.create_key_pair(keypair_name) created = True @@ -535,7 +533,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): (region_name, ami) = self.getAMIData( system.getValue("disk.0.image.url")) - self.log_debug("Connecting with the region: " + region_name) + self.log_info("Connecting with the region: " + region_name) conn = self.get_connection(region_name, auth_data) res = [] @@ -614,11 +612,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if spot: err_msg += " a spot instance " - self.log_debug("Launching a spot instance") + self.log_info("Launching a spot instance") instance_type = self.get_instance_type(system, vpc is not None) if not instance_type: - self.log_error( - "Error %s, no instance type available for the requirements." % err_msg) + self.log_error("Error %s, no instance type available for the requirements." % err_msg) self.log_debug(system) res.append( (False, "Error %s, no instance type available for the requirements." % err_msg)) @@ -652,14 +649,12 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): product_description=operative_system, availability_zone=zone.name, max_results=1) - self.log_debug( - "Spot price history for the region " + zone.name) + self.log_debug("Spot price history for the region " + zone.name) self.log_debug(history) if history and history[0].price < historical_price: historical_price = history[0].price availability_zone = zone.name - self.log_debug( - "Launching the spot request in the zone " + availability_zone) + self.log_info("Launching the spot request in the zone " + availability_zone) # Force to use magnetic volumes bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping( @@ -685,8 +680,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): 'instance_id', str(vm.id)) # Add the keypair name to remove it later vm.keypair_name = keypair_name - self.log_debug( - "Instance successfully launched.") + self.log_info("Instance successfully launched.") all_failed = False inf.add_vm(vm) res.append((True, vm)) @@ -694,11 +688,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): res.append((False, "Error %s." % err_msg)) else: err_msg += " an ondemand instance " - self.log_debug("Launching ondemand instance") + self.log_info("Launching ondemand instance") instance_type = self.get_instance_type(system, vpc is not None) if not instance_type: - self.log_error( - "Error %s, no instance type available for the requirements." % err_msg) + self.log_error("Error %s, no instance type available for the requirements." % err_msg) self.log_debug(system) res.append( (False, "Error %s, no instance type available for the requirements." % err_msg)) @@ -729,8 +722,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): 'instance_id', str(vm.id)) # Add the keypair name to remove it later vm.keypair_name = keypair_name - self.log_debug( - "Instance successfully launched.") + self.log_info("Instance successfully launched.") inf.add_vm(vm) res.append((True, vm)) all_failed = False @@ -753,14 +745,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if sg_ids: try: for sgid in sg_ids: - self.log_debug("Remove the SG: %s" % sgid) + self.log_info("Remove the SG: %s" % sgid) conn.delete_security_group(group_id=sgid) except: self.log_exception("Error deleting SG.") if sg_names and sg_names[0] != 'default': try: for sgname in sg_names: - self.log_debug("Remove the SG: %s" % sgname) + self.log_info("Remove the SG: %s" % sgname) conn.delete_security_group(sgname) except: self.log_exception("Error deleting SG.") @@ -782,7 +774,7 @@ def create_volume(self, conn, disk_size, placement, timeout=60): cont = 0 err_states = ["error"] while str(volume.status) != 'available' and str(volume.status) not in err_states and cont < timeout: - self.log_debug("State: " + str(volume.status)) + self.log_info("State: " + str(volume.status)) cont += 2 time.sleep(2) volume = conn.get_all_volumes([volume.id])[0] @@ -790,8 +782,7 @@ def create_volume(self, conn, disk_size, placement, timeout=60): if str(volume.status) == 'available': return volume else: - self.log_error( - "Error creating the volume %s, deleting it" % (volume.id)) + self.log_error("Error creating the volume %s, deleting it" % (volume.id)) conn.delete_volume(volume.id) return None @@ -816,13 +807,11 @@ def attach_volumes(self, instance, vm): "disk." + str(cont) + ".size").getValue('G') disk_device = vm.info.systems[0].getValue( "disk." + str(cont) + ".device") - self.log_debug( - "Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) volume = self.create_volume( conn, int(disk_size), instance.placement) if volume: - self.log_debug( - "Attach the volume ID " + str(volume.id)) + self.log_info("Attach the volume ID " + str(volume.id)) conn.attach_volume( volume.id, instance.id, "/dev/" + disk_device) cont += 1 @@ -854,16 +843,14 @@ def delete_volumes(self, conn, volumes, instance_id, timeout=240): try: curr_vol = conn.get_all_volumes([volume_id])[0] if str(curr_vol.attachment_state()) == "attached": - self.log_debug( - "Detaching the volume " + volume_id + " from the instance " + instance_id) + self.log_info("Detaching the volume " + volume_id + " from the instance " + instance_id) conn.detach_volume(volume_id, instance_id, force=True) elif curr_vol.attachment_state() is None: - self.log_debug("Removing the volume " + volume_id) + self.log_info("Removing the volume " + volume_id) conn.delete_volume(volume_id) deleted = True else: - self.log_debug( - "State: " + str(curr_vol.attachment_state())) + self.log_info("State: " + str(curr_vol.attachment_state())) except Exception as ex: self.log_warn("Error removing the volume: " + str(ex)) @@ -912,18 +899,16 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): vm.elastic_ip = True try: pub_address = None - self.log_debug("Add an Elastic IP") + self.log_info("Add an Elastic IP") if fixed_ip: for address in instance.connection.get_all_addresses(): if str(address.public_ip) == fixed_ip: pub_address = address if pub_address: - self.log_debug( - "Setting a fixed allocated IP: " + fixed_ip) + self.log_info("Setting a fixed allocated IP: " + fixed_ip) else: - self.log_warn( - "Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") + self.log_warn("Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") return None else: provider_id = self.get_net_provider_id(vm.info) @@ -948,8 +933,7 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): pub_address.release() return None else: - self.log_debug( - "The VM is not running, not adding an Elastic IP.") + self.log_info("The VM is not running, not adding an Elastic IP.") return None def delete_elastic_ips(self, conn, vm): @@ -965,8 +949,7 @@ def delete_elastic_ips(self, conn, vm): # Get the elastic IPs for address in conn.get_all_addresses(): if address.instance_id == instance_id: - self.log_debug( - "This VM has a Elastic IP, disassociate it") + self.log_info("This VM has a Elastic IP, disassociate it") address.disassociate() n = 0 @@ -982,11 +965,10 @@ def delete_elastic_ips(self, conn, vm): n += 1 if not found: - self.log_debug("Now release it") + self.log_info("Now release it") address.release() else: - self.log_debug( - "This is a fixed IP, it is not released") + self.log_info("This is a fixed IP, it is not released") except Exception: self.log_exception( "Error deleting the Elastic IPs to VM ID: " + str(vm.id)) @@ -1077,7 +1059,7 @@ def updateVMInfo(self, vm, auth_data): # deployed job_instance_id = None - self.log_debug("Check if the request has been fulfilled and the instance has been deployed") + self.log_info("Check if the request has been fulfilled and the instance has been deployed") job_sir_id = instance_id request_list = conn.get_all_spot_instance_requests() for sir in request_list: @@ -1090,7 +1072,7 @@ def updateVMInfo(self, vm, auth_data): break if job_instance_id: - self.log_debug("Request fulfilled, instance_id: " + str(job_instance_id)) + self.log_info("Request fulfilled, instance_id: " + str(job_instance_id)) instance_id = job_instance_id vm.id = region + ";" + instance_id vm.info.systems[0].setValue('instance_id', str(vm.id)) @@ -1161,22 +1143,22 @@ def add_dns_entries(self, vm, auth_data): domain += "." zone = conn.get_zone(domain) if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = conn.create_zone(domain) else: - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain record = zone.get_a(fqdn) if not record: - self.log_debug("Creating DNS record %s." % fqdn) + self.log_info("Creating DNS record %s." % fqdn) changes = boto.route53.record.ResourceRecordSets(conn, zone.id) change = changes.add_change("CREATE", fqdn, "A") change.add_value(ip) result = changes.commit() else: - self.log_debug("DNS record %s exists. Do not create." % fqdn) + self.log_info("DNS record %s exists. Do not create." % fqdn) return True except Exception: @@ -1205,14 +1187,14 @@ def del_dns_entries(self, vm, auth_data): domain += "." zone = conn.get_zone(domain) if not zone: - self.log_debug("The DNS zone %s does not exists. Do not delete records." % domain) + self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: fqdn = hostname + "." + domain record = zone.get_a(fqdn) if not record: - self.log_debug("DNS record %s does not exists. Do not delete." % fqdn) + self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: - self.log_debug("Deleting DNS record %s." % fqdn) + self.log_info("Deleting DNS record %s." % fqdn) changes = boto.route53.record.ResourceRecordSets(conn, zone.id) change = changes.add_change("DELETE", fqdn, "A") change.add_value(ip) @@ -1237,8 +1219,7 @@ def cancel_spot_requests(self, conn, vm): for sir in request_list: if sir.instance_id == instance_id: conn.cancel_spot_instance_requests(sir.id) - self.log_debug( - "Spot instance request " + str(sir.id) + " deleted") + self.log_info("Spot instance request " + str(sir.id) + " deleted") break except Exception: self.log_exception("Error deleting the spot instance request") @@ -1348,7 +1329,7 @@ def delete_security_groups(self, conn, vm, timeout=90): all_vms_terminated = False if all_vms_terminated: - self.log_debug("Remove the SG: " + sg.name) + self.log_info("Remove the SG: " + sg.name) try: sg.revoke('tcp', 0, 65535, src_group=sg) sg.revoke('udp', 0, 65535, src_group=sg) @@ -1367,13 +1348,13 @@ def delete_security_groups(self, conn, vm, timeout=90): # Check if it has been deleted yet sg = self._get_security_group(conn, sg.name) if not sg: - self.log_debug("Error deleting the SG. But it does not exist. Ignore. " + str(ex)) + self.log_info("Error deleting the SG. But it does not exist. Ignore. " + str(ex)) deleted = True else: self.log_exception("Error deleting the SG.") else: # If there are more than 1, we skip this step - self.log_debug("There are active instances. Not removing the SG") + self.log_info("There are active instances. Not removing the SG") def stop(self, vm, auth_data): region_name = vm.id.split(";")[0] @@ -1661,7 +1642,6 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): snapshot_id = "" # Obtain the connection object to connect with EC2 - self.logger.debug("Connecting with the region: " + region_name) conn = self.get_connection(region_name, auth_data) if not conn: @@ -1670,7 +1650,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): # Create the instance snapshot instance = self.get_instance_by_id(instance_id, region_name, auth_data) if instance: - self.logger.debug("Creating snapshot: " + image_name) + self.log_info("Creating snapshot: " + image_name) snapshot_id = instance.create_image(image_name, description="AMI automatically generated by IM", no_reboot=True) @@ -1689,7 +1669,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): def delete_image(self, image_url, auth_data): (region_name, ami) = self.getAMIData(image_url) - self.logger.debug("Connecting with the region: " + region_name) + self.log_info("Deleting image: %s." % image_url) conn = self.get_connection(region_name, auth_data) success = conn.deregister_image(ami, delete_snapshot=True) # https://github.com/boto/boto/issues/3019 diff --git a/IM/connectors/GCE.py b/IM/connectors/GCE.py index 5f050a9b1..e84e9979e 100644 --- a/IM/connectors/GCE.py +++ b/IM/connectors/GCE.py @@ -91,8 +91,7 @@ def get_driver(self, auth_data, datacenter=None): self.driver = driver return driver else: - self.log_error( - "No correct auth data has been specified to GCE: username, password and project") + self.log_error("No correct auth data has been specified to GCE: username, password and project") self.log_debug(auth) raise Exception( "No correct auth data has been specified to GCE: username, password and project") @@ -131,8 +130,7 @@ def get_dns_driver(self, auth_data): self.dns_driver = driver return driver else: - self.log_error( - "No correct auth data has been specified to GCE: username, password and project") + self.log_error("No correct auth data has been specified to GCE: username, password and project") self.log_debug(auth) raise Exception( "No correct auth data has been specified to GCE: username, password and project") @@ -302,7 +300,7 @@ def request_external_ip(self, radl): n += 1 if requested_ips: - self.log_debug("The user requested for a fixed IP") + self.log_info("The user requested for a fixed IP") if len(requested_ips) > 1: self.log_warn( "The user has requested more than one fixed IP. Using only the first one") @@ -394,7 +392,7 @@ def create_firewall(self, inf, net_name, radl, driver): try: firewall = driver.ex_get_firewall(firewall_name) except ResourceNotFoundError: - self.log_debug("The firewall %s does not exist." % firewall_name) + self.log_info("The firewall %s does not exist." % firewall_name) except: self.log_exception("Error trying to get FW %s." % firewall_name) @@ -402,14 +400,14 @@ def create_firewall(self, inf, net_name, radl, driver): try: firewall.allowed = allowed firewall.update() - self.log_debug("Firewall %s existing. Rules updated." % firewall_name) + self.log_info("Firewall %s existing. Rules updated." % firewall_name) except: self.log_exception("Error updating the firewall %s." % firewall_name) return try: driver.ex_create_firewall(firewall_name, allowed, network=net_name) - self.log_debug("Firewall %s successfully created." % firewall_name) + self.log_info("Firewall %s successfully created." % firewall_name) except Exception as addex: self.log_warn("Exception creating FW: " + str(addex)) @@ -458,14 +456,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if not public or not private: # We must generate them - self.log_debug("No keys. Generating key pair.") + self.log_info("No keys. Generating key pair.") (public, private) = self.keygen() system.setValue('disk.0.os.credentials.private_key', private) metadata = {} if private and public: metadata = {"sshKeys": username + ":" + public} - self.log_debug("Setting ssh for user: " + username) + self.log_info("Setting ssh for user: " + username) self.log_debug(metadata) startup_script = self.get_cloud_init_data(radl) @@ -503,7 +501,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): vm.info.systems[0].setValue('instance_id', str(vm.id)) vm.info.systems[0].setValue('instance_name', str(vm.id)) inf.add_vm(vm) - self.log_debug("Node successfully created.") + self.log_info("Node successfully created.") res.append((True, vm)) @@ -530,7 +528,7 @@ def finalize(self, vm, last, auth_data): if not success: return (False, "Error destroying node: " + vm.id) - self.log_debug("VM " + str(vm.id) + " successfully destroyed") + self.log_info("VM " + str(vm.id) + " successfully destroyed") else: self.log_warn("VM " + str(vm.id) + " not found.") return (True, "") @@ -546,14 +544,14 @@ def delete_firewall(self, vm, driver): try: firewall = driver.ex_get_firewall(firewall_name) except ResourceNotFoundError: - self.log_debug("Firewall %s does not exist. Do not delete." % firewall_name) + self.log_info("Firewall %s does not exist. Do not delete." % firewall_name) except: self.log_exception("Error trying to get FW %s." % firewall_name) if firewall: try: firewall.destroy() - self.log_debug("Firewall %s successfully deleted." % firewall_name) + self.log_info("Firewall %s successfully deleted." % firewall_name) except: self.log_exception("Error trying to delete FW %s." % firewall_name) @@ -583,7 +581,7 @@ def delete_disks(self, node): self.log_error( "Error destroying the volume: " + vol_name) except ResourceNotFoundError: - self.log_debug("The volume: " + vol_name + " does not exists. Ignore it.") + self.log_info("The volume: " + vol_name + " does not exists. Ignore it.") success = True except: self.log_exception( @@ -666,8 +664,7 @@ def attach_volumes(self, vm, node): "disk." + str(cont) + ".size").getValue('G') disk_device = vm.info.systems[0].getValue( "disk." + str(cont) + ".device") - self.log_debug( - "Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) volume_name = "im-%s" % str(uuid.uuid1()) location = self.get_node_location(node) @@ -675,7 +672,7 @@ def attach_volumes(self, vm, node): int(disk_size), volume_name, location=location) success = self.wait_volume(volume) if success: - self.log_debug("Attach the volume ID " + str(volume.id)) + self.log_info("Attach the volume ID " + str(volume.id)) try: volume.attach(node, disk_device) except: @@ -758,20 +755,20 @@ def add_dns_entries(self, vm, auth_data): domain += "." zone = [z for z in driver.iterate_zones() if z.domain == domain] if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = driver.create_zone(domain) else: zone = zone[0] - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain record = [r for r in driver.iterate_records(zone) if r.name == fqdn] if not record: - self.log_debug("Creating DNS record %s." % fqdn) + self.log_info("Creating DNS record %s." % fqdn) driver.create_record(fqdn, zone, RecordType.A, dict(ttl=300, rrdatas=[ip])) else: - self.log_debug("DNS record %s exists. Do not create." % fqdn) + self.log_info("DNS record %s exists. Do not create." % fqdn) return True except Exception: @@ -800,20 +797,20 @@ def del_dns_entries(self, vm, auth_data): domain += "." zone = [z for z in driver.iterate_zones() if z.domain == domain] if not zone: - self.log_debug("The DNS zone %s does not exists. Do not delete records." % domain) + self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: zone = zone[0] fqdn = hostname + "." + domain record = [r for r in driver.iterate_records(zone) if r.name == fqdn] if not record: - self.log_debug("DNS record %s does not exists. Do not delete." % fqdn) + self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: record = record[0] if record.data['rrdatas'] != [ip]: - self.log_debug("DNS record %s mapped to unexpected IP: %s != %s." - "Do not delete." % (fqdn, record.data['rrdatas'], ip)) + self.log_info("DNS record %s mapped to unexpected IP: %s != %s." + "Do not delete." % (fqdn, record.data['rrdatas'], ip)) else: - self.log_debug("Deleting DNS record %s." % fqdn) + self.log_info("Deleting DNS record %s." % fqdn) if not driver.delete_record(record): self.log_error("Error deleting DNS record %s." % fqdn) diff --git a/IM/connectors/Kubernetes.py b/IM/connectors/Kubernetes.py index 90d92a98a..7cadb4779 100644 --- a/IM/connectors/Kubernetes.py +++ b/IM/connectors/Kubernetes.py @@ -105,8 +105,7 @@ def get_api_version(self, auth_data): self.log_exception( "Error connecting with Kubernetes API server") - self.log_warn( - "Error getting a compatible API version. Setting the default one.") + self.log_warn("Error getting a compatible API version. Setting the default one.") self.log_debug("Using %s API version." % version) return version @@ -221,7 +220,7 @@ def _create_volumes(self, apiVersion, namespace, system, pod_name, auth_data, pe disk_mount_path = '/' + disk_mount_path if not disk_device.startswith('/'): disk_device = '/' + disk_device - self.log_debug("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) + self.log_info("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) name = "%s-%d" % (pod_name, cont) if persistent: @@ -536,8 +535,7 @@ def alterVM(self, vm, radl, auth_data): changed = True if not changed: - self.log_debug( - "Nothing changes in the kubernetes pod: " + str(vm.id)) + self.log_info("Nothing changes in the kubernetes pod: " + str(vm.id)) return (True, vm) # Create the container diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index 00f60aaa0..3c6e89dc8 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -255,11 +255,11 @@ def manage_public_ips(self, vm, auth_data): """ Manage public IPs in the VM """ - self.log_debug("The VM does not have public IP trying to add one.") + self.log_info("The VM does not have public IP trying to add one.") if self.add_public_ip_count < self.MAX_ADD_IP_COUNT: success, msgs = self.add_public_ip(vm, auth_data) if success: - self.log_debug("Public IP successfully added.") + self.log_info("Public IP successfully added.") else: self.add_public_ip_count += 1 self.log_warn("Error adding public IP the VM: %s (%d/%d)\n" % (msgs, @@ -401,7 +401,7 @@ def add_public_ip(self, vm, auth_data): if resp.status_code != 201 and resp.status_code != 200: return (False, output) else: - self.log_debug("Public IP added from pool %s" % network_name) + self.log_info("Public IP added from pool %s" % network_name) return (True, vm.id) except Exception: self.log_exception("Error connecting with OCCI server") @@ -586,11 +586,11 @@ def create_volumes(self, system, auth_data): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) storage_name = "im-disk-%s" % str(uuid.uuid1()) success, volume_id = self.create_volume(int(disk_size), storage_name, auth_data) if success: - self.log_debug("Volume id %s sucessfully created." % volume_id) + self.log_info("Volume id %s sucessfully created." % volume_id) volumes.append((disk_device, volume_id)) system.setValue("disk." + str(cont) + ".provider_id", volume_id) # TODO: get the actual device_id from OCCI @@ -621,7 +621,7 @@ def wait_volume_state(self, volume_id, auth_data, wait_state="online", timeout=1 wait += delay success, storage_info = self.get_volume_info(volume_id, auth_data) state = self.get_occi_attribute_value(storage_info, 'occi.storage.state') - self.log_debug("Waiting volume %s to be %s. Current state: %s" % (volume_id, wait_state, state)) + self.log_info("Waiting volume %s to be %s. Current state: %s" % (volume_id, wait_state, state)) if success and state == wait_state: online = True elif not success: @@ -693,18 +693,18 @@ def detach_volume(self, volume, auth_data, timeout=90, delay=5): wait = 0 while wait < timeout: try: - self.log_debug("Detaching volume: %s" % storage_id) + self.log_info("Detaching volume: %s" % storage_id) resp = self.create_request('GET', link, auth_data, headers) if resp.status_code == 200: - self.log_debug("Volume link %s exists. Try to delete it." % link) + self.log_info("Volume link %s exists. Try to delete it." % link) resp = self.create_request('DELETE', link, auth_data, headers) if resp.status_code in [204, 200]: - self.log_debug("Successfully detached. Wait it to be deleted.") + self.log_info("Successfully detached. Wait it to be deleted.") else: self.log_error("Error detaching volume: %s" + resp.reason + "\n" + resp.text) elif resp.status_code == 404: # wait until the resource does not exist - self.log_debug("Successfully detached") + self.log_info("Successfully detached") return (True, "") else: self.log_warn("Error detaching volume: %s" + resp.reason + "\n" + resp.text) @@ -734,26 +734,26 @@ def delete_volume(self, storage_id, auth_data, timeout=180, delay=5): wait = 0 while wait < timeout: - self.log_debug("Delete storage: %s" % storage_id) + self.log_info("Delete storage: %s" % storage_id) try: resp = self.create_request('GET', storage_id, auth_data, headers) if resp.status_code == 200: - self.log_debug("Storage %s exists. Try to delete it." % storage_id) + self.log_info("Storage %s exists. Try to delete it." % storage_id) resp = self.create_request('DELETE', storage_id, auth_data, headers) if resp.status_code == 404: - self.log_debug("It does not exist.") + self.log_info("It does not exist.") return (True, "") elif resp.status_code == 409: - self.log_debug("Error deleting the Volume. It seems that it is still " - "attached to a VM: %s" % resp.text) + self.log_info("Error deleting the Volume. It seems that it is still " + "attached to a VM: %s" % resp.text) elif resp.status_code != 200 and resp.status_code != 204: self.log_warn("Error deleting the Volume: " + resp.reason + "\n" + resp.text) else: - self.log_debug("Successfully deleted") + self.log_info("Successfully deleted") return (True, "") elif resp.status_code == 404: - self.log_debug("It does not exist.") + self.log_info("It does not exist.") return (True, "") else: self.log_warn("Error deleting storage: %s" + resp.reason + "\n" + resp.text) @@ -1065,19 +1065,19 @@ def add_new_disks(self, vm, radl, auth_data): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) success, volume_id = self.create_volume(int(disk_size), "im-disk-%d" % cont, auth_data) if success: - self.log_debug("Volume id %s successfuly created." % volume_id) + self.log_info("Volume id %s successfuly created." % volume_id) # let's wait the storage to be ready "online" wait_ok = self.wait_volume_state(volume_id, auth_data) if not wait_ok: - self.log_debug("Error waiting volume %s. Deleting it." % volume_id) + self.log_info("Error waiting volume %s. Deleting it." % volume_id) self.delete_volume(volume_id, auth_data) return (False, "Error waiting volume %s. Deleting it." % volume_id) else: - self.log_debug("Attaching to the instance") + self.log_info("Attaching to the instance") attached = self.attach_volume(vm, volume_id, disk_device, mount_path, auth_data) if attached: orig_system.setValue("disk." + str(cont) + ".size", disk_size, "G") @@ -1117,7 +1117,7 @@ def remove_public_ip(self, vm, auth_data): """ Remove/Detach public IP from VM """ - self.log_debug("Removing Public IP from VM %s" % vm.id) + self.log_info("Removing Public IP from VM %s" % vm.id) auth = self.get_auth_header(auth_data) headers = {'Accept': 'text/plain', 'Connection': 'close'} @@ -1137,7 +1137,7 @@ def remove_public_ip(self, vm, auth_data): return (True, "No public IP to delete.") resp = self.create_request('DELETE', link, auth_data, headers) if resp.status_code in [404, 204, 200]: - self.log_debug("Successfully removed") + self.log_info("Successfully removed") return (True, "") else: self.log_error("Error removing public IP: " + resp.reason + "\n" + resp.text) @@ -1336,11 +1336,11 @@ def get_keystone_token(occi, keystone_uri, auth): return token if version == 2: - occi.logger.debug("Getting Keystone v2 token") + occi.logger.info("Getting Keystone v2 token") occi.keystone_token = KeyStoneAuth.get_keystone_token_v2(occi, keystone_uri, auth) return occi.keystone_token elif version == 3: - occi.logger.debug("Getting Keystone v3 token") + occi.logger.info("Getting Keystone v3 token") occi.keystone_token = KeyStoneAuth.get_keystone_token_v3(occi, keystone_uri, auth) return occi.keystone_token else: @@ -1451,7 +1451,7 @@ def get_keystone_token_v2(occi, keystone_uri, auth): # \"metadata\": {\"is_admin\": 0, \"roles\": []}}}" output = resp.json() if 'access' in output: - occi.logger.debug("Using tenant: %s" % tenant["name"]) + occi.logger.info("Using tenant: %s" % tenant["name"]) occi.keystone_tenant = tenant tenant_token_id = str(output['access']['token']['id']) break @@ -1520,7 +1520,7 @@ def get_keystone_token_v3(occi, keystone_uri, auth): url = "%s/v3/auth/tokens" % keystone_uri resp = occi.create_request_static('POST', url, auth, headers, json.dumps(body)) if resp.status_code in [200, 201, 202]: - occi.logger.debug("Using project: %s" % project["name"]) + occi.logger.info("Using project: %s" % project["name"]) occi.keystone_project = project scoped_token = resp.headers['X-Subject-Token'] break diff --git a/IM/connectors/OpenNebula.py b/IM/connectors/OpenNebula.py index 88038123e..8bc98157a 100644 --- a/IM/connectors/OpenNebula.py +++ b/IM/connectors/OpenNebula.py @@ -410,7 +410,7 @@ def create_security_groups(self, inf, radl, auth_data): outport.get_remote_port())) if sg_template: - self.log_debug("Creating security group: %s" % sg_name) + self.log_info("Creating security group: %s" % sg_name) sg_template = ("NAME = %s\n" % sg_name) + sg_template success, sg_id, _ = server.one.secgroup.allocate(session_id, sg_template) if not success: @@ -462,14 +462,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i += 1 if all_failed: - self.log_debug("All VMs failed, delete Security Groups.") + self.log_info("All VMs failed, delete Security Groups.") for sg in sgs.values(): - self.log_debug("Delete Security Group: %d." % sg) + self.log_info("Delete Security Group: %d." % sg) success, sg_id, _ = server.one.secgroup.delete(session_id, sg) if success: - self.log_debug("Deleted.") + self.log_info("Deleted.") else: - self.log_debug("Error deleting SG: %s." % sg_id) + self.log_info("Error deleting SG: %s." % sg_id) return res def delete_security_groups(self, inf, auth_data, timeout=90, delay=10): @@ -489,17 +489,17 @@ def delete_security_groups(self, inf, auth_data, timeout=90, delay=10): # Get the SG to delete sg = self._get_security_group(sg_name, auth_data) if not sg: - self.log_debug("The SG %s does not exist. Do not delete it." % sg_name) + self.log_info("The SG %s does not exist. Do not delete it." % sg_name) deleted = True else: try: - self.log_debug("Deleting SG: %s" % sg_name) + self.log_info("Deleting SG: %s" % sg_name) success, sg_id, _ = server.one.secgroup.delete(session_id, sg) if success: - self.log_debug("Deleted.") + self.log_info("Deleted.") deleted = True else: - self.log_debug("Error deleting SG: %s." % sg_id) + self.log_info("Error deleting SG: %s." % sg_id) except Exception as ex: self.log_warn("Error deleting the SG: %s" % str(ex)) @@ -1095,7 +1095,7 @@ def attach_new_disks(self, vm, system, session_id): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) success, volume_id = self.attach_volume(vm, int(disk_size), disk_device, disk_fstype, session_id) if success: orig_system.setValue("disk." + str(cont) + ".size", disk_size, "M") diff --git a/IM/connectors/OpenStack.py b/IM/connectors/OpenStack.py index 6caed7184..634eeb341 100644 --- a/IM/connectors/OpenStack.py +++ b/IM/connectors/OpenStack.py @@ -352,7 +352,7 @@ def setIPsFromInstance(self, vm, node): self.log_error("Error adding a floating IP: Max number of retries reached.") self.error_messages += "Error adding a floating IP: Max number of retries reached.\n" else: - self.log_debug("The VM is not running, not adding Elastic/Floating IPs.") + self.log_info("The VM is not running, not adding Elastic/Floating IPs.") def update_system_info_from_instance(self, system, instance_type): """ @@ -386,7 +386,7 @@ def get_networks(self, driver, radl): # site has IP pools, we do not need to assign a network to this interface # it will be assigned with a floating IP if network.isPublic() and num_nets > 1 and pool_names: - self.log_debug("Public IP to be assigned with a floating IP. Do not set a net.") + self.log_info("Public IP to be assigned with a floating IP. Do not set a net.") else: # First check if the user has specified a provider ID if net_provider_id: @@ -469,7 +469,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): elif not system.getValue("disk.0.os.credentials.password"): keypair_name = "im-%d" % int(time.time() * 100.0) - self.log_debug("Create keypair: %s" % keypair_name) + self.log_info("Create keypair: %s" % keypair_name) keypair = driver.create_key_pair(keypair_name) keypair_created = True public_key = keypair.public_key @@ -500,7 +500,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i = 0 all_failed = True while i < num_vm: - self.log_debug("Creating node") + self.log_info("Creating node") node = None retries = 0 @@ -520,7 +520,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): # Add the keypair name to remove it later if keypair_name: vm.keypair = keypair_name - self.log_debug("Node successfully created.") + self.log_info("Node successfully created.") all_failed = False inf.add_vm(vm) res.append((True, vm)) @@ -532,10 +532,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if all_failed: if keypair_created: # only delete in case of the user do not specify the keypair name - self.log_debug("Deleting keypair: %s." % keypair_name) + self.log_info("Deleting keypair: %s." % keypair_name) driver.delete_key_pair(keypair) for sg in sgs: - self.log_debug("Deleting security group: %s." % sg.id) + self.log_info("Deleting security group: %s." % sg.id) driver.ex_delete_security_group(sg) return res @@ -584,11 +584,11 @@ def manage_elastic_ips(self, vm, node, public_ips): # It is a fixed IP if ip not in public_ips: # It has not been created yet, do it - self.log_debug("Asking for a fixed ip: %s." % ip) + self.log_info("Asking for a fixed ip: %s." % ip) success, msg = self.add_elastic_ip(vm, node, ip, pool_name) else: if num >= len(public_ips): - self.log_debug("Asking for public IP %d and there are %d" % (num + 1, len(public_ips))) + self.log_info("Asking for public IP %d and there are %d" % (num + 1, len(public_ips))) success, msg = self.add_elastic_ip(vm, node, None, pool_name) if not success: @@ -608,7 +608,7 @@ def get_floating_ip(self, pool): if not ip.node_id: is_private = any([IPAddress(ip.ip_address) in IPNetwork(mask) for mask in Config.PRIVATE_NET_MASKS]) if is_private: - self.log_debug("Floating IP found %s, but it is private. Ignore." % ip.ip_address) + self.log_info("Floating IP found %s, but it is private. Ignore." % ip.ip_address) else: return True, ip @@ -625,7 +625,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): Returns: a :py:class:`OpenStack_1_1_FloatingIpAddress` added or None if some problem occur. """ try: - self.log_debug("Add an Floating IP") + self.log_info("Add an Floating IP") pool = self.get_ip_pool(node.driver, pool_name) if not pool: @@ -633,7 +633,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): msg = "Incorrect pool name: %s." % pool_name else: msg = "No pools available." - self.log_debug("No Floating IP assigned: %s" % msg) + self.log_info("No Floating IP assigned: %s" % msg) return False, msg if node.driver.ex_list_floating_ip_pools(): @@ -659,7 +659,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): if is_private: self.log_error("Error getting a Floating IP from pool %s. The IP is private." % pool_name) - self.log_debug("We have created it, so release it.") + self.log_info("We have created it, so release it.") floating_ip.delete() return False, "Error attaching a Floating IP to the node. Private IP returned." @@ -681,7 +681,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): if not attached: self.log_error("Error attaching a Floating IP to the node.") - self.log_debug("We have created it, so release it.") + self.log_info("We have created it, so release it.") floating_ip.delete() return False, "Error attaching a Floating IP to the node." return True, floating_ip @@ -720,7 +720,7 @@ def create_security_groups(self, driver, inf, radl): with inf._lock: sg = self._get_security_group(driver, sg_name) if not sg: - self.log_debug("Creating security group: %s" % sg_name) + self.log_info("Creating security group: %s" % sg_name) sg = driver.ex_create_security_group(sg_name, "Security group created by the IM") res.append(sg) @@ -792,14 +792,14 @@ def finalize(self, vm, last, auth_data): self.delete_security_groups(node, vm.inf) else: # If this is not the last vm, we skip this step - self.log_debug("There are active instances. Not removing the SG") + self.log_info("There are active instances. Not removing the SG") except: self.log_exception("Error deleting security groups.") if not success: return (False, "Error destroying node: " + vm.id) - self.log_debug("VM " + str(vm.id) + " successfully destroyed") + self.log_info("VM " + str(vm.id) + " successfully destroyed") else: self.log_warn("VM " + str(vm.id) + " not found.") @@ -819,11 +819,11 @@ def delete_security_groups(self, node, inf, timeout=90, delay=10): # Get the SG to delete sg = self._get_security_group(node.driver, sg_name) if not sg: - self.log_debug("The SG %s does not exist. Do not delete it." % sg_name) + self.log_info("The SG %s does not exist. Do not delete it." % sg_name) deleted = True else: try: - self.log_debug("Deleting SG: %s" % sg_name) + self.log_info("Deleting SG: %s" % sg_name) node.driver.ex_delete_security_group(sg) deleted = True except Exception as ex: diff --git a/etc/im.cfg b/etc/im.cfg index 1861aae95..a41f45f62 100644 --- a/etc/im.cfg +++ b/etc/im.cfg @@ -49,7 +49,7 @@ VM_INFO_UPDATE_FREQUENCY = 10 VM_INFO_UPDATE_ERROR_GRACE_PERIOD = 120 # Log File -LOG_LEVEL = DEBUG +LOG_LEVEL = INFO LOG_FILE = /var/log/im/im.log LOG_FILE_MAX_SIZE = 10485760 From b60ca2278d5f86f19b9f51fef3a327103e717054 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 8 Nov 2017 17:43:56 +0100 Subject: [PATCH 2/2] Set loglevel to INFO: #485 --- etc/logging.conf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/etc/logging.conf b/etc/logging.conf index fd97a89eb..854b54070 100644 --- a/etc/logging.conf +++ b/etc/logging.conf @@ -12,26 +12,26 @@ level=ERROR handlers=fileHandler [logger_ConfManager] -level=DEBUG +level=INFO handlers=fileHandler qualname=ConfManager propagate=0 [logger_CloudConnector] -level=DEBUG +level=INFO handlers=fileHandler qualname=CloudConnector propagate=0 [logger_InfrastructureManager] -level=DEBUG +level=INFO handlers=fileHandler qualname=InfrastructureManager propagate=0 [handler_fileHandler] class=logging.handlers.RotatingFileHandler -level=DEBUG +level=INFO formatter=simpleFormatter args=('/var/log/im/im.log', 'w', 10485760, 3)