From 282bf87c6ed5e86027d45a1f9f96d9e4a75f2830 Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 3 Nov 2017 08:50:35 +0100 Subject: [PATCH 01/30] Fix conf-ansible to force install pyOpenSSL --- contextualization/conf-ansible.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/contextualization/conf-ansible.yml b/contextualization/conf-ansible.yml index cda57640b..4317eee67 100644 --- a/contextualization/conf-ansible.yml +++ b/contextualization/conf-ansible.yml @@ -93,6 +93,12 @@ file: src=/usr/bin/python2.6 dest=/usr/bin/python_ansible state=link when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 6 + - name: Upgrade pip + pip: name=pip extra_args="-I" state=latest + + - name: Upgrade pyOpenSSL with Pip + pip: name=pyOpenSSL extra_args="-I" state=latest + - name: Upgrade setuptools with Pip pip: name=setuptools extra_args="-I" state=latest when: ansible_os_family == "Suse" or (ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7) From e1226adbbc12e3aa56aabc161eb2db31780bc436 Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 3 Nov 2017 10:30:33 +0100 Subject: [PATCH 02/30] Reduce timeout getting Ansible process results --- IM/ConfManager.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index 5eace3a85..c4b4e33d0 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -1260,11 +1260,8 @@ def call_ansible(self, tmp_dir, inventory, playbook, ssh): self.log_debug('Ansible process finished.') try: - timeout = Config.ANSIBLE_INSTALL_TIMEOUT - wait - if timeout < Config.CHECK_CTXT_PROCESS_INTERVAL: - timeout = Config.CHECK_CTXT_PROCESS_INTERVAL - self.log_debug('Get the result with a timeout of %d seconds.' % timeout) - _, (return_code, _), output = result.get(timeout=timeout) + self.log_debug('Get the results of the Ansible process.') + _, (return_code, _), output = result.get(timeout=10) msg = output.getvalue() except: self.log_exception('Error getting ansible results.') From f9c3a91c83cc13f828c9a4cd324e09840d172095 Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 6 Nov 2017 11:59:34 +0100 Subject: [PATCH 03/30] Fix #476 --- IM/connectors/OCCI.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index cfcd0d792..64ad8de2c 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -1305,9 +1305,9 @@ def check_keystone_token(occi, keystone_uri, version, auth): headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Auth-Token': occi.keystone_token, 'Connection': 'close'} if version == 2: - url = "%s/v2.0" % keystone_uri + url = "%s/v2.0/tenants" % keystone_uri elif version == 3: - url = "%s/v3" % keystone_uri + url = "%s/v3/auth/tokens" % keystone_uri else: return None resp = occi.create_request_static('GET', url, auth, headers) From 8a46bbba7e5adb3ac4ac063f18d08ca180e84961 Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 6 Nov 2017 12:56:23 +0100 Subject: [PATCH 04/30] Implements #478 --- IM/connectors/OCCI.py | 107 +++++++++++++++++++++-------------- test/unit/connectors/OCCI.py | 2 +- 2 files changed, 65 insertions(+), 44 deletions(-) diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index 64ad8de2c..f60463495 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -61,6 +61,8 @@ class OCCICloudConnector(CloudConnector): def __init__(self, cloud_info, inf): self.add_public_ip_count = 0 self.keystone_token = None + self.keystone_tenant = None + self.keystone_project = None if cloud_info.path == "/": cloud_info.path = "" CloudConnector.__init__(self, cloud_info, inf) @@ -1412,20 +1414,25 @@ def get_keystone_token_v2(occi, keystone_uri, auth): occi.logger.exception("Error obtaining Keystone Token.") raise Exception("Error obtaining Keystone Token: %s" % str(output)) - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', - 'X-Auth-Token': token_id, 'Connection': 'close'} - url = "%s/v2.0/tenants" % keystone_uri - resp = occi.create_request_static('GET', url, auth, headers) - resp.raise_for_status() + if occi.keystone_tenant is None: + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', + 'X-Auth-Token': token_id, 'Connection': 'close'} + url = "%s/v2.0/tenants" % keystone_uri + resp = occi.create_request_static('GET', url, auth, headers) + resp.raise_for_status() - # format: -> "{\"tenants_links\": [], \"tenants\": - # [{\"description\": \"egi fedcloud\", \"enabled\": true, \"id\": - # \"fffd98393bae4bf0acf66237c8f292ad\", \"name\": \"egi\"}]}" - output = resp.json() + # format: -> "{\"tenants_links\": [], \"tenants\": + # [{\"description\": \"egi fedcloud\", \"enabled\": true, \"id\": + # \"fffd98393bae4bf0acf66237c8f292ad\", \"name\": \"egi\"}]}" + output = resp.json() + tenants = output['tenants'] + else: + tenants = [occi.keystone_tenant] tenant_token_id = None + # retry for each available tenant (usually only one) - for tenant in output['tenants']: + for tenant in tenants: body = '{"auth":{"voms":true,"tenantName":"' + str(tenant['name']) + '"}}' headers = {'Accept': 'application/json', 'Content-Type': 'application/json', @@ -1444,6 +1451,8 @@ def get_keystone_token_v2(occi, keystone_uri, auth): # \"metadata\": {\"is_admin\": 0, \"roles\": []}}}" output = resp.json() if 'access' in output: + occi.logger.debug("Using tenant: %s" % tenant["name"]) + occi.keystone_tenant = tenant tenant_token_id = str(output['access']['token']['id']) break @@ -1474,40 +1483,52 @@ def get_keystone_token_v3(occi, keystone_uri, auth): token = resp.headers['X-Subject-Token'] - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', - 'X-Auth-Token': token, 'Connection': 'close'} - url = "%s/v3/auth/projects" % keystone_uri - resp = occi.create_request_static('GET', url, auth, headers) - resp.raise_for_status() - - output = resp.json() + if occi.keystone_project is None: + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', + 'X-Auth-Token': token, 'Connection': 'close'} + url = "%s/v3/auth/projects" % keystone_uri + resp = occi.create_request_static('GET', url, auth, headers) + resp.raise_for_status() + + output = resp.json() + + if len(output['projects']) == 1: + # If there are only one get the first project + projects = output['projects'] + elif len(output['projects']) > 1: + # If there are more than one + if auth and "project" in auth: + project_found = None + for elem in output['projects']: + if elem['id'] == auth["project"] or elem['name'] == auth["project"]: + project_found = elem + if project_found: + projects = [project_found] + else: + projects = output['projects'] + self.log_warn("Keystone 3 project %s not found." % auth["project"]) + else: + projects = [occi.keystone_project] - if len(output['projects']) == 1: - # If there are only one get the first tenant - project = output['projects'].pop() - if len(output['projects']) >= 1: - # If there are more than one - if auth and "project" in auth: - project_found = None - for elem in output['projects']: - if elem['id'] == auth["project"] or elem['name'] == auth["project"]: - project_found = elem - if project_found: - project = project_found - else: - project = output['projects'].pop() - self.log_warn("Keystone 3 project %s not found. Using first one." % auth["project"]) - - # get scoped token for allowed project - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', - 'X-Auth-Token': token, 'Connection': 'close'} - body = {"auth": {"identity": {"methods": ["token"], "token": {"id": token}}, - "scope": {"project": {"id": project["id"]}}}} - url = "%s/v3/auth/tokens" % keystone_uri - resp = occi.create_request_static('POST', url, auth, headers, json.dumps(body)) - resp.raise_for_status() - token = resp.headers['X-Subject-Token'] - return token + scoped_token = None + for project in projects: + # get scoped token for allowed project + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', + 'X-Auth-Token': token, 'Connection': 'close'} + body = {"auth": {"identity": {"methods": ["token"], "token": {"id": token}}, + "scope": {"project": {"id": project["id"]}}}} + url = "%s/v3/auth/tokens" % keystone_uri + resp = occi.create_request_static('POST', url, auth, headers, json.dumps(body)) + if resp.status_code in [200, 201, 202]: + occi.logger.debug("Using project: %s" % project["name"]) + occi.keystone_project = project + scoped_token = resp.headers['X-Subject-Token'] + break + + if not scoped_token: + occi.logger.error("Not project accesible for the user.") + + return scoped_token except Exception as ex: occi.logger.exception("Error obtaining Keystone v3 Token.") raise Exception("Error obtaining Keystone v3 Token: %s" % str(ex)) diff --git a/test/unit/connectors/OCCI.py b/test/unit/connectors/OCCI.py index 2b0a44f2a..db0216850 100755 --- a/test/unit/connectors/OCCI.py +++ b/test/unit/connectors/OCCI.py @@ -160,7 +160,7 @@ def get_response(self, method, url, verify, cert, headers, data): resp.json.return_value = {"tenants": [{"name": "tenantname"}]} elif url == "/v3/auth/projects": resp.status_code = 200 - resp.json.return_value = {"projects": [{"id": "projectid"}]} + resp.json.return_value = {"projects": [{"id": "projectid", "name": "prname"}]} elif url == "/v3/OS-FEDERATION/identity_providers/egi.eu/protocols/oidc/auth": resp.status_code = 200 resp.headers = {'X-Subject-Token': 'token1'} From 90d7680849e43b3c57658e3042e346ddeadde4cc Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 6 Nov 2017 12:59:42 +0100 Subject: [PATCH 05/30] Style change --- IM/connectors/OCCI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index f60463495..bb59bd26e 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -1516,7 +1516,7 @@ def get_keystone_token_v3(occi, keystone_uri, auth): headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Auth-Token': token, 'Connection': 'close'} body = {"auth": {"identity": {"methods": ["token"], "token": {"id": token}}, - "scope": {"project": {"id": project["id"]}}}} + "scope": {"project": {"id": project["id"]}}}} url = "%s/v3/auth/tokens" % keystone_uri resp = occi.create_request_static('POST', url, auth, headers, json.dumps(body)) if resp.status_code in [200, 201, 202]: From 377f092aba85e92c81b6c9584be6fede068f2693 Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 6 Nov 2017 16:37:16 +0100 Subject: [PATCH 06/30] Set version num 1.6.4 --- IM/__init__.py | 2 +- changelog | 5 +++++ docker-devel/Dockerfile | 2 +- docker-py3/Dockerfile | 4 ++-- docker/Dockerfile | 4 ++-- 5 files changed, 11 insertions(+), 6 deletions(-) diff --git a/IM/__init__.py b/IM/__init__.py index 6c5497868..b51df70cc 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -19,5 +19,5 @@ 'InfrastructureInfo', 'InfrastructureManager', 'recipe', 'request', 'REST', 'retry', 'ServiceRequests', 'SSH', 'SSHRetry', 'timedcall', 'UnixHTTPAdapter', 'uriparse', 'VirtualMachine', 'VMRC', 'xmlobject'] -__version__ = '1.6.3' +__version__ = '1.6.4' __author__ = 'Miguel Caballer' diff --git a/changelog b/changelog index dd7f5ce1c..11fc3e926 100644 --- a/changelog +++ b/changelog @@ -351,3 +351,8 @@ IM 1.6.3 * Fix error setting Hostname in Docker, Kubernetes and AzureClassic conns. * Fix error connecting with Synefo OCCI sites. * Fix error deleting VM in OCCI OpenNebula sites. + +IM 1.6.4 + * Store tenant and project in OCCI connector. + * Fix error validating keystone token in OCCI conn. + * Decrease timeout getting ansible process results. diff --git a/docker-devel/Dockerfile b/docker-devel/Dockerfile index 673589bae..ffceb73b4 100644 --- a/docker-devel/Dockerfile +++ b/docker-devel/Dockerfile @@ -2,7 +2,7 @@ FROM grycap/jenkins:ubuntu16.04-im ARG BRANCH=devel MAINTAINER Miguel Caballer -LABEL version="1.6.3" +LABEL version="1.6.4" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 diff --git a/docker-py3/Dockerfile b/docker-py3/Dockerfile index ff0b04267..758576331 100644 --- a/docker-py3/Dockerfile +++ b/docker-py3/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile to create a container with the IM service FROM ubuntu:16.04 LABEL maintainer="Miguel Caballer " -LABEL version="1.6.3" +LABEL version="1.6.4" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 @@ -16,7 +16,7 @@ RUN pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-c # Install IM RUN apt-get update && apt-get install --no-install-recommends -y gcc libssl-dev libffi-dev libsqlite3-dev && \ - pip3 install IM==1.6.3 && \ + pip3 install IM==1.6.4 && \ apt-get remove -y gcc libssl-dev libffi-dev libsqlite3-dev python-dev python-pip && \ apt-get autoremove -y && \ rm -rf /var/lib/apt/lists/* diff --git a/docker/Dockerfile b/docker/Dockerfile index 5fed39ec2..fcd13b209 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile to create a container with the IM service FROM ubuntu:16.04 LABEL maintainer="Miguel Caballer " -LABEL version="1.6.3" +LABEL version="1.6.4" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 @@ -17,7 +17,7 @@ RUN pip install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-co # Install IM RUN apt-get update && apt-get install --no-install-recommends -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev && \ pip install MySQL-python && \ - pip install IM==1.6.3 && \ + pip install IM==1.6.4 && \ apt-get remove -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev python-dev python-pip && \ apt-get autoremove -y && \ rm -rf /var/lib/apt/lists/* From 6187fbf4bcec782fd3e207e89d5fe572f9cfa9b0 Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 7 Nov 2017 13:13:23 +0100 Subject: [PATCH 07/30] Implements: #481 --- IM/InfrastructureManager.py | 12 +++++++----- IM/REST.py | 12 +++++++++++- IM/ServiceRequests.py | 5 +++-- doc/source/REST.rst | 5 ++++- doc/source/xmlrpc.rst | 6 ++++-- im_service.py | 4 ++-- test/unit/REST.py | 8 ++++++++ test/unit/test_im_logic.py | 4 ++++ 8 files changed, 43 insertions(+), 13 deletions(-) diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index 4db9b3e1a..befc1bb19 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -821,7 +821,7 @@ def GetInfrastructureInfo(inf_id, auth): return res @staticmethod - def GetInfrastructureContMsg(inf_id, auth): + def GetInfrastructureContMsg(inf_id, auth, headeronly=False): """ Get cont msg of an infrastructure. @@ -829,6 +829,7 @@ def GetInfrastructureContMsg(inf_id, auth): - inf_id(str): infrastructure id. - auth(Authentication): parsed authentication tokens. + - headeronly(bool): Flag to return only the header part of the infra log. Return: a str with the cont msg """ @@ -840,10 +841,11 @@ def GetInfrastructureContMsg(inf_id, auth): sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) res = sel_inf.cont_out - for vm in sel_inf.get_vm_list(): - if vm.get_cont_msg(): - res += "VM " + str(vm.id) + ":\n" + vm.get_cont_msg() + "\n" - res += "***************************************************************************\n" + if not headeronly: + for vm in sel_inf.get_vm_list(): + if vm.get_cont_msg(): + res += "VM " + str(vm.id) + ":\n" + vm.get_cont_msg() + "\n" + res += "***************************************************************************\n" InfrastructureManager.logger.debug(res) return res diff --git a/IM/REST.py b/IM/REST.py index a2a979ec1..9b50f40e4 100644 --- a/IM/REST.py +++ b/IM/REST.py @@ -306,7 +306,17 @@ def RESTGetInfrastructureProperty(infid=None, prop=None): try: if prop == "contmsg": - res = InfrastructureManager.GetInfrastructureContMsg(infid, auth) + headeronly = False + if "headeronly" in bottle.request.params.keys(): + str_headeronly = bottle.request.params.get("headeronly").lower() + if str_headeronly in ['yes', 'true', '1']: + headeronly = True + elif str_headeronly in ['no', 'false', '0']: + headeronly = False + else: + return return_error(400, "Incorrect value in context parameter") + + res = InfrastructureManager.GetInfrastructureContMsg(infid, auth, headeronly) elif prop == "radl": res = InfrastructureManager.GetInfrastructureRADL(infid, auth) elif prop == "state": diff --git a/IM/ServiceRequests.py b/IM/ServiceRequests.py index b7a10fefc..ac12aa71b 100644 --- a/IM/ServiceRequests.py +++ b/IM/ServiceRequests.py @@ -317,9 +317,10 @@ class Request_GetInfrastructureContMsg(IMBaseRequest): def _call_function(self): self._error_mesage = "Error gettinf the Inf. cont msg" - (inf_id, auth_data) = self.arguments + (inf_id, auth_data, headeronly) = self.arguments return IM.InfrastructureManager.InfrastructureManager.GetInfrastructureContMsg(inf_id, - Authentication(auth_data)) + Authentication(auth_data), + headeronly) class Request_StartVM(IMBaseRequest): diff --git a/doc/source/REST.rst b/doc/source/REST.rst index 4d6f8bf75..dfce18c1d 100644 --- a/doc/source/REST.rst +++ b/doc/source/REST.rst @@ -117,10 +117,13 @@ GET ``http://imserver.com/infrastructures/`` GET ``http://imserver.com/infrastructures//`` :Response Content-type: text/plain or application/json :ok response: 200 OK + :input fields: ``headeronly`` (optional) :fail response: 401, 404, 400, 403 Return property ``property_name`` associated to the infrastructure with ID ``infId``. It has three properties: - :``contmsg``: a string with the contextualization message. + :``contmsg``: a string with the contextualization message. In case of ``headeronly`` flag is set to 'yes', + 'true' or '1' only the initial part of the infrastructure contextualization log will be + returned (without any VM contextualization log). :``radl``: a string with the original specified RADL of the infrastructure. :``state``: a JSON object with two elements: diff --git a/doc/source/xmlrpc.rst b/doc/source/xmlrpc.rst index 04a534af7..e216c97c3 100644 --- a/doc/source/xmlrpc.rst +++ b/doc/source/xmlrpc.rst @@ -43,11 +43,13 @@ This is the list of method names: ``GetInfrastructureContMsg`` :parameter 0: ``infId``: integer :parameter 1: ``auth``: array of structs + :parameter 2: ``headeronly``: (optional, default value False) boolean :ok response: [true, ``cont_out``: string] :fail response: [false, ``error``: string] - Return the contextualization log associated to the - infrastructure with ID ``infId``. + Return the contextualization log associated to the infrastructure with ID ``infId``. + In case of ``headeronly`` flag is set to True. Only the initial part of the infrastructure + contextualization log will be returned (without any VM contextualization log). ``GetInfrastructureState`` :parameter 0: ``infId``: integer diff --git a/im_service.py b/im_service.py index 1dad58b14..e0db244a7 100755 --- a/im_service.py +++ b/im_service.py @@ -160,9 +160,9 @@ def GetVMContMsg(inf_id, vm_id, auth_data): return WaitRequest(request) -def GetInfrastructureContMsg(inf_id, auth_data): +def GetInfrastructureContMsg(inf_id, auth_data, headeronly=False): request = IMBaseRequest.create_request( - IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG, (inf_id, auth_data)) + IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG, (inf_id, auth_data, headeronly)) return WaitRequest(request) diff --git a/test/unit/REST.py b/test/unit/REST.py index f8c8a82ef..c0a7534c7 100755 --- a/test/unit/REST.py +++ b/test/unit/REST.py @@ -155,6 +155,14 @@ def test_GetInfrastructureProperty(self, bottle_request, GetInfrastructureState, res = RESTGetInfrastructureProperty("1", "contmsg") self.assertEqual(res, "contmsg") + bottle_request.params = {'headeronly': 'yes'} + res = RESTGetInfrastructureProperty("1", "contmsg") + self.assertEqual(res, "contmsg") + + bottle_request.params = {'headeronly': 'no'} + res = RESTGetInfrastructureProperty("1", "contmsg") + self.assertEqual(res, "contmsg") + res = RESTGetInfrastructureProperty("1", "radl") self.assertEqual(res, "radl") diff --git a/test/unit/test_im_logic.py b/test/unit/test_im_logic.py index 95e330f84..c4f8dcc19 100755 --- a/test/unit/test_im_logic.py +++ b/test/unit/test_im_logic.py @@ -717,12 +717,16 @@ def test_get_vm_info(self): contmsg = IM.GetVMContMsg(infId, "0", auth0) self.assertEqual(contmsg, "") + InfrastructureList.infrastructure_list[infId].cont_out = "Header" InfrastructureList.infrastructure_list[infId].vm_list[0].cloud_connector = MagicMock() InfrastructureList.infrastructure_list[infId].vm_list[0].cloud_connector.error_messages = "TESTMSG" contmsg = IM.GetInfrastructureContMsg(infId, auth0) + header_contmsg = IM.GetInfrastructureContMsg(infId, auth0, True) InfrastructureList.infrastructure_list[infId].vm_list[0].cloud_connector = None self.assertIn("TESTMSG", contmsg) + self.assertNotIn("TESTMSG", header_contmsg) + self.assertIn("Header", header_contmsg) state = IM.GetInfrastructureState(infId, auth0) self.assertEqual(state["state"], "running") From 80729b265e40837366ff27a588f86a491418b5d6 Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 7 Nov 2017 13:15:23 +0100 Subject: [PATCH 08/30] Implements: #481 --- changelog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/changelog b/changelog index 11fc3e926..25a829bdc 100644 --- a/changelog +++ b/changelog @@ -356,3 +356,5 @@ IM 1.6.4 * Store tenant and project in OCCI connector. * Fix error validating keystone token in OCCI conn. * Decrease timeout getting ansible process results. + * Enable to get the initial infrastructure contextualization log. + From 0748152de1e695af5906f54b7bda985b6c004d06 Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 7 Nov 2017 13:24:08 +0100 Subject: [PATCH 09/30] Implements: #481 --- test/integration/TestIM.py | 16 ++++++++++------ test/integration/TestREST.py | 14 ++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py index d1813e738..86c8cceba 100755 --- a/test/integration/TestIM.py +++ b/test/integration/TestIM.py @@ -164,14 +164,18 @@ def test_13_getcontmsg(self): """ Test the GetInfrastructureContMsg IM function """ - (success, cont_out) = self.server.GetInfrastructureContMsg( - self.inf_id, self.auth_data) - self.assertTrue( - success, msg="ERROR calling GetInfrastructureContMsg: " + str(cont_out)) - self.assertGreater( - len(cont_out), 100, msg="Incorrect contextualization message: " + cont_out) + (success, cont_out) = self.server.GetInfrastructureContMsg(self.inf_id, self.auth_data) + self.assertTrue(success, msg="ERROR calling GetInfrastructureContMsg: " + str(cont_out)) + self.assertGreater(len(cont_out), 100, msg="Incorrect contextualization message: " + cont_out) + self.assertIn("Select master VM", cont_out) self.assertIn("NODENAME = front", cont_out) + (success, cont_out) = self.server.GetInfrastructureContMsg(self.inf_id, self.auth_data, True) + self.assertTrue(success, msg="ERROR calling GetInfrastructureContMsg: " + str(cont_out)) + self.assertGreater(len(cont_out), 100, msg="Incorrect contextualization message: " + cont_out) + self.assertIn("Select master VM", cont_out) + self.assertNotIn("NODENAME = front", cont_out) + def test_14_getvmcontmsg(self): """ Test the GetVMContMsg IM function diff --git a/test/integration/TestREST.py b/test/integration/TestREST.py index d92af2caf..8c53aa58a 100755 --- a/test/integration/TestREST.py +++ b/test/integration/TestREST.py @@ -186,16 +186,18 @@ def test_30_get_vm_info(self): def test_32_get_vm_contmsg(self): resp = self.create_request("GET", "/infrastructures/" + self.inf_id) - self.assertEqual(resp.status_code, 200, - msg="ERROR getting the infrastructure info:" + resp.text) + self.assertEqual(resp.status_code, 200, msg="ERROR getting the infrastructure info:" + resp.text) vm_ids = resp.text.split("\n") vm_uri = uriparse(vm_ids[0]) resp = self.create_request("GET", vm_uri[2] + "/contmsg") - self.assertEqual(resp.status_code, 200, - msg="ERROR getting VM contmsg:" + resp.text) - self.assertEqual( - len(resp.text), 0, msg="Incorrect VM contextualization message: " + resp.text) + self.assertEqual(resp.status_code, 200, msg="ERROR getting VM contmsg:" + resp.text) + self.assertEqual(len(resp.text), 0, msg="Incorrect VM contextualization message: " + resp.text) + + resp2 = self.create_request("GET", vm_uri[2] + "/contmsg?headeronly=true") + self.assertEqual(resp2.status_code, 200, msg="ERROR getting VM contmsg:" + resp.text) + self.assertEqual(len(resp2.text), 0, msg="Incorrect VM contextualization message: " + resp.text) + self.assertGreater(len(resp.text), len(resp2.text)) def test_33_get_contmsg(self): resp = self.create_request("GET", "/infrastructures/" + self.inf_id + "/contmsg") From da99cb6dc20a6821c1f5d9690ac54d97a8cf551c Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 7 Nov 2017 13:32:09 +0100 Subject: [PATCH 10/30] Implements: #481 --- test/unit/ServiceRequests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/ServiceRequests.py b/test/unit/ServiceRequests.py index 2c9364df3..918a065ac 100755 --- a/test/unit/ServiceRequests.py +++ b/test/unit/ServiceRequests.py @@ -65,7 +65,7 @@ def test_cont_msg(self, inflist): import IM.ServiceRequests req = IM.ServiceRequests.IMBaseRequest.create_request(IM.ServiceRequests. IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG, - ("", "")) + ("", "", False)) req._call_function() @patch('IM.InfrastructureManager.InfrastructureManager') From a3671e1c55b510c57d08d331465a882853d329c5 Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 7 Nov 2017 16:20:43 +0100 Subject: [PATCH 11/30] Implements: #481 --- test/integration/TestREST.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/integration/TestREST.py b/test/integration/TestREST.py index 8c53aa58a..d95316773 100755 --- a/test/integration/TestREST.py +++ b/test/integration/TestREST.py @@ -196,8 +196,6 @@ def test_32_get_vm_contmsg(self): resp2 = self.create_request("GET", vm_uri[2] + "/contmsg?headeronly=true") self.assertEqual(resp2.status_code, 200, msg="ERROR getting VM contmsg:" + resp.text) - self.assertEqual(len(resp2.text), 0, msg="Incorrect VM contextualization message: " + resp.text) - self.assertGreater(len(resp.text), len(resp2.text)) def test_33_get_contmsg(self): resp = self.create_request("GET", "/infrastructures/" + self.inf_id + "/contmsg") From 9f7182d573300e59ba1048e5c28963aa46ac8789 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 8 Nov 2017 15:59:12 +0100 Subject: [PATCH 12/30] remove python-xmltodict dependency --- packages/generate_deb.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/generate_deb.sh b/packages/generate_deb.sh index 0f85d9aba..28a360dc4 100755 --- a/packages/generate_deb.sh +++ b/packages/generate_deb.sh @@ -5,6 +5,6 @@ apt install -y python-stdeb # remove the ansible requirement as it makes to generate an incorrect dependency python-ansible # also remove the pysqlite requirement as it makes to generate an incorrect dependency python-pysqlite1.1 sed -i '/install_requires/c\ install_requires=["paramiko >= 1.14", "PyYAML", suds_pkg,' setup.py -python setup.py --command-packages=stdeb.command sdist_dsc --depends "python-radl, python-mysqldb, python-pysqlite2, ansible, python-paramiko, python-yaml, python-suds, python-boto, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy3, python-requests, python-xmltodict" bdist_deb +python setup.py --command-packages=stdeb.command sdist_dsc --depends "python-radl, python-mysqldb, python-pysqlite2, ansible, python-paramiko, python-yaml, python-suds, python-boto, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy3, python-requests" bdist_deb mkdir dist_pkg cp deb_dist/*.deb dist_pkg From a675bc620295cbff900a706800ef5d4fbd81e5eb Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 8 Nov 2017 16:09:31 +0100 Subject: [PATCH 13/30] Decrease detach volume timeout --- IM/connectors/OCCI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index bb59bd26e..00f60aaa0 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -680,7 +680,7 @@ def create_volume(self, size, name, auth_data): self.log_exception("Error creating volume") return False, str(ex) - def detach_volume(self, volume, auth_data, timeout=180, delay=5): + def detach_volume(self, volume, auth_data, timeout=90, delay=5): auth = self.get_auth_header(auth_data) headers = {'Accept': 'text/plain', 'Connection': 'close'} if auth: From 83331dc8ae4cf855dac79a372e1c6c527d9c4200 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 8 Nov 2017 17:40:10 +0100 Subject: [PATCH 14/30] Set loglevel to INFO: #485 --- IM/ConfManager.py | 100 +++++++++++++++--------------- IM/InfrastructureInfo.py | 3 +- IM/InfrastructureManager.py | 12 ++-- IM/VirtualMachine.py | 13 ++-- IM/config.py | 2 +- IM/connectors/Azure.py | 44 ++++++------- IM/connectors/AzureClassic.py | 23 ++++--- IM/connectors/Docker.py | 16 ++--- IM/connectors/EC2.py | 112 ++++++++++++++-------------------- IM/connectors/GCE.py | 51 ++++++++-------- IM/connectors/Kubernetes.py | 8 +-- IM/connectors/OCCI.py | 54 ++++++++-------- IM/connectors/OpenNebula.py | 20 +++--- IM/connectors/OpenStack.py | 38 ++++++------ etc/im.cfg | 2 +- 15 files changed, 235 insertions(+), 263 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index c4b4e33d0..d3d82d309 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -78,15 +78,15 @@ def check_running_pids(self, vms_configuring, failed_step): if step not in res: res[step] = [] res[step].append(vm) - self.log_debug("Ansible process to configure " + str(vm.im_id) + - " with PID " + vm.ctxt_pid + " is still running.") + self.log_info("Ansible process to configure " + str(vm.im_id) + + " with PID " + vm.ctxt_pid + " is still running.") else: - self.log_debug("Configuration process in VM: " + str(vm.im_id) + " finished.") + self.log_info("Configuration process in VM: " + str(vm.im_id) + " finished.") if vm.configured: - self.log_debug("Configuration process of VM %s success." % vm.im_id) + self.log_info("Configuration process of VM %s success." % vm.im_id) elif vm.configured is False: failed_step.append(step) - self.log_debug("Configuration process of VM %s failed." % vm.im_id) + self.log_info("Configuration process of VM %s failed." % vm.im_id) else: self.log_warn("Configuration process of VM %s in unfinished state." % vm.im_id) # Force to save the data to store the log data () @@ -97,14 +97,14 @@ def check_running_pids(self, vms_configuring, failed_step): if step not in res: res[step] = [] res[step].append(vm) - self.log_debug("Configuration process of master node: " + - str(vm.get_ctxt_process_names()) + " is still running.") + self.log_info("Configuration process of master node: " + + str(vm.get_ctxt_process_names()) + " is still running.") else: if vm.configured: - self.log_debug("Configuration process of master node successfully finished.") + self.log_info("Configuration process of master node successfully finished.") elif vm.configured is False: failed_step.append(step) - self.log_debug("Configuration process of master node failed.") + self.log_info("Configuration process of master node failed.") else: self.log_warn("Configuration process of master node in unfinished state.") # Force to save the data to store the log data @@ -116,9 +116,9 @@ def stop(self): self._stop_thread = True # put a task to assure to wake up the thread self.inf.add_ctxt_tasks([(-10, 0, None, None)]) - self.log_debug("Stop Configuration thread.") + self.log_info("Stop Configuration thread.") if self.ansible_process and self.ansible_process.is_alive(): - self.log_debug("Stopping pending Ansible process.") + self.log_info("Stopping pending Ansible process.") self.ansible_process.terminate() def check_vm_ips(self, timeout=Config.WAIT_RUNNING_VM_TIMEOUT): @@ -170,7 +170,7 @@ def check_vm_ips(self, timeout=Config.WAIT_RUNNING_VM_TIMEOUT): self.log_error("Error waiting all the VMs to have a correct IP") self.inf.set_configured(False) else: - self.log_debug("All the VMs have a correct IP") + self.log_info("All the VMs have a correct IP") self.inf.set_configured(True) return success @@ -180,7 +180,7 @@ def kill_ctxt_processes(self): Kill all the ctxt processes """ for vm in self.inf.get_vm_list(): - self.log_debug("Killing ctxt processes in VM: %s" % vm.id) + self.log_info("Killing ctxt processes in VM: %s" % vm.id) try: vm.kill_check_ctxt_process() except: @@ -188,7 +188,7 @@ def kill_ctxt_processes(self): vm.configured = None def run(self): - self.log_debug("Starting the ConfManager Thread") + self.log_info("Starting the ConfManager Thread") failed_step = [] last_step = None @@ -196,14 +196,14 @@ def run(self): while not self._stop_thread: if self.init_time + self.max_ctxt_time < time.time(): - self.log_debug("Max contextualization time passed. Exit thread.") + self.log_info("Max contextualization time passed. Exit thread.") self.inf.add_cont_msg("ERROR: Max contextualization time passed.") # Remove tasks from queue self.inf.reset_ctxt_tasks() # Kill the ansible processes self.kill_ctxt_processes() if self.ansible_process and self.ansible_process.is_alive(): - self.log_debug("Stopping pending Ansible process.") + self.log_info("Stopping pending Ansible process.") self.ansible_process.terminate() return @@ -219,14 +219,14 @@ def run(self): # stop the thread if the stop method has been called if self._stop_thread: - self.log_debug("Exit Configuration thread.") + self.log_info("Exit Configuration thread.") return # if this task is from a next step if last_step is not None and last_step < step: if failed_step and sorted(failed_step)[-1] < step: - self.log_debug("Configuration of process of step %s failed, " - "ignoring tasks of step %s." % (sorted(failed_step)[-1], step)) + self.log_info("Configuration of process of step %s failed, " + "ignoring tasks of step %s." % (sorted(failed_step)[-1], step)) else: # Add the task again to the queue only if the last step was # OK @@ -234,12 +234,12 @@ def run(self): # If there are any process running of last step, wait if last_step in vms_configuring and len(vms_configuring[last_step]) > 0: - self.log_debug("Waiting processes of step " + str(last_step) + " to finish.") + self.log_info("Waiting processes of step " + str(last_step) + " to finish.") time.sleep(Config.CONFMAMAGER_CHECK_STATE_INTERVAL) else: # if not, update the step, to go ahead with the new # step - self.log_debug("Step " + str(last_step) + " finished. Go to step: " + str(step)) + self.log_info("Step " + str(last_step) + " finished. Go to step: " + str(step)) last_step = step else: if isinstance(vm, VirtualMachine): @@ -247,12 +247,12 @@ def run(self): self.log_warn("VM ID " + str(vm.im_id) + " has been destroyed. Not launching new tasks for it.") elif vm.is_configured() is False: - self.log_debug("Configuration process of step %s failed, " - "ignoring tasks of step %s." % (last_step, step)) + self.log_info("Configuration process of step %s failed, " + "ignoring tasks of step %s." % (last_step, step)) # Check that the VM has no other ansible process # running elif vm.ctxt_pid: - self.log_debug("VM ID " + str(vm.im_id) + " has running processes, wait.") + self.log_info("VM ID " + str(vm.im_id) + " has running processes, wait.") # If there are, add the tasks again to the queue # Set the priority to a higher number to decrease the # priority enabling to select other items of the queue @@ -262,7 +262,7 @@ def run(self): time.sleep(Config.CONFMAMAGER_CHECK_STATE_INTERVAL) else: if not tasks: - self.log_debug("No tasks to execute. Ignore this step.") + self.log_info("No tasks to execute. Ignore this step.") else: # If not, launch it # Mark this VM as configuring @@ -318,11 +318,11 @@ def launch_ctxt_agent(self, vm, tasks): str(self.inf.id) + "/" + ip + "_" + str(vm.im_id) tmp_dir = tempfile.mkdtemp() - self.log_debug("Create the configuration file for the contextualization agent") + self.log_info("Create the configuration file for the contextualization agent") conf_file = tmp_dir + "/config.cfg" self.create_vm_conf_file(conf_file, vm, tasks, remote_dir) - self.log_debug("Copy the contextualization agent config file") + self.log_info("Copy the contextualization agent config file") # Copy the contextualization agent config file ssh = vm.get_ssh_ansible_master() @@ -332,10 +332,10 @@ def launch_ctxt_agent(self, vm, tasks): if vm.configured is None: if len(self.inf.get_vm_list()) > Config.VM_NUM_USE_CTXT_DIST: - self.log_debug("Using ctxt_agent_dist") + self.log_info("Using ctxt_agent_dist") ctxt_agent_command = "/ctxt_agent_dist.py " else: - self.log_debug("Using ctxt_agent") + self.log_info("Using ctxt_agent") ctxt_agent_command = "/ctxt_agent.py " vault_export = "" vault_password = vm.info.systems[0].getValue("vault.password") @@ -348,7 +348,7 @@ def launch_ctxt_agent(self, vm, tasks): " > " + remote_dir + "/stdout" + " 2> " + remote_dir + "/stderr < /dev/null & echo -n $!") - self.log_debug("Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) + self.log_info("Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) vm.ctxt_pid = pid vm.launch_check_ctxt_process() @@ -374,7 +374,7 @@ def generate_inventory(self, tmp_dir): """ Generate the ansible inventory file """ - self.log_debug("Create the ansible configuration file") + self.log_info("Create the ansible configuration file") res_filename = "hosts" ansible_file = tmp_dir + "/" + res_filename out = open(ansible_file, 'w') @@ -738,7 +738,7 @@ def configure_master(self): success = False cont = 0 while not self._stop_thread and not success and cont < Config.PLAYBOOK_RETRIES: - self.log_debug("Sleeping %s secs." % (cont ** 2 * 5)) + self.log_info("Sleeping %s secs." % (cont ** 2 * 5)) time.sleep(cont ** 2 * 5) cont += 1 try: @@ -768,7 +768,7 @@ def configure_master(self): if configured_ok: remote_dir = Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" - self.log_debug("Copy the contextualization agent files") + self.log_info("Copy the contextualization agent files") files = [] files.append((Config.IM_PATH + "/SSH.py", remote_dir + "/IM/SSH.py")) files.append((Config.IM_PATH + "/SSHRetry.py", remote_dir + "/IM/SSHRetry.py")) @@ -837,7 +837,7 @@ def wait_master(self): - Wait it to boot and has the SSH port open """ if self.inf.radl.ansible_hosts: - self.log_debug("Usign ansible host: " + self.inf.radl.ansible_hosts[0].getHost()) + self.log_info("Usign ansible host: " + self.inf.radl.ansible_hosts[0].getHost()) self.inf.set_configured(True) return True @@ -919,7 +919,7 @@ def generate_playbooks_and_hosts(self): # Get the groups for the different VM types vm_group = self.inf.get_vm_list_by_system_name() - self.log_debug("Generating YAML, hosts and inventory files.") + self.log_info("Generating YAML, hosts and inventory files.") # Create the other configure sections (it may be included in other # configure) filenames = [] @@ -971,7 +971,7 @@ def generate_playbooks_and_hosts(self): recipe_files.append((tmp_dir + "/" + f, remote_dir + "/" + f)) self.inf.add_cont_msg("Copying YAML, hosts and inventory files.") - self.log_debug("Copying YAML files.") + self.log_info("Copying YAML files.") if self.inf.radl.ansible_hosts: for ansible_host in self.inf.radl.ansible_hosts: (user, passwd, private_key) = ansible_host.getCredentialValues() @@ -1056,7 +1056,7 @@ def wait_vm_running(self, vm, timeout, relaunch=False): self.log_warn("VM deleted by the user, Exit") return False - self.log_debug("VM " + str(vm.id) + " is not running yet.") + self.log_info("VM " + str(vm.id) + " is not running yet.") time.sleep(delay) wait += delay @@ -1109,13 +1109,13 @@ def wait_vm_ssh_acccess(self, vm, timeout): else: vm.update_status(self.auth) if vm.state == VirtualMachine.FAILED: - self.log_debug('VM: ' + str(vm.id) + " is in state Failed. Does not wait for SSH.") + self.log_warn('VM: ' + str(vm.id) + " is in state Failed. Does not wait for SSH.") return False, "VM Failure." ip = vm.getPublicIP() if ip is not None: ssh = vm.get_ssh() - self.log_debug('SSH Connecting with: ' + ip + ' to the VM: ' + str(vm.id)) + self.log_info('SSH Connecting with: ' + ip + ' to the VM: ' + str(vm.id)) try: connected = ssh.test_connectivity(5) @@ -1128,14 +1128,14 @@ def wait_vm_ssh_acccess(self, vm, timeout): return False, "Error connecting with ip: " + ip + " incorrect credentials." if connected: - self.log_debug('Works!') + self.log_info('Works!') return True, "" else: - self.log_debug('do not connect, wait ...') + self.log_info('do not connect, wait ...') wait += delay time.sleep(delay) else: - self.log_debug('VM ' + str(vm.id) + ' with no IP') + self.log_warn('VM ' + str(vm.id) + ' with no IP') # Update the VM info and wait to have a valid public IP wait += delay time.sleep(delay) @@ -1232,7 +1232,7 @@ def call_ansible(self, tmp_dir, inventory, playbook, ssh): os.symlink(os.path.abspath( Config.RECIPES_DIR + "/utils"), tmp_dir + "/utils") - self.log_debug('Launching Ansible process.') + self.log_info('Launching Ansible process.') result = Queue() extra_vars = {'IM_HOST': 'all'} # store the process to terminate it later is Ansible does not finish correctly @@ -1253,14 +1253,14 @@ def call_ansible(self, tmp_dir, inventory, playbook, ssh): self.ansible_process = None return (False, "Timeout. Ansible process terminated.") else: - self.log_debug('Waiting Ansible process to finish (%d/%d).' % (wait, Config.ANSIBLE_INSTALL_TIMEOUT)) + self.log_info('Waiting Ansible process to finish (%d/%d).' % (wait, Config.ANSIBLE_INSTALL_TIMEOUT)) time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL) wait += Config.CHECK_CTXT_PROCESS_INTERVAL - self.log_debug('Ansible process finished.') + self.log_info('Ansible process finished.') try: - self.log_debug('Get the results of the Ansible process.') + self.log_info('Get the results of the Ansible process.') _, (return_code, _), output = result.get(timeout=10) msg = output.getvalue() except: @@ -1366,18 +1366,18 @@ def configure_ansible(self, ssh, tmp_dir): self.inf.add_cont_msg("Performing preliminary steps to configure Ansible.") - self.log_debug("Remove requiretty in sshd config") + self.log_info("Remove requiretty in sshd config") try: cmd = "sudo -S sed -i 's/.*requiretty$/#Defaults requiretty/' /etc/sudoers" if ssh.password: cmd = "echo '" + ssh.password + "' | " + cmd (stdout, stderr, _) = ssh.execute(cmd, 120) - self.log_debug(stdout + "\n" + stderr) + self.log_info(stdout + "\n" + stderr) except: self.log_exception("Error removing requiretty. Ignoring.") self.inf.add_cont_msg("Configure Ansible in the master VM.") - self.log_debug("Call Ansible to (re)configure in the master node") + self.log_info("Call Ansible to (re)configure in the master node") (success, msg) = self.call_ansible( tmp_dir, "inventory.cfg", ConfManager.MASTER_YAML, ssh) @@ -1385,7 +1385,7 @@ def configure_ansible(self, ssh, tmp_dir): self.log_error("Error configuring master node: " + msg + "\n\n") self.inf.add_cont_msg("Error configuring the master VM: " + msg + " " + tmp_dir) else: - self.log_debug("Ansible successfully configured in the master VM:\n" + msg + "\n\n") + self.log_info("Ansible successfully configured in the master VM:\n" + msg + "\n\n") self.inf.add_cont_msg("Ansible successfully configured in the master VM.") except Exception as ex: self.log_exception("Error configuring master node.") diff --git a/IM/InfrastructureInfo.py b/IM/InfrastructureInfo.py index 6899104cd..50036c1cc 100644 --- a/IM/InfrastructureInfo.py +++ b/IM/InfrastructureInfo.py @@ -468,8 +468,7 @@ def Contextualize(self, auth, vm_list=None): break if not ctxt: - InfrastructureInfo.logger.debug( - "Inf ID: " + str(self.id) + ": Contextualization disabled by the RADL.") + InfrastructureInfo.logger.info("Inf ID: " + str(self.id) + ": Contextualization disabled by the RADL.") self.cont_out = "Contextualization disabled by the RADL." self.configured = True for vm in self.get_vm_list(): diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index befc1bb19..4f75883fa 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -185,7 +185,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c requested_radl = radl.clone() requested_radl.systems = [radl.get_system_by_name(concrete_system.name)] try: - InfrastructureManager.logger.debug( + InfrastructureManager.logger.info( "Launching %d VMs of type %s" % (remain_vm, concrete_system.name)) launched_vms = cloud.cloud.getCloudConnector(sel_inf).launch( sel_inf, launch_radl, requested_radl, remain_vm, auth) @@ -198,7 +198,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c launched_vms = [] for success, launched_vm in launched_vms: if success: - InfrastructureManager.logger.debug("VM successfully launched: " + str(launched_vm.id)) + InfrastructureManager.logger.info("VM successfully launched: " + str(launched_vm.id)) deployed_vm.setdefault(deploy, []).append(launched_vm) deploy.cloud_id = cloud_id remain_vm -= 1 @@ -907,7 +907,7 @@ def GetInfrastructureState(inf_id, auth): if state is None: state = VirtualMachine.UNKNOWN - InfrastructureManager.logger.debug( + InfrastructureManager.logger.info( "inf: " + str(inf_id) + " is in state: " + state) return {'state': state, 'vm_states': vm_states} @@ -915,7 +915,7 @@ def GetInfrastructureState(inf_id, auth): def _stop_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.debug("Stopping the VM id: " + vm.id) + InfrastructureManager.logger.info("Stopping the VM id: " + vm.id) (success, msg) = vm.stop(auth) except Exception as e: msg = str(e) @@ -968,7 +968,7 @@ def StopInfrastructure(inf_id, auth): def _start_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.debug("Starting the VM id: " + vm.id) + InfrastructureManager.logger.info("Starting the VM id: " + vm.id) (success, msg) = vm.start(auth) except Exception as e: msg = str(e) @@ -1113,7 +1113,7 @@ def _delete_vm(vm, delete_list, auth, exceptions): last = InfrastructureManager.is_last_in_cloud(vm, delete_list, remain_vms) success = False try: - InfrastructureManager.logger.debug("Finalizing the VM id: " + str(vm.id)) + InfrastructureManager.logger.info("Finalizing the VM id: " + str(vm.id)) (success, msg) = vm.finalize(last, auth) except Exception as e: msg = str(e) diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index 3aab7d246..e4444992e 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -465,7 +465,7 @@ def update_status(self, auth, force=False): updated = True self.last_update = now elif self.creating: - self.log_debug("VM is in creation process, set pending state") + self.log_info("VM is in creation process, set pending state") state = VirtualMachine.PENDING else: self.log_error("Error updating VM status: %s" % new_vm) @@ -633,8 +633,7 @@ def kill_check_ctxt_process(self): if self.ctxt_pid != self.WAIT_TO_PID: ssh = self.get_ssh_ansible_master() try: - self.log_debug( - "Killing ctxt process with pid: " + str(self.ctxt_pid)) + self.log_info("Killing ctxt process with pid: " + str(self.ctxt_pid)) # Try to get PGID to kill all child processes pgkill_success = False @@ -691,7 +690,7 @@ def check_ctxt_process(self): ssh = self.get_ssh_ansible_master() try: - self.log_debug("Getting status of ctxt process with pid: " + str(ctxt_pid)) + self.log_info("Getting status of ctxt process with pid: " + str(ctxt_pid)) (_, _, exit_status) = ssh.execute("ps " + str(ctxt_pid)) except: self.log_warn("Error getting status of ctxt process with pid: " + str(ctxt_pid)) @@ -710,7 +709,7 @@ def check_ctxt_process(self): if exit_status != 0: # The process has finished, get the outputs - self.log_debug("The process %s has finished, get the outputs" % ctxt_pid) + self.log_info("The process %s has finished, get the outputs" % ctxt_pid) ctxt_log = self.get_ctxt_log(remote_dir, True) msg = self.get_ctxt_output(remote_dir, True) if ctxt_log: @@ -724,11 +723,11 @@ def check_ctxt_process(self): # dynamically if Config.UPDATE_CTXT_LOG_INTERVAL > 0 and wait > Config.UPDATE_CTXT_LOG_INTERVAL: wait = 0 - self.log_debug("Get the log of the ctxt process with pid: " + str(ctxt_pid)) + self.log_info("Get the log of the ctxt process with pid: " + str(ctxt_pid)) ctxt_log = self.get_ctxt_log(remote_dir) self.cont_out = initial_count_out + ctxt_log # The process is still running, wait - self.log_debug("The process %s is still running. wait." % ctxt_pid) + self.log_info("The process %s is still running. wait." % ctxt_pid) time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL) wait += Config.CHECK_CTXT_PROCESS_INTERVAL else: diff --git a/IM/config.py b/IM/config.py index b3a5d2afe..5921607a6 100644 --- a/IM/config.py +++ b/IM/config.py @@ -62,7 +62,7 @@ class Config: IM_PATH = os.path.dirname(os.path.realpath(__file__)) LOG_FILE = '/var/log/im/inf.log' LOG_FILE_MAX_SIZE = 10485760 - LOG_LEVEL = "DEBUG" + LOG_LEVEL = "INFO" CONTEXTUALIZATION_DIR = '/usr/share/im/contextualization' RECIPES_DIR = CONTEXTUALIZATION_DIR + '/AnsibleRecipes' RECIPES_DB_FILE = CONTEXTUALIZATION_DIR + '/recipes_ansible.db' diff --git a/IM/connectors/Azure.py b/IM/connectors/Azure.py index 54c1ed3f2..32be3f6a9 100644 --- a/IM/connectors/Azure.py +++ b/IM/connectors/Azure.py @@ -449,7 +449,7 @@ def get_azure_vm_create_json(self, storage_account, vm_name, nics, radl, instanc data_disks = [] while system.getValue("disk." + str(cont) + ".size"): disk_size = system.getFeature("disk." + str(cont) + ".size").getValue('G') - self.log_debug("Adding a %s GB disk." % disk_size) + self.log_info("Adding a %s GB disk." % disk_size) data_disks.append({ 'name': '%s_disk_%d' % (vm_name, cont), 'disk_size_gb': disk_size, @@ -548,7 +548,7 @@ def create_vms(self, inf, radl, requested_radl, num_vm, location, storage_accoun vm_name, vm_parameters) - self.log_debug("VM ID: %s created." % vm.id) + self.log_info("VM ID: %s created." % vm.id) inf.add_vm(vm) vms.append((True, (vm, async_vm_creation))) except Exception as ex: @@ -557,7 +557,7 @@ def create_vms(self, inf, radl, requested_radl, num_vm, location, storage_accoun # Delete Resource group and everything in it if group_name: - self.log_debug("Delete Resource group %s and everything in it." % group_name) + self.log_info("Delete Resource group %s and everything in it." % group_name) try: resource_client.resource_groups.delete(group_name).wait() except: @@ -587,7 +587,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): with inf._lock: # Create resource group for the Infrastructure if it does not exists if not self.get_rg("rg-%s" % inf.id, credentials, subscription_id): - self.log_debug("Creating Inf RG: %s" % "rg-%s" % inf.id) + self.log_info("Creating Inf RG: %s" % "rg-%s" % inf.id) resource_client.resource_groups.create_or_update("rg-%s" % inf.id, {'location': location}) # Create an storage_account per Infrastructure @@ -595,7 +595,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): credentials, subscription_id) if not storage_account: - self.log_debug("Creating storage account: %s" % storage_account_name) + self.log_info("Creating storage account: %s" % storage_account_name) try: storage_client = StorageManagementClient(credentials, subscription_id) storage_client.storage_accounts.create("rg-%s" % inf.id, @@ -606,7 +606,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): ).wait() except: self.log_exception("Error creating storage account: %s" % storage_account) - self.log_debug("Delete Inf RG group %s" % "rg-%s" % inf.id) + self.log_info("Delete Inf RG group %s" % "rg-%s" % inf.id) try: resource_client.resource_groups.delete("rg-%s" % inf.id) except: @@ -626,29 +626,29 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if success: vm, async_vm_creation = data try: - self.log_debug("Waiting VM ID %s to be created." % vm.id) + self.log_info("Waiting VM ID %s to be created." % vm.id) async_vm_creation.wait() res.append((True, vm)) remaining_vms -= 1 except: self.log_exception("Error waiting the VM %s." % vm.id) - self.log_debug("End of retry %d of %d" % (retries, Config.MAX_VM_FAILS)) + self.log_info("End of retry %d of %d" % (retries, Config.MAX_VM_FAILS)) if remaining_vms > 0: # Remove the general group - self.log_debug("Delete Inf RG group %s" % "rg-%s" % inf.id) + self.log_info("Delete Inf RG group %s" % "rg-%s" % inf.id) try: resource_client.resource_groups.delete("rg-%s" % inf.id) except: pass else: - self.log_debug("All VMs created successfully.") + self.log_info("All VMs created successfully.") return res def updateVMInfo(self, vm, auth_data): - self.log_debug("Get the VM info with the id: " + vm.id) + self.log_info("Get the VM info with the id: " + vm.id) group_name = vm.id.split('/')[0] vm_name = vm.id.split('/')[1] @@ -661,9 +661,9 @@ def updateVMInfo(self, vm, auth_data): self.log_exception("Error getting the VM info: " + vm.id) return (False, "Error getting the VM info: " + vm.id + ". " + str(ex)) - self.log_debug("VM info: " + vm.id + " obtained.") + self.log_info("VM info: " + vm.id + " obtained.") vm.state = self.PROVISION_STATE_MAP.get(virtual_machine.provisioning_state, VirtualMachine.UNKNOWN) - self.log_debug("The VM state is: " + vm.state) + self.log_info("The VM state is: " + vm.state) instance_type = self.get_instance_type_by_name(virtual_machine.hardware_profile.vm_size, virtual_machine.location, credentials, subscription_id) @@ -699,11 +699,11 @@ def add_dns_entries(self, vm, credentials, subscription_id): except Exception: pass if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = dns_client.zones.create_or_update(group_name, domain, {'location': 'global'}) else: - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: record = None @@ -712,11 +712,11 @@ def add_dns_entries(self, vm, credentials, subscription_id): except Exception: pass if not record: - self.log_debug("Creating DNS record %s." % hostname) + self.log_info("Creating DNS record %s." % hostname) record_data = {"ttl": 300, "arecords": [{"ipv4_address": ip}]} dns_client.record_sets.create_or_update(group_name, domain, hostname, 'A', record_data) else: - self.log_debug("DNS record %s exists. Do not create." % hostname) + self.log_info("DNS record %s exists. Do not create." % hostname) return True except Exception: @@ -752,25 +752,25 @@ def setIPs(self, vm, network_profile, credentials, subscription_id): def finalize(self, vm, last, auth_data): try: - self.log_debug("Terminate VM: " + vm.id) + self.log_info("Terminate VM: " + vm.id) group_name = vm.id.split('/')[0] credentials, subscription_id = self.get_credentials(auth_data) resource_client = ResourceManagementClient(credentials, subscription_id) # Delete Resource group and everything in it if self.get_rg(group_name, credentials, subscription_id): - self.log_debug("Removing RG: %s" % group_name) + self.log_info("Removing RG: %s" % group_name) resource_client.resource_groups.delete(group_name).wait() else: - self.log_debug("RG: %s does not exist. Do not remove." % group_name) + self.log_info("RG: %s does not exist. Do not remove." % group_name) # if it is the last VM delete the RG of the Inf if last: if self.get_rg("rg-%s" % vm.inf.id, credentials, subscription_id): - self.log_debug("Removing Inf. RG: %s" % "rg-%s" % vm.inf.id) + self.log_info("Removing Inf. RG: %s" % "rg-%s" % vm.inf.id) resource_client.resource_groups.delete("rg-%s" % vm.inf.id) else: - self.log_debug("RG: %s does not exist. Do not remove." % "rg-%s" % vm.inf.id) + self.log_info("RG: %s does not exist. Do not remove." % "rg-%s" % vm.inf.id) except Exception as ex: self.log_exception("Error terminating the VM") diff --git a/IM/connectors/AzureClassic.py b/IM/connectors/AzureClassic.py index be0c5e07f..036c82687 100644 --- a/IM/connectors/AzureClassic.py +++ b/IM/connectors/AzureClassic.py @@ -517,7 +517,7 @@ def wait_operation_status(self, request_id, auth_data, delay=2, timeout=90): output = Operation(resp.text) status_str = output.Status # InProgress|Succeeded|Failed - self.log_debug("Operation string state: " + status_str) + self.log_info("Operation string state: " + status_str) else: self.log_error( "Error waiting operation to finish: Code %d. Msg: %s." % (resp.status_code, resp.text)) @@ -629,8 +629,7 @@ def get_storage_account(self, storage_account, auth_data): storage_info = StorageService(resp.text) return storage_info.StorageServiceProperties elif resp.status_code == 404: - self.log_debug( - "Storage " + storage_account + " does not exist") + self.log_info("Storage " + storage_account + " does not exist") return None else: self.log_warn( @@ -682,7 +681,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): res.append((False, error_msg)) break - self.log_debug("Creating the VM with id: " + service_name) + self.log_info("Creating the VM with id: " + service_name) # Create the VM to get the nodename vm = VirtualMachine(inf, service_name, self.cloud, radl, requested_radl, self) @@ -784,7 +783,7 @@ def get_instance_type(self, system, auth_data): return res def updateVMInfo(self, vm, auth_data): - self.log_debug("Get the VM info with the id: " + vm.id) + self.log_info("Get the VM info with the id: " + vm.id) service_name = vm.id try: @@ -801,13 +800,13 @@ def updateVMInfo(self, vm, auth_data): return (False, "Error getting the VM info: " + vm.id + ". Error Code: " + str(resp.status_code) + ". Msg: " + resp.text) else: - self.log_debug("VM info: " + vm.id + " obtained.") - self.log_debug(resp.text) + self.log_info("VM info: " + vm.id + " obtained.") + self.log_info(resp.text) vm_info = Deployment(resp.text) vm.state = self.get_vm_state(vm_info) - self.log_debug("The VM state is: " + vm.state) + self.log_info("The VM state is: " + vm.state) instance_type = self.get_instance_type_by_name( vm_info.RoleInstanceList.RoleInstance[0].InstanceSize, auth_data) @@ -857,7 +856,7 @@ def setIPs(self, vm, vm_info): vm.setIps(public_ips, private_ips) def finalize(self, vm, last, auth_data): - self.log_debug("Terminate VM: " + vm.id) + self.log_info("Terminate VM: " + vm.id) service_name = vm.id # Delete the service @@ -900,7 +899,7 @@ def call_role_operation(self, op, vm, auth_data): return (True, "") def stop(self, vm, auth_data): - self.log_debug("Stop VM: " + vm.id) + self.log_info("Stop VM: " + vm.id) op = """ @@ -910,7 +909,7 @@ def stop(self, vm, auth_data): return self.call_role_operation(op, vm, auth_data) def start(self, vm, auth_data): - self.log_debug("Start VM: " + vm.id) + self.log_info("Start VM: " + vm.id) op = """ @@ -935,7 +934,7 @@ def get_all_instance_types(self, auth_data): "Error getting Role Sizes. Error Code: " + str(resp.status_code) + ". Msg: " + resp.text) return [] else: - self.log_debug("Role List obtained.") + self.log_info("Role List obtained.") role_sizes = RoleSizes(resp.text) res = [] for role_size in role_sizes.RoleSize: diff --git a/IM/connectors/Docker.py b/IM/connectors/Docker.py index de90f62db..8a35befe8 100644 --- a/IM/connectors/Docker.py +++ b/IM/connectors/Docker.py @@ -352,7 +352,7 @@ def _generate_mounts(self, system): disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path") if not disk_mount_path.startswith('/'): disk_mount_path = '/' + disk_mount_path - self.log_debug("Attaching a volume in %s" % disk_mount_path) + self.log_info("Attaching a volume in %s" % disk_mount_path) mount = {"Source": source, "Target": disk_mount_path} mount["Type"] = "volume" mount["ReadOnly"] = False @@ -443,10 +443,10 @@ def _delete_volumes(self, vm, auth_data): self.log_warn("Error deleting volume %s: %s." % (source, resp.text)) time.sleep(delay) else: - self.log_debug("Volume %s successfully deleted." % source) + self.log_info("Volume %s successfully deleted." % source) break else: - self.log_debug("Volume %s not created by the IM, not deleting it." % source) + self.log_info("Volume %s not created by the IM, not deleting it." % source) def _delete_networks(self, vm, auth_data): for net in vm.info.networks: @@ -465,7 +465,7 @@ def _delete_networks(self, vm, auth_data): if resp.status_code not in [204, 404]: self.log_error("Error deleting network %s: %s" % (net.id, resp.text)) else: - self.log_debug("Network %s deleted successfully" % net.id) + self.log_info("Network %s deleted successfully" % net.id) def _attach_cont_to_networks(self, vm, auth_data): system = vm.info.systems[0] @@ -493,7 +493,7 @@ def _attach_cont_to_networks(self, vm, auth_data): self.log_error("Error attaching cont %s to network %s: %s" % (vm.id, net_name, resp.text)) all_ok = False else: - self.log_debug("Cont %s attached to network %s" % (vm.id, net_name)) + self.log_info("Cont %s attached to network %s" % (vm.id, net_name)) return all_ok def _create_volumes(self, system, auth_data): @@ -515,7 +515,7 @@ def _create_volumes(self, system, auth_data): resp = self.create_request('GET', "/volumes/%s" % source, auth_data, headers) if resp.status_code == 200: # the volume already exists - self.log_debug("Volume named %s already exists." % source) + self.log_info("Volume named %s already exists." % source) else: body = json.dumps({"Name": source, "Driver": driver}) resp = self.create_request('POST', "/volumes/create", auth_data, headers, body) @@ -524,7 +524,7 @@ def _create_volumes(self, system, auth_data): self.log_error("Error creating volume %s: %s." % (source, resp.text)) else: system.setValue("disk." + str(cont) + ".created", "yes") - self.log_debug("Volume %s successfully created." % source) + self.log_info("Volume %s successfully created." % source) cont += 1 @@ -678,7 +678,7 @@ def _get_svc_state(self, svc_name, auth_data): if task["Status"]["State"] == "running": return VirtualMachine.RUNNING elif task["Status"]["State"] == "rejected": - self.log_debug("Task %s rejected: %s." % (task["ID"], task["Status"]["Err"])) + self.log_info("Task %s rejected: %s." % (task["ID"], task["Status"]["Err"])) return VirtualMachine.PENDING else: return VirtualMachine.PENDING diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 0023e3045..d1d633565 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -123,8 +123,7 @@ def concreteSystem(self, radl_system, auth_data): instance_type = self.get_instance_type(res_system) if not instance_type: - self.log_error( - "Error launching the VM, no instance type available for the requirements.") + self.log_error("Error launching the VM, no instance type available for the requirements.") self.log_debug(res_system) return [] else: @@ -295,7 +294,7 @@ def get_instance_type(self, radl, vpc=None): performance = float(cpu_perf.value) performance_op = cpu_perf.getLogOperator() else: - self.log_debug("Performance unit unknown: " + cpu_perf.unit + ". Ignore it") + self.log_warn("Performance unit unknown: " + cpu_perf.unit + ". Ignore it") instace_types = self.get_all_instance_types() @@ -390,7 +389,7 @@ def create_security_groups(self, conn, inf, radl, vpc=None): with inf._lock: sg = self._get_security_group(conn, sg_name) if not sg: - self.log_debug("Creating security group: " + sg_name) + self.log_info("Creating security group: " + sg_name) try: sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) except Exception as crex: @@ -400,7 +399,7 @@ def create_security_groups(self, conn, inf, radl, vpc=None): # if not raise the exception raise crex else: - self.log_debug("Security group: " + sg_name + " already created.") + self.log_info("Security group: " + sg_name + " already created.") if vpc: res.append(sg.id) @@ -455,8 +454,7 @@ def create_keypair(self, system, conn): public = system.getValue('disk.0.os.credentials.public_key') if private and public: if public.find('-----BEGIN CERTIFICATE-----') != -1: - self.log_debug( - "The RADL specifies the PK, upload it to EC2") + self.log_info("The RADL specifies the PK, upload it to EC2") public_key = base64.b64encode(public) conn.import_key_pair(keypair_name, public_key) else: @@ -466,7 +464,7 @@ def create_keypair(self, system, conn): system.setUserKeyCredentials( system.getCredentials().username, public, private) else: - self.log_debug("Creating the Keypair name: %s" % keypair_name) + self.log_info("Creating the Keypair name: %s" % keypair_name) keypair_file = self.KEYPAIR_DIR + '/' + keypair_name + '.pem' keypair = conn.create_key_pair(keypair_name) created = True @@ -535,7 +533,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): (region_name, ami) = self.getAMIData( system.getValue("disk.0.image.url")) - self.log_debug("Connecting with the region: " + region_name) + self.log_info("Connecting with the region: " + region_name) conn = self.get_connection(region_name, auth_data) res = [] @@ -614,11 +612,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if spot: err_msg += " a spot instance " - self.log_debug("Launching a spot instance") + self.log_info("Launching a spot instance") instance_type = self.get_instance_type(system, vpc is not None) if not instance_type: - self.log_error( - "Error %s, no instance type available for the requirements." % err_msg) + self.log_error("Error %s, no instance type available for the requirements." % err_msg) self.log_debug(system) res.append( (False, "Error %s, no instance type available for the requirements." % err_msg)) @@ -652,14 +649,12 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): product_description=operative_system, availability_zone=zone.name, max_results=1) - self.log_debug( - "Spot price history for the region " + zone.name) + self.log_debug("Spot price history for the region " + zone.name) self.log_debug(history) if history and history[0].price < historical_price: historical_price = history[0].price availability_zone = zone.name - self.log_debug( - "Launching the spot request in the zone " + availability_zone) + self.log_info("Launching the spot request in the zone " + availability_zone) # Force to use magnetic volumes bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping( @@ -685,8 +680,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): 'instance_id', str(vm.id)) # Add the keypair name to remove it later vm.keypair_name = keypair_name - self.log_debug( - "Instance successfully launched.") + self.log_info("Instance successfully launched.") all_failed = False inf.add_vm(vm) res.append((True, vm)) @@ -694,11 +688,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): res.append((False, "Error %s." % err_msg)) else: err_msg += " an ondemand instance " - self.log_debug("Launching ondemand instance") + self.log_info("Launching ondemand instance") instance_type = self.get_instance_type(system, vpc is not None) if not instance_type: - self.log_error( - "Error %s, no instance type available for the requirements." % err_msg) + self.log_error("Error %s, no instance type available for the requirements." % err_msg) self.log_debug(system) res.append( (False, "Error %s, no instance type available for the requirements." % err_msg)) @@ -729,8 +722,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): 'instance_id', str(vm.id)) # Add the keypair name to remove it later vm.keypair_name = keypair_name - self.log_debug( - "Instance successfully launched.") + self.log_info("Instance successfully launched.") inf.add_vm(vm) res.append((True, vm)) all_failed = False @@ -753,14 +745,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if sg_ids: try: for sgid in sg_ids: - self.log_debug("Remove the SG: %s" % sgid) + self.log_info("Remove the SG: %s" % sgid) conn.delete_security_group(group_id=sgid) except: self.log_exception("Error deleting SG.") if sg_names and sg_names[0] != 'default': try: for sgname in sg_names: - self.log_debug("Remove the SG: %s" % sgname) + self.log_info("Remove the SG: %s" % sgname) conn.delete_security_group(sgname) except: self.log_exception("Error deleting SG.") @@ -782,7 +774,7 @@ def create_volume(self, conn, disk_size, placement, timeout=60): cont = 0 err_states = ["error"] while str(volume.status) != 'available' and str(volume.status) not in err_states and cont < timeout: - self.log_debug("State: " + str(volume.status)) + self.log_info("State: " + str(volume.status)) cont += 2 time.sleep(2) volume = conn.get_all_volumes([volume.id])[0] @@ -790,8 +782,7 @@ def create_volume(self, conn, disk_size, placement, timeout=60): if str(volume.status) == 'available': return volume else: - self.log_error( - "Error creating the volume %s, deleting it" % (volume.id)) + self.log_error("Error creating the volume %s, deleting it" % (volume.id)) conn.delete_volume(volume.id) return None @@ -816,13 +807,11 @@ def attach_volumes(self, instance, vm): "disk." + str(cont) + ".size").getValue('G') disk_device = vm.info.systems[0].getValue( "disk." + str(cont) + ".device") - self.log_debug( - "Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) volume = self.create_volume( conn, int(disk_size), instance.placement) if volume: - self.log_debug( - "Attach the volume ID " + str(volume.id)) + self.log_info("Attach the volume ID " + str(volume.id)) conn.attach_volume( volume.id, instance.id, "/dev/" + disk_device) cont += 1 @@ -854,16 +843,14 @@ def delete_volumes(self, conn, volumes, instance_id, timeout=240): try: curr_vol = conn.get_all_volumes([volume_id])[0] if str(curr_vol.attachment_state()) == "attached": - self.log_debug( - "Detaching the volume " + volume_id + " from the instance " + instance_id) + self.log_info("Detaching the volume " + volume_id + " from the instance " + instance_id) conn.detach_volume(volume_id, instance_id, force=True) elif curr_vol.attachment_state() is None: - self.log_debug("Removing the volume " + volume_id) + self.log_info("Removing the volume " + volume_id) conn.delete_volume(volume_id) deleted = True else: - self.log_debug( - "State: " + str(curr_vol.attachment_state())) + self.log_info("State: " + str(curr_vol.attachment_state())) except Exception as ex: self.log_warn("Error removing the volume: " + str(ex)) @@ -912,18 +899,16 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): vm.elastic_ip = True try: pub_address = None - self.log_debug("Add an Elastic IP") + self.log_info("Add an Elastic IP") if fixed_ip: for address in instance.connection.get_all_addresses(): if str(address.public_ip) == fixed_ip: pub_address = address if pub_address: - self.log_debug( - "Setting a fixed allocated IP: " + fixed_ip) + self.log_info("Setting a fixed allocated IP: " + fixed_ip) else: - self.log_warn( - "Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") + self.log_warn("Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") return None else: provider_id = self.get_net_provider_id(vm.info) @@ -948,8 +933,7 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): pub_address.release() return None else: - self.log_debug( - "The VM is not running, not adding an Elastic IP.") + self.log_info("The VM is not running, not adding an Elastic IP.") return None def delete_elastic_ips(self, conn, vm): @@ -965,8 +949,7 @@ def delete_elastic_ips(self, conn, vm): # Get the elastic IPs for address in conn.get_all_addresses(): if address.instance_id == instance_id: - self.log_debug( - "This VM has a Elastic IP, disassociate it") + self.log_info("This VM has a Elastic IP, disassociate it") address.disassociate() n = 0 @@ -982,11 +965,10 @@ def delete_elastic_ips(self, conn, vm): n += 1 if not found: - self.log_debug("Now release it") + self.log_info("Now release it") address.release() else: - self.log_debug( - "This is a fixed IP, it is not released") + self.log_info("This is a fixed IP, it is not released") except Exception: self.log_exception( "Error deleting the Elastic IPs to VM ID: " + str(vm.id)) @@ -1077,7 +1059,7 @@ def updateVMInfo(self, vm, auth_data): # deployed job_instance_id = None - self.log_debug("Check if the request has been fulfilled and the instance has been deployed") + self.log_info("Check if the request has been fulfilled and the instance has been deployed") job_sir_id = instance_id request_list = conn.get_all_spot_instance_requests() for sir in request_list: @@ -1090,7 +1072,7 @@ def updateVMInfo(self, vm, auth_data): break if job_instance_id: - self.log_debug("Request fulfilled, instance_id: " + str(job_instance_id)) + self.log_info("Request fulfilled, instance_id: " + str(job_instance_id)) instance_id = job_instance_id vm.id = region + ";" + instance_id vm.info.systems[0].setValue('instance_id', str(vm.id)) @@ -1161,22 +1143,22 @@ def add_dns_entries(self, vm, auth_data): domain += "." zone = conn.get_zone(domain) if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = conn.create_zone(domain) else: - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain record = zone.get_a(fqdn) if not record: - self.log_debug("Creating DNS record %s." % fqdn) + self.log_info("Creating DNS record %s." % fqdn) changes = boto.route53.record.ResourceRecordSets(conn, zone.id) change = changes.add_change("CREATE", fqdn, "A") change.add_value(ip) result = changes.commit() else: - self.log_debug("DNS record %s exists. Do not create." % fqdn) + self.log_info("DNS record %s exists. Do not create." % fqdn) return True except Exception: @@ -1205,14 +1187,14 @@ def del_dns_entries(self, vm, auth_data): domain += "." zone = conn.get_zone(domain) if not zone: - self.log_debug("The DNS zone %s does not exists. Do not delete records." % domain) + self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: fqdn = hostname + "." + domain record = zone.get_a(fqdn) if not record: - self.log_debug("DNS record %s does not exists. Do not delete." % fqdn) + self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: - self.log_debug("Deleting DNS record %s." % fqdn) + self.log_info("Deleting DNS record %s." % fqdn) changes = boto.route53.record.ResourceRecordSets(conn, zone.id) change = changes.add_change("DELETE", fqdn, "A") change.add_value(ip) @@ -1237,8 +1219,7 @@ def cancel_spot_requests(self, conn, vm): for sir in request_list: if sir.instance_id == instance_id: conn.cancel_spot_instance_requests(sir.id) - self.log_debug( - "Spot instance request " + str(sir.id) + " deleted") + self.log_info("Spot instance request " + str(sir.id) + " deleted") break except Exception: self.log_exception("Error deleting the spot instance request") @@ -1348,7 +1329,7 @@ def delete_security_groups(self, conn, vm, timeout=90): all_vms_terminated = False if all_vms_terminated: - self.log_debug("Remove the SG: " + sg.name) + self.log_info("Remove the SG: " + sg.name) try: sg.revoke('tcp', 0, 65535, src_group=sg) sg.revoke('udp', 0, 65535, src_group=sg) @@ -1367,13 +1348,13 @@ def delete_security_groups(self, conn, vm, timeout=90): # Check if it has been deleted yet sg = self._get_security_group(conn, sg.name) if not sg: - self.log_debug("Error deleting the SG. But it does not exist. Ignore. " + str(ex)) + self.log_info("Error deleting the SG. But it does not exist. Ignore. " + str(ex)) deleted = True else: self.log_exception("Error deleting the SG.") else: # If there are more than 1, we skip this step - self.log_debug("There are active instances. Not removing the SG") + self.log_info("There are active instances. Not removing the SG") def stop(self, vm, auth_data): region_name = vm.id.split(";")[0] @@ -1661,7 +1642,6 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): snapshot_id = "" # Obtain the connection object to connect with EC2 - self.logger.debug("Connecting with the region: " + region_name) conn = self.get_connection(region_name, auth_data) if not conn: @@ -1670,7 +1650,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): # Create the instance snapshot instance = self.get_instance_by_id(instance_id, region_name, auth_data) if instance: - self.logger.debug("Creating snapshot: " + image_name) + self.log_info("Creating snapshot: " + image_name) snapshot_id = instance.create_image(image_name, description="AMI automatically generated by IM", no_reboot=True) @@ -1689,7 +1669,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): def delete_image(self, image_url, auth_data): (region_name, ami) = self.getAMIData(image_url) - self.logger.debug("Connecting with the region: " + region_name) + self.log_info("Deleting image: %s." % image_url) conn = self.get_connection(region_name, auth_data) success = conn.deregister_image(ami, delete_snapshot=True) # https://github.com/boto/boto/issues/3019 diff --git a/IM/connectors/GCE.py b/IM/connectors/GCE.py index 5f050a9b1..e84e9979e 100644 --- a/IM/connectors/GCE.py +++ b/IM/connectors/GCE.py @@ -91,8 +91,7 @@ def get_driver(self, auth_data, datacenter=None): self.driver = driver return driver else: - self.log_error( - "No correct auth data has been specified to GCE: username, password and project") + self.log_error("No correct auth data has been specified to GCE: username, password and project") self.log_debug(auth) raise Exception( "No correct auth data has been specified to GCE: username, password and project") @@ -131,8 +130,7 @@ def get_dns_driver(self, auth_data): self.dns_driver = driver return driver else: - self.log_error( - "No correct auth data has been specified to GCE: username, password and project") + self.log_error("No correct auth data has been specified to GCE: username, password and project") self.log_debug(auth) raise Exception( "No correct auth data has been specified to GCE: username, password and project") @@ -302,7 +300,7 @@ def request_external_ip(self, radl): n += 1 if requested_ips: - self.log_debug("The user requested for a fixed IP") + self.log_info("The user requested for a fixed IP") if len(requested_ips) > 1: self.log_warn( "The user has requested more than one fixed IP. Using only the first one") @@ -394,7 +392,7 @@ def create_firewall(self, inf, net_name, radl, driver): try: firewall = driver.ex_get_firewall(firewall_name) except ResourceNotFoundError: - self.log_debug("The firewall %s does not exist." % firewall_name) + self.log_info("The firewall %s does not exist." % firewall_name) except: self.log_exception("Error trying to get FW %s." % firewall_name) @@ -402,14 +400,14 @@ def create_firewall(self, inf, net_name, radl, driver): try: firewall.allowed = allowed firewall.update() - self.log_debug("Firewall %s existing. Rules updated." % firewall_name) + self.log_info("Firewall %s existing. Rules updated." % firewall_name) except: self.log_exception("Error updating the firewall %s." % firewall_name) return try: driver.ex_create_firewall(firewall_name, allowed, network=net_name) - self.log_debug("Firewall %s successfully created." % firewall_name) + self.log_info("Firewall %s successfully created." % firewall_name) except Exception as addex: self.log_warn("Exception creating FW: " + str(addex)) @@ -458,14 +456,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if not public or not private: # We must generate them - self.log_debug("No keys. Generating key pair.") + self.log_info("No keys. Generating key pair.") (public, private) = self.keygen() system.setValue('disk.0.os.credentials.private_key', private) metadata = {} if private and public: metadata = {"sshKeys": username + ":" + public} - self.log_debug("Setting ssh for user: " + username) + self.log_info("Setting ssh for user: " + username) self.log_debug(metadata) startup_script = self.get_cloud_init_data(radl) @@ -503,7 +501,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): vm.info.systems[0].setValue('instance_id', str(vm.id)) vm.info.systems[0].setValue('instance_name', str(vm.id)) inf.add_vm(vm) - self.log_debug("Node successfully created.") + self.log_info("Node successfully created.") res.append((True, vm)) @@ -530,7 +528,7 @@ def finalize(self, vm, last, auth_data): if not success: return (False, "Error destroying node: " + vm.id) - self.log_debug("VM " + str(vm.id) + " successfully destroyed") + self.log_info("VM " + str(vm.id) + " successfully destroyed") else: self.log_warn("VM " + str(vm.id) + " not found.") return (True, "") @@ -546,14 +544,14 @@ def delete_firewall(self, vm, driver): try: firewall = driver.ex_get_firewall(firewall_name) except ResourceNotFoundError: - self.log_debug("Firewall %s does not exist. Do not delete." % firewall_name) + self.log_info("Firewall %s does not exist. Do not delete." % firewall_name) except: self.log_exception("Error trying to get FW %s." % firewall_name) if firewall: try: firewall.destroy() - self.log_debug("Firewall %s successfully deleted." % firewall_name) + self.log_info("Firewall %s successfully deleted." % firewall_name) except: self.log_exception("Error trying to delete FW %s." % firewall_name) @@ -583,7 +581,7 @@ def delete_disks(self, node): self.log_error( "Error destroying the volume: " + vol_name) except ResourceNotFoundError: - self.log_debug("The volume: " + vol_name + " does not exists. Ignore it.") + self.log_info("The volume: " + vol_name + " does not exists. Ignore it.") success = True except: self.log_exception( @@ -666,8 +664,7 @@ def attach_volumes(self, vm, node): "disk." + str(cont) + ".size").getValue('G') disk_device = vm.info.systems[0].getValue( "disk." + str(cont) + ".device") - self.log_debug( - "Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) volume_name = "im-%s" % str(uuid.uuid1()) location = self.get_node_location(node) @@ -675,7 +672,7 @@ def attach_volumes(self, vm, node): int(disk_size), volume_name, location=location) success = self.wait_volume(volume) if success: - self.log_debug("Attach the volume ID " + str(volume.id)) + self.log_info("Attach the volume ID " + str(volume.id)) try: volume.attach(node, disk_device) except: @@ -758,20 +755,20 @@ def add_dns_entries(self, vm, auth_data): domain += "." zone = [z for z in driver.iterate_zones() if z.domain == domain] if not zone: - self.log_debug("Creating DNS zone %s" % domain) + self.log_info("Creating DNS zone %s" % domain) zone = driver.create_zone(domain) else: zone = zone[0] - self.log_debug("DNS zone %s exists. Do not create." % domain) + self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain record = [r for r in driver.iterate_records(zone) if r.name == fqdn] if not record: - self.log_debug("Creating DNS record %s." % fqdn) + self.log_info("Creating DNS record %s." % fqdn) driver.create_record(fqdn, zone, RecordType.A, dict(ttl=300, rrdatas=[ip])) else: - self.log_debug("DNS record %s exists. Do not create." % fqdn) + self.log_info("DNS record %s exists. Do not create." % fqdn) return True except Exception: @@ -800,20 +797,20 @@ def del_dns_entries(self, vm, auth_data): domain += "." zone = [z for z in driver.iterate_zones() if z.domain == domain] if not zone: - self.log_debug("The DNS zone %s does not exists. Do not delete records." % domain) + self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: zone = zone[0] fqdn = hostname + "." + domain record = [r for r in driver.iterate_records(zone) if r.name == fqdn] if not record: - self.log_debug("DNS record %s does not exists. Do not delete." % fqdn) + self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: record = record[0] if record.data['rrdatas'] != [ip]: - self.log_debug("DNS record %s mapped to unexpected IP: %s != %s." - "Do not delete." % (fqdn, record.data['rrdatas'], ip)) + self.log_info("DNS record %s mapped to unexpected IP: %s != %s." + "Do not delete." % (fqdn, record.data['rrdatas'], ip)) else: - self.log_debug("Deleting DNS record %s." % fqdn) + self.log_info("Deleting DNS record %s." % fqdn) if not driver.delete_record(record): self.log_error("Error deleting DNS record %s." % fqdn) diff --git a/IM/connectors/Kubernetes.py b/IM/connectors/Kubernetes.py index 90d92a98a..7cadb4779 100644 --- a/IM/connectors/Kubernetes.py +++ b/IM/connectors/Kubernetes.py @@ -105,8 +105,7 @@ def get_api_version(self, auth_data): self.log_exception( "Error connecting with Kubernetes API server") - self.log_warn( - "Error getting a compatible API version. Setting the default one.") + self.log_warn("Error getting a compatible API version. Setting the default one.") self.log_debug("Using %s API version." % version) return version @@ -221,7 +220,7 @@ def _create_volumes(self, apiVersion, namespace, system, pod_name, auth_data, pe disk_mount_path = '/' + disk_mount_path if not disk_device.startswith('/'): disk_device = '/' + disk_device - self.log_debug("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) + self.log_info("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) name = "%s-%d" % (pod_name, cont) if persistent: @@ -536,8 +535,7 @@ def alterVM(self, vm, radl, auth_data): changed = True if not changed: - self.log_debug( - "Nothing changes in the kubernetes pod: " + str(vm.id)) + self.log_info("Nothing changes in the kubernetes pod: " + str(vm.id)) return (True, vm) # Create the container diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index 00f60aaa0..3c6e89dc8 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -255,11 +255,11 @@ def manage_public_ips(self, vm, auth_data): """ Manage public IPs in the VM """ - self.log_debug("The VM does not have public IP trying to add one.") + self.log_info("The VM does not have public IP trying to add one.") if self.add_public_ip_count < self.MAX_ADD_IP_COUNT: success, msgs = self.add_public_ip(vm, auth_data) if success: - self.log_debug("Public IP successfully added.") + self.log_info("Public IP successfully added.") else: self.add_public_ip_count += 1 self.log_warn("Error adding public IP the VM: %s (%d/%d)\n" % (msgs, @@ -401,7 +401,7 @@ def add_public_ip(self, vm, auth_data): if resp.status_code != 201 and resp.status_code != 200: return (False, output) else: - self.log_debug("Public IP added from pool %s" % network_name) + self.log_info("Public IP added from pool %s" % network_name) return (True, vm.id) except Exception: self.log_exception("Error connecting with OCCI server") @@ -586,11 +586,11 @@ def create_volumes(self, system, auth_data): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) storage_name = "im-disk-%s" % str(uuid.uuid1()) success, volume_id = self.create_volume(int(disk_size), storage_name, auth_data) if success: - self.log_debug("Volume id %s sucessfully created." % volume_id) + self.log_info("Volume id %s sucessfully created." % volume_id) volumes.append((disk_device, volume_id)) system.setValue("disk." + str(cont) + ".provider_id", volume_id) # TODO: get the actual device_id from OCCI @@ -621,7 +621,7 @@ def wait_volume_state(self, volume_id, auth_data, wait_state="online", timeout=1 wait += delay success, storage_info = self.get_volume_info(volume_id, auth_data) state = self.get_occi_attribute_value(storage_info, 'occi.storage.state') - self.log_debug("Waiting volume %s to be %s. Current state: %s" % (volume_id, wait_state, state)) + self.log_info("Waiting volume %s to be %s. Current state: %s" % (volume_id, wait_state, state)) if success and state == wait_state: online = True elif not success: @@ -693,18 +693,18 @@ def detach_volume(self, volume, auth_data, timeout=90, delay=5): wait = 0 while wait < timeout: try: - self.log_debug("Detaching volume: %s" % storage_id) + self.log_info("Detaching volume: %s" % storage_id) resp = self.create_request('GET', link, auth_data, headers) if resp.status_code == 200: - self.log_debug("Volume link %s exists. Try to delete it." % link) + self.log_info("Volume link %s exists. Try to delete it." % link) resp = self.create_request('DELETE', link, auth_data, headers) if resp.status_code in [204, 200]: - self.log_debug("Successfully detached. Wait it to be deleted.") + self.log_info("Successfully detached. Wait it to be deleted.") else: self.log_error("Error detaching volume: %s" + resp.reason + "\n" + resp.text) elif resp.status_code == 404: # wait until the resource does not exist - self.log_debug("Successfully detached") + self.log_info("Successfully detached") return (True, "") else: self.log_warn("Error detaching volume: %s" + resp.reason + "\n" + resp.text) @@ -734,26 +734,26 @@ def delete_volume(self, storage_id, auth_data, timeout=180, delay=5): wait = 0 while wait < timeout: - self.log_debug("Delete storage: %s" % storage_id) + self.log_info("Delete storage: %s" % storage_id) try: resp = self.create_request('GET', storage_id, auth_data, headers) if resp.status_code == 200: - self.log_debug("Storage %s exists. Try to delete it." % storage_id) + self.log_info("Storage %s exists. Try to delete it." % storage_id) resp = self.create_request('DELETE', storage_id, auth_data, headers) if resp.status_code == 404: - self.log_debug("It does not exist.") + self.log_info("It does not exist.") return (True, "") elif resp.status_code == 409: - self.log_debug("Error deleting the Volume. It seems that it is still " - "attached to a VM: %s" % resp.text) + self.log_info("Error deleting the Volume. It seems that it is still " + "attached to a VM: %s" % resp.text) elif resp.status_code != 200 and resp.status_code != 204: self.log_warn("Error deleting the Volume: " + resp.reason + "\n" + resp.text) else: - self.log_debug("Successfully deleted") + self.log_info("Successfully deleted") return (True, "") elif resp.status_code == 404: - self.log_debug("It does not exist.") + self.log_info("It does not exist.") return (True, "") else: self.log_warn("Error deleting storage: %s" + resp.reason + "\n" + resp.text) @@ -1065,19 +1065,19 @@ def add_new_disks(self, vm, radl, auth_data): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) success, volume_id = self.create_volume(int(disk_size), "im-disk-%d" % cont, auth_data) if success: - self.log_debug("Volume id %s successfuly created." % volume_id) + self.log_info("Volume id %s successfuly created." % volume_id) # let's wait the storage to be ready "online" wait_ok = self.wait_volume_state(volume_id, auth_data) if not wait_ok: - self.log_debug("Error waiting volume %s. Deleting it." % volume_id) + self.log_info("Error waiting volume %s. Deleting it." % volume_id) self.delete_volume(volume_id, auth_data) return (False, "Error waiting volume %s. Deleting it." % volume_id) else: - self.log_debug("Attaching to the instance") + self.log_info("Attaching to the instance") attached = self.attach_volume(vm, volume_id, disk_device, mount_path, auth_data) if attached: orig_system.setValue("disk." + str(cont) + ".size", disk_size, "G") @@ -1117,7 +1117,7 @@ def remove_public_ip(self, vm, auth_data): """ Remove/Detach public IP from VM """ - self.log_debug("Removing Public IP from VM %s" % vm.id) + self.log_info("Removing Public IP from VM %s" % vm.id) auth = self.get_auth_header(auth_data) headers = {'Accept': 'text/plain', 'Connection': 'close'} @@ -1137,7 +1137,7 @@ def remove_public_ip(self, vm, auth_data): return (True, "No public IP to delete.") resp = self.create_request('DELETE', link, auth_data, headers) if resp.status_code in [404, 204, 200]: - self.log_debug("Successfully removed") + self.log_info("Successfully removed") return (True, "") else: self.log_error("Error removing public IP: " + resp.reason + "\n" + resp.text) @@ -1336,11 +1336,11 @@ def get_keystone_token(occi, keystone_uri, auth): return token if version == 2: - occi.logger.debug("Getting Keystone v2 token") + occi.logger.info("Getting Keystone v2 token") occi.keystone_token = KeyStoneAuth.get_keystone_token_v2(occi, keystone_uri, auth) return occi.keystone_token elif version == 3: - occi.logger.debug("Getting Keystone v3 token") + occi.logger.info("Getting Keystone v3 token") occi.keystone_token = KeyStoneAuth.get_keystone_token_v3(occi, keystone_uri, auth) return occi.keystone_token else: @@ -1451,7 +1451,7 @@ def get_keystone_token_v2(occi, keystone_uri, auth): # \"metadata\": {\"is_admin\": 0, \"roles\": []}}}" output = resp.json() if 'access' in output: - occi.logger.debug("Using tenant: %s" % tenant["name"]) + occi.logger.info("Using tenant: %s" % tenant["name"]) occi.keystone_tenant = tenant tenant_token_id = str(output['access']['token']['id']) break @@ -1520,7 +1520,7 @@ def get_keystone_token_v3(occi, keystone_uri, auth): url = "%s/v3/auth/tokens" % keystone_uri resp = occi.create_request_static('POST', url, auth, headers, json.dumps(body)) if resp.status_code in [200, 201, 202]: - occi.logger.debug("Using project: %s" % project["name"]) + occi.logger.info("Using project: %s" % project["name"]) occi.keystone_project = project scoped_token = resp.headers['X-Subject-Token'] break diff --git a/IM/connectors/OpenNebula.py b/IM/connectors/OpenNebula.py index 88038123e..8bc98157a 100644 --- a/IM/connectors/OpenNebula.py +++ b/IM/connectors/OpenNebula.py @@ -410,7 +410,7 @@ def create_security_groups(self, inf, radl, auth_data): outport.get_remote_port())) if sg_template: - self.log_debug("Creating security group: %s" % sg_name) + self.log_info("Creating security group: %s" % sg_name) sg_template = ("NAME = %s\n" % sg_name) + sg_template success, sg_id, _ = server.one.secgroup.allocate(session_id, sg_template) if not success: @@ -462,14 +462,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i += 1 if all_failed: - self.log_debug("All VMs failed, delete Security Groups.") + self.log_info("All VMs failed, delete Security Groups.") for sg in sgs.values(): - self.log_debug("Delete Security Group: %d." % sg) + self.log_info("Delete Security Group: %d." % sg) success, sg_id, _ = server.one.secgroup.delete(session_id, sg) if success: - self.log_debug("Deleted.") + self.log_info("Deleted.") else: - self.log_debug("Error deleting SG: %s." % sg_id) + self.log_info("Error deleting SG: %s." % sg_id) return res def delete_security_groups(self, inf, auth_data, timeout=90, delay=10): @@ -489,17 +489,17 @@ def delete_security_groups(self, inf, auth_data, timeout=90, delay=10): # Get the SG to delete sg = self._get_security_group(sg_name, auth_data) if not sg: - self.log_debug("The SG %s does not exist. Do not delete it." % sg_name) + self.log_info("The SG %s does not exist. Do not delete it." % sg_name) deleted = True else: try: - self.log_debug("Deleting SG: %s" % sg_name) + self.log_info("Deleting SG: %s" % sg_name) success, sg_id, _ = server.one.secgroup.delete(session_id, sg) if success: - self.log_debug("Deleted.") + self.log_info("Deleted.") deleted = True else: - self.log_debug("Error deleting SG: %s." % sg_id) + self.log_info("Error deleting SG: %s." % sg_id) except Exception as ex: self.log_warn("Error deleting the SG: %s" % str(ex)) @@ -1095,7 +1095,7 @@ def attach_new_disks(self, vm, system, session_id): # get the last letter and use vd disk_device = "vd" + disk_device[-1] system.setValue("disk." + str(cont) + ".device", disk_device) - self.log_debug("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) + self.log_info("Creating a %d GB volume for the disk %d" % (int(disk_size), cont)) success, volume_id = self.attach_volume(vm, int(disk_size), disk_device, disk_fstype, session_id) if success: orig_system.setValue("disk." + str(cont) + ".size", disk_size, "M") diff --git a/IM/connectors/OpenStack.py b/IM/connectors/OpenStack.py index 6caed7184..634eeb341 100644 --- a/IM/connectors/OpenStack.py +++ b/IM/connectors/OpenStack.py @@ -352,7 +352,7 @@ def setIPsFromInstance(self, vm, node): self.log_error("Error adding a floating IP: Max number of retries reached.") self.error_messages += "Error adding a floating IP: Max number of retries reached.\n" else: - self.log_debug("The VM is not running, not adding Elastic/Floating IPs.") + self.log_info("The VM is not running, not adding Elastic/Floating IPs.") def update_system_info_from_instance(self, system, instance_type): """ @@ -386,7 +386,7 @@ def get_networks(self, driver, radl): # site has IP pools, we do not need to assign a network to this interface # it will be assigned with a floating IP if network.isPublic() and num_nets > 1 and pool_names: - self.log_debug("Public IP to be assigned with a floating IP. Do not set a net.") + self.log_info("Public IP to be assigned with a floating IP. Do not set a net.") else: # First check if the user has specified a provider ID if net_provider_id: @@ -469,7 +469,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): elif not system.getValue("disk.0.os.credentials.password"): keypair_name = "im-%d" % int(time.time() * 100.0) - self.log_debug("Create keypair: %s" % keypair_name) + self.log_info("Create keypair: %s" % keypair_name) keypair = driver.create_key_pair(keypair_name) keypair_created = True public_key = keypair.public_key @@ -500,7 +500,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i = 0 all_failed = True while i < num_vm: - self.log_debug("Creating node") + self.log_info("Creating node") node = None retries = 0 @@ -520,7 +520,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): # Add the keypair name to remove it later if keypair_name: vm.keypair = keypair_name - self.log_debug("Node successfully created.") + self.log_info("Node successfully created.") all_failed = False inf.add_vm(vm) res.append((True, vm)) @@ -532,10 +532,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if all_failed: if keypair_created: # only delete in case of the user do not specify the keypair name - self.log_debug("Deleting keypair: %s." % keypair_name) + self.log_info("Deleting keypair: %s." % keypair_name) driver.delete_key_pair(keypair) for sg in sgs: - self.log_debug("Deleting security group: %s." % sg.id) + self.log_info("Deleting security group: %s." % sg.id) driver.ex_delete_security_group(sg) return res @@ -584,11 +584,11 @@ def manage_elastic_ips(self, vm, node, public_ips): # It is a fixed IP if ip not in public_ips: # It has not been created yet, do it - self.log_debug("Asking for a fixed ip: %s." % ip) + self.log_info("Asking for a fixed ip: %s." % ip) success, msg = self.add_elastic_ip(vm, node, ip, pool_name) else: if num >= len(public_ips): - self.log_debug("Asking for public IP %d and there are %d" % (num + 1, len(public_ips))) + self.log_info("Asking for public IP %d and there are %d" % (num + 1, len(public_ips))) success, msg = self.add_elastic_ip(vm, node, None, pool_name) if not success: @@ -608,7 +608,7 @@ def get_floating_ip(self, pool): if not ip.node_id: is_private = any([IPAddress(ip.ip_address) in IPNetwork(mask) for mask in Config.PRIVATE_NET_MASKS]) if is_private: - self.log_debug("Floating IP found %s, but it is private. Ignore." % ip.ip_address) + self.log_info("Floating IP found %s, but it is private. Ignore." % ip.ip_address) else: return True, ip @@ -625,7 +625,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): Returns: a :py:class:`OpenStack_1_1_FloatingIpAddress` added or None if some problem occur. """ try: - self.log_debug("Add an Floating IP") + self.log_info("Add an Floating IP") pool = self.get_ip_pool(node.driver, pool_name) if not pool: @@ -633,7 +633,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): msg = "Incorrect pool name: %s." % pool_name else: msg = "No pools available." - self.log_debug("No Floating IP assigned: %s" % msg) + self.log_info("No Floating IP assigned: %s" % msg) return False, msg if node.driver.ex_list_floating_ip_pools(): @@ -659,7 +659,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): if is_private: self.log_error("Error getting a Floating IP from pool %s. The IP is private." % pool_name) - self.log_debug("We have created it, so release it.") + self.log_info("We have created it, so release it.") floating_ip.delete() return False, "Error attaching a Floating IP to the node. Private IP returned." @@ -681,7 +681,7 @@ def add_elastic_ip(self, vm, node, fixed_ip=None, pool_name=None): if not attached: self.log_error("Error attaching a Floating IP to the node.") - self.log_debug("We have created it, so release it.") + self.log_info("We have created it, so release it.") floating_ip.delete() return False, "Error attaching a Floating IP to the node." return True, floating_ip @@ -720,7 +720,7 @@ def create_security_groups(self, driver, inf, radl): with inf._lock: sg = self._get_security_group(driver, sg_name) if not sg: - self.log_debug("Creating security group: %s" % sg_name) + self.log_info("Creating security group: %s" % sg_name) sg = driver.ex_create_security_group(sg_name, "Security group created by the IM") res.append(sg) @@ -792,14 +792,14 @@ def finalize(self, vm, last, auth_data): self.delete_security_groups(node, vm.inf) else: # If this is not the last vm, we skip this step - self.log_debug("There are active instances. Not removing the SG") + self.log_info("There are active instances. Not removing the SG") except: self.log_exception("Error deleting security groups.") if not success: return (False, "Error destroying node: " + vm.id) - self.log_debug("VM " + str(vm.id) + " successfully destroyed") + self.log_info("VM " + str(vm.id) + " successfully destroyed") else: self.log_warn("VM " + str(vm.id) + " not found.") @@ -819,11 +819,11 @@ def delete_security_groups(self, node, inf, timeout=90, delay=10): # Get the SG to delete sg = self._get_security_group(node.driver, sg_name) if not sg: - self.log_debug("The SG %s does not exist. Do not delete it." % sg_name) + self.log_info("The SG %s does not exist. Do not delete it." % sg_name) deleted = True else: try: - self.log_debug("Deleting SG: %s" % sg_name) + self.log_info("Deleting SG: %s" % sg_name) node.driver.ex_delete_security_group(sg) deleted = True except Exception as ex: diff --git a/etc/im.cfg b/etc/im.cfg index 1861aae95..a41f45f62 100644 --- a/etc/im.cfg +++ b/etc/im.cfg @@ -49,7 +49,7 @@ VM_INFO_UPDATE_FREQUENCY = 10 VM_INFO_UPDATE_ERROR_GRACE_PERIOD = 120 # Log File -LOG_LEVEL = DEBUG +LOG_LEVEL = INFO LOG_FILE = /var/log/im/im.log LOG_FILE_MAX_SIZE = 10485760 From b60ca2278d5f86f19b9f51fef3a327103e717054 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 8 Nov 2017 17:43:56 +0100 Subject: [PATCH 15/30] Set loglevel to INFO: #485 --- etc/logging.conf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/etc/logging.conf b/etc/logging.conf index fd97a89eb..854b54070 100644 --- a/etc/logging.conf +++ b/etc/logging.conf @@ -12,26 +12,26 @@ level=ERROR handlers=fileHandler [logger_ConfManager] -level=DEBUG +level=INFO handlers=fileHandler qualname=ConfManager propagate=0 [logger_CloudConnector] -level=DEBUG +level=INFO handlers=fileHandler qualname=CloudConnector propagate=0 [logger_InfrastructureManager] -level=DEBUG +level=INFO handlers=fileHandler qualname=InfrastructureManager propagate=0 [handler_fileHandler] class=logging.handlers.RotatingFileHandler -level=DEBUG +level=INFO formatter=simpleFormatter args=('/var/log/im/im.log', 'w', 10485760, 3) From 6a1955268241ca8ee243e3347591f1f8a453c639 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 9 Nov 2017 09:00:02 +0100 Subject: [PATCH 16/30] Reduce timeout --- IM/connectors/OCCI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index 3c6e89dc8..c6891edec 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -680,7 +680,7 @@ def create_volume(self, size, name, auth_data): self.log_exception("Error creating volume") return False, str(ex) - def detach_volume(self, volume, auth_data, timeout=90, delay=5): + def detach_volume(self, volume, auth_data, timeout=60, delay=5): auth = self.get_auth_header(auth_data) headers = {'Accept': 'text/plain', 'Connection': 'close'} if auth: From 03e8cf5ed07ac497a32c9b61a48e07782f267e89 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 9 Nov 2017 16:32:21 +0100 Subject: [PATCH 17/30] Implements: #485 --- IM/InfrastructureManager.py | 76 ++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index 4f75883fa..7591ed60f 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -153,7 +153,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c """Launch a group of deploys together.""" if not deploy_group: - InfrastructureManager.logger.warning("No VMs to deploy!") + InfrastructureManager.logger.warning("Inf ID: %s: No VMs to deploy!" % sel_inf.id) return if not deploys_group_cloud_list: cancel_deployment.append(Exception("No cloud provider available")) @@ -169,6 +169,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c concrete_system = concrete_systems[cloud_id][deploy.id][0] if not concrete_system: InfrastructureManager.logger.error( + "Inf ID: " + sel_inf.id + ": " + "Error, no concrete system to deploy: " + deploy.id + " in cloud: " + cloud_id + ". Check if a correct image is being used") exceptions.append("Error, no concrete system to deploy: " + deploy.id + @@ -177,8 +178,7 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c (username, _, _, _) = concrete_system.getCredentialValues() if not username: - raise IncorrectVMCrecentialsException( - "No username for deploy: " + deploy.id) + raise IncorrectVMCrecentialsException("No username for deploy: " + deploy.id) launch_radl = radl.clone() launch_radl.systems = [concrete_system.clone()] @@ -186,11 +186,13 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c requested_radl.systems = [radl.get_system_by_name(concrete_system.name)] try: InfrastructureManager.logger.info( + "Inf ID: " + sel_inf.id + ": " + "Launching %d VMs of type %s" % (remain_vm, concrete_system.name)) launched_vms = cloud.cloud.getCloudConnector(sel_inf).launch( sel_inf, launch_radl, requested_radl, remain_vm, auth) except Exception as e: - InfrastructureManager.logger.exception("Error launching some of the VMs: %s" % e) + InfrastructureManager.logger.exception("Inf ID: " + sel_inf.id + ": " + + "Error launching some of the VMs: %s" % e) exceptions.append("Error launching the VMs of type %s to cloud ID %s" " of type %s. Cloud Provider Error: %s" % (concrete_system.name, cloud.cloud.id, @@ -198,12 +200,14 @@ def _launch_group(sel_inf, deploy_group, deploys_group_cloud_list, cloud_list, c launched_vms = [] for success, launched_vm in launched_vms: if success: - InfrastructureManager.logger.info("VM successfully launched: " + str(launched_vm.id)) + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": " + + "VM successfully launched: " + str(launched_vm.id)) deployed_vm.setdefault(deploy, []).append(launched_vm) deploy.cloud_id = cloud_id remain_vm -= 1 else: InfrastructureManager.logger.warn( + "Inf ID: " + sel_inf.id + ": " + "Error launching some of the VMs: " + str(launched_vm)) exceptions.append("Error launching the VMs of type %s to cloud ID %s of type %s. %s" % ( concrete_system.name, cloud.cloud.id, cloud.cloud.type, str(launched_vm))) @@ -266,8 +270,7 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Reconfiguring the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Reconfiguring the inf: " + str(inf_id)) if isinstance(radl_data, RADL): radl = radl_data else: @@ -281,6 +284,7 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): for s in radl.configures: sel_inf.radl.add(s.clone(), "replace") InfrastructureManager.logger.info( + "Inf ID: " + sel_inf.id + ": " + "(Re)definition of %s %s" % (type(s), s.getId())) # and update contextualize @@ -301,7 +305,7 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): password=password, public_key=public_key, private_key=private_key, new=True) # Stick all virtual machines to be reconfigured - InfrastructureManager.logger.info("Contextualize the inf.") + InfrastructureManager.logger.info("Contextualize the inf: " + sel_inf.id) # reset ansible_configured to force the re-installation of galaxy roles sel_inf.ansible_configured = None sel_inf.Contextualize(auth, vm_list) @@ -373,8 +377,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): failed_clouds = [] auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Adding resources to inf: " + str(inf_id)) + InfrastructureManager.logger.info("Adding resources to inf: " + str(inf_id)) if isinstance(radl_data, RADL): radl = radl_data @@ -392,8 +395,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # If any deploy is defined, only update definitions. if not radl.deploys: sel_inf.update_radl(radl, []) - InfrastructureManager.logger.warn( - "Infrastructure without any deploy. Exiting.") + InfrastructureManager.logger.warn("Inf ID: " + sel_inf.id + ": without any deploy. Exiting.") return [] for system in radl.systems: @@ -411,6 +413,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): requirements_radl, conflict="other", missing="other") except Exception: InfrastructureManager.logger.exception( + "Inf ID: " + sel_inf.id + ": " + "Error in the requirements of the app: " + app_to_install.getValue("name") + ". Ignore them.") @@ -476,7 +479,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # Group virtual machines to deploy by network dependencies deploy_groups = InfrastructureManager._compute_deploy_groups(radl) - InfrastructureManager.logger.debug("Groups of VMs with dependencies") + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Groups of VMs with dependencies") InfrastructureManager.logger.debug(deploy_groups) # Sort by score the cloud providers @@ -494,7 +497,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): "are asked to be deployed in different cloud providers: %s" % deploy_group) elif len(suggested_cloud_ids) == 1: if suggested_cloud_ids[0] not in cloud_list: - InfrastructureManager.logger.debug("Cloud Provider list:") + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Cloud Provider list:") InfrastructureManager.logger.debug(cloud_list) raise Exception("No auth data for cloud with ID: %s" % suggested_cloud_ids[0]) else: @@ -581,8 +584,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # Add the new virtual machines to the infrastructure sel_inf.update_radl(radl, [(d, deployed_vm[d], concrete_systems[d.cloud_id][d.id][0]) for d in deployed_vm]) - InfrastructureManager.logger.info( - "VMs %s successfully added to Inf id %s" % (new_vms, sel_inf.id)) + InfrastructureManager.logger.info("VMs %s successfully added to Inf id %s" % (new_vms, sel_inf.id)) # Let's contextualize! if context and new_vms: @@ -608,8 +610,7 @@ def RemoveResource(inf_id, vm_list, auth, context=True): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Removing the VMs: " + str(vm_list) + " from inf ID: '" + str(inf_id) + "'") + InfrastructureManager.logger.info("Removing the VMs: " + str(vm_list) + " from inf ID: '" + str(inf_id) + "'") sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -630,7 +631,7 @@ def RemoveResource(inf_id, vm_list, auth, context=True): if InfrastructureManager._delete_vm(vm, delete_list, auth, exceptions): cont += 1 - InfrastructureManager.logger.info("%d VMs successfully removed" % cont) + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": %d VMs successfully removed" % cont) if context and cont > 0: # Now test again if the infrastructure is contextualizing @@ -639,7 +640,7 @@ def RemoveResource(inf_id, vm_list, auth, context=True): IM.InfrastructureList.InfrastructureList.save_data(inf_id) if exceptions: - InfrastructureManager.logger.exception("Error removing resources") + InfrastructureManager.logger.exception("Inf ID: " + sel_inf.id + ": Error removing resources") raise Exception("Error removing resources: %s" % exceptions) return cont @@ -690,7 +691,8 @@ def GetVMInfo(inf_id, vm_id, auth, json_res=False): success = vm.update_status(auth) if not success: - InfrastructureManager.logger.warn( + InfrastructureManager.logger.debug( + "Inf ID: " + str(inf_id) + ": " + "Information not updated. Using last information retrieved") if json_res: @@ -744,6 +746,7 @@ def AlterVM(inf_id, vm_id, radl_data, auth): vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) if not vm: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "VM does not exist or Access Error") raise Exception("VM does not exist or Access Error") @@ -762,9 +765,8 @@ def AlterVM(inf_id, vm_id, radl_data, auth): raise exception if not success: InfrastructureManager.logger.warn( - "Error getting the information about the VM " + str(vm_id) + ": " + str(alter_res)) - InfrastructureManager.logger.warn( - "Using last information retrieved") + "Inf ID: " + str(inf_id) + ": " + + "Error modifying the information about the VM " + str(vm_id) + ": " + str(alter_res)) vm.update_status(auth) IM.InfrastructureList.InfrastructureList.save_data(inf_id) @@ -785,8 +787,7 @@ def GetInfrastructureRADL(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Getting RADL of the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting RADL of the inf: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -808,15 +809,11 @@ def GetInfrastructureInfo(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Getting information about the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting information about the inf: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) - # : .. todo:: - # : Return int instead res = [str(vm.im_id) for vm in sel_inf.get_vm_list()] - InfrastructureManager.logger.info("Information obtained successfully") InfrastructureManager.logger.debug(res) return res @@ -960,8 +957,7 @@ def StopInfrastructure(inf_id, auth): msg += str(e) + "\n" raise Exception("Error stopping the infrastructure: %s" % msg) - InfrastructureManager.logger.info( - "Infrastructure successfully stopped") + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": Successfully stopped") return "" @staticmethod @@ -1013,8 +1009,7 @@ def StartInfrastructure(inf_id, auth): msg += str(e) + "\n" raise Exception("Error starting the infrastructure: %s" % msg) - InfrastructureManager.logger.info( - "Infrastructure successfully restarted") + InfrastructureManager.logger.info("Inf ID: " + sel_inf.id + ": Successfully restarted") return "" @staticmethod @@ -1044,10 +1039,12 @@ def StartVM(inf_id, vm_id, auth): if not success: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s cannot be restarted: %s" % (vm_id, msg)) raise Exception("Error starting the VM: %s" % msg) else: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s successfully restarted" % vm_id) return "" @@ -1079,10 +1076,12 @@ def StopVM(inf_id, vm_id, auth): if not success: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s cannot be stopped: %s" % (vm_id, msg)) raise Exception("Error stopping the VM: %s" % msg) else: InfrastructureManager.logger.info( + "Inf ID: " + str(inf_id) + ": " + "The VM %s successfully stopped" % vm_id) return "" @@ -1137,8 +1136,7 @@ def DestroyInfrastructure(inf_id, auth): # First check the auth data auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Destroying the infrastructure id: " + str(inf_id)) + InfrastructureManager.logger.info("Destroying the infrastructure id: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) exceptions = [] @@ -1167,8 +1165,7 @@ def DestroyInfrastructure(inf_id, auth): sel_inf.delete() IM.InfrastructureList.InfrastructureList.save_data(inf_id) IM.InfrastructureList.InfrastructureList.remove_inf(sel_inf) - InfrastructureManager.logger.info( - "Infrastructure %s successfully destroyed" % inf_id) + InfrastructureManager.logger.info("Infrastructure %s successfully destroyed" % inf_id) return "" @staticmethod @@ -1335,6 +1332,7 @@ def CreateDiskSnapshot(inf_id, vm_id, disk_num, image_name, auto_delete, auth): Return: a str with url of the saved snapshot. """ auth = InfrastructureManager.check_auth_data(auth) + InfrastructureManager.logger.info("Creating a snapshot of VM id: %s Inf id: %s" % (vm_id, inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) From 6c2c73f5775fb65a7a82de545687da70a5462213 Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 10 Nov 2017 13:29:52 +0100 Subject: [PATCH 18/30] Update docs --- README | 1 + README.md | 1 + doc/source/manual.rst | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/README b/README index e556628e7..d27b55c4d 100644 --- a/README +++ b/README @@ -175,6 +175,7 @@ You can download it from their corresponding PPAs. But here you have some links: * python-backports.ssl-match-hostname: http://archive.ubuntu.com/ubuntu/pool/universe/b/backports.ssl-match-hostname/python-backports.ssl-match-hostname_3.4.0.2-1_all.deb * python-scp: http://archive.ubuntu.com/ubuntu/pool/universe/p/python-scp/python-scp_0.10.2-1_all.deb * python-libcloud: http://archive.ubuntu.com/ubuntu/pool/universe/libc/libcloud/python-libcloud_0.20.0-1_all.deb + * python-xmltodict: http://archive.ubuntu.com/ubuntu/pool/universe/p/python-xmltodict/python-xmltodict_0.9.2-3_all.deb Also Azure python SDK is not available in Ubuntu 16.04. So if you need the Azure plugin you have to manually install them. You can download it from their corresponding PPAs. But here you have some links: diff --git a/README.md b/README.md index 2cc8c2ef4..c68df44ca 100644 --- a/README.md +++ b/README.md @@ -201,6 +201,7 @@ You can download it from their corresponding PPAs. But here you have some links: * python-backports.ssl-match-hostname: [download](http://archive.ubuntu.com/ubuntu/pool/universe/b/backports.ssl-match-hostname/python-backports.ssl-match-hostname_3.4.0.2-1_all.deb) * python-scp: [download](http://archive.ubuntu.com/ubuntu/pool/universe/p/python-scp/python-scp_0.10.2-1_all.deb) * python-libcloud: [download](http://archive.ubuntu.com/ubuntu/pool/universe/libc/libcloud/python-libcloud_0.20.0-1_all.deb) + * python-xmltodict: [download](http://archive.ubuntu.com/ubuntu/pool/universe/p/python-xmltodict/python-xmltodict_0.9.2-3_all.deb) Also Azure python SDK is not available in Ubuntu 16.04. So if you need the Azure plugin you have to manually install them. You can download it from their corresponding PPAs. But here you have some links: diff --git a/doc/source/manual.rst b/doc/source/manual.rst index 5c30b0348..4e472bb00 100644 --- a/doc/source/manual.rst +++ b/doc/source/manual.rst @@ -144,7 +144,7 @@ You can download it from their corresponding PPAs. But here you have some links: * python-backports.ssl-match-hostname: `download `_ * python-scp: `download `_ * python-libcloud: `download `_ - * python-xmltodict: `download `_ + * python-xmltodict: `download `_ Also Azure python SDK is not available in Ubuntu 16.04. So if you need the Azure plugin you have to manually install them. You can download it from their corresponding PPAs. But here you have some links: From 7a293de5240164ab711cf9c0e8b18fafcf765afe Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 13 Nov 2017 08:22:02 +0100 Subject: [PATCH 19/30] Fix #490 --- IM/connectors/OpenStack.py | 45 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/IM/connectors/OpenStack.py b/IM/connectors/OpenStack.py index 634eeb341..305ca7785 100644 --- a/IM/connectors/OpenStack.py +++ b/IM/connectors/OpenStack.py @@ -147,6 +147,51 @@ def get_driver(self, auth_data): self.driver = driver return driver + def get_instance_type(self, sizes, radl): + """ + Get the name of the instance type to launch to LibCloud + + Arguments: + - size(list of :py:class: `libcloud.compute.base.NodeSize`): List of sizes on a provider + - radl(str): RADL document with the requirements of the VM to get the instance type + Returns: a :py:class:`libcloud.compute.base.NodeSize` with the instance type to launch + """ + instance_type_name = radl.getValue('instance_type') + + cpu = 1 + cpu_op = ">=" + if radl.getFeature('cpu.count'): + cpu = radl.getValue('cpu.count') + cpu_op = radl.getFeature('cpu.count').getLogOperator() + + memory = 1 + memory_op = ">=" + if radl.getFeature('memory.size'): + memory = radl.getFeature('memory.size').getValue('M') + memory_op = radl.getFeature('memory.size').getLogOperator() + disk_free = 0 + disk_free_op = ">=" + if radl.getValue('disk.0.free_size'): + disk_free = radl.getFeature('disk.0.free_size').getValue('G') + disk_free_op = radl.getFeature('memory.size').getLogOperator() + + res = None + for size in sizes: + # get the node size with the lowest price and memory (in the case + # of the price is not set) + if res is None or (size.price <= res.price and size.ram <= res.ram): + str_compare = "size.ram " + memory_op + " memory" + str_compare += " and size.vcpus " + cpu_op + " cpu " + str_compare += " and size.disk " + disk_free_op + " disk_free" + if eval(str_compare): + if not instance_type_name or size.name == instance_type_name: + res = size + + if res is None: + self.log_error("No compatible size found") + + return res + def concreteSystem(self, radl_system, auth_data): image_urls = radl_system.getValue("disk.0.image.url") if not image_urls: From ecfdcbbc30d9f51cf4b211d0a076c56d6bd8200b Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 13 Nov 2017 17:00:04 +0100 Subject: [PATCH 20/30] Fix #490 --- IM/connectors/OpenStack.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/IM/connectors/OpenStack.py b/IM/connectors/OpenStack.py index 305ca7785..5afbb82bd 100644 --- a/IM/connectors/OpenStack.py +++ b/IM/connectors/OpenStack.py @@ -175,22 +175,18 @@ def get_instance_type(self, sizes, radl): disk_free = radl.getFeature('disk.0.free_size').getValue('G') disk_free_op = radl.getFeature('memory.size').getLogOperator() - res = None + # get the node size with the lowest price, vcpus and memory + sizes.sort(key=lambda x: (x.price, x.vcpus, x.ram)) for size in sizes: - # get the node size with the lowest price and memory (in the case - # of the price is not set) - if res is None or (size.price <= res.price and size.ram <= res.ram): - str_compare = "size.ram " + memory_op + " memory" - str_compare += " and size.vcpus " + cpu_op + " cpu " - str_compare += " and size.disk " + disk_free_op + " disk_free" - if eval(str_compare): - if not instance_type_name or size.name == instance_type_name: - res = size - - if res is None: - self.log_error("No compatible size found") - - return res + str_compare = "size.ram " + memory_op + " memory" + str_compare += " and size.vcpus " + cpu_op + " cpu " + str_compare += " and size.disk " + disk_free_op + " disk_free" + if eval(str_compare): + if not instance_type_name or size.name == instance_type_name: + return size + + self.log_error("No compatible size found") + return None def concreteSystem(self, radl_system, auth_data): image_urls = radl_system.getValue("disk.0.image.url") From da6262b09b60c7d663bceeb716af851246436bff Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 14 Nov 2017 08:21:42 +0100 Subject: [PATCH 21/30] Remove noisy error message --- IM/connectors/OCCI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index c6891edec..de23f76fd 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -270,7 +270,7 @@ def manage_public_ips(self, vm, auth_data): self.MAX_ADD_IP_COUNT) else: self.log_error("Error adding public IP the VM: Max number of retries reached.") - self.error_messages += "Error adding public IP the VM: Max number of retries reached.\n" + # self.error_messages += "Error adding public IP the VM: Max number of retries reached.\n" # this is a total fail, stop contextualization vm.configured = False vm.inf.set_configured(False) From 9dc5b252f7566828671c84969164e4694940ee7d Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 14 Nov 2017 08:22:43 +0100 Subject: [PATCH 22/30] Minor change --- IM/connectors/Azure.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/IM/connectors/Azure.py b/IM/connectors/Azure.py index 32be3f6a9..2aaa87e90 100644 --- a/IM/connectors/Azure.py +++ b/IM/connectors/Azure.py @@ -148,20 +148,18 @@ def get_instance_type(self, system, credentials, subscription_id): instace_types = list(compute_client.virtual_machine_sizes.list(location)) instace_types.sort(key=lambda x: (x.number_of_cores, x.memory_in_mb, x.resource_disk_size_in_mb)) - res = None default = None for instace_type in instace_types: if instace_type.name == self.INSTANCE_TYPE: default = instace_type - # get the instance type with the lowest Memory - if res is None: - str_compare = "instace_type.number_of_cores " + cpu_op + " cpu " - str_compare += " and instace_type.memory_in_mb " + memory_op + " memory " - str_compare += " and instace_type.resource_disk_size_in_mb " + disk_free_op + " disk_free" - - if eval(str_compare): - if not instance_type_name or instace_type.name == instance_type_name: - return instace_type + + str_compare = "instace_type.number_of_cores " + cpu_op + " cpu " + str_compare += " and instace_type.memory_in_mb " + memory_op + " memory " + str_compare += " and instace_type.resource_disk_size_in_mb " + disk_free_op + " disk_free" + + if eval(str_compare): + if not instance_type_name or instace_type.name == instance_type_name: + return instace_type return default From 9afd4ed3c47cfb909fbd040b7a0c71878205e8fa Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 16 Nov 2017 09:45:01 +0100 Subject: [PATCH 23/30] Update docs --- doc/source/client.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/client.rst b/doc/source/client.rst index c62a37cea..209aea49c 100644 --- a/doc/source/client.rst +++ b/doc/source/client.rst @@ -102,9 +102,10 @@ The :program:`im_client` is called like this:: Stop (but not remove) the specified virtual machine ``vmId`` associated to the infrastructure with ID infrastructure with ID ``infId``. - ``sshvm infId vmId`` + ``sshvm infId vmId [show_only]`` Connect with SSH with the specified virtual machine ``vmId`` associated to the infrastructure with ID - infrastructure with ID ``infId``. + infrastructure with ID ``infId``. The ``show_only`` parameter is optional and is a flag to specify if ssh + command will only be shown in stdout instead of executed. ``export infId delete`` Export the data of the infrastructure with ID ``infId``. The ``delete`` parameter is optional From a4cf5a20664a62626f69f7458f98b353a636e50f Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 16 Nov 2017 10:21:54 +0100 Subject: [PATCH 24/30] Implements: #494 --- IM/InfrastructureManager.py | 107 ++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index 7591ed60f..ff804c2ad 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -236,14 +236,14 @@ def get_infrastructure(inf_id, auth): """Return infrastructure info with some id if valid authorization provided.""" if inf_id not in IM.InfrastructureList.InfrastructureList.get_inf_ids(): - InfrastructureManager.logger.error("Error, incorrect infrastructure ID: %s" % inf_id) + InfrastructureManager.logger.error("Error, incorrect Inf ID: %s" % inf_id) raise IncorrectInfrastructureException() sel_inf = IM.InfrastructureList.InfrastructureList.get_infrastructure(inf_id) if not sel_inf.is_authorized(auth): - InfrastructureManager.logger.error("Access Error to infrastructure ID: %s" % inf_id) + InfrastructureManager.logger.error("Access Error to Inf ID: %s" % inf_id) raise UnauthorizedUserException() if sel_inf.deleted: - InfrastructureManager.logger.error("Infrastructure ID: %s is deleted." % inf_id) + InfrastructureManager.logger.error("Inf ID: %s is deleted." % inf_id) raise DeletedInfrastructureException() return sel_inf @@ -270,12 +270,12 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info("Reconfiguring the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Reconfiguring the Inf ID: " + str(inf_id)) if isinstance(radl_data, RADL): radl = radl_data else: radl = radl_parse.parse_radl(radl_data) - InfrastructureManager.logger.debug(radl) + InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": \n" + str(radl)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -305,7 +305,7 @@ def Reconfigure(inf_id, radl_data, auth, vm_list=None): password=password, public_key=public_key, private_key=private_key, new=True) # Stick all virtual machines to be reconfigured - InfrastructureManager.logger.info("Contextualize the inf: " + sel_inf.id) + InfrastructureManager.logger.info("Contextualize the Inf ID: " + sel_inf.id) # reset ansible_configured to force the re-installation of galaxy roles sel_inf.ansible_configured = None sel_inf.Contextualize(auth, vm_list) @@ -377,14 +377,14 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): failed_clouds = [] auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info("Adding resources to inf: " + str(inf_id)) + InfrastructureManager.logger.info("Adding resources to Inf ID: " + str(inf_id)) if isinstance(radl_data, RADL): radl = radl_data else: radl = radl_parse.parse_radl(radl_data) - InfrastructureManager.logger.debug(radl) + InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": \n" + str(radl)) radl.check() sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -417,7 +417,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): "Error in the requirements of the app: " + app_to_install.getValue("name") + ". Ignore them.") - InfrastructureManager.logger.debug(requirements) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + str(requirements)) break # Get VMRC credentials @@ -480,7 +480,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # Group virtual machines to deploy by network dependencies deploy_groups = InfrastructureManager._compute_deploy_groups(radl) InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Groups of VMs with dependencies") - InfrastructureManager.logger.debug(deploy_groups) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + "\n" + str(deploy_groups)) # Sort by score the cloud providers # NOTE: consider fake deploys (vm_number == 0) @@ -498,7 +498,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): elif len(suggested_cloud_ids) == 1: if suggested_cloud_ids[0] not in cloud_list: InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": Cloud Provider list:") - InfrastructureManager.logger.debug(cloud_list) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + " - " + str(cloud_list)) raise Exception("No auth data for cloud with ID: %s" % suggested_cloud_ids[0]) else: cloud_list0 = [ @@ -584,7 +584,7 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None): # Add the new virtual machines to the infrastructure sel_inf.update_radl(radl, [(d, deployed_vm[d], concrete_systems[d.cloud_id][d.id][0]) for d in deployed_vm]) - InfrastructureManager.logger.info("VMs %s successfully added to Inf id %s" % (new_vms, sel_inf.id)) + InfrastructureManager.logger.info("VMs %s successfully added to Inf ID: %s" % (new_vms, sel_inf.id)) # Let's contextualize! if context and new_vms: @@ -610,7 +610,7 @@ def RemoveResource(inf_id, vm_list, auth, context=True): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info("Removing the VMs: " + str(vm_list) + " from inf ID: '" + str(inf_id) + "'") + InfrastructureManager.logger.info("Removing the VMs: " + str(vm_list) + " from Inf ID: '" + str(inf_id) + "'") sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -685,7 +685,7 @@ def GetVMInfo(inf_id, vm_id, auth, json_res=False): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Get information about the vm: '" + str(vm_id) + "' from inf: " + str(inf_id)) + "Get information about the vm: '" + str(vm_id) + "' from Inf ID: " + str(inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) @@ -716,12 +716,12 @@ def GetVMContMsg(inf_id, vm_id, auth): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Get contextualization log of the vm: '" + str(vm_id) + "' from inf: " + str(inf_id)) + "Get contextualization log of the vm: '" + str(vm_id) + "' from Inf ID: " + str(inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) cont_msg = vm.get_cont_msg() - InfrastructureManager.logger.debug(cont_msg) + InfrastructureManager.logger.debug("Inf ID: " + str(inf_id) + ": " + cont_msg) return cont_msg @@ -742,7 +742,7 @@ def AlterVM(inf_id, vm_id, radl_data, auth): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Modifying the VM: '" + str(vm_id) + "' from inf: " + str(inf_id)) + "Modifying the VM: '" + str(vm_id) + "' from Inf ID: " + str(inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) if not vm: InfrastructureManager.logger.info( @@ -787,12 +787,12 @@ def GetInfrastructureRADL(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info("Getting RADL of the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting RADL of the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) radl = str(sel_inf.get_radl()) - InfrastructureManager.logger.debug(radl) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + radl) return radl @staticmethod @@ -809,12 +809,12 @@ def GetInfrastructureInfo(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info("Getting information about the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting information about the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) res = [str(vm.im_id) for vm in sel_inf.get_vm_list()] - InfrastructureManager.logger.debug(res) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + str(res)) return res @staticmethod @@ -833,7 +833,7 @@ def GetInfrastructureContMsg(inf_id, auth, headeronly=False): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Getting cont msg of the inf: " + str(inf_id)) + "Getting cont msg of the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) res = sel_inf.cont_out @@ -844,7 +844,7 @@ def GetInfrastructureContMsg(inf_id, auth, headeronly=False): res += "VM " + str(vm.id) + ":\n" + vm.get_cont_msg() + "\n" res += "***************************************************************************\n" - InfrastructureManager.logger.debug(res) + InfrastructureManager.logger.debug("Inf ID: " + sel_inf.id + ": " + res) return res @staticmethod @@ -863,8 +863,7 @@ def GetInfrastructureState(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Getting state of the inf: " + str(inf_id)) + InfrastructureManager.logger.info("Getting state of the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) @@ -904,20 +903,19 @@ def GetInfrastructureState(inf_id, auth): if state is None: state = VirtualMachine.UNKNOWN - InfrastructureManager.logger.info( - "inf: " + str(inf_id) + " is in state: " + state) + InfrastructureManager.logger.info("Inf ID: " + str(inf_id) + " is in state: " + state) return {'state': state, 'vm_states': vm_states} @staticmethod def _stop_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.info("Stopping the VM id: " + vm.id) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Stopping the VM id: " + vm.id) (success, msg) = vm.stop(auth) except Exception as e: msg = str(e) if not success: - InfrastructureManager.logger.info("The VM cannot be stopped") + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be stopped") exceptions.append(msg) @staticmethod @@ -934,8 +932,7 @@ def StopInfrastructure(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Stopping the infrastructure id: " + str(inf_id)) + InfrastructureManager.logger.info("Stopping the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) exceptions = [] @@ -964,12 +961,12 @@ def StopInfrastructure(inf_id, auth): def _start_vm(vm, auth, exceptions): try: success = False - InfrastructureManager.logger.info("Starting the VM id: " + vm.id) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Starting the VM id: " + vm.id) (success, msg) = vm.start(auth) except Exception as e: msg = str(e) if not success: - InfrastructureManager.logger.info("The VM cannot be restarted") + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be restarted") exceptions.append(msg) @staticmethod @@ -986,8 +983,7 @@ def StartInfrastructure(inf_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Starting the infrastructure id: " + str(inf_id)) + InfrastructureManager.logger.info("Starting the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) exceptions = [] @@ -1027,8 +1023,7 @@ def StartVM(inf_id, vm_id, auth): """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info( - "Starting the VM id %s from the infrastructure id: %s" % (vm_id, inf_id)) + InfrastructureManager.logger.info("Starting the VM id %s from the Inf ID: %s" % (vm_id, inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) success = False @@ -1065,7 +1060,7 @@ def StopVM(inf_id, vm_id, auth): auth = InfrastructureManager.check_auth_data(auth) InfrastructureManager.logger.info( - "Stopping the VM id %s from the infrastructure id: %s" % (vm_id, inf_id)) + "Stopping the VM id %s from the Inf ID: %s" % (vm_id, inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) success = False @@ -1112,12 +1107,12 @@ def _delete_vm(vm, delete_list, auth, exceptions): last = InfrastructureManager.is_last_in_cloud(vm, delete_list, remain_vms) success = False try: - InfrastructureManager.logger.info("Finalizing the VM id: " + str(vm.id)) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": Finalizing the VM id: " + str(vm.id)) (success, msg) = vm.finalize(last, auth) except Exception as e: msg = str(e) if not success: - InfrastructureManager.logger.info("The VM cannot be finalized: %s" % msg) + InfrastructureManager.logger.info("Inf ID: " + vm.inf.id + ": The VM cannot be finalized: %s" % msg) exceptions.append(msg) return success @@ -1136,7 +1131,7 @@ def DestroyInfrastructure(inf_id, auth): # First check the auth data auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info("Destroying the infrastructure id: " + str(inf_id)) + InfrastructureManager.logger.info("Destroying the Inf ID: " + str(inf_id)) sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) exceptions = [] @@ -1165,7 +1160,7 @@ def DestroyInfrastructure(inf_id, auth): sel_inf.delete() IM.InfrastructureList.InfrastructureList.save_data(inf_id) IM.InfrastructureList.InfrastructureList.remove_inf(sel_inf) - InfrastructureManager.logger.info("Infrastructure %s successfully destroyed" % inf_id) + InfrastructureManager.logger.info("Inf ID: %s: Successfully destroyed" % inf_id) return "" @staticmethod @@ -1189,12 +1184,10 @@ def check_im_user(auth): break return found except Exception: - InfrastructureManager.logger.exception( - "Incorrect format in the User DB file %s" % Config.USER_DB) + InfrastructureManager.logger.exception("Incorrect format in the User DB file %s" % Config.USER_DB) return False else: - InfrastructureManager.logger.error( - "User DB file %s not found" % Config.USER_DB) + InfrastructureManager.logger.error("User DB file %s not found" % Config.USER_DB) return False else: return True @@ -1238,21 +1231,18 @@ def CreateInfrastructure(radl, auth): inf.auth = Authentication(auth.getAuthInfo("InfrastructureManager")) IM.InfrastructureList.InfrastructureList.add_infrastructure(inf) IM.InfrastructureList.InfrastructureList.save_data(inf.id) - InfrastructureManager.logger.info( - "Creating new infrastructure with id: " + str(inf.id)) + InfrastructureManager.logger.info("Creating new Inf ID: " + str(inf.id)) # Add the resources in radl_data try: InfrastructureManager.AddResource(inf.id, radl, auth) except Exception as e: - InfrastructureManager.logger.exception( - "Error Creating Inf id " + str(inf.id)) + InfrastructureManager.logger.exception("Error Creating Inf ID " + str(inf.id)) inf.delete() IM.InfrastructureList.InfrastructureList.save_data(inf.id) IM.InfrastructureList.InfrastructureList.remove_inf(inf) raise e - InfrastructureManager.logger.info( - "Infrastructure id " + str(inf.id) + " successfully created") + InfrastructureManager.logger.info("Inf ID:" + str(inf.id) + ": Successfully created") return inf.id @@ -1273,8 +1263,7 @@ def GetInfrastructureList(auth): auths = auth.getAuthInfo('InfrastructureManager') if not auths: - InfrastructureManager.logger.error( - "No correct auth data has been specified.") + InfrastructureManager.logger.error("No correct auth data has been specified.") raise InvaliddUserException() return IM.InfrastructureList.InfrastructureList.get_inf_ids(auth) @@ -1286,7 +1275,7 @@ def ExportInfrastructure(inf_id, delete, auth_data): sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) str_inf = sel_inf.serialize() - InfrastructureManager.logger.info("Exporting infrastructure id: " + str(sel_inf.id)) + InfrastructureManager.logger.info("Exporting Inf ID: " + str(sel_inf.id)) if delete: sel_inf.delete() IM.InfrastructureList.InfrastructureList.save_data(sel_inf.id) @@ -1307,8 +1296,7 @@ def ImportInfrastructure(str_inf, auth_data): new_inf.auth = Authentication(auth.getAuthInfo("InfrastructureManager")) IM.InfrastructureList.InfrastructureList.add_infrastructure(new_inf) - InfrastructureManager.logger.info( - "Importing new infrastructure with id: " + str(new_inf.id)) + InfrastructureManager.logger.info("Importing new infrastructure with Inf ID: " + str(new_inf.id)) # Save the state IM.InfrastructureList.InfrastructureList.save_data(new_inf.id) return new_inf.id @@ -1332,13 +1320,14 @@ def CreateDiskSnapshot(inf_id, vm_id, disk_num, image_name, auto_delete, auth): Return: a str with url of the saved snapshot. """ auth = InfrastructureManager.check_auth_data(auth) - InfrastructureManager.logger.info("Creating a snapshot of VM id: %s Inf id: %s" % (vm_id, inf_id)) + InfrastructureManager.logger.info("Creating a snapshot of VM id: %s Inf ID: %s" % (vm_id, inf_id)) vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) success, image_url = vm.create_snapshot(disk_num, image_name, auto_delete, auth) if not success: - InfrastructureManager.logger.error("Error creating snapshot: %s" % image_url) + InfrastructureManager.logger.error("Error creating a snapshot: %s of VM id: %s " + "Inf ID: %s" % (image_url, vm_id, inf_id)) raise Exception("Error creating snapshot: %s" % image_url) else: return image_url From d245fac0f6d88869671b9b4259a17002607c370e Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 16 Nov 2017 10:22:50 +0100 Subject: [PATCH 25/30] Update docs --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index c68df44ca..b9ecba1f9 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ ============================ * Version ![PyPI](https://img.shields.io/pypi/v/im.svg) -* PyPI ![PypI](https://img.shields.io/pypi/dm/IM.svg) * Build Status [![Build Status](http://jenkins.i3m.upv.es/buildStatus/icon?job=grycap/im-unit)](http://jenkins.i3m.upv.es/job/grycap/job/im-unit/) IM is a tool that deploys complex and customized virtual infrastructures on IaaS From 18533ada4c03a4cd3f80708821fde372ef88e328 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 16 Nov 2017 11:55:32 +0100 Subject: [PATCH 26/30] Set version num 1.6.5 --- IM/__init__.py | 2 +- changelog | 3 +++ docker-devel/Dockerfile | 2 +- docker/Dockerfile | 4 ++-- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/IM/__init__.py b/IM/__init__.py index b51df70cc..e27452c8d 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -19,5 +19,5 @@ 'InfrastructureInfo', 'InfrastructureManager', 'recipe', 'request', 'REST', 'retry', 'ServiceRequests', 'SSH', 'SSHRetry', 'timedcall', 'UnixHTTPAdapter', 'uriparse', 'VirtualMachine', 'VMRC', 'xmlobject'] -__version__ = '1.6.4' +__version__ = '1.6.5' __author__ = 'Miguel Caballer' diff --git a/changelog b/changelog index 25a829bdc..7a336f494 100644 --- a/changelog +++ b/changelog @@ -358,3 +358,6 @@ IM 1.6.4 * Decrease timeout getting ansible process results. * Enable to get the initial infrastructure contextualization log. +IM 1.6.5 + * Homogenize Inf ID log message + * Fix error cpu.count parameter is ignored in OpenStack conn. diff --git a/docker-devel/Dockerfile b/docker-devel/Dockerfile index ffceb73b4..7bdc2ce0f 100644 --- a/docker-devel/Dockerfile +++ b/docker-devel/Dockerfile @@ -2,7 +2,7 @@ FROM grycap/jenkins:ubuntu16.04-im ARG BRANCH=devel MAINTAINER Miguel Caballer -LABEL version="1.6.4" +LABEL version="1.6.5" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 diff --git a/docker/Dockerfile b/docker/Dockerfile index fcd13b209..905a60b48 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile to create a container with the IM service FROM ubuntu:16.04 LABEL maintainer="Miguel Caballer " -LABEL version="1.6.4" +LABEL version="1.6.5" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 @@ -17,7 +17,7 @@ RUN pip install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-co # Install IM RUN apt-get update && apt-get install --no-install-recommends -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev && \ pip install MySQL-python && \ - pip install IM==1.6.4 && \ + pip install IM==1.6.5 && \ apt-get remove -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev python-dev python-pip && \ apt-get autoremove -y && \ rm -rf /var/lib/apt/lists/* From 3dcddd2dd4441dfb74702cbc593f571bcce5946c Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 17 Nov 2017 09:19:43 +0100 Subject: [PATCH 27/30] Fix #497 --- IM/ansible_utils/ansible_launcher.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/IM/ansible_utils/ansible_launcher.py b/IM/ansible_utils/ansible_launcher.py index 8ed588141..f89b1f9c4 100755 --- a/IM/ansible_utils/ansible_launcher.py +++ b/IM/ansible_utils/ansible_launcher.py @@ -48,6 +48,7 @@ from ansible.vars import VariableManager from ansible.inventory import Inventory import ansible.inventory + from ansible.utils.vars import load_options_vars from .ansible_executor_v2 import IMPlaybookExecutor @@ -157,6 +158,7 @@ def get_play_prereqs_2(self, options): variable_manager = VariableManager() variable_manager.extra_vars = self.extra_vars + variable_manager.options_vars = load_options_vars(options, self.version_info(ansible_version)) # Add this to avoid the Ansible bug: no host vars as host is not in inventory # In version 2.0.1 it must be fixed @@ -186,9 +188,30 @@ def get_play_prereqs_2_4(self, options): # the code, ensuring a consistent view of global variables variable_manager = VariableManager(loader=loader, inventory=inventory) variable_manager.extra_vars = self.extra_vars + variable_manager.options_vars = load_options_vars(options, self.version_info(ansible_version)) return loader, inventory, variable_manager + def version_info(self, ansible_version_string): + ''' return full ansible version info ''' + ansible_ver = ansible_version_string.split()[0] + ansible_versions = ansible_ver.split('.') + for counter in range(len(ansible_versions)): + if ansible_versions[counter] == "": + ansible_versions[counter] = 0 + try: + ansible_versions[counter] = int(ansible_versions[counter]) + except: + pass + if len(ansible_versions) < 3: + for counter in range(len(ansible_versions), 3): + ansible_versions.append(0) + return {'string': ansible_version_string.strip(), + 'full': ansible_ver, + 'major': ansible_versions[0], + 'minor': ansible_versions[1], + 'revision': ansible_versions[2]} + def launch_playbook_v2(self): ''' run ansible-playbook operations v2.X''' # create parser for CLI options From 0d74d7f6b619128d7563a51965fd8be0dd48a6c5 Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 17 Nov 2017 09:26:26 +0100 Subject: [PATCH 28/30] Add test to #497 --- test/files/test.radl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/files/test.radl b/test/files/test.radl index fcdc88d5f..c192076fb 100644 --- a/test/files/test.radl +++ b/test/files/test.radl @@ -69,6 +69,7 @@ configure test ( - easy_install: name=jmespath tasks: - debug: msg="NODENAME = {{NODENAME}}" + - debug: msg="VERSION = {{ansible_version.major}}" - shell: test -d "/mnt/disk/lost+found" @end From e7fe8095bc94287b765bd39da9ce4e639f2bf373 Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 17 Nov 2017 10:16:31 +0100 Subject: [PATCH 29/30] Fix #497 --- IM/ansible_utils/ansible_launcher.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/IM/ansible_utils/ansible_launcher.py b/IM/ansible_utils/ansible_launcher.py index f89b1f9c4..6981a4a90 100755 --- a/IM/ansible_utils/ansible_launcher.py +++ b/IM/ansible_utils/ansible_launcher.py @@ -48,7 +48,6 @@ from ansible.vars import VariableManager from ansible.inventory import Inventory import ansible.inventory - from ansible.utils.vars import load_options_vars from .ansible_executor_v2 import IMPlaybookExecutor @@ -158,7 +157,7 @@ def get_play_prereqs_2(self, options): variable_manager = VariableManager() variable_manager.extra_vars = self.extra_vars - variable_manager.options_vars = load_options_vars(options, self.version_info(ansible_version)) + variable_manager.options_vars = {'ansible_version': self.version_info(ansible_version)} # Add this to avoid the Ansible bug: no host vars as host is not in inventory # In version 2.0.1 it must be fixed @@ -188,7 +187,7 @@ def get_play_prereqs_2_4(self, options): # the code, ensuring a consistent view of global variables variable_manager = VariableManager(loader=loader, inventory=inventory) variable_manager.extra_vars = self.extra_vars - variable_manager.options_vars = load_options_vars(options, self.version_info(ansible_version)) + variable_manager.options_vars = {'ansible_version': self.version_info(ansible_version)} return loader, inventory, variable_manager From 32403d0a512c65d2286e6b9e7151aec07ee6565b Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 17 Nov 2017 10:26:08 +0100 Subject: [PATCH 30/30] Update changelog --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 7a336f494..9efe44bad 100644 --- a/changelog +++ b/changelog @@ -361,3 +361,4 @@ IM 1.6.4 IM 1.6.5 * Homogenize Inf ID log message * Fix error cpu.count parameter is ignored in OpenStack conn. + * Fix ansible_version is not available in ctxt process.