diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index dfb7eade..d1e1c950 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -16,7 +16,7 @@ jobs: - name: Set up Python 3. uses: actions/setup-python@v5 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: python -m pip install tox @@ -24,6 +24,9 @@ jobs: - name: Check code style run: tox -e style + - name: Check security + run: tox -e bandit + - name: Unit tests run: tox -e coverage diff --git a/IM/ConfManager.py b/IM/ConfManager.py index 6ce5d057..d82f7b9f 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -56,6 +56,7 @@ from IM.recipe import Recipe from IM.config import Config from radl.radl import system, contextualize_item +from IM.CtxtAgentBase import CtxtAgentBase class ConfManager(LoggerMixin, threading.Thread): @@ -397,7 +398,8 @@ def launch_ctxt_agent(self, vm, tasks): vault_password = vm.info.systems[0].getValue("vault.password") if vault_password: vault_export = "export VAULT_PASS='%s' && " % vault_password - (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + "python3 " + Config.REMOTE_CONF_DIR + + (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + CtxtAgentBase.VENV_DIR + + "/bin/python3 " + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + ctxt_agent_command + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + "/general_info.cfg " + remote_dir + "/" + os.path.basename(conf_file) + @@ -1382,7 +1384,7 @@ def configure_ansible(self, ssh, tmp_dir, ansible_version=None): if ssh.proxy_host.private_key: priv_key_filename = "/var/tmp/%s_%s_%s.pem" % (ssh.proxy_host.username, ssh.username, - ssh.host) + ssh.host) # nosec # copy it to the proxy host to enable im_client to use it # ssh.proxy_host.sftp_put_content(ssh.proxy_host.private_key, priv_key_filename) # ssh.proxy_host.sftp_chmod(priv_key_filename, 0o600) diff --git a/IM/CtxtAgentBase.py b/IM/CtxtAgentBase.py index 15457ccb..3a1a5226 100644 --- a/IM/CtxtAgentBase.py +++ b/IM/CtxtAgentBase.py @@ -37,6 +37,7 @@ class CtxtAgentBase: # the ConfManager PLAYBOOK_RETRIES = 1 INTERNAL_PLAYBOOK_RETRIES = 1 + VENV_DIR = "/var/tmp/.ansible" # nosec def __init__(self, conf_data_filename): self.logger = None @@ -288,7 +289,7 @@ def add_proxy_host_line(self, vm_data): # we must create it in the localhost to use it later with ansible priv_key_filename = "/var/tmp/%s_%s_%s.pem" % (proxy['user'], vm_data['user'], - vm_data['ip']) + vm_data['ip']) # nosec with open(priv_key_filename, 'w') as f: f.write(proxy['private_key']) os.chmod(priv_key_filename, 0o600) @@ -501,14 +502,14 @@ def install_ansible_roles(self, general_conf_data, playbook): if galaxy_collections: now = str(int(time.time() * 100)) - filename = "/tmp/galaxy_collections_%s.yml" % now + filename = "/tmp/galaxy_collections_%s.yml" % now # nosec yaml_deps = yaml.safe_dump({"collections": galaxy_collections}, default_flow_style=True) self.logger.debug("Galaxy collections file: %s" % yaml_deps) task = {"copy": 'dest=%s content="%s"' % (filename, yaml_deps)} task["name"] = "Create YAML file to install the collections with ansible-galaxy" yaml_data[0]['tasks'].append(task) - task = {"command": "ansible-galaxy collection install -c -r %s" % filename} + task = {"command": self.VENV_DIR + "/bin/ansible-galaxy collection install -c -r %s" % filename} task["name"] = "Install galaxy collections" task["become"] = "yes" task["register"] = "collections_install" @@ -555,14 +556,14 @@ def install_ansible_roles(self, general_conf_data, playbook): if galaxy_dependencies: now = str(int(time.time() * 100)) - filename = "/tmp/galaxy_roles_%s.yml" % now + filename = "/tmp/galaxy_roles_%s.yml" % now # nosec yaml_deps = yaml.safe_dump(galaxy_dependencies, default_flow_style=True) self.logger.debug("Galaxy depencies file: %s" % yaml_deps) task = {"copy": 'dest=%s content="%s"' % (filename, yaml_deps)} task["name"] = "Create YAML file to install the roles with ansible-galaxy" yaml_data[0]['tasks'].append(task) - task = {"command": "ansible-galaxy install -c -r %s" % filename} + task = {"command": self.VENV_DIR + "/bin/ansible-galaxy install -c -r %s" % filename} task["name"] = "Install galaxy roles" task["become"] = "yes" task["register"] = "roles_install" @@ -597,7 +598,7 @@ def LaunchAnsiblePlaybook(self, output, remote_dir, playbook_file, vm, threads, gen_pk_file = pk_file else: if vm['private_key'] and not vm['passwd']: - gen_pk_file = "/tmp/pk_" + vm['ip'] + ".pem" + gen_pk_file = "/tmp/pk_" + vm['ip'] + ".pem" # nosec pk_out = open(gen_pk_file, 'w') pk_out.write(vm['private_key']) pk_out.close() diff --git a/IM/InfrastructureList.py b/IM/InfrastructureList.py index c9b1d99c..b743fce2 100644 --- a/IM/InfrastructureList.py +++ b/IM/InfrastructureList.py @@ -182,12 +182,13 @@ def _get_data_from_db(db_url, inf_id=None, auth=None): if db.db_type == DataBase.MONGO: res = db.find("inf_list", {"id": inf_id}, {data_field: True, "deleted": True}) else: - res = db.select("select " + data_field + ",deleted from inf_list where id = %s", (inf_id,)) + res = db.select("select " + data_field + ",deleted from inf_list where id = %s", # nosec + (inf_id,)) else: if db.db_type == DataBase.MONGO: res = db.find("inf_list", {"deleted": 0}, {data_field: True, "deleted": True}, [('_id', -1)]) else: - res = db.select("select " + data_field + ",deleted from inf_list where deleted = 0" + res = db.select("select " + data_field + ",deleted from inf_list where deleted = 0" # nosec " order by rowid desc") if len(res) > 0: for elem in res: @@ -296,7 +297,7 @@ def _get_inf_ids_from_db(auth=None): where = "where deleted = 0 and (%s)" % like else: where = "where deleted = 0" - res = db.select("select id from inf_list %s order by rowid desc" % where) + res = db.select("select id from inf_list %s order by rowid desc" % where) # nosec for elem in res: if db.db_type == DataBase.MONGO: inf_list.append(elem['id']) diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index ff3213a4..eab08d99 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -2056,6 +2056,8 @@ def GetStats(init_date, end_date, auth): """ # First check the auth data auth = InfrastructureManager.check_auth_data(auth) + if not init_date: + init_date = "1970-01-01" stats = Stats.get_stats(init_date, end_date, auth) if stats is None: raise Exception("ERROR connecting with the database!.") diff --git a/IM/SSH.py b/IM/SSH.py index 966793fc..1fda8227 100644 --- a/IM/SSH.py +++ b/IM/SSH.py @@ -77,7 +77,7 @@ def run(self): channel = self.client.get_transport().open_session() if self.ssh.tty: channel.get_pty() - channel.exec_command(self.command + "\n") + channel.exec_command(self.command + "\n") # nosec stdout = channel.makefile() stderr = channel.makefile_stderr() exit_status = channel.recv_exit_status() @@ -118,8 +118,10 @@ def __init__(self, host, user, passwd=None, private_key=None, port=22, proxy_hos private_key_obj = StringIO() if os.path.isfile(private_key): pkfile = open(private_key) + self.private_key = "" for line in pkfile.readlines(): private_key_obj.write(line) + self.private_key += line pkfile.close() else: # Avoid windows line endings @@ -128,18 +130,20 @@ def __init__(self, host, user, passwd=None, private_key=None, port=22, proxy_hos if not private_key.endswith("\n"): private_key += "\n" private_key_obj.write(private_key) + self.private_key = private_key - self.private_key = private_key private_key_obj.seek(0) + self.private_key_obj = self._load_private_key(private_key_obj) - if "BEGIN RSA PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.RSAKey.from_private_key(private_key_obj) - elif "BEGIN DSA PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.DSSKey.from_private_key(private_key_obj) - elif "BEGIN EC PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.ECDSAKey.from_private_key(private_key_obj) - elif "BEGIN OPENSSH PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.Ed25519Key.from_private_key(private_key_obj) + @staticmethod + def _load_private_key(private_key_obj): + """ Load a private key from a file-like object""" + for kype in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey, paramiko.Ed25519Key]: + try: + return kype.from_private_key(private_key_obj) + except Exception: + private_key_obj.seek(0) + raise Exception("Invalid private key") def __del__(self): self.close() @@ -178,13 +182,13 @@ def connect(self, time_out=None): return self.client, self.proxy client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # nosec proxy = None proxy_channel = None if self.proxy_host: proxy = paramiko.SSHClient() - proxy.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + proxy.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # nosec proxy.connect(self.proxy_host.host, self.proxy_host.port, username=self.proxy_host.username, password=self.proxy_host.password, pkey=self.proxy_host.private_key_obj) proxy_transport = proxy.get_transport() @@ -260,7 +264,7 @@ def execute(self, command, timeout=None): if self.tty: channel.get_pty() - channel.exec_command(command + "\n") + channel.exec_command(command + "\n") # nosec stdout = channel.makefile() stderr = channel.makefile_stderr() exit_status = channel.recv_exit_status() diff --git a/IM/Stats.py b/IM/Stats.py index f5a53b05..b0f13f62 100644 --- a/IM/Stats.py +++ b/IM/Stats.py @@ -35,7 +35,7 @@ class Stats(): @staticmethod def _get_data(str_data, init_date=None, end_date=None): dic = json.loads(str_data) - resp = {'creation_date': None} + resp = {'creation_date': ''} if 'creation_date' in dic and dic['creation_date']: creation_date = datetime.datetime.fromtimestamp(float(dic['creation_date'])) resp['creation_date'] = str(creation_date) @@ -44,7 +44,7 @@ def _get_data(str_data, init_date=None, end_date=None): if end_date and creation_date > end_date: return None - resp['tosca_name'] = None + resp['tosca_name'] = '' if 'extra_info' in dic and dic['extra_info'] and "TOSCA" in dic['extra_info']: try: tosca = yaml.safe_load(dic['extra_info']['TOSCA']) @@ -56,8 +56,8 @@ def _get_data(str_data, init_date=None, end_date=None): resp['vm_count'] = 0 resp['cpu_count'] = 0 resp['memory_size'] = 0 - resp['cloud_type'] = None - resp['cloud_host'] = None + resp['cloud_type'] = '' + resp['cloud_host'] = '' resp['hybrid'] = False resp['deleted'] = True if 'deleted' in dic and dic['deleted'] else False for str_vm_data in dic['vm_list']: @@ -125,7 +125,7 @@ def get_stats(init_date="1970-01-01", end_date=None, auth=None): if like: where += " and" where += " date <= '%s'" % end_date - res = db.select("select data, date, id from inf_list %s order by rowid desc" % where) + res = db.select("select data, date, id from inf_list %s order by rowid desc" % where) # nosec for elem in res: if db.db_type == DataBase.MONGO: diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index 3210365f..9b6ebc34 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -1135,7 +1135,7 @@ def get_ssh_command(self): reverse_opt = "-R %d:localhost:22" % (self.SSH_REVERSE_BASE_PORT + self.creation_im_id) if ssh.private_key: - filename = "/tmp/%s_%s.pem" % (self.inf.id, self.im_id) + filename = "/tmp/%s_%s.pem" % (self.inf.id, self.im_id) # nosec command = 'echo "%s" > %s && chmod 400 %s ' % (ssh.private_key, filename, filename) command += ('&& ssh -N %s -p %s -i %s -o "UserKnownHostsFile=/dev/null"' ' -o "StrictHostKeyChecking=no" %s@%s &' % (reverse_opt, diff --git a/IM/__init__.py b/IM/__init__.py index ab315621..d03e90b7 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -19,7 +19,7 @@ 'InfrastructureInfo', 'InfrastructureManager', 'recipe', 'request', 'REST', 'retry', 'ServiceRequests', 'SSH', 'SSHRetry', 'timedcall', 'UnixHTTPAdapter', 'VirtualMachine', 'VMRC', 'xmlobject'] -__version__ = '1.17.1' +__version__ = '1.18.0' __author__ = 'Miguel Caballer' diff --git a/IM/config.py b/IM/config.py index 80478fa2..b71bc179 100644 --- a/IM/config.py +++ b/IM/config.py @@ -58,10 +58,10 @@ class Config: WAIT_SSH_ACCCESS_TIMEOUT = 300 WAIT_PUBLIC_IP_TIMEOUT = 90 XMLRCP_PORT = 8899 - XMLRCP_ADDRESS = "0.0.0.0" + XMLRCP_ADDRESS = "0.0.0.0" # nosec ACTIVATE_REST = True REST_PORT = 8800 - REST_ADDRESS = "0.0.0.0" + REST_ADDRESS = "0.0.0.0" # nosec USER_DB = "" IM_PATH = os.path.dirname(os.path.realpath(__file__)) LOG_FILE = '/var/log/im/inf.log' @@ -85,7 +85,7 @@ class Config: VM_INFO_UPDATE_FREQUENCY = 10 # This value must be always higher than VM_INFO_UPDATE_FREQUENCY VM_INFO_UPDATE_ERROR_GRACE_PERIOD = 120 - REMOTE_CONF_DIR = "/var/tmp/.im" + REMOTE_CONF_DIR = "/var/tmp/.im" # nosec MAX_SSH_ERRORS = 5 PRIVATE_NET_MASKS = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "198.18.0.0/15"] diff --git a/IM/connectors/CloudConnector.py b/IM/connectors/CloudConnector.py index 01d7a426..ad275b56 100644 --- a/IM/connectors/CloudConnector.py +++ b/IM/connectors/CloudConnector.py @@ -738,10 +738,6 @@ def manage_dns_entries(self, op, vm, auth_data, extra_args=None): vm.dns_entries = [] if op == "add": dns_entries = [entry for entry in self.get_dns_entries(vm) if entry not in vm.dns_entries] - dns_entries = [] - for entry in self.get_dns_entries(vm): - if entry not in vm.dns_entries: - dns_entries.append(entry) else: dns_entries = list(vm.dns_entries) if dns_entries: diff --git a/IM/connectors/Docker.py b/IM/connectors/Docker.py index a3855550..d93a708c 100644 --- a/IM/connectors/Docker.py +++ b/IM/connectors/Docker.py @@ -182,7 +182,7 @@ def _generate_create_svc_request_data(self, image_name, outports, vm, ssh_port, command += " ; " command += "mkdir /var/run/sshd" command += " ; " - command += "sed -i '/PermitRootLogin/c\PermitRootLogin yes' /etc/ssh/sshd_config" + command += "sed -i '/PermitRootLogin/c\\PermitRootLogin yes' /etc/ssh/sshd_config" command += " ; " command += "rm -f /etc/ssh/ssh_host_rsa_key*" command += " ; " @@ -190,7 +190,8 @@ def _generate_create_svc_request_data(self, image_name, outports, vm, ssh_port, command += " ; " command += "echo 'root:" + self._root_password + "' | chpasswd" command += " ; " - command += "sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" + command += ("sed 's@session\\s*required\\s*pam_loginuid.so@session " + + "optional pam_loginuid.so@g' -i /etc/pam.d/sshd") command += " ; " command += " /usr/sbin/sshd -D" @@ -264,7 +265,8 @@ def _generate_create_cont_request_data(self, image_name, outports, vm, ssh_port) command += " ; " command += "echo 'root:" + self._root_password + "' | chpasswd" command += " ; " - command += "sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" + command += ("sed 's@session\\s*required\\s*pam_loginuid.so@session" + + " optional pam_loginuid.so@g' -i /etc/pam.d/sshd") command += " ; " command += " /usr/sbin/sshd -D" diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 1b6fd17b..0daedc7a 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1,5 +1,5 @@ # IM - Infrastructure Manager -# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia +# Copyright (C) 2024 - GRyCAP - Universitat Politecnica de Valencia # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -20,11 +20,9 @@ from netaddr import IPNetwork, IPAddress, spanning_cidr try: - import boto.ec2 - import boto.vpc - import boto.route53 + import boto3 except Exception as ex: - print("WARN: Boto library not correctly installed. EC2CloudConnector will not work!.") + print("WARN: Boto3 library not correctly installed. EC2CloudConnector will not work!.") print(ex) try: @@ -104,7 +102,6 @@ class EC2CloudConnector(CloudConnector): def __init__(self, cloud_info, inf): self.connection = None - self.route53_connection = None self.auth = None CloudConnector.__init__(self, cloud_info, inf) @@ -163,14 +160,14 @@ def update_system_info_from_instance(system, instance_type): system.addFeature(Feature("gpu.model", "=", instance_type.gpu_model), conflict="other", missing="other") # Get the EC2 connection object - def get_connection(self, region_name, auth_data): + def get_connection(self, region_name, auth_data, service_name, object_type='client'): """ Get a :py:class:`boto.ec2.connection` to interact with. Arguments: - region_name(str): EC2 region to connect. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.ec2.connection` or None in case of error + Returns: a :py:class:`boto3.EC2.Client` or None in case of error """ auths = auth_data.getAuthInfo(self.type) if not auths: @@ -179,63 +176,28 @@ def get_connection(self, region_name, auth_data): auth = auths[0] if self.connection and self.auth.compare(auth_data, self.type): - return self.connection + if object_type == 'resource': + return self.connection.resource(service_name) + else: + return self.connection.client(service_name) else: self.auth = auth_data - conn = None try: if 'username' in auth and 'password' in auth: - region = boto.ec2.get_region(region_name) - if region: - token = auth.get('token') - conn = boto.vpc.VPCConnection(aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password'], - security_token=token, - region=region) + if region_name != 'universal': + region_names = boto3.session.Session().get_available_regions('ec2') + if region_name not in region_names: + raise Exception("Incorrect region name: " + region_name) + + session = boto3.session.Session(region_name=region_name, + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password'], + aws_session_token=auth.get('token')) + self.connection = session + if object_type == 'resource': + return session.resource(service_name) else: - raise Exception( - "Incorrect region name: " + region_name) - else: - self.log_error("No correct auth data has been specified to EC2: " - "username (Access Key) and password (Secret Key)") - raise Exception("No correct auth data has been specified to EC2: " - "username (Access Key) and password (Secret Key)") - - except Exception as ex: - self.log_exception( - "Error getting the region " + region_name) - raise Exception("Error getting the region " + - region_name + ": " + str(ex)) - - self.connection = conn - return conn - - # Get the Route53 connection object - def get_route53_connection(self, region_name, auth_data): - """ - Get a :py:class:`boto.route53.connection` to interact with. - - Arguments: - - region_name(str): AWS region to connect. - - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.route53.connection` or None in case of error - """ - auths = auth_data.getAuthInfo(self.type) - if not auths: - raise Exception("No auth data has been specified to EC2.") - else: - auth = auths[0] - - if self.route53_connection and self.auth.compare(auth_data, self.type): - return self.route53_connection - else: - self.auth = auth_data - conn = None - try: - if 'username' in auth and 'password' in auth: - conn = boto.route53.connect_to_region(region_name, - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) + return session.client(service_name) else: self.log_error("No correct auth data has been specified to EC2: " "username (Access Key) and password (Secret Key)") @@ -243,11 +205,8 @@ def get_route53_connection(self, region_name, auth_data): "username (Access Key) and password (Secret Key)") except Exception as ex: - self.log_exception("Error conneting Route53 in region " + region_name) - raise Exception("Error conneting Route53 in region" + region_name + ": " + str(ex)) - - self.route53_connection = conn - return conn + self.log_exception("Error getting the region " + region_name) + raise Exception("Error getting the region " + region_name + ": " + str(ex)) # path format: aws://eu-west-1/ami-00685b74 @staticmethod @@ -342,10 +301,28 @@ def set_net_provider_id(radl, vpc, subnet): @staticmethod def _get_security_group(conn, sg_name): try: - return conn.get_all_security_groups(filters={'group-name': sg_name})[0] + return conn.describe_security_groups(Filters=[{'Name': 'group-name', + 'Values': [sg_name]}])['SecurityGroups'][0] except Exception: return None + @staticmethod + def _get_default_security_rules(sg): + return [ + { + "IpProtocol": "tcp", + "FromPort": 0, + "ToPort": 65535, + "UserIdGroupPairs": [{"GroupId": sg["GroupId"]}], + }, + { + "IpProtocol": "udp", + "FromPort": 0, + "ToPort": 65535, + "UserIdGroupPairs": [{"GroupId": sg["GroupId"]}], + }, + ] + def create_security_groups(self, conn, inf, radl, vpc): res = [] try: @@ -360,10 +337,12 @@ def create_security_groups(self, conn, inf, radl, vpc): if not sg: self.log_info("Creating security group: %s" % sg_name) try: - sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) + sg = conn.create_security_group(GroupName=sg_name, + Description="Security group created by the IM", + VpcId=vpc) # open all the ports for the VMs in the security group - sg.authorize('tcp', 0, 65535, src_group=sg) - sg.authorize('udp', 0, 65535, src_group=sg) + conn.authorize_security_group_ingress(GroupId=sg['GroupId'], + IpPermissions=self._get_default_security_rules(sg)) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -373,7 +352,7 @@ def create_security_groups(self, conn, inf, radl, vpc): else: self.log_info("Security group: " + sg_name + " already created.") - res.append(sg.id) + res.append(sg['GroupId']) while system.getValue("net_interface." + str(i) + ".connection"): network_name = system.getValue("net_interface." + str(i) + ".connection") @@ -389,7 +368,9 @@ def create_security_groups(self, conn, inf, radl, vpc): if not sg: self.log_info("Creating security group: " + sg_name) try: - sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) + sg = conn.create_security_group(GroupName=sg_name, + Description="Security group created by the IM", + VpcId=vpc) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -399,12 +380,12 @@ def create_security_groups(self, conn, inf, radl, vpc): else: self.log_info("Security group: " + sg_name + " already created.") - res.append(sg.id) + res.append(sg['GroupId']) try: # open all the ports for the VMs in the security group - sg.authorize('tcp', 0, 65535, src_group=sg) - sg.authorize('udp', 0, 65535, src_group=sg) + conn.authorize_security_group_ingress(GroupId=sg['GroupId'], + IpPermissions=self._get_default_security_rules(sg)) except Exception as addex: self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) @@ -415,17 +396,23 @@ def create_security_groups(self, conn, inf, radl, vpc): for outport in outports: if outport.is_range(): - try: - sg.authorize(outport.get_protocol(), outport.get_port_init(), - outport.get_port_end(), outport.get_remote_cidr()) - except Exception as addex: - self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) + from_port = outport.get_port_init() + to_port = outport.get_port_end() else: - try: - sg.authorize(outport.get_protocol(), outport.get_remote_port(), - outport.get_remote_port(), outport.get_remote_cidr()) - except Exception as addex: - self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) + from_port = outport.get_remote_port() + to_port = outport.get_remote_port() + + try: + conn.authorize_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': outport.get_protocol(), + 'FromPort': from_port, + 'ToPort': to_port, + 'IpRanges': [{'CidrIp': outport.get_remote_cidr()}]} + ]) + except Exception as addex: + self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) i += 1 except Exception as ex: @@ -444,13 +431,21 @@ def get_default_subnet(conn): vpc_id = None subnet_id = None - for vpc in conn.get_all_vpcs(): - if vpc.is_default or ('Name' in vpc.tags and vpc.tags['Name'] == "default"): - vpc_id = vpc.id - for subnet in conn.get_all_subnets(filters={"vpcId": vpc_id}): - subnet_id = subnet.id - break - break + vpcs = conn.describe_vpcs(Filters=[{'Name': 'is-default', 'Values': ['true']}])['Vpcs'] + if vpcs: + vpc_id = vpcs[0]['VpcId'] + + # Just in case there is no default VPC, in some old accounts + # get the VPC named default + if not vpc_id: + vpcs = conn.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': ['default']}])['Vpcs'] + if vpcs: + vpc_id = vpcs[0]['VpcId'] + + if vpc_id: + subnets = conn.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])['Subnets'] + if subnets: + subnet_id = subnets[0]['SubnetId'] return vpc_id, subnet_id @@ -478,11 +473,12 @@ def get_vpc_cidr(self, radl, conn, inf): Get a common CIDR in all the RADL nets """ nets = [] - for i, net in enumerate(radl.networks): + for net in radl.networks: provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: + subnets = [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet.cidr_block for subnet in conn.get_all_subnets()] + nets, + subnets + nets, inf, 127) nets.append(net_cidr) @@ -505,73 +501,85 @@ def create_networks(self, conn, radl, inf): else: vpc_cird = str(common_cird) vpc_id = None - for i, net in enumerate(radl.networks): + for net in radl.networks: provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: + subnets = [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet.cidr_block for subnet in conn.get_all_subnets()], + subnets, inf, 127) net.delValue('cidr') # First create the VPC if vpc_id is None: # Check if it already exists - vpcs = conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": inf.id}) + vpcs = conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf.id]}])['Vpcs'] if vpcs: - vpc_id = vpcs[0].id + vpc_id = vpcs[0]['VpcId'] self.log_debug("VPC %s exists. Do not create." % vpc_id) else: # if not create it self.log_info("Creating VPC with cidr: %s." % vpc_cird) - vpc = conn.create_vpc(vpc_cird) - time.sleep(1) - vpc.add_tag("IM-INFRA-ID", inf.id) - vpc_id = vpc.id + vpc = conn.create_vpc(CidrBlock=vpc_cird, + TagSpecifications=[{'ResourceType': 'vpc', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}]}]) + vpc_id = vpc['Vpc']['VpcId'] self.log_info("VPC %s created." % vpc_id) self.log_info("Creating Internet Gateway.") - ig = conn.create_internet_gateway() - time.sleep(1) - ig.add_tag("IM-INFRA-ID", inf.id) - self.log_info("Internet Gateway %s created." % ig.id) - conn.attach_internet_gateway(ig.id, vpc_id) + ig = conn.create_internet_gateway(TagSpecifications=[{'ResourceType': 'internet-gateway', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}]}]) + ig_id = ig['InternetGateway']['InternetGatewayId'] + self.log_info("Internet Gateway %s created." % ig_id) + conn.attach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) self.log_info("Adding route to the IG.") - for route_table in conn.get_all_route_tables(filters={"vpc-id": vpc_id}): - conn.create_route(route_table.id, "0.0.0.0/0", ig.id) + for rt in conn.describe_route_tables(Filters=[{"Name": "vpc-id", + "Values": [vpc_id]}])['RouteTables']: + conn.create_route(RouteTableId=rt['RouteTableId'], + DestinationCidrBlock="0.0.0.0/0", + GatewayId=ig_id) # Now create the subnet # Check if it already exists - subnets = conn.get_all_subnets(filters={"tag:IM-INFRA-ID": inf.id, - "tag:IM-SUBNET-ID": net.id}) + subnets = conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', + 'Values': [inf.id]}, + {'Name': 'tag:IM-SUBNET-ID', + 'Values': [net.id]}])['Subnets'] if subnets: subnet = subnets[0] self.log_debug("Subnet %s exists. Do not create." % net.id) net.setValue('cidr', subnet.cidr_block) else: self.log_info("Create subnet for net %s." % net.id) - subnet = conn.create_subnet(vpc_id, net_cidr) - self.log_info("Subnet %s created." % subnet.id) - time.sleep(1) - subnet.add_tag("IM-INFRA-ID", inf.id) - subnet.add_tag("IM-SUBNET-ID", net.id) + subnet = conn.create_subnet(VpcId=vpc_id, CidrBlock=net_cidr, + TagSpecifications=[{'ResourceType': 'subnet', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}, + {'Key': 'IM-SUBNET-ID', + 'Value': net.id}]}]) + self.log_info("Subnet %s created." % subnet['Subnet']['SubnetId']) net.setValue('cidr', net_cidr) # Set also the cidr in the inf RADL inf.radl.get_network_by_id(net.id).setValue('cidr', net_cidr) - net.setValue('provider_id', "%s.%s" % (vpc_id, subnet.id)) + net.setValue('provider_id', "%s.%s" % (vpc_id, subnet['Subnet']['SubnetId'])) except Exception as ex: self.log_exception("Error creating subnets or vpc.") try: - for subnet in conn.get_all_subnets(filters={"tag:IM-INFRA-ID": inf.id}): - self.log_info("Deleting subnet: %s" % subnet.id) - conn.delete_subnet(subnet.id) - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": inf.id}): - self.log_info("Deleting vpc: %s" % vpc.id) - conn.delete_vpc(vpc.id) - for ig in conn.get_all_internet_gateways(filters={"tag:IM-INFRA-ID": inf.id}): - self.log_info("Deleting Internet Gateway: %s" % ig.id) - conn.delete_internet_gateways(ig.id) + for subnet in conn.describe_subnets(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [inf.id]}])['Subnets']: + self.log_info("Deleting subnet: %s" % subnet['Subnets']['SubnetId']) + conn.delete_subnet(SubnetId=subnet['Subnets']['SubnetId']) + for vpc in conn.describe_vpcs(Filters=[{"Name": "tag:IM-INFRA-ID", "Values": [inf.id]}])['Vpcs']: + self.log_info("Deleting vpc: %s" % vpc_id) + conn.delete_vpc(VpcId=vpc_id) + for ig in conn.describe_internet_gateways(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [inf.id]}])['InternetGateways']: + self.log_info("Deleting Internet Gateway: %s" % ig_id) + conn.delete_internet_gateways(InternetGatewayId=ig_id) except Exception: self.log_exception("Error deleting subnets or vpc.") raise ex @@ -584,10 +592,10 @@ def get_networks(self, conn, radl): if provider_id: parts = provider_id.split(".") if len(parts) == 2 and parts[0].startswith("vpc-") and parts[1].startswith("subnet-"): - vpc = conn.get_all_vpcs([parts[0]]) - subnet = conn.get_all_subnets([parts[1]]) + vpc = conn.describe_vpcs(Filters=[{'Name': 'vpc-id', 'Values': [parts[0]]}])['Vpcs'] + subnet = conn.describe_subnets(Filters=[{'Name': 'subnet-id', 'Values': [parts[1]]}])['Subnets'] if vpc and subnet: - return vpc[0].id, subnet[0].id + return vpc[0]['VpcId'], subnet[0]['SubnetId'] elif vpc: raise Exception("Incorrect subnet value in provider_id value: %s" % provider_id) else: @@ -617,7 +625,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): (region_name, ami) = self.getAMIData(system.getValue("disk.0.image.url")) self.log_info("Connecting with the region: " + region_name) - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') res = [] spot = False @@ -651,16 +659,17 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): for i in range(num_vm): res.append((False, "Error no instance type available for the requirements.")) - image = conn.get_image(ami) + image = conn.describe_images(ImageIds=[ami])['Images'] if not image: for i in range(num_vm): res.append((False, "Incorrect AMI selected")) return res + image = image[0] block_device_name = None - for name, device in image.block_device_mapping.items(): - if device.snapshot_id or device.volume_id: - block_device_name = name + for device in image['BlockDeviceMappings']: + if device.get('Ebs', {}).get('SnapshotId'): + block_device_name = device.get('DeviceName') if not block_device_name: self.log_error("Error getting correct block_device name from AMI: " + str(ami)) @@ -714,7 +723,6 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): inf.add_vm(vm) user_data = self.get_cloud_init_data(radl, vm, public_key, user) - bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping(conn) # Get data for the root disk size = None disk_type = "standard" @@ -722,22 +730,41 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): disk_type = system.getValue("disk.0.type") if system.getValue("disk.0.size"): size = system.getFeature("disk.0.size").getValue('G') - bdm[block_device_name] = boto.ec2.blockdevicemapping.BlockDeviceType(volume_type=disk_type, - size=size, - delete_on_termination=True) + bdm = [ + { + 'DeviceName': block_device_name, + 'Ebs': { + 'DeleteOnTermination': True, + 'VolumeType': disk_type + } + } + ] + if size: + bdm[0]['Ebs']['VolumeSize'] = size volumes = self.get_volumes(conn, vm) - for device, (size, snapshot_id, volume_id, disk_type) in volumes.items(): - bdm[device] = boto.ec2.blockdevicemapping.BlockDeviceType(snapshot_id=snapshot_id, - volume_id=volume_id, - volume_type=disk_type, - size=size, delete_on_termination=True) + for device, (size, snapshot_id, _, disk_type) in volumes.items(): + bd = { + 'DeviceName': device, + 'Ebs': { + 'DeleteOnTermination': True, + } + } + if size: + bd['Ebs']['VolumeSize'] = size + if snapshot_id: + bd['Ebs']['SnapshotId'] = snapshot_id + if disk_type: + bd['Ebs']['VolumeType'] = disk_type + bdm.append(bd) if spot: self.log_info("Launching a spot instance") err_msg += " a spot instance " err_msg += " of type: %s " % instance_type.name price = system.getValue("price") + if price: + price = str(price) # Realizamos el request de spot instances if system.getValue('availability_zone'): @@ -745,27 +772,42 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): else: availability_zone = 'us-east-1c' historical_price = 1000.0 - availability_zone_list = conn.get_all_zones() + availability_zone_list = conn.describe_availability_zones()['AvailabilityZones'] for zone in availability_zone_list: - history = conn.get_spot_price_history(instance_type=instance_type.name, - product_description=operative_system, - availability_zone=zone.name, - max_results=1) - self.log_debug("Spot price history for the region " + zone.name) + history = conn.describe_spot_price_history(InstanceTypes=[instance_type.name], + ProductDescriptions=[operative_system], + Filters=[{'Name': 'availability-zone', + 'Values': [zone['ZoneName']]}], + MaxResults=1)['SpotPriceHistory'] + + self.log_debug("Spot price history for the region " + zone['ZoneName']) self.log_debug(history) - if history and history[0].price < historical_price: - historical_price = history[0].price - availability_zone = zone.name + if history and float(history[0]['SpotPrice']) < historical_price: + historical_price = float(history[0]['SpotPrice']) + availability_zone = zone['ZoneName'] self.log_info("Launching the spot request in the zone " + availability_zone) - request = conn.request_spot_instances(price=price, image_id=image.id, count=1, - type='one-time', instance_type=instance_type.name, - placement=availability_zone, key_name=keypair_name, - security_group_ids=sg_ids, block_device_map=bdm, - subnet_id=subnet, user_data=user_data) + launch_spec = {'ImageId': image['ImageId'], + 'InstanceType': instance_type.name, + 'SecurityGroupIds': sg_ids, + 'BlockDeviceMappings': bdm, + 'SubnetId': subnet, + 'UserData': user_data} + + if keypair_name: + launch_spec['KeyName'] = keypair_name + if availability_zone: + launch_spec['Placement'] = {'AvailabilityZone': availability_zone} + + params = {'InstanceCount': 1, + 'Type': 'one-time', + 'LaunchSpecification': launch_spec} + if price: + params['SpotPrice'] = price + request = conn.request_spot_instances(**params) - if request: - ec2_vm_id = region_name + ";" + request[0].id + if request['SpotInstanceRequests']: + ec2_vm_id = region_name + ";" + request['SpotInstanceRequests'][0]['SpotInstanceRequestId'] self.log_debug("RADL:") self.log_debug(system) @@ -783,23 +825,42 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): err_msg += " an ondemand instance " err_msg += " of type: %s " % instance_type.name - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=subnet, - groups=sg_ids, - associate_public_ip_address=add_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + interfaces = [ + { + 'DeviceIndex': 0, + 'SubnetId': subnet, + 'Groups': sg_ids, + 'AssociatePublicIpAddress': add_public_ip, + 'DeleteOnTermination': True + } + ] + + params = {'ImageId': image['ImageId'], + 'MinCount': 1, + 'MaxCount': 1, + 'InstanceType': instance_type.name, + 'NetworkInterfaces': interfaces, + 'BlockDeviceMappings': bdm, + 'UserData': user_data} + + if keypair_name: + params['KeyName'] = keypair_name + if placement: + params['Placement'] = {'AvailabilityZone': placement} + + im_username = "im_user" + if auth_data.getAuthInfo('InfrastructureManager'): + im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] + instace_tags = [{'Key': 'Name', 'Value': self.gen_instance_name(system)}, + {'Key': 'IM-USER', 'Value': im_username}] + for key, value in tags.items(): + instace_tags.append({'Key': key, 'Value': value}) + params['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': instace_tags}] - reservation = conn.run_instances(image.id, min_count=1, max_count=1, key_name=keypair_name, - instance_type=instance_type.name, network_interfaces=interfaces, - placement=placement, block_device_map=bdm, user_data=user_data) + instances = conn.run_instances(**params)['Instances'] - if len(reservation.instances) == 1: - time.sleep(1) - instance = reservation.instances[0] - instance.add_tag("Name", self.gen_instance_name(system)) - for key, value in tags.items(): - instance.add_tag(key, value) - ec2_vm_id = region_name + ";" + instance.id + if instances: + ec2_vm_id = region_name + ";" + instances[0]['InstanceId'] self.log_debug("RADL:") self.log_debug(system) @@ -819,13 +880,17 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i += 1 - # if all the VMs have failed, remove the sgs + # if all the VMs have failed, remove the sgs and nets if all_failed: + try: + self.delete_networks(conn, inf.id) + except Exception: + self.log_exception("Error deleting networks.") if sg_ids: for sgid in sg_ids: self.log_info("Remove the SG: %s" % sgid) try: - conn.delete_security_group(group_id=sgid) + conn.delete_security_group(GroupId=sgid) except Exception: self.log_exception("Error deleting SG.") @@ -836,29 +901,29 @@ def create_volume(self, conn, disk_size, placement=None, vol_type=None, timeout= Create an EBS volume Arguments: - - conn(:py:class:`boto.ec2.connection`): object to connect to EC2 API. + - conn(:py:class:`boto3.EC2.Client`): object to connect to EC2 API. - disk_size(int): The size of the new volume, in GiB - placement(str): The availability zone in which the Volume will be created. - type(str): Type of the volume: standard | io1 | gp2. - timeout(int): Time needed to create the volume. - Returns: a :py:class:`boto.ec2.volume.Volume` of the new volume + Returns: a :py:dict:`boto3.EC2.Volume` of the new volume """ if placement is None: - placement = conn.get_all_zones()[0] - volume = conn.create_volume(disk_size, placement, volume_type=vol_type) + placement = conn.describe_zones()['Zones'][0]['ZoneName'] + volume = conn.create_volume(Size=disk_size, AvailabilityZone=placement, VolumeType=vol_type) cont = 0 err_states = ["error"] - while str(volume.status) != 'available' and str(volume.status) not in err_states and cont < timeout: - self.log_info("State: " + str(volume.status)) + while str(volume['Status']) != 'available' and str(volume['Status']) not in err_states and cont < timeout: + self.log_info("State: " + str(volume['Status'])) cont += 2 time.sleep(2) - volume = conn.get_all_volumes([volume.id])[0] + volume = conn.describe_volumes([volume['VolumeId']])['Volumes'][0] - if str(volume.status) == 'available': + if str(volume['Status']) == 'available': return volume else: self.log_error("Error creating the volume %s, deleting it" % (volume.id)) - conn.delete_volume(volume.id) + conn.delete_volume(volume['VolumeId']) return None @staticmethod @@ -888,17 +953,21 @@ def get_volumes(conn, vm): if disk_url: _, elem_id = EC2CloudConnector.getAMIData(disk_url) if elem_id.startswith('snap-'): - snapshot_id = conn.get_all_snapshots([elem_id])[0].id + snapshot = conn.describe_snapshots(SnapshotIds=[elem_id])['Snapshots'] + if snapshot: + snapshot_id = snapshot_id[0]['SnapshotId'] elif elem_id.startswith('vol-'): - volume_id = conn.get_all_volumes([elem_id])[0].id + volume = conn.describe_volumes(VolumeIds=[elem_id])['Volumes'] + if volume: + volume_id = volume[0]['VolumeId'] else: - snapshot = conn.get_all_snapshots(filters={'tag:Name': elem_id}) + snapshot = conn.describe_snapshots(Filters=[{'Name': 'tag:Name', 'Values': [elem_id]}])['Snapshots'] if snapshot: - snapshot_id = snapshot[0].id + snapshot_id = snapshot[0]['SnapshotId'] else: - volume = conn.get_all_volumes(filters={'tag:Name': elem_id}) + volume = conn.describe_volumes(Filters=[{'Name': 'tag:Name', 'Values': [elem_id]}])['Volumes'] if volume: - volume_id = volume[0].id + volume_id = volume[0]['VolumeId'] else: raise Exception("No snapshot/volume found with name: %s" % elem_id) else: @@ -923,29 +992,29 @@ def get_instance_by_id(self, instance_id, region_name, auth_data): - id(str): ID of the EC2 instance. - region_name(str): Region name to search the instance. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.ec2.instance` of found instance or None if it was not found + Returns: a :py:class:`boto3.EC2.Instance` of found instance or None if it was not found """ instance = None try: - conn = self.get_connection(region_name, auth_data) - - reservations = conn.get_all_instances([instance_id]) - instance = reservations[0].instances[0] + resource = self.get_connection(region_name, auth_data, 'ec2', 'resource') + instance = resource.Instance(instance_id) + instance.load() except Exception: - self.log_error("Error getting instance id: %s" % instance_id) + self.log_exception("Error getting instance id: %s" % instance_id) return instance - def add_elastic_ip(self, vm, instance, fixed_ip=None): + def add_elastic_ip(self, vm, instance, conn, fixed_ip=None): """ Add an elastic IP to an instance Arguments: - vm(:py:class:`IM.VirtualMachine`): VM information. - - instance(:py:class:`boto.ec2.instance`): object to connect to EC2 instance. + - instance(:py:class:`boto3.EC2.Instance`): object to connect to EC2 instance. + - conn(:py:class:`boto3.EC2.Client`): object to connect to EC2 API. - fixed_ip(str, optional): specifies a fixed IP to add to the instance. - Returns: a :py:class:`boto.ec2.address.Address` added or None if some problem occur. + Returns: a :py:dict:`boto3.EC2.Address` added or None if some problem occur. """ if vm.state == VirtualMachine.RUNNING and "elastic_ip" not in vm.__dict__.keys(): # Flag to set that this VM has created (or is creating) the elastic @@ -955,8 +1024,8 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): pub_address = None self.log_info("Add an Elastic IP") if fixed_ip: - for address in instance.connection.get_all_addresses(): - if str(address.public_ip) == fixed_ip: + for address in conn.describe_addresses()['Addresses']: + if str(address['PublicIp']) == fixed_ip: pub_address = address if pub_address: @@ -965,13 +1034,9 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): self.log_warn("Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") return None else: - provider_id = self.get_net_provider_id(vm.info) - if provider_id: - pub_address = instance.connection.allocate_address(domain="vpc") - instance.connection.associate_address(instance.id, allocation_id=pub_address.allocation_id) - else: - pub_address = instance.connection.allocate_address() - instance.connection.associate_address(instance.id, pub_address.public_ip) + pub_address = conn.allocate_address(Domain='vpc') + + conn.associate_address(InstanceId=instance.id, AllocationId=pub_address['AllocationId']) self.log_debug(pub_address) return pub_address @@ -979,7 +1044,7 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): self.log_exception("Error adding an Elastic IP to VM ID: " + str(vm.id)) if pub_address: self.log_exception("The Elastic IP was allocated, release it.") - pub_address.release() + conn.release_address(AllocationId=pub_address['AllocationId']) return None else: self.log_info("The VM is not running, not adding an Elastic IP.") @@ -1017,40 +1082,41 @@ def delete_elastic_ips(self, conn, vm, timeout=240): if pub_ip in fixed_ips: self.log_info("%s is a fixed IP, it is not released" % pub_ip) else: - for address in conn.get_all_addresses(filters={"public-ip": pub_ip}): - self.log_info("This VM has a Elastic IP %s." % address.public_ip) + for address in conn.describe_addresses(Filters=[{"Name": "public-ip", + "Values": [pub_ip]}])['Addresses']: + self.log_info("This VM has a Elastic IP %s." % address['PublicIp']) cont = 0 - while address.instance_id and cont < timeout: + while address['InstanceId'] and cont < timeout: cont += 3 try: self.log_debug("Disassociate it.") - address.disassociate() + conn.disassociate_address(PublicIp=address['PublicIp']) except Exception: self.log_debug("Error disassociating the IP.") - address = conn.get_all_addresses(filters={"public-ip": pub_ip})[0] + address = conn.describe_addresses(Filters=[{"Name": "public-ip", + "Values": [pub_ip]}])['Addresses'][0] self.log_info("It is attached. Wait.") time.sleep(3) - address = conn.get_all_addresses(filters={"public-ip": pub_ip})[0] self.log_info("Now release it.") - address.release() + conn.release_address(AllocationId=address['AllocationId']) - def setIPsFromInstance(self, vm, instance): + def setIPsFromInstance(self, vm, instance, conn): """ Adapt the RADL information of the VM to the real IPs assigned by EC2 Arguments: - vm(:py:class:`IM.VirtualMachine`): VM information. - - instance(:py:class:`boto.ec2.instance`): object to connect to EC2 instance. + - instance(:py:class:`boto3.ec2.Instance`): object to connect to EC2 instance. """ vm_system = vm.info.systems[0] num_pub_nets = num_nets = 0 public_ips = [] private_ips = [] - if (instance.ip_address is not None and len(instance.ip_address) > 0 and - instance.ip_address != instance.private_ip_address): - public_ips = [instance.ip_address] + if (instance.public_ip_address is not None and len(instance.public_ip_address) > 0 and + instance.public_ip_address != instance.private_ip_address): + public_ips = [instance.public_ip_address] num_nets += 1 num_pub_nets = 1 if instance.private_ip_address is not None and len(instance.private_ip_address) > 0: @@ -1071,13 +1137,13 @@ def setIPsFromInstance(self, vm, instance): elastic_ips = [] # Get the elastic IPs assigned (there must be only 1) - for address in instance.connection.get_all_addresses(): - if address.instance_id == instance.id: - elastic_ips.append(str(address.public_ip)) + for address in conn.describe_addresses()['Addresses']: + if address['InstanceId'] == instance.id: + elastic_ips.append(str(address['PublicIp'])) # It will be used if it is different to the public IP of the # instance - if str(address.public_ip) != instance.ip_address: - vm_system.setValue('net_interface.' + str(num_nets) + '.ip', str(instance.ip_address)) + if str(address['PublicIp']) != instance.public_ip_address: + vm_system.setValue('net_interface.' + str(num_nets) + '.ip', str(instance.public_ip_address)) vm_system.setValue('net_interface.' + str(num_nets) + '.connection', public_net.id) num_pub_nets += 1 @@ -1097,14 +1163,14 @@ def setIPsFromInstance(self, vm, instance): # It is a fixed IP if ip not in elastic_ips: # It has not been created yet, do it - self.add_elastic_ip(vm, instance, ip) + self.add_elastic_ip(vm, instance, conn, ip) # EC2 only supports 1 elastic IP per instance (without # VPC), so break break else: # Check if we have enough public IPs if num >= num_pub_nets: - self.add_elastic_ip(vm, instance) + self.add_elastic_ip(vm, instance, conn) # EC2 only supports 1 elastic IP per instance (without # VPC), so break break @@ -1125,13 +1191,15 @@ def addRouterInstance(self, vm, conn): if network.getValue('router'): if not route_table_id: vpc_id = None - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": vm.inf.id}): - vpc_id = vpc.id + for vpc in conn.describe_vpcs(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [vm.inf.id]}])['Vpcs']: + vpc_id = vpc['VpcId'] if not vpc_id: self.log_error("No VPC found.") return False - for rt in conn.get_all_route_tables(filters={"vpc-id": vpc_id}): - route_table_id = rt.id + for rt in conn.describe_route_tables(Filters=[{"Name": "vpc-id", + "Values": [vpc_id]}])['RouteTables']: + route_table_id = rt['RouteTableId'] if not route_table_id: self.log_error("No Route Table found with name.") @@ -1159,18 +1227,20 @@ def addRouterInstance(self, vm, conn): success = False break - reservations = conn.get_all_instances([vrouter]) - vrouter_instance = reservations[0].instances[0] + reservations = conn.describe_instances(InstanceIds=[vrouter])['Reservations'] + vrouter_instance = reservations[0]['Instances'][0] - if vrouter_instance.state != "running": + if vrouter_instance['State']['Name'] != "running": self.log_debug("VRouter instance %s is not running." % system_router) success = False break self.log_info("Adding route %s to instance ID: %s." % (router_cidr, vrouter)) - conn.create_route(route_table_id, router_cidr, instance_id=vrouter) + conn.create_route(RouteTableId=route_table_id, + DestinationCidrBlock=router_cidr, + InstanceId=vrouter) self.log_debug("Disabling sourceDestCheck to instance ID: %s." % vrouter) - conn.modify_instance_attribute(vrouter, attribute='sourceDestCheck', value=False) + conn.modify_instance_attribute(InstanceId=vrouter, SourceDestCheck={'Value': False}) # once set, delete it to not set it again network.delValue('router') @@ -1181,9 +1251,8 @@ def addRouterInstance(self, vm, conn): return success def updateVMInfo(self, vm, auth_data): - region = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - conn = self.get_connection(region, auth_data) + region, instance_id = vm.id.split(";") + conn = self.get_connection(region, auth_data, 'ec2') # Check if the instance_id starts with "sir" -> spot request if (instance_id[0] == "s"): @@ -1192,16 +1261,16 @@ def updateVMInfo(self, vm, auth_data): job_instance_id = None self.log_info("Check if the request has been fulfilled and the instance has been deployed") - job_sir_id = instance_id - request_list = conn.get_all_spot_instance_requests() - for sir in request_list: - # TODO: Check if the request had failed and launch it in - # another availability zone - if sir.state == 'failed': - vm.state = VirtualMachine.FAILED - if sir.id == job_sir_id: - job_instance_id = sir.instance_id - break + request_list = conn.describe_spot_instance_requests(Filters=[{'Name': 'spot-instance-request-id', + 'Values': [instance_id]}]) + sir = [] + if request_list['SpotInstanceRequests']: + sir = request_list['SpotInstanceRequests'][0] + # TODO: Check if the request had failed and launch it in + # another availability zone + if sir['State'] == 'failed': + vm.state = VirtualMachine.FAILED + job_instance_id = sir['InstanceId'] if job_instance_id: self.log_info("Request fulfilled, instance_id: " + str(job_instance_id)) @@ -1214,34 +1283,20 @@ def updateVMInfo(self, vm, auth_data): instance = self.get_instance_by_id(instance_id, region, auth_data) if instance: - try: - # sometime if you try to update a recently created instance - # this operation fails - instance.update() - if "IM-USER" not in instance.tags: - im_username = "im_user" - if auth_data.getAuthInfo('InfrastructureManager'): - im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] - instance.add_tag("IM-USER", im_username) - except Exception as ex: - self.log_exception("Error updating the instance " + instance_id) - return (False, "Error updating the instance " + instance_id + ": " + str(ex)) - vm.info.systems[0].setValue("virtual_system_type", instance.virtualization_type) - vm.info.systems[0].setValue("availability_zone", instance.placement) + vm.info.systems[0].setValue("availability_zone", instance.placement['AvailabilityZone']) - vm.state = self.VM_STATE_MAP.get(instance.state, VirtualMachine.UNKNOWN) + vm.state = self.VM_STATE_MAP.get(instance.state['Name'], VirtualMachine.UNKNOWN) instance_type = self.get_instance_type_by_name(instance.instance_type) self.update_system_info_from_instance(vm.info.systems[0], instance_type) - self.setIPsFromInstance(vm, instance) + self.setIPsFromInstance(vm, instance, conn) self.manage_dns_entries("add", vm, auth_data) self.addRouterInstance(vm, conn) try: - vm.info.systems[0].setValue('launch_time', int(time.mktime( - time.strptime(instance.launch_time[:19], '%Y-%m-%dT%H:%M:%S')))) + vm.info.systems[0].setValue('launch_time', int(instance.launch_time.timestamp())) except Exception as ex: self.log_warn("Error setting the launch_time of the instance. " "Probably the instance is not running:" + str(ex)) @@ -1252,36 +1307,64 @@ def updateVMInfo(self, vm, auth_data): return (True, vm) + def _get_zone(self, conn, domain): + zones = conn.list_hosted_zones_by_name(DNSName=domain, MaxItems='1')['HostedZones'] + if not zones or len(zones) == 0: + return None + return zones[0] + + @staticmethod + def _get_change_batch(action, fqdn, ip): + return { + "Changes": [ + { + "Action": action, + "ResourceRecordSet": { + "Name": fqdn, + "Type": "A", + "TTL": 300, + "ResourceRecords": [{"Value": ip}], + }, + } + ] + } + def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): try: # Workaround to use EC2 as the default case. if self.type == "EC2": - conn = self.get_route53_connection('universal', auth_data) + conn = self.get_connection('universal', auth_data, 'route53') else: auths = auth_data.getAuthInfo("EC2") if not auths: raise Exception("No auth data has been specified to EC2.") else: auth = auths[0] - conn = boto.route53.connect_to_region('universal', - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) - zone = conn.get_zone(domain) + conn = boto3.client('route53', region_name='universal', + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password']) + + zone = self._get_zone(conn, domain) + if not zone: + raise Exception("Could not find DNS zone to update") + zone_id = zone['Id'] + if not zone: self.log_info("Creating DNS zone %s" % domain) - zone = conn.create_zone(domain) + zone = conn.create_hosted_zone(domain) else: self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain - record = zone.get_a(fqdn) - if not record: + records = conn.list_resource_record_sets(HostedZoneId=zone_id, + StartRecordName=fqdn, + StartRecordType='A', + MaxItems='1')['ResourceRecordSets'] + if not records or records[0]['Name'] != fqdn: self.log_info("Creating DNS record %s." % fqdn) - changes = boto.route53.record.ResourceRecordSets(conn, zone.id) - change = changes.add_change("CREATE", fqdn, "A") - change.add_value(ip) - changes.commit() + conn.change_resource_record_sets(HostedZoneId=zone_id, + ChangeBatch=self._get_change_batch('CREATE', fqdn, ip)) else: self.log_info("DNS record %s exists. Do not create." % fqdn) return True @@ -1292,35 +1375,38 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): # Workaround to use EC2 as the default case. if self.type == "EC2": - conn = self.get_route53_connection('universal', auth_data) + conn = self.get_connection('universal', auth_data, 'route53') else: auths = auth_data.getAuthInfo("EC2") if not auths: raise Exception("No auth data has been specified to EC2.") else: auth = auths[0] - conn = boto.route53.connect_to_region('universal', - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) - zone = conn.get_zone(domain) + conn = boto3.client('route53', region_name='universal', + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password']) + zone = self._get_zone(conn, domain) if not zone: self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: fqdn = hostname + "." + domain - record = zone.get_a(fqdn) - if not record: + records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], + StartRecordName=fqdn, + StartRecordType='A', + MaxItems='1')['ResourceRecordSets'] + if not records or records[0]['Name'] != fqdn: self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: self.log_info("Deleting DNS record %s." % fqdn) - changes = boto.route53.record.ResourceRecordSets(conn, zone.id) - change = changes.add_change("DELETE", fqdn, "A") - change.add_value(ip) - changes.commit() + conn.change_resource_record_sets(HostedZoneId=zone['Id'], + ChangeBatch=self._get_change_batch('DELETE', fqdn, ip)) # if there are no A records - # all_a_records = [r for r in conn.get_all_rrsets(zone.id) if r.type == "A"] + # all_a_records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], + # StartRecordType='A')['ResourceRecordSets'] # if not all_a_records: - # conn.delete_hosted_zone(zone.id) + # self.log_info("Deleting DNS zone %s." % domain) + # conn.delete_hosted_zone(zone['Id']) def cancel_spot_requests(self, conn, vm): """ @@ -1333,24 +1419,25 @@ def cancel_spot_requests(self, conn, vm): instance_id = vm.id.split(";")[1] # Check if the instance_id starts with "sir" -> spot request if (instance_id[0] == "s"): - request_list = conn.get_all_spot_instance_requests([instance_id]) - for sir in request_list: - conn.cancel_spot_instance_requests(sir.id) - self.log_info("Spot instance request " + str(sir.id) + " deleted") - break - - def delete_networks(self, conn, vm, timeout=240): + request_list = conn.describe_spot_instance_requests(Filters=[{'Name': 'spot-instance-request-id', + 'Values': ['job_sir_id']}]) + if request_list['SpotInstanceRequests']: + sir = request_list['SpotInstanceRequests'][0] + conn.cancel_spot_instance_requests(sir['SpotInstanceRequestId']) + self.log_info("Spot instance request " + sir['SpotInstanceRequestId'] + " deleted") + + def delete_networks(self, conn, inf_id, timeout=240): """ Delete the created networks """ - for subnet in conn.get_all_subnets(filters={"tag:IM-INFRA-ID": vm.inf.id}): - self.log_info("Deleting subnet: %s" % subnet.id) + for subnet in conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf_id]}])['Subnets']: + self.log_info("Deleting subnet: %s" % subnet['SubnetId']) cont = 0 deleted = False while not deleted and cont < timeout: cont += 5 try: - conn.delete_subnet(subnet.id) + conn.delete_subnet(SubnetId=subnet['SubnetId']) deleted = True except Exception as ex: self.log_warn("Error removing subnet: " + str(ex)) @@ -1359,24 +1446,25 @@ def delete_networks(self, conn, vm, timeout=240): time.sleep(5) if not deleted: - self.log_error("Timeout (%s) deleting the subnet %s" % (timeout, subnet.id)) + self.log_error("Timeout (%s) deleting the subnet %s" % (timeout, subnet['SubnetId'])) vpc_id = None - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": vm.inf.id}): - vpc_id = vpc.id + for vpc in conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf_id]}])['Vpcs']: + vpc_id = vpc['VpcId'] ig_id = None - for ig in conn.get_all_internet_gateways(filters={"tag:IM-INFRA-ID": vm.inf.id}): - ig_id = ig.id + for ig in conn.describe_internet_gateways(Filters=[{'Name': 'tag:IM-INFRA-ID', + 'Values': [inf_id]}])['InternetGateways']: + ig_id = ig['InternetGatewayId'] if ig_id and vpc_id: self.log_info("Detacching Internet Gateway: %s from VPC: %s" % (ig_id, vpc_id)) - conn.detach_internet_gateway(ig_id, vpc_id) + conn.detach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) if ig_id: - self.log_info("Deleting Internet Gateway: %s" % ig.id) - conn.delete_internet_gateway(ig_id) + self.log_info("Deleting Internet Gateway: %s" % ig_id) + conn.delete_internet_gateway(InternetGatewayId=ig_id) if vpc_id: - self.log_info("Deleting vpc: %s" % vpc.id) - conn.delete_vpc(vpc_id) + self.log_info("Deleting vpc: %s" % vpc_id) + conn.delete_vpc(VpcId=vpc_id) def finalize(self, vm, last, auth_data): @@ -1389,10 +1477,9 @@ def finalize(self, vm, last, auth_data): self.log_info("VM with no ID. Ignore.") return True, "" - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') # Terminate the instance instance = self.get_instance_by_id(instance_id, region_name, auth_data) @@ -1431,7 +1518,7 @@ def finalize(self, vm, last, auth_data): # And nets try: - self.delete_networks(conn, vm) + self.delete_networks(conn, vm.inf.id) except Exception as ex: self.log_exception("Error deleting networks.") error_msg += "Error deleting networks: %s. " % ex @@ -1450,14 +1537,14 @@ def _get_security_groups(self, conn, vm): sg_names.append(sg_name) sgs = [] - for sg_name in sg_names: - try: - sgs.extend(conn.get_all_security_groups(filters={'group-name': sg_name})) - except Exception: - self.log_exception("Error getting SG %s" % sg_name) + try: + sg = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': sg_names}]) + sgs = sg['SecurityGroups'] + except Exception: + self.log_exception("Error getting SG %s" % sg_name) return sgs - def delete_security_groups(self, conn, vm, timeout=90): + def delete_security_groups(self, conn, vm): """ Delete the SG of this infrastructure if this is the last VM @@ -1469,68 +1556,67 @@ def delete_security_groups(self, conn, vm, timeout=90): if sgs: # Get the default SG to set in the instances - def_sg_id = conn.get_all_security_groups(filters={'group-name': 'default', - 'vpc-id': sgs[0].vpc_id})[0].id + def_sg_id = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': ['default']}, + {'Name': 'vpc-id', 'Values': [sgs[0]['VpcId']]}] + )['SecurityGroups'][0]['GroupId'] for sg in sgs: - if sg.description != "Security group created by the IM": - self.log_info("SG %s not created by the IM. Do not delete it." % sg.name) + if sg['Description'] != "Security group created by the IM": + self.log_info("SG %s not created by the IM. Do not delete it." % sg['GroupName']) continue try: - for instance in sg.instances(): - instance.modify_attribute("groupSet", [def_sg_id]) + reservations = conn.describe_instances(Filters=[{'Name': 'instance.group-id', + 'Values': [sg['GroupId']]}])['Reservations'] + if reservations: + for instance in reservations[0]['Instances']: + conn.modify_instance_attribute(InstanceId=instance['InstanceId'], Groups=[def_sg_id]) except Exception as ex: - self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg.name, instance.id, ex)) + self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg["GroupName"], + instance['InstanceId'], ex)) # try to wait some seconds to free the SGs time.sleep(5) - self.log_info("Remove the SG: " + sg.name) + self.log_info("Remove the SG: " + sg['GroupName']) try: - sg.revoke('tcp', 0, 65535, src_group=sg) - sg.revoke('udp', 0, 65535, src_group=sg) + conn.revoke_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': 'tcp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, + {'IpProtocol': 'udp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} + ]) except Exception as ex: self.log_warn("Error revoking self rules: " + str(ex)) - sg.delete() + conn.delete_security_group(GroupId=sg['GroupId']) def stop(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - - instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if (instance is not None): - instance.update() - instance.stop() - else: - self.log_warn("Instance %s not found. Not stopping it." % instance_id) - return (False, "Instance %s not found." % instance_id) - - return (True, "") + return self._vm_operation("stop", vm, auth_data) def start(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - - instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if (instance is not None): - instance.update() - instance.start() - else: - self.log_warn("Instance %s not found. Not starting it." % instance_id) - return (False, "Instance %s not found." % instance_id) - - return (True, "") + return self._vm_operation("start", vm, auth_data) def reboot(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + return self._vm_operation("reboot", vm, auth_data) + + def _vm_operation(self, op, vm, auth_data): + region_name, instance_id = vm.id.split(";") instance = self.get_instance_by_id(instance_id, region_name, auth_data) if (instance is not None): - instance.update() - instance.reboot() + if op == "stop": + instance.stop() + elif op == "start": + instance.start() + elif op == "reboot": + instance.reboot() else: - self.log_warn("Instance %s not found. Not rebooting it." % instance_id) + self.log_warn("Instance %s not found. Not %sing it." % (instance_id, op)) return (False, "Instance %s not found." % instance_id) return (True, "") @@ -1544,9 +1630,8 @@ def waitStop(instance, timeout=120): wait = 0 powered_off = False while wait < timeout and not powered_off: - instance.update() - - powered_off = instance.state == 'stopped' + instance.reload() + powered_off = instance.state['Name'] == 'stopped' if not powered_off: time.sleep(2) wait += 2 @@ -1554,14 +1639,11 @@ def waitStop(instance, timeout=120): return powered_off def alterVM(self, vm, radl, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") # Terminate the instance instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if instance: - instance.update() - else: + if not instance: return (False, "The instance has not been found") new_system = self.resize_vm_radl(vm, radl) @@ -1575,7 +1657,8 @@ def alterVM(self, vm, radl, auth_data): if instance_type and instance.instance_type != instance_type.name: stopped = self.waitStop(instance) if stopped: - success = instance.modify_attribute('instanceType', instance_type.name) + success = instance.modify_attribute(Attribute='instanceType', + Value=instance_type.name) if success: self.update_system_info_from_instance(vm.info.systems[0], instance_type) instance.start() @@ -1671,20 +1754,16 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): infrastructure is destroyed. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - http://boto.readthedocs.io/en/latest/ref/ec2.html#module-boto.ec2.volume - http://boto.readthedocs.io/en/latest/ref/ec2.html#module-boto.ec2.instance - Returns: a tuple (success, vm). - The first value is True if the operation finished successfully or False otherwise. - The second value is a str with the url of the new image if the operation finished successfully or an error message otherwise. """ - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - snapshot_id = "" + region_name, instance_id = vm.id.split(";") + snapshot_id = None # Obtain the connection object to connect with EC2 - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') if not conn: return (False, "Error connecting with EC2, check the credentials") @@ -1693,15 +1772,16 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): instance = self.get_instance_by_id(instance_id, region_name, auth_data) if instance: self.log_info("Creating snapshot: " + image_name) - snapshot_id = instance.create_image(image_name, - description="AMI automatically generated by IM", - no_reboot=True) - # Add tags to the snapshot to be recognizable - conn.create_tags(snapshot_id, {'instance_id': instance_id}) + snapshot_id = instance.create_image(Name=image_name, + Description="AMI automatically generated by IM", + NoReboot=True, + TagSpecifications=[{'ResourceType': 'image', + 'Tags': [{'Key': 'instance_id', + 'Value': instance_id}]}]) else: return (False, "Error obtaining details of the instance") - if snapshot_id != "": - new_url = "aws://%s/%s" % (region_name, snapshot_id) + if snapshot_id: + new_url = "aws://%s/%s" % (region_name, snapshot_id['ImageId']) if auto_delete: vm.inf.snapshots.append(new_url) return (True, new_url) @@ -1712,9 +1792,9 @@ def delete_image(self, image_url, auth_data): (region_name, ami) = self.getAMIData(image_url) self.log_info("Deleting image: %s." % image_url) - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') - success = conn.deregister_image(ami, delete_snapshot=True) # https://github.com/boto/boto/issues/3019 + success = conn.deregister_image(ImageId=ami) if success: return (True, "") @@ -1730,7 +1810,7 @@ def list_images(self, auth_data, filters=None): regions = [filters['region']] del filters['region'] if not regions: - regions = [region.name for region in boto.ec2.regions()] + region = [region['RegionName'] for region in boto3.client('ec2').describe_regions()['Regions']] images_filter = {'architecture': 'x86_64', 'image-type': 'machine', 'virtualization-type': 'hvm', 'state': 'available', @@ -1742,12 +1822,12 @@ def list_images(self, auth_data, filters=None): images = [] for region in regions: - conn = self.get_connection(region, auth_data) + conn = self.get_connection(region, auth_data, 'ec2') try: - for image in conn.get_all_images(owners=['self', 'aws-marketplace'], filters=images_filter): - if len(image.id) > 12: # do not add old images - images.append({"uri": "aws://%s/%s" % (region, image.id), - "name": "%s/%s" % (region, image.name)}) + for image in conn.describe_images(Owners=['self', 'aws-marketplace'], Filters=images_filter)['Images']: + if len(image['ImageId']) > 12: # do not add old images + images.append({"uri": "aws://%s/%s" % (region, image['ImageId']), + "name": "%s/%s" % (region, image['Name'])}) except Exception: continue return self._filter_images(images, filters) diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index c2125e9c..f7716430 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -229,7 +229,7 @@ def get_net_info(occi_res): mask) for mask in Config.PRIVATE_NET_MASKS]) elif kv[0].strip() == "occi.networkinterface.interface": net_interface = kv[1].strip('"') - num_interface = re.findall('\d+', net_interface)[0] + num_interface = re.findall(r'\d+', net_interface)[0] elif kv[0].strip() == "self": link = kv[1].strip('"') if num_interface and ip_address: diff --git a/IM/connectors/OSCAR.py b/IM/connectors/OSCAR.py index d7dfc3ba..f5d17603 100644 --- a/IM/connectors/OSCAR.py +++ b/IM/connectors/OSCAR.py @@ -134,7 +134,7 @@ def _get_service_json(radl_system): if radl_system.getValue("name"): service["name"] = radl_system.getValue("name") if radl_system.getValue("memory.size"): - service["memory"] = "%dMi" % radl_system.getFeature('memory.size').getValue('M') + service["memory"] = "%dMi" % radl_system.getFeature('memory.size').getValue('Mi') if radl_system.getValue("cpu.count"): service["cpu"] = "%g" % radl_system.getValue("cpu.count") if radl_system.getValue("gpu.count"): @@ -260,7 +260,7 @@ def update_system_info_from_service_info(self, system, service_info): conflict="other", missing="other") if "memory" in service_info and service_info["memory"]: memory = self.convert_memory_unit(service_info["memory"], "Mi") - system.addFeature(Feature("memory.size", "=", memory, "M"), + system.addFeature(Feature("memory.size", "=", memory, "Mi"), conflict="other", missing="other") if "script" in service_info and service_info["script"]: system.addFeature(Feature("script", "=", service_info["script"]), diff --git a/IM/tosca/Tosca.py b/IM/tosca/Tosca.py index 86820239..10d733b5 100644 --- a/IM/tosca/Tosca.py +++ b/IM/tosca/Tosca.py @@ -800,7 +800,7 @@ def _gen_configure_from_interfaces(self, node, compute, interfaces): variables = "" tasks = "" recipe_list = [] - remote_artifacts_path = "/tmp" + remote_artifacts_path = "/tmp" # nosec # Take the interfaces in correct order for name in ['create', 'pre_configure_source', 'pre_configure_target', 'configure_rel', 'configure', 'post_configure_source', 'post_configure_target', 'start', diff --git a/changelog b/changelog index dfaa04b2..ff80a770 100644 --- a/changelog +++ b/changelog @@ -777,3 +777,7 @@ IM 1.17.1: * Speed up Ansible installation using newer versions. * Fix problem with 0 disk flavors in OpenStack. * Flush Inf data to DB in case of service termination. + +IM 1.18.0: + * Enable to get IM stats. + * Migrate EC2 conn to boto3 library. diff --git a/codemeta.json b/codemeta.json index 1aab92a3..bfb302d9 100644 --- a/codemeta.json +++ b/codemeta.json @@ -6,7 +6,7 @@ "@type": "SoftwareSourceCode", "identifier": "im", "name": "Infrastructure Manager", - "version": "1.17.1", + "version": "1.18.0", "description": "IM is a tool that deploys complex and customized virtual infrastructures on IaaS Cloud deployments", "license": "GNU General Public License v3.0", "author": [ diff --git a/contextualization/ansible_install.sh b/contextualization/ansible_install.sh index fcdcbb3f..062dd240 100755 --- a/contextualization/ansible_install.sh +++ b/contextualization/ansible_install.sh @@ -39,6 +39,10 @@ distribution_id() { echo ${RETVAL} } +# Create a symbolic link to python3 in case of not venv created +ls /var/tmp/.ansible/bin/ || mkdir -p /var/tmp/.ansible/bin/ +ls /var/tmp/.ansible/bin/python3 || ln -s /usr/bin/python3 /var/tmp/.ansible/bin/python3 + if [ $(which ansible-playbook) ]; then echo "Ansible installed. Do not install." else diff --git a/contextualization/conf-ansible.yml b/contextualization/conf-ansible.yml index de3172eb..128e32d7 100644 --- a/contextualization/conf-ansible.yml +++ b/contextualization/conf-ansible.yml @@ -6,6 +6,7 @@ vars: # Ansible specific Version or "latest" ANSIBLE_VERSION: 4.10.0 + VENV_PATH: /var/tmp/.ansible tasks: ############## To avoid some issues with cloud-init and unattended upgrades ############### - name: Avoid unattended upgrades @@ -99,15 +100,15 @@ ################### Install Ansible/pip requisites ######################### - name: Debian/Ubuntu install requisites with apt - apt: name=python3-pip,wget,python3-setuptools,python3-psutil,sshpass,openssh-client,unzip install_recommends=no + apt: name=python3-pip,wget,python3-setuptools,sshpass,openssh-client,unzip install_recommends=no when: ansible_os_family == "Debian" - name: Yum install requisites RH 7/8 or Fedora - command: yum install -y python3-pip python3-setuptools python3-psutil sshpass openssh-clients + command: yum install -y python3-pip python3-setuptools sshpass openssh-clients when: ansible_os_family == "RedHat" - name: Zypper install requirements Suse - zypper: name=python3-pip,python3-setuptools,python3-psutil,wget,python3-cryptography state=present + zypper: name=python3-pip,python3-setuptools,wget,python3-cryptography state=present when: ansible_os_family == "Suse" - name: Install python-setuptools @@ -116,11 +117,34 @@ ######################################### Use pip to enable to set the version ############################################# + - name: Set extra_args var + set_fact: + extra_args: '' + + - name: Set extra_args var in py3.11 + set_fact: + extra_args: --break-system-packages + when: ansible_python_version is version('3.11', '>=') + + - name: Install virtualenv with pip + pip: + name: virtualenv + executable: pip3 + extra_args: "{{ extra_args }}" + + - name: Create virtualenv link in PATH + file: + state: link + src: /usr/local/bin/virtualenv + dest: /usr/local/sbin/virtualenv + when: ansible_os_family == "RedHat" + ignore_errors: yes + # Version over 21 does not work with python 3.6 or older - name: Upgrade pip in py3.6- pip: name: pip>18.0,<21.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" # in some old distros we need to trust in the pypi to avoid SSL errors extra_args: --trusted-host files.pythonhosted.org --trusted-host pypi.org --trusted-host pypi.python.org when: ansible_python_version is version('3.7', '<') @@ -128,37 +152,38 @@ - name: Upgrade pip in py3.7-py3.8 pip: name: pip>20.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" when: ansible_python_version is version('3.7', '>=') and ansible_python_version is version('3.9', '<') - name: Upgrade pip in py3.9-py3.10 pip: name: pip>=22.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" when: ansible_python_version is version('3.9', '>=') and ansible_python_version is version('3.11', '<') # Version 66 (#2497) fails - name: Upgrade setuptools with Pip in py3.11- pip: name: setuptools<66.0.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" when: ansible_python_version is version('3.11', '<') - name: Set extra_args var set_fact: extra_args: --prefer-binary - - name: Set extra_args var in py3.11 - set_fact: - extra_args: --prefer-binary --break-system-packages - when: ansible_python_version is version('3.11', '>=') + - name: Install psutil + pip: + name: psutil + virtualenv: "{{ VENV_PATH }}" + extra_args: "{{ extra_args }}" - name: Install cryptography & pyOpenSSL in py3.11- pip: name: - cryptography>36.0.0,<39.0.0 - pyOpenSSL>20.0,<22.1.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ansible_python_version is version('3.11', '<') @@ -167,7 +192,7 @@ name: - cryptography>36.0.0 - pyOpenSSL>20.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ansible_python_version is version('3.11', '>=') @@ -178,7 +203,7 @@ - pyyaml - paramiko>=2.9.5 - packaging - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" - name: Set Ansible newer version for python 3.8+ @@ -201,14 +226,14 @@ pip: name: ansible version: "{{ ANSIBLE_VERSION }}" - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ANSIBLE_VERSION != "latest" - name: Install latest ansible version with Pip pip: name: ansible - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ANSIBLE_VERSION == "latest" @@ -219,13 +244,13 @@ name: - jmespath - scp - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" - name: Install pywinrm with Pip pip: name: pywinrm - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" ignore_errors: yes diff --git a/contextualization/ctxt_agent_dist.py b/contextualization/ctxt_agent_dist.py index af981185..ac5c1e77 100755 --- a/contextualization/ctxt_agent_dist.py +++ b/contextualization/ctxt_agent_dist.py @@ -162,7 +162,8 @@ def LaunchRemoteAgent(self, vm, vault_pass, pk_file, changed_pass_ok): vm_dir = os.path.abspath(os.path.dirname(self.vm_conf_data_filename)) remote_dir = os.path.abspath(os.path.dirname(self.conf_data_filename)) try: - (pid, _, _) = ssh_client.execute(vault_export + "nohup python3 " + remote_dir + "/ctxt_agent_dist.py " + + (pid, _, _) = ssh_client.execute(vault_export + "nohup " + CtxtAgentBase.VENV_DIR + "/bin/python3 " + + remote_dir + "/ctxt_agent_dist.py " + self.conf_data_filename + " " + self.vm_conf_data_filename + " 1 > " + vm_dir + "/stdout 2> " + vm_dir + "/stderr < /dev/null & echo -n $!") @@ -211,7 +212,7 @@ def get_master_ssh(self, general_conf_data): return SSHRetry(vm_ip, ctxt_vm['user'], passwd, private_key, ctxt_vm['remote_port']) @staticmethod - def get_ssh(vm, pk_file, changed_pass=None): + def get_ssh(vm, pk_file, changed_pass=None, use_proxy=False): passwd = vm['passwd'] if 'new_passwd' in vm and vm['new_passwd'] and changed_pass: passwd = vm['new_passwd'] diff --git a/doc/source/conf.py b/doc/source/conf.py index 54bfd468..7ab6ab98 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -3,8 +3,8 @@ copyright = '2023, I3M-GRyCAP' author = 'micafer' -version = '1.17' -release = '1.17.1' +version = '1.18' +release = '1.18.0' master_doc = 'index' diff --git a/doc/source/tosca.rst b/doc/source/tosca.rst index 2cdc6bcf..c079b8ae 100644 --- a/doc/source/tosca.rst +++ b/doc/source/tosca.rst @@ -5,9 +5,9 @@ TOSCA The Infrastructure Manager supports the definition of Cloud topologies using `OASIS TOSCA Simple Profile in YAML Version 1.0 `_. -The TOSCA support has been developed under de framework of the `INDIGO DataCloud EU project `_. +The TOSCA support was developed under the framework of the `INDIGO DataCloud EU project `_. You can see some input examples at -`https://github.com/indigo-dc/tosca-types/tree/master/examples `_. +`https://github.com/grycap/tosca/tree/main/templates `_. Basic example ^^^^^^^^^^^^^ @@ -55,7 +55,7 @@ the SSH credentials to access it:: Setting VMI URI ^^^^^^^^^^^^^^^^ -As in RADL you can set an specific URI identifying the VMI to use in the VM. +As in RADL, you can set a specific URI identifying the VMI to use in the VM. The URI format is the same used in RADL (:ref:`radl_system`). In this case the type must be changed to ``tosca.nodes.indigo.Compute`` (the Compute normative type does not support the ``os image`` property), and the image property must @@ -82,7 +82,7 @@ be added in the ``os`` capability:: Advanced Compute host properties ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``tosca.nodes.indigo.Compute`` custom type add a new set of advanced features to the +The ``tosca.nodes.indigo.Compute`` custom type adds a new set of advanced features to the host properties, enabling the request of GPUs and `Intel SGX `_ CPU support in the compute node:: @@ -109,8 +109,8 @@ Network properties Basic properties ----------------- -The easiest way to specify network requirements of the Compute node is sing the endpoint capability properties. -For example the following example the compute node requests for a public IP:: +The easiest way to specify network requirements of the Compute node is using the endpoint capability properties. +For example, the following example the compute node requests for a public IP:: ... simple_node: @@ -123,15 +123,15 @@ For example the following example the compute node requests for a public IP:: Possible values of the ``network_name`` endpoint property: - * PRIVATE: The Compute node does not requires a public IP. **This is the default behavior if no + * PRIVATE: The Compute node does not require a public IP. **This is the default behaviour if no endpoint capability is defined**. * PUBLIC: The Compute node requires a public IP. * Network provider ID: As the `provider_id` network property in RADL It defines the name of the network in a specific Cloud provider (see :ref:`_radl_network`): -Furthermore the endpoint capability has a set of additional properties -to set the DNS name of the node or the set of ports to be externally accesible:: +Furthermore, the endpoint capability has a set of additional properties +to set the DNS name of the node or the set of ports to be externally accessible:: ... @@ -151,7 +151,7 @@ to set the DNS name of the node or the set of ports to be externally accesible:: Advanced properties ------------------- -In case that you need a more detailed definition of the networks, you can use the +In case you need a more detailed definition of the networks, you can use the ``tosca.nodes.network.Network`` and ``tosca.nodes.network.Port`` TOSCA normative types. In this way you can define the set of networks needed in your topology using the ports to link the networks with the Compute nodes:: @@ -198,7 +198,7 @@ Custom defined Port type ``tosca.nodes.indigo.network.Port`` has a set of additi Software Components ^^^^^^^^^^^^^^^^^^^ -IM enable to use Ansible playbooks as implementation scripts. Furthermore it enables to specify +IM enable the use of Ansible playbooks as implementation scripts. Furthermore, it enables to specify Ansible roles (``tosca.artifacts.AnsibleGalaxy.role``) and collections (``tosca.artifacts.AnsibleGalaxy.collections``) to be installed and used in the playbooks:: @@ -255,8 +255,8 @@ some cloud providers, in general is better not to add it:: Policies & groups ^^^^^^^^^^^^^^^^^ -IM enables the definition of the specific cloud provider where the Compute nodes will be deployed in an hybrid deployment. -For example, in the following code we assume that we have defined three computes nodes (compute_one, compute_two and compute_three). +IM enables the definition of the specific cloud provider where the Compute nodes will be deployed in a hybrid deployment. +For example, in the following code we assume that we have defined three compute nodes (compute_one, compute_two and compute_three). We can create a placement group with two of them (compute_one and compute_two) and then set a placement policy with a cloud_id (that must be defined in the :ref:`auth-file`), and create a second placement policy where we can set a different cloud provider and, optionally, an availability zone:: @@ -285,10 +285,10 @@ Container Applications (Kubernetes connector) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ IM also enables the definition of container applications to be deployed in a Kubernetes cluster. -In the following example we can see how to define a container application (IM) that uses a +In the following example, we can see how to define a container application (IM) that uses a ConfigMap for a configuration file. The IM application is connected with a MySQL backend using the ``IM_DATA_DB`` environment variable. The MySQL container is defined with a Persistent -Volume Claim (PVC) of 10GB. Furthermore the IM application specifies an endpoint to be published +Volume Claim (PVC) of 10GB. Furthermore, the IM application specifies an endpoint to be published that will result in the creation of a Kubernetes Ingress. ... @@ -384,14 +384,14 @@ Advanced Output values The ``tosca.nodes.indigo.Compute`` node type adds a new attribute named: ``ansible_output``. It is a map that has one element per each IM configuration step, so you can access it by name. The steps have the keyword -``tasks`` that is also a map that has one element per ansible task. In this case -it can bes accessed using the task name as defined in the playbook. Finally +``tasks``, that is also a map that has one element per ansible task. In this case +it can be accessed using the task name as defined in the playbook. Finally there is an ``output`` keyword that returns the output of the task. In most of the cases the task is a ``debug`` ansible task that shows anything you want to return. -In the following example the specified task was a debug ansible task that shows the -value of a internal defined value:: +In the following example, the specified task was a debug ansible task that shows the +value of a internally defined value:: ... diff --git a/docker-devel/Dockerfile b/docker-devel/Dockerfile index fc6ee4a9..33d00187 100644 --- a/docker-devel/Dockerfile +++ b/docker-devel/Dockerfile @@ -1,8 +1,8 @@ # Dockerfile to create a container with the IM service -FROM ubuntu:22.04 +FROM ubuntu:24.04 ARG BRANCH=devel LABEL maintainer="Miguel Caballer " -LABEL version="1.17.1" +LABEL version="1.18.0" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 @@ -12,11 +12,10 @@ RUN apt-get update && apt-get install --no-install-recommends -y patch wget pyth # Install IM RUN apt-get update && apt-get install --no-install-recommends -y python3-setuptools python3-pip git && \ - pip3 install -U pip && \ - pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ - pip3 install pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ - pip3 install git+https://github.com/micafer/libcloud@ost_nets_extra && \ - pip3 install apache-libcloud==3.8.0 git+https://github.com/grycap/im@$BRANCH && \ + pip3 install --break-system-packages msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ + pip3 install --break-system-packages pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ + pip3 install --break-system-packages git+https://github.com/micafer/libcloud@ost_nets_extra && \ + pip3 install --break-system-packages apache-libcloud==3.8.0 git+https://github.com/grycap/im@$BRANCH && \ apt-get purge -y python3-pip git && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ @@ -24,7 +23,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y python3-setupto # untill this PR is merged and released # https://github.com/apache/libcloud/pull/2016 COPY ost.patch /tmp/ost.patch -RUN patch /usr/local/lib/python3.10/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch +RUN patch /usr/local/lib/python3.12/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch # Copy im configuration files RUN mkdir /etc/im @@ -38,8 +37,5 @@ RUN sed -i -e 's/VM_NUM_USE_CTXT_DIST = 30/VM_NUM_USE_CTXT_DIST = 3/g' /etc/im/i # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/local/lib/python3.10/dist-packages/boto/endpoints.json - # Start IM service CMD /usr/local/bin/im_service \ No newline at end of file diff --git a/docker-py3/Dockerfile b/docker-py3/Dockerfile index 3044d852..81500d20 100644 --- a/docker-py3/Dockerfile +++ b/docker-py3/Dockerfile @@ -1,5 +1,5 @@ # Dockerfile to create a container with the IM service -FROM ubuntu:22.04 +FROM ubuntu:24.04 ENV VERSION=1.17.1 @@ -14,10 +14,9 @@ RUN apt-get update && apt-get install --no-install-recommends -y patch wget pyth # Install IM RUN apt-get update && apt-get install --no-install-recommends -y python3-setuptools python3-pip git && \ - pip3 install -U pip && \ - pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ - pip3 install pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ - pip3 install apache-libcloud==3.8.0 IM==${VERSION} &&\ + pip3 install --break-system-packages msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ + pip3 install --break-system-packages pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ + pip3 install --break-system-packages apache-libcloud==3.8.0 IM==${VERSION} &&\ apt-get purge -y python3-pip git && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ @@ -25,7 +24,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y python3-setupto # untill this PR is merged and released # https://github.com/apache/libcloud/pull/2016 COPY ost.patch /tmp/ost.patch -RUN patch /usr/local/lib/python3.10/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch +RUN patch /usr/local/lib/python3.12/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch # Copy im configuration files RUN mkdir /etc/im @@ -36,8 +35,5 @@ RUN wget https://raw.githubusercontent.com/grycap/im/v${VERSION}/etc/logging.con # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/local/lib/python3.10/dist-packages/boto/endpoints.json - # Start IM service CMD ["/usr/local/bin/im_service"] diff --git a/docker-py3/Dockerfile.alp b/docker-py3/Dockerfile.alp index e21363c7..97f502d0 100644 --- a/docker-py3/Dockerfile.alp +++ b/docker-py3/Dockerfile.alp @@ -40,8 +40,5 @@ RUN apk add --no-cache git &&\ # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/lib/python3.10/site-packages/boto/endpoints.json - # Start IM service CMD im_service.py diff --git a/pyproject.toml b/pyproject.toml index fd6487a1..18e1180c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,7 @@ dependencies = [ "PyYAML", "suds-community", "cheroot", - "boto >= 2.29", + "boto3", "apache-libcloud >= 3.2.0", "RADL >= 1.3.3", "flask", diff --git a/requirements-tests.txt b/requirements-tests.txt index 48bdd4d1..2c8c9ae9 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,11 +1,10 @@ -ansible >= 2.4 -ansible-base +ansible == 8.7.0 paramiko >= 1.14 PyYAML cheroot -boto >= 2.29 +boto3 apache-libcloud >= 3.3.1 -RADL >= 1.3.3 +RADL >= 1.3.4 flask werkzeug netaddr diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py index 1b937d36..3e666a81 100755 --- a/test/integration/TestIM.py +++ b/test/integration/TestIM.py @@ -154,7 +154,7 @@ def test_11_create(self): self.__class__.inf_id = inf_id all_configured = self.wait_inf_state( - inf_id, VirtualMachine.CONFIGURED, 2400) + inf_id, VirtualMachine.CONFIGURED, 2700) self.assertTrue( all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).") @@ -261,7 +261,7 @@ def test_19_addresource(self): Test AddResource function """ (success, res) = self.server.AddResource( - self.inf_id, RADL_ADD_WIN, self.auth_data) + self.inf_id, RADL_ADD, self.auth_data) self.assertTrue(success, msg="ERROR calling AddResource: " + str(res)) (success, vm_ids) = self.server.GetInfrastructureInfo( @@ -272,7 +272,7 @@ def test_19_addresource(self): str(len(vm_ids)) + "). It must be 4")) all_configured = self.wait_inf_state( - self.inf_id, VirtualMachine.CONFIGURED, 2700) + self.inf_id, VirtualMachine.CONFIGURED, 2400) self.assertTrue( all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).") @@ -510,10 +510,10 @@ def test_40_export_import(self): success, msg="ERROR calling ImportInfrastructure: " + str(res)) def test_45_stats(self): - (success, res) = self.server.GetStats(None, None, self.auth_data) + (success, res) = self.server.GetStats('', '', self.auth_data) self.assertTrue( success, msg="ERROR calling GetStats: " + str(res)) - self.assertEqual(len(res), 3, msg="ERROR getting stats: Incorrect number of infrastructures") + self.assertEqual(len(res), 4, msg="ERROR getting stats: Incorrect number of infrastructures") def test_50_destroy(self): """ diff --git a/test/unit/REST.py b/test/unit/REST.py index 3b723a9b..f7c8f9ed 100755 --- a/test/unit/REST.py +++ b/test/unit/REST.py @@ -228,8 +228,8 @@ def test_CreateInfrastructure(self, get_infrastructure, CreateInfrastructure): data=read_file_as_bytes("../files/test_simple.json")) self.assertEqual(res.json, {"one": {"cloudType": "OpenNebula", "cloudEndpoint": "http://ramses.i3m.upv.es:2633", - "compute": [{"cpuCores": 1, "memoryInMegabytes": 1024}, - {"cpuCores": 1, "memoryInMegabytes": 1024}], "storage": []}}) + "compute": [{"cpuCores": 1, "memoryInMegabytes": 1074}, + {"cpuCores": 1, "memoryInMegabytes": 1074}], "storage": []}}) headers["Content-Type"] = "application/json" CreateInfrastructure.side_effect = InvaliddUserException() diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index f2e116d4..29780728 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -15,27 +15,24 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . - import sys import unittest +import datetime sys.path.append(".") sys.path.append("..") from .CloudConn import TestCloudConnectorBase from IM.CloudInfo import CloudInfo from IM.auth import Authentication -from radl import radl_parse +from IM.config import Config from IM.VirtualMachine import VirtualMachine +from radl import radl_parse from IM.InfrastructureInfo import InfrastructureInfo from IM.connectors.EC2 import EC2CloudConnector -from IM.config import Config from mock import patch, MagicMock, call class TestEC2Connector(TestCloudConnectorBase): - """ - Class to test the IM connectors - """ @staticmethod def get_ec2_cloud(): @@ -106,31 +103,20 @@ def test_15_get_all_instance_types(self): self.assertEqual(instance.cores_per_cpu, 1) self.assertEqual(instance.disk_space, 160) - def get_all_subnets(self, subnet_ids=None, filters=None): - subnet = MagicMock() - subnet.id = "subnet-id" - if filters: - return [] - elif subnet_ids: - subnet.cidr_block = "10.10.0.1/24" - return [subnet] - subnet.cidr_block = "10.0.1.0/24" - return [subnet] - - def _get_all_vpcs(self, vpc_ids=None, filters=None, dry_run=False): - vpc = MagicMock() - vpc.id = "vpc-id" - vpc.default = True - if vpc_ids: - return [vpc] + def describe_subnets(self, **kwargs): + subnet = {} + subnet['SubnetId'] = "subnet-id" + if 'Filters' in kwargs and kwargs['Filters']: + return {'Subnets': []} + elif 'SubnetIds' in kwargs and kwargs['SubnetIds']: + subnet['CidrBlock'] = "10.10.0.1/24" else: - return [] + subnet['CidrBlock'] = "10.0.1.0/24" + return {'Subnets': [subnet]} - @patch('boto.ec2.get_region') - @patch('boto.vpc.VPCConnection') - @patch('boto.ec2.blockdevicemapping.BlockDeviceMapping') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_region): + def test_20_launch(self, save_data, mock_boto_session): radl_data = """ network net1 (outbound = 'yes' and outports='8080,9000:9100' and sg_name = 'sgname') network net2 () @@ -143,7 +129,7 @@ def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_regio net_interface.0.dns_name = 'test' and net_interface.1.connection = 'net2' and disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and + disk.0.image.url = 'aws://us-east-1/ami-id' and disk.0.os.credentials.username = 'user' and disk.1.size=1GB and disk.1.device='hdb' and @@ -156,110 +142,42 @@ def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_regio {'type': 'InfrastructureManager', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - region = MagicMock() - get_region.return_value = region - - conn = MagicMock() - VPCConnection.return_value = conn - - image = MagicMock() - device = MagicMock() - reservation = MagicMock() - instance = MagicMock() - device.snapshot_id = True - device.volume_id = True - image.block_device_mapping = {"device": device} - instance.add_tag.return_value = True - instance.id = "iid" - reservation.instances = [instance] - conn.run_instances.return_value = reservation - conn.get_image.return_value = image - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - sg = MagicMock() - sg.id = "sgid" - sg.name = "sgname" - sg.authorize.return_value = True - conn.create_security_group.return_value = sg - - conn.get_all_security_groups.return_value = [] - - blockdevicemapping.return_value = {'device': ''} - inf = InfrastructureInfo() inf.auth = auth inf.radl = radl - res = ec2_cloud.launch(inf, radl, radl, 1, auth) - success, _ = res[0] - self.assertTrue(success, msg="ERROR: launching a VM.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - self.assertEqual(len(conn.create_security_group.call_args_list), 3) - self.assertEqual(conn.create_security_group.call_args_list[0][0][0], "im-%s" % inf.id) - self.assertEqual(conn.create_security_group.call_args_list[1][0][0], "sgname") - self.assertEqual(conn.create_security_group.call_args_list[2][0][0], "im-%s-net2" % inf.id) - # Check the case that we do not use VPC - radl_data = """ - network net1 (outbound = 'yes' and outports='8080') - network net2 (create='yes' and cidr='10.0.128.0/24') - network net3 (create='yes' and cidr='10.0.*.0/24') - network net4 (create='yes') - system test ( - cpu.arch='x86_64' and - cpu.count>=1 and - memory.size>=1g and - net_interface.0.connection = 'net1' and - net_interface.0.dns_name = 'test' and - net_interface.1.connection = 'net2' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and - disk.0.os.credentials.username = 'user' and - #disk.0.os.credentials.private_key = 'private' and - #disk.0.os.credentials.public_key = 'public' and - disk.1.size=1GB and - disk.1.device='hdb' and - disk.1.mount_path='/mnt/path' - )""" - radl = radl_parse.parse_radl(radl_data) - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.create_vpc.return_value = vpc - conn.get_all_vpcs.side_effect = self._get_all_vpcs - - subnet = MagicMock() - subnet.id = "subnet-id" - subnet.cidr_block = "10.10.129.0/24" - conn.create_subnet.return_value = subnet - conn.get_all_subnets.side_effect = self.get_all_subnets + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_conn.describe_security_groups.return_value = {'SecurityGroups': []} + mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', + 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', + 'Ebs': { + 'SnapshotId': 'snap-12345678' + }}]} + ]} + mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'i-12345678'}]} + instance = MagicMock() + mock_res.Instance.return_value = instance - inf = InfrastructureInfo() - inf.auth = auth - inf.radl = radl res = ec2_cloud.launch(inf, radl, radl, 1, auth) - success, _ = res[0] - self.assertTrue(success, msg="ERROR: launching a VM.") - # check the instance_type selected is correct - self.assertIn(".micro", conn.run_instances.call_args_list[1][1]["instance_type"]) - self.assertEqual(conn.create_vpc.call_args_list[0][0][0], "10.0.128.0/22") - self.assertEqual(conn.create_subnet.call_args_list[0][0], ('vpc-id', '10.0.128.0/24')) - self.assertEqual(conn.create_subnet.call_args_list[1][0], ('vpc-id', '10.0.129.0/24')) - self.assertEqual(conn.create_subnet.call_args_list[2][0], ('vpc-id', '10.0.130.0/24')) - + success, msg = res[0] + self.assertTrue(success, msg="ERROR: launching a VM: %s" % msg) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(len(mock_conn.create_security_group.call_args_list), 3) + self.assertEqual(mock_conn.create_security_group.call_args_list[0][1]['GroupName'], "im-%s" % inf.id) + self.assertEqual(mock_conn.create_security_group.call_args_list[1][1]['GroupName'], "sgname") + self.assertEqual(mock_conn.create_security_group.call_args_list[2][1]['GroupName'], "im-%s-net2" % inf.id) + mock_conn.run_instances.assert_called_once() - @patch('boto.ec2.get_region') - @patch('boto.vpc.VPCConnection') - @patch('boto.ec2.blockdevicemapping.BlockDeviceMapping') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_region): + def test_25_launch_spot(self, save_data, mock_boto_session): radl_data = """ network net1 (outbound = 'yes' and provider_id = 'vpc-id.subnet-id') network net2 () @@ -272,7 +190,7 @@ def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_ net_interface.0.dns_name = 'test' and net_interface.1.connection = 'net2' and disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and + disk.0.image.url = 'aws://us-east-1/ami-id' and disk.0.os.credentials.username = 'user' and disk.0.os.credentials.private_key = 'private' and disk.0.os.credentials.public_key = 'public' and @@ -287,45 +205,29 @@ def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_ {'type': 'InfrastructureManager', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - region = MagicMock() - get_region.return_value = region - - conn = MagicMock() - VPCConnection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - image = MagicMock() - device = MagicMock() - reservation = MagicMock() + mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'iid'}]} instance = MagicMock() - device.snapshot_id = True - device.volume_id = True - image.block_device_mapping = {"device": device} - instance.add_tag.return_value = True + mock_res.Instance.return_value = instance instance.id = "iid" - reservation.instances = [instance] - conn.run_instances.return_value = reservation - conn.get_image.return_value = image - - sg = MagicMock() - sg.id = "sgid" - sg.name = "sgname" - sg.authorize.return_value = True - conn.create_security_group.return_value = sg - - conn.get_all_security_groups.return_value = [] - - blockdevicemapping.return_value = {'device': ''} - - zone = MagicMock() - zone.name = 'us-east-1' - conn.get_all_zones.return_value = [zone] - history = MagicMock() - history.price = 0.1 - conn.get_spot_price_history.return_value = [history] - - request = MagicMock() - request.id = "id" - conn.request_spot_instances.return_value = [request] + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', + 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', + 'Ebs': { + 'SnapshotId': 'snap-12345678' + }}]} + ]} + mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} + mock_conn.describe_security_groups.return_value = {'SecurityGroups': []} + mock_conn.describe_availability_zones.return_value = {'AvailabilityZones': [{'ZoneName': 'us-east-1'}]} + mock_conn.describe_spot_price_history.return_value = {'SpotPriceHistory': [{'SpotPrice': '0.1'}]} + mock_conn.request_spot_instances.return_value = {'SpotInstanceRequests': [{'SpotInstanceRequestId': 'sid'}]} inf = InfrastructureInfo() inf.auth = auth @@ -334,10 +236,8 @@ def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_ self.assertTrue(success, msg="ERROR: launching a VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - @patch('boto.route53.connect_to_region') - @patch('boto.route53.record.ResourceRecordSets') - def test_30_updateVMInfo(self, record_sets, connect_to_region, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_30_updateVMInfo(self, mock_boto_session): radl_data = """ network net (outbound = 'yes') network net2 (router = '10.0.10.0/24,vrouter') @@ -364,6 +264,36 @@ def test_30_updateVMInfo(self, record_sets, connect_to_region, get_connection): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', + 'State': {'Name': 'running'}}]}]} + instance = MagicMock() + mock_res.Instance.return_value = instance + instance.id = "iid" + instance.tags = [] + instance.virtualization_type = "vt" + instance.placement = {'AvailabilityZone': 'us-east-1'} + instance.state = {'Name': 'running'} + instance.instance_type = "t1.micro" + instance.launch_time = datetime.datetime.now() + instance.public_ip_address = "158.42.1.1" + instance.private_ip_address = "10.0.0.1" + mock_conn.describe_addresses.return_value = {'Addresses': [{'PublicIp': '158.42.1.1', + 'InstanceId': 'iid'}]} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_conn.describe_route_tables.return_value = {'RouteTables': [{'RouteTableId': 'routet-id'}]} + + mock_conn.list_hosted_zones_by_name.return_value = {'HostedZones': [{'Name': 'domain.com.', + 'Id': 'zone-id'}]} + mock_conn.create_hosted_zone.return_value = {'HostedZone': {'Id': 'zone-idc'}} + mock_conn.list_resource_record_sets.return_value = { + 'ResourceRecordSets': [{'Name': 'some.test.domain.com.'}]} + inf = MagicMock() vm1 = MagicMock() system1 = MagicMock() @@ -373,78 +303,38 @@ def test_30_updateVMInfo(self, record_sets, connect_to_region, get_connection): inf.vm_list = [vm1] vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.tags = [] - instance.virtualization_type = "vt" - instance.placement = "us-east-1" - instance.state = "running" - instance.instance_type = "t1.micro" - instance.launch_time = "2016-12-31T00:00:00" - instance.ip_address = "158.42.1.1" - instance.private_ip_address = "10.0.0.1" - instance.connection = conn - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - address = MagicMock() - address.public_ip = "158.42.1.1" - conn.get_all_addresses.return_value = [address] - - dns_conn = MagicMock() - connect_to_region.return_value = dns_conn - - dns_conn.get_zone.return_value = None - zone = MagicMock() - zone.get_a.return_value = None - dns_conn.create_zone.return_value = zone - changes = MagicMock() - record_sets.return_value = changes - change = MagicMock() - changes.add_change.return_value = change - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - routet = MagicMock() - routet.id = "routet-id" - conn.get_all_route_tables.return_value = [routet] - success, vm = ec2_cloud.updateVMInfo(vm, auth) self.assertTrue(success, msg="ERROR: updating VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - self.assertEqual(dns_conn.create_zone.call_count, 2) - self.assertEqual(dns_conn.get_zone.call_count, 2) - self.assertEqual(dns_conn.create_zone.call_args_list[0][0][0], "domain.com.") - self.assertEqual(changes.add_change.call_args_list[0], call('CREATE', 'test.domain.com.', 'A')) - self.assertEqual(changes.add_change.call_args_list[1], call('CREATE', 'some.test.domain.com.', 'A')) - self.assertEqual(change.add_value.call_args_list[0], call('158.42.1.1')) - self.assertEqual(conn.create_route.call_args_list[0], call('routet-id', '10.0.10.0/24', instance_id='int-id')) + self.assertEqual(mock_conn.list_hosted_zones_by_name.call_count, 2) + self.assertEqual(mock_conn.change_resource_record_sets.call_args_list[0][1]['ChangeBatch']['Changes'], + [{'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'test.domain.com.', + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': '158.42.1.1'}]} + }]) + self.assertEqual(mock_conn.create_route.call_args_list[0][1], {'RouteTableId': 'routet-id', + 'DestinationCidrBlock': '10.0.10.0/24', + 'InstanceId': 'int-id'}) # Test using PRIVATE_NET_MASKS setting 10.0.0.0/8 as public net old_priv = Config.PRIVATE_NET_MASKS Config.PRIVATE_NET_MASKS = ["172.16.0.0/12", "192.168.0.0/16"] - instance.ip_address = None + + instance.public_ip_address = None instance.private_ip_address = "10.0.0.1" - conn.get_all_addresses.return_value = [] + mock_conn.describe_addresses.return_value = {'Addresses': []} + success, vm = ec2_cloud.updateVMInfo(vm, auth) Config.PRIVATE_NET_MASKS = old_priv self.assertEqual(vm.getPublicIP(), "10.0.0.1") self.assertEqual(vm.getPrivateIP(), None) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_30_updateVMInfo_spot(self, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_40_updateVMInfo_spot(self, mock_boto_session): radl_data = """ network net (outbound = 'yes') system test ( @@ -470,113 +360,66 @@ def test_30_updateVMInfo_spot(self, get_connection): inf = MagicMock() vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] instance = MagicMock() - instance.update.return_value = True - instance.tags = [] + mock_res.Instance.return_value = instance + instance.id = "iid" instance.virtualization_type = "vt" - instance.placement = "us-east-1" - instance.state = "running" + instance.placement = {'AvailabilityZone': 'us-east-1'} + instance.state = {'Name': 'running'} instance.instance_type = "t1.micro" instance.launch_time = "2016-12-31T00:00:00" instance.ip_address = "158.42.1.1" instance.private_ip_address = "10.0.0.1" - instance.connection = conn - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - conn.get_all_addresses.return_value = [] - - sir = MagicMock() - sir.state = "" - sir.id = "id" - conn.get_all_spot_instance_requests.return_value = [sir] - - volume = MagicMock() - volume.status = "available" - volume.id = "volid" - conn.create_volume.return_value = volume - conn.attach_volume.return_value = True + mock_conn.describe_addresses.return_value = {'Addresses': []} + mock_conn.describe_spot_instance_requests.return_value = {'SpotInstanceRequests': [{'InstanceId': 'id', + 'State': ''}]} + mock_conn.create_volume.return_value = {'VolumeId': 'volid', 'State': 'available'} success, vm = ec2_cloud.updateVMInfo(vm, auth) self.assertTrue(success, msg="ERROR: updating VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_40_stop(self, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.stop(vm, auth) - - self.assertTrue(success, msg="ERROR: stopping VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_50_start(self, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_50_vmop(self, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() inf = MagicMock() vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - reservation = MagicMock() instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] + mock_res.Instance.return_value = instance success, _ = ec2_cloud.start(vm, auth) - - self.assertTrue(success, msg="ERROR: stopping VM info.") + self.assertTrue(success, msg="ERROR: starting VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.start.call_args_list, [call()]) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_52_reboot(self, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.reboot.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.start(vm, auth) + success, _ = ec2_cloud.stop(vm, auth) + self.assertTrue(success, msg="ERROR: stopping VM.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.stop.call_args_list, [call()]) - self.assertTrue(success, msg="ERROR: stopping VM info.") + success, _ = ec2_cloud.reboot(vm, auth) + self.assertTrue(success, msg="ERROR: rebooting VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.reboot.call_args_list, [call()]) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_55_alter(self, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_55_alter(self, mock_boto_session): radl_data = """ network net () system test ( @@ -605,27 +448,29 @@ def test_55_alter(self, get_connection): inf = MagicMock() vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - reservation = MagicMock() instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - instance.state = "stopped" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] + mock_res.Instance.return_value = instance + instance.id = "iid" + instance.instance_type = "t1.micro" + instance.state = {'Name': 'stopped'} success, _ = ec2_cloud.alterVM(vm, new_radl, auth) self.assertTrue(success, msg="ERROR: modifying VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.stop.call_args_list, [call()]) + self.assertEqual(instance.start.call_args_list, [call()]) + self.assertEqual(instance.modify_attribute.call_args_list, [call(Attribute='instanceType', Value='t3a.small')]) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - @patch('boto.route53.connect_to_region') - @patch('boto.route53.record.ResourceRecordSets') - def test_60_finalize(self, record_sets, connect_to_region, sleep, get_connection): + def test_60_finalize(self, sleep, mock_boto_session): radl_data = """ network net (outbound = 'yes') network net2 (outbound = 'yes') @@ -655,145 +500,117 @@ def test_60_finalize(self, record_sets, connect_to_region, sleep, get_connection vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) vm.dns_entries = [('test', 'domain.com.', '158.42.1.1')] - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', + 'State': {'Name': 'running'}}]}]} instance = MagicMock() - device = MagicMock() - instance.update.return_value = True - instance.terminate.return_value = True - instance.block_device_mapping = {"device": device} - device.volume_id = "volid" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - address = MagicMock() - address.public_ip = "158.42.1.1" - address.instance_id = "id-1" - address.disassociate.return_value = True - address.release.return_value = True - conn.get_all_addresses.return_value = [address] - - conn.get_all_spot_instance_requests.return_value = [] - - sg = MagicMock() - sg.name = "im-1" - sg.description = "Security group created by the IM" - sg.instances.return_value = [] - sg.revoke.return_value = True - sg.delete.return_value = True - sg1 = MagicMock() - sg1.name = "im-1-net" - sg1.description = "" - sg1.instances.return_value = [] - sg1.revoke.return_value = True - sg1.delete.return_value = True - sg2 = MagicMock() - sg2.name = "im-1-net2" - sg2.description = "Security group created by the IM" - sg2.instances.return_value = [] - sg2.revoke.return_value = True - sg2.delete.return_value = True - conn.get_all_security_groups.return_value = [sg, sg1, sg2] - - dns_conn = MagicMock() - connect_to_region.return_value = dns_conn - - zone = MagicMock() - record = MagicMock() - zone.id = "zid" - zone.get_a.return_value = record - dns_conn.get_all_rrsets.return_value = [] - dns_conn.get_zone.return_value = zone - changes = MagicMock() - record_sets.return_value = changes - change = MagicMock() - changes.add_change.return_value = change - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - ig = MagicMock() - ig.id = "ig-id" - conn.get_all_internet_gateways.return_value = [ig] + mock_res.Instance.return_value = instance + instance.block_device_mappings = [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeId': 'volid'}}] + mock_conn.describe_addresses.return_value = {'Addresses': [{'PublicIp': '158.42.1.1', + 'InstanceId': 'id-1'}]} + mock_conn.describe_spot_instance_requests.return_value = {'SpotInstanceRequests': []} + + mock_conn.describe_security_groups.return_value = {'SecurityGroups': [ + {'GroupId': 'sg1', 'GroupName': 'im-1', 'Description': 'Security group created by the IM', + 'VpcId': 'vpc-id'}, + {'GroupId': 'sg2', 'GroupName': 'im-1-net', 'Description': '', + 'VpcId': 'vpc-id'}, + {'GroupId': 'sg3', 'GroupName': 'im-1-net2', 'Description': 'Security group created by the IM', + 'VpcId': 'vpc-id'} + ]} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + + mock_conn.list_hosted_zones_by_name.return_value = {'HostedZones': [{'Name': 'domain.com.', + 'Id': 'zone-id'}]} + mock_conn.list_resource_record_sets.return_value = { + 'ResourceRecordSets': [{'Name': 'test.domain.com.'}]} + mock_conn.describe_internet_gateways.return_value = {'InternetGateways': [{'InternetGatewayId': 'ig-id'}]} success, _ = ec2_cloud.finalize(vm, True, auth) self.assertTrue(success, msg="ERROR: finalizing VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - self.assertEqual(changes.add_change.call_args_list, [call('DELETE', 'test.domain.com.', 'A')]) - self.assertEqual(change.add_value.call_args_list, [call('158.42.1.1')]) - self.assertEqual(sg.delete.call_args_list, [call()]) - self.assertEqual(sg1.delete.call_args_list, []) - self.assertEqual(sg2.delete.call_args_list, [call()]) - self.assertEqual(conn.delete_subnet.call_args_list, [call('subnet-id')]) - self.assertEqual(conn.delete_vpc.call_args_list, [call('vpc-id')]) - self.assertEqual(conn.delete_internet_gateway.call_args_list, [call('ig-id')]) - self.assertEqual(conn.detach_internet_gateway.call_args_list, [call('ig-id', 'vpc-id')]) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + self.assertEqual(mock_conn.change_resource_record_sets.call_args_list[0][1]['ChangeBatch']['Changes'], + [{'Action': 'DELETE', + 'ResourceRecordSet': { + 'Name': 'test.domain.com.', + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': '158.42.1.1'}]} + }]) + self.assertEqual(mock_conn.delete_security_group.call_args_list, [call(GroupId='sg1'), + call(GroupId='sg3')]) + self.assertEqual(instance.terminate.call_args_list, [call()]) + self.assertEqual(mock_conn.delete_subnet.call_args_list, [call(SubnetId='subnet-id')]) + self.assertEqual(mock_conn.delete_vpc.call_args_list, [call(VpcId='vpc-id')]) + self.assertEqual(mock_conn.delete_internet_gateway.call_args_list, [call(InternetGatewayId='ig-id')]) + self.assertEqual(mock_conn.detach_internet_gateway.call_args_list, [call(InternetGatewayId='ig-id', + VpcId='vpc-id')]) + + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_70_create_snapshot(self, sleep, get_connection): + def test_70_create_snapshot(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() inf = MagicMock() - vm = VirtualMachine(inf, "region;id1", ec2_cloud.cloud, "", "", ec2_cloud, 1) + vm = VirtualMachine(inf, "us-east-1;id1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - reservation = MagicMock() instance = MagicMock() - instance.create_image.return_value = "image-ami" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] + mock_res.Instance.return_value = instance + instance.create_image.return_value = {'ImageId': 'image-ami'} success, new_image = ec2_cloud.create_snapshot(vm, 0, "image_name", True, auth) self.assertTrue(success, msg="ERROR: creating snapshot: %s" % new_image) - self.assertEqual(new_image, "aws://region/image-ami") - self.assertEqual(instance.create_image.call_args_list, [call('image_name', - description='AMI automatically generated by IM', - no_reboot=True)]) + self.assertEqual(new_image, "aws://us-east-1/image-ami") + self.assertEqual(instance.create_image.call_args_list, [call(Name='image_name', + Description='AMI automatically generated by IM', + NoReboot=True, + TagSpecifications=[{'ResourceType': 'image', + 'Tags': [{'Key': 'instance_id', + 'Value': 'id1'}]}])]) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_80_delete_image(self, sleep, get_connection): + def test_80_delete_image(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - conn = MagicMock() - get_connection.return_value = conn - conn.deregister_image.return_value = True + mock_conn = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - success, msg = ec2_cloud.delete_image('aws://region/image-ami', auth) + success, msg = ec2_cloud.delete_image('aws://us-east-1/image-ami', auth) self.assertTrue(success, msg="ERROR: deleting image. %s" % msg) - self.assertEqual(conn.deregister_image.call_args_list, [call('image-ami', delete_snapshot=True)]) + self.assertEqual(mock_conn.deregister_image.call_args_list, [call(ImageId='image-ami')]) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_90_list_images(self, sleep, get_connection): + def test_90_list_images(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - conn = MagicMock() - get_connection.return_value = conn - image = MagicMock() - image.id = "ami-123456789012" - image.name = "image_name" - conn.get_all_images.return_value = [image] - + mock_conn = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-123456789012', + 'Name': 'image_name'}]} res = ec2_cloud.list_images(auth, filters={'region': 'us-east-1'}) self.assertEqual(res, [{'uri': 'aws://us-east-1/ami-123456789012', 'name': 'us-east-1/image_name'}]) diff --git a/test/unit/connectors/Fogbow.py b/test/unit/connectors/Fogbow.py index ad950027..0625d2ea 100755 --- a/test/unit/connectors/Fogbow.py +++ b/test/unit/connectors/Fogbow.py @@ -300,7 +300,7 @@ def test_30_updateVMInfo(self, sleep, requests): self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) self.assertEqual(vm.info.systems[0].getValue("net_interface.1.ip"), "10.0.0.1") self.assertEqual(vm.info.systems[0].getValue("net_interface.0.ip"), "8.8.8.8") - self.assertEqual(vm.info.systems[0].getValue("memory.size"), 1073741824) + self.assertEqual(vm.info.systems[0].getValue("memory.size"), 1024000000) self.assertEqual(vm.info.systems[0].getValue("disk.1.device"), "/dev/sdb") data = json.loads(requests.call_args_list[1][1]["data"]) diff --git a/test/unit/connectors/GCE.py b/test/unit/connectors/GCE.py index fd8524ea..a9d976aa 100755 --- a/test/unit/connectors/GCE.py +++ b/test/unit/connectors/GCE.py @@ -129,7 +129,7 @@ def test_10_concrete(self, get_driver): concrete = gce_cloud.concreteSystem(radl_system, auth) self.assertEqual(len(concrete), 1) - self.assertEqual(concrete[0].getValue("memory.size"), 2147483648) + self.assertEqual(concrete[0].getValue("memory.size"), 2048000000) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) @patch('libcloud.compute.drivers.gce.GCENodeDriver') diff --git a/test/unit/connectors/Kubernetes.py b/test/unit/connectors/Kubernetes.py index 3e8667d7..742c7836 100755 --- a/test/unit/connectors/Kubernetes.py +++ b/test/unit/connectors/Kubernetes.py @@ -188,7 +188,7 @@ def test_20_launch(self, save_data, requests): 'labels': {'name': 'test-1'}}, "spec": { "accessModes": ["ReadWriteOnce"], - "resources": {"requests": {"storage": 10737418240}}, + "resources": {"requests": {"storage": 10000000000}}, }, } self.assertEqual(requests.call_args_list[1][0][1], @@ -237,8 +237,8 @@ def test_20_launch(self, save_data, requests): "imagePullPolicy": "Always", "ports": [{"containerPort": 8080, "protocol": "TCP"}], "resources": { - "limits": {"cpu": "1", "memory": "536870912"}, - "requests": {"cpu": "1", "memory": "536870912"}, + "limits": {"cpu": "1", "memory": "512000000"}, + "requests": {"cpu": "1", "memory": "512000000"}, }, "env": [{"name": "var", "value": "some_val"}, {"name": "var2", "value": "some,val2"}], diff --git a/test/unit/connectors/OCCI.py b/test/unit/connectors/OCCI.py index 5a967aed..e07e10d3 100755 --- a/test/unit/connectors/OCCI.py +++ b/test/unit/connectors/OCCI.py @@ -327,7 +327,7 @@ def test_30_updateVMInfo(self, get_keystone_uri, requests): self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) memory = vm.info.systems[0].getValue("memory.size") - self.assertEqual(memory, 1824522240) + self.assertEqual(memory, 1740000000) @patch('requests.request') @patch('IM.connectors.OCCI.KeyStoneAuth.get_keystone_uri') diff --git a/test/unit/connectors/OSCAR.py b/test/unit/connectors/OSCAR.py index 9c4a4a83..fa4715a6 100755 --- a/test/unit/connectors/OSCAR.py +++ b/test/unit/connectors/OSCAR.py @@ -124,7 +124,7 @@ def test_20_launch(self, save_data, requests): radl_data = """ system test ( name = 'plants' and - memory.size = 2G and + memory.size = 2GI and cpu.count = 1.0 and cpu.sgx = 1 and gpu.count = 1 and @@ -288,7 +288,7 @@ def test_55_alter(self, requests): new_radl_data = """ system test ( cpu.count>=2 and - memory.size>=4G + memory.size>=4GI )""" new_radl = radl_parse.parse_radl(new_radl_data) @@ -306,7 +306,7 @@ def test_55_alter(self, requests): self.assertTrue(success, msg="ERROR: modifying VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) self.assertEqual(new_vm.info.systems[0].getValue("cpu.count"), 2) - self.assertEqual(new_vm.info.systems[0].getFeature("memory.size").getValue("M"), 4096) + self.assertEqual(new_vm.info.systems[0].getFeature("memory.size").getValue("M"), 4295) self.assertEqual(requests.call_args_list[0][0][0], "PUT") self.assertEqual(requests.call_args_list[0][0][1], "http://oscar.com:80/system/services/fname") self.assertEqual(json.loads(requests.call_args_list[0][1]['data']), {'memory': '4096Mi', 'cpu': '2'}) diff --git a/test/unit/connectors/OpenNebula.py b/test/unit/connectors/OpenNebula.py index 9c897651..d73fad01 100755 --- a/test/unit/connectors/OpenNebula.py +++ b/test/unit/connectors/OpenNebula.py @@ -144,7 +144,7 @@ def test_20_launch(self, save_data, getONEVersion, server_proxy): OS = [ ARCH = "x86_64" ] DISK = [ IMAGE_ID = "1" ] - DISK = [ SAVE = no, TYPE = fs , FORMAT = qcow2, SIZE = 1024, TARGET = hdb ] + DISK = [ SAVE = no, TYPE = fs , FORMAT = qcow2, SIZE = 1000, TARGET = hdb ] SCHED_REQUIREMENTS = "CLUSTER_ID=\\"0\\""\n""" diff --git a/test/unit/test_ctxt_agent.py b/test/unit/test_ctxt_agent.py index bc4894b2..3f9dac14 100755 --- a/test/unit/test_ctxt_agent.py +++ b/test/unit/test_ctxt_agent.py @@ -277,7 +277,8 @@ def test_95_install_ansible_roles(self): copy_content = yaml_data[0]['tasks'][1]['copy'][pos + 9:-2] self.assertEqual(copy_content, "[{src: ansible_role}, {name: hadoop, src: " "'git+https://github.com/micafer/ansible-role-hadoop'}]") - self.assertEqual(yaml_data[0]['tasks'][2]['command'][:47], "ansible-galaxy install -c -r /tmp/galaxy_roles_") + self.assertEqual(yaml_data[0]['tasks'][2]['command'][:69], + ctxt_agent.VENV_DIR + "/bin/ansible-galaxy install -c -r /tmp/galaxy_roles_") os.unlink(res) @@ -300,8 +301,8 @@ def test_99_install_ansible_collections(self): pos = yaml_data[0]['tasks'][0]['copy'].find('content="') copy_content = yaml_data[0]['tasks'][0]['copy'][pos + 9:-2] self.assertEqual(copy_content, "{collections: [{name: ns.collection, version: '1.0'}]}") - self.assertEqual(yaml_data[0]['tasks'][1]['command'][:64], - "ansible-galaxy collection install -c -r /tmp/galaxy_collections_") + self.assertEqual(yaml_data[0]['tasks'][1]['command'][:86], + ctxt_agent.VENV_DIR + "/bin/ansible-galaxy collection install -c -r /tmp/galaxy_collections_") os.unlink(res) diff --git a/test/unit/test_im_logic.py b/test/unit/test_im_logic.py index d9d154cc..68ce6315 100644 --- a/test/unit/test_im_logic.py +++ b/test/unit/test_im_logic.py @@ -1141,6 +1141,7 @@ def test_check_oidc_groups(self, openidclient): Config.OIDC_ISSUERS = ["https://iam-test.indigo-datacloud.eu/"] Config.OIDC_AUDIENCE = None Config.OIDC_GROUPS = ["urn:mace:egi.eu:group:demo.fedcloud.egi.eu:role=member#aai.egi.eu"] + Config.OIDC_GROUPS_CLAIM = "eduperson_entitlement" IM.check_oidc_token(im_auth) @@ -1551,8 +1552,8 @@ def test_estimate_resources(self): 'cloud0': { 'cloudType': 'Dummy', 'cloudEndpoint': 'http://server.com:80/path', - 'compute': [{'cpuCores': 2, 'memoryInMegabytes': 4096, 'diskSizeInGigabytes': 40}, - {'cpuCores': 1, 'memoryInMegabytes': 2048, 'diskSizeInGigabytes': 10}], + 'compute': [{'cpuCores': 2, 'memoryInMegabytes': 4000, 'diskSizeInGigabytes': 40}, + {'cpuCores': 1, 'memoryInMegabytes': 2000, 'diskSizeInGigabytes': 10}], 'storage': [{'sizeInGigabytes': 100}] }}) diff --git a/tox.ini b/tox.ini index 7a1f1e46..05c22d08 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ commands = python -m coverage run --source=. -m unittest discover -v -s test/uni [testenv:bandit] deps = bandit -commands = bandit IM -r -f html -o bandit.html -s B108,B601,B608,B507,B104 -ll +commands = bandit IM -r -ll [flake8] ignore = E402,E265,W605,W504,F811