diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index c7adc89f..ff3213a4 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -1434,10 +1434,7 @@ def check_oidc_token(im_auth): if Config.OIDC_GROUPS: # Get user groups from any of the possible fields - user_groups = userinfo.get('groups', # Generic - userinfo.get('entitlements', # GEANT - userinfo.get('eduperson_entitlement', # EGI Check-in - []))) + user_groups = userinfo.get(Config.OIDC_GROUPS_CLAIM, []) if not set(Config.OIDC_GROUPS).issubset(user_groups): raise InvaliddUserException("Invalid InfrastructureManager credentials. " + diff --git a/IM/__init__.py b/IM/__init__.py index ab315621..d03e90b7 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -19,7 +19,7 @@ 'InfrastructureInfo', 'InfrastructureManager', 'recipe', 'request', 'REST', 'retry', 'ServiceRequests', 'SSH', 'SSHRetry', 'timedcall', 'UnixHTTPAdapter', 'VirtualMachine', 'VMRC', 'xmlobject'] -__version__ = '1.17.1' +__version__ = '1.18.0' __author__ = 'Miguel Caballer' diff --git a/IM/config.py b/IM/config.py index 3f5c9602..80478fa2 100644 --- a/IM/config.py +++ b/IM/config.py @@ -107,6 +107,7 @@ class Config: OIDC_USER_INFO_PATH = "/userinfo" OIDC_INSTROSPECT_PATH = "/introspect" OIDC_GROUPS = [] + OIDC_GROUPS_CLAIM = "groups" VM_NUM_USE_CTXT_DIST = 30 DELAY_BETWEEN_VM_RETRIES = 5 VERIFI_SSL = False diff --git a/IM/connectors/CloudConnector.py b/IM/connectors/CloudConnector.py index 01d7a426..ad275b56 100644 --- a/IM/connectors/CloudConnector.py +++ b/IM/connectors/CloudConnector.py @@ -738,10 +738,6 @@ def manage_dns_entries(self, op, vm, auth_data, extra_args=None): vm.dns_entries = [] if op == "add": dns_entries = [entry for entry in self.get_dns_entries(vm) if entry not in vm.dns_entries] - dns_entries = [] - for entry in self.get_dns_entries(vm): - if entry not in vm.dns_entries: - dns_entries.append(entry) else: dns_entries = list(vm.dns_entries) if dns_entries: diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 1b6fd17b..0daedc7a 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1,5 +1,5 @@ # IM - Infrastructure Manager -# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia +# Copyright (C) 2024 - GRyCAP - Universitat Politecnica de Valencia # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -20,11 +20,9 @@ from netaddr import IPNetwork, IPAddress, spanning_cidr try: - import boto.ec2 - import boto.vpc - import boto.route53 + import boto3 except Exception as ex: - print("WARN: Boto library not correctly installed. EC2CloudConnector will not work!.") + print("WARN: Boto3 library not correctly installed. EC2CloudConnector will not work!.") print(ex) try: @@ -104,7 +102,6 @@ class EC2CloudConnector(CloudConnector): def __init__(self, cloud_info, inf): self.connection = None - self.route53_connection = None self.auth = None CloudConnector.__init__(self, cloud_info, inf) @@ -163,14 +160,14 @@ def update_system_info_from_instance(system, instance_type): system.addFeature(Feature("gpu.model", "=", instance_type.gpu_model), conflict="other", missing="other") # Get the EC2 connection object - def get_connection(self, region_name, auth_data): + def get_connection(self, region_name, auth_data, service_name, object_type='client'): """ Get a :py:class:`boto.ec2.connection` to interact with. Arguments: - region_name(str): EC2 region to connect. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.ec2.connection` or None in case of error + Returns: a :py:class:`boto3.EC2.Client` or None in case of error """ auths = auth_data.getAuthInfo(self.type) if not auths: @@ -179,63 +176,28 @@ def get_connection(self, region_name, auth_data): auth = auths[0] if self.connection and self.auth.compare(auth_data, self.type): - return self.connection + if object_type == 'resource': + return self.connection.resource(service_name) + else: + return self.connection.client(service_name) else: self.auth = auth_data - conn = None try: if 'username' in auth and 'password' in auth: - region = boto.ec2.get_region(region_name) - if region: - token = auth.get('token') - conn = boto.vpc.VPCConnection(aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password'], - security_token=token, - region=region) + if region_name != 'universal': + region_names = boto3.session.Session().get_available_regions('ec2') + if region_name not in region_names: + raise Exception("Incorrect region name: " + region_name) + + session = boto3.session.Session(region_name=region_name, + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password'], + aws_session_token=auth.get('token')) + self.connection = session + if object_type == 'resource': + return session.resource(service_name) else: - raise Exception( - "Incorrect region name: " + region_name) - else: - self.log_error("No correct auth data has been specified to EC2: " - "username (Access Key) and password (Secret Key)") - raise Exception("No correct auth data has been specified to EC2: " - "username (Access Key) and password (Secret Key)") - - except Exception as ex: - self.log_exception( - "Error getting the region " + region_name) - raise Exception("Error getting the region " + - region_name + ": " + str(ex)) - - self.connection = conn - return conn - - # Get the Route53 connection object - def get_route53_connection(self, region_name, auth_data): - """ - Get a :py:class:`boto.route53.connection` to interact with. - - Arguments: - - region_name(str): AWS region to connect. - - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.route53.connection` or None in case of error - """ - auths = auth_data.getAuthInfo(self.type) - if not auths: - raise Exception("No auth data has been specified to EC2.") - else: - auth = auths[0] - - if self.route53_connection and self.auth.compare(auth_data, self.type): - return self.route53_connection - else: - self.auth = auth_data - conn = None - try: - if 'username' in auth and 'password' in auth: - conn = boto.route53.connect_to_region(region_name, - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) + return session.client(service_name) else: self.log_error("No correct auth data has been specified to EC2: " "username (Access Key) and password (Secret Key)") @@ -243,11 +205,8 @@ def get_route53_connection(self, region_name, auth_data): "username (Access Key) and password (Secret Key)") except Exception as ex: - self.log_exception("Error conneting Route53 in region " + region_name) - raise Exception("Error conneting Route53 in region" + region_name + ": " + str(ex)) - - self.route53_connection = conn - return conn + self.log_exception("Error getting the region " + region_name) + raise Exception("Error getting the region " + region_name + ": " + str(ex)) # path format: aws://eu-west-1/ami-00685b74 @staticmethod @@ -342,10 +301,28 @@ def set_net_provider_id(radl, vpc, subnet): @staticmethod def _get_security_group(conn, sg_name): try: - return conn.get_all_security_groups(filters={'group-name': sg_name})[0] + return conn.describe_security_groups(Filters=[{'Name': 'group-name', + 'Values': [sg_name]}])['SecurityGroups'][0] except Exception: return None + @staticmethod + def _get_default_security_rules(sg): + return [ + { + "IpProtocol": "tcp", + "FromPort": 0, + "ToPort": 65535, + "UserIdGroupPairs": [{"GroupId": sg["GroupId"]}], + }, + { + "IpProtocol": "udp", + "FromPort": 0, + "ToPort": 65535, + "UserIdGroupPairs": [{"GroupId": sg["GroupId"]}], + }, + ] + def create_security_groups(self, conn, inf, radl, vpc): res = [] try: @@ -360,10 +337,12 @@ def create_security_groups(self, conn, inf, radl, vpc): if not sg: self.log_info("Creating security group: %s" % sg_name) try: - sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) + sg = conn.create_security_group(GroupName=sg_name, + Description="Security group created by the IM", + VpcId=vpc) # open all the ports for the VMs in the security group - sg.authorize('tcp', 0, 65535, src_group=sg) - sg.authorize('udp', 0, 65535, src_group=sg) + conn.authorize_security_group_ingress(GroupId=sg['GroupId'], + IpPermissions=self._get_default_security_rules(sg)) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -373,7 +352,7 @@ def create_security_groups(self, conn, inf, radl, vpc): else: self.log_info("Security group: " + sg_name + " already created.") - res.append(sg.id) + res.append(sg['GroupId']) while system.getValue("net_interface." + str(i) + ".connection"): network_name = system.getValue("net_interface." + str(i) + ".connection") @@ -389,7 +368,9 @@ def create_security_groups(self, conn, inf, radl, vpc): if not sg: self.log_info("Creating security group: " + sg_name) try: - sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) + sg = conn.create_security_group(GroupName=sg_name, + Description="Security group created by the IM", + VpcId=vpc) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -399,12 +380,12 @@ def create_security_groups(self, conn, inf, radl, vpc): else: self.log_info("Security group: " + sg_name + " already created.") - res.append(sg.id) + res.append(sg['GroupId']) try: # open all the ports for the VMs in the security group - sg.authorize('tcp', 0, 65535, src_group=sg) - sg.authorize('udp', 0, 65535, src_group=sg) + conn.authorize_security_group_ingress(GroupId=sg['GroupId'], + IpPermissions=self._get_default_security_rules(sg)) except Exception as addex: self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) @@ -415,17 +396,23 @@ def create_security_groups(self, conn, inf, radl, vpc): for outport in outports: if outport.is_range(): - try: - sg.authorize(outport.get_protocol(), outport.get_port_init(), - outport.get_port_end(), outport.get_remote_cidr()) - except Exception as addex: - self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) + from_port = outport.get_port_init() + to_port = outport.get_port_end() else: - try: - sg.authorize(outport.get_protocol(), outport.get_remote_port(), - outport.get_remote_port(), outport.get_remote_cidr()) - except Exception as addex: - self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) + from_port = outport.get_remote_port() + to_port = outport.get_remote_port() + + try: + conn.authorize_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': outport.get_protocol(), + 'FromPort': from_port, + 'ToPort': to_port, + 'IpRanges': [{'CidrIp': outport.get_remote_cidr()}]} + ]) + except Exception as addex: + self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) i += 1 except Exception as ex: @@ -444,13 +431,21 @@ def get_default_subnet(conn): vpc_id = None subnet_id = None - for vpc in conn.get_all_vpcs(): - if vpc.is_default or ('Name' in vpc.tags and vpc.tags['Name'] == "default"): - vpc_id = vpc.id - for subnet in conn.get_all_subnets(filters={"vpcId": vpc_id}): - subnet_id = subnet.id - break - break + vpcs = conn.describe_vpcs(Filters=[{'Name': 'is-default', 'Values': ['true']}])['Vpcs'] + if vpcs: + vpc_id = vpcs[0]['VpcId'] + + # Just in case there is no default VPC, in some old accounts + # get the VPC named default + if not vpc_id: + vpcs = conn.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': ['default']}])['Vpcs'] + if vpcs: + vpc_id = vpcs[0]['VpcId'] + + if vpc_id: + subnets = conn.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])['Subnets'] + if subnets: + subnet_id = subnets[0]['SubnetId'] return vpc_id, subnet_id @@ -478,11 +473,12 @@ def get_vpc_cidr(self, radl, conn, inf): Get a common CIDR in all the RADL nets """ nets = [] - for i, net in enumerate(radl.networks): + for net in radl.networks: provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: + subnets = [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet.cidr_block for subnet in conn.get_all_subnets()] + nets, + subnets + nets, inf, 127) nets.append(net_cidr) @@ -505,73 +501,85 @@ def create_networks(self, conn, radl, inf): else: vpc_cird = str(common_cird) vpc_id = None - for i, net in enumerate(radl.networks): + for net in radl.networks: provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: + subnets = [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet.cidr_block for subnet in conn.get_all_subnets()], + subnets, inf, 127) net.delValue('cidr') # First create the VPC if vpc_id is None: # Check if it already exists - vpcs = conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": inf.id}) + vpcs = conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf.id]}])['Vpcs'] if vpcs: - vpc_id = vpcs[0].id + vpc_id = vpcs[0]['VpcId'] self.log_debug("VPC %s exists. Do not create." % vpc_id) else: # if not create it self.log_info("Creating VPC with cidr: %s." % vpc_cird) - vpc = conn.create_vpc(vpc_cird) - time.sleep(1) - vpc.add_tag("IM-INFRA-ID", inf.id) - vpc_id = vpc.id + vpc = conn.create_vpc(CidrBlock=vpc_cird, + TagSpecifications=[{'ResourceType': 'vpc', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}]}]) + vpc_id = vpc['Vpc']['VpcId'] self.log_info("VPC %s created." % vpc_id) self.log_info("Creating Internet Gateway.") - ig = conn.create_internet_gateway() - time.sleep(1) - ig.add_tag("IM-INFRA-ID", inf.id) - self.log_info("Internet Gateway %s created." % ig.id) - conn.attach_internet_gateway(ig.id, vpc_id) + ig = conn.create_internet_gateway(TagSpecifications=[{'ResourceType': 'internet-gateway', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}]}]) + ig_id = ig['InternetGateway']['InternetGatewayId'] + self.log_info("Internet Gateway %s created." % ig_id) + conn.attach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) self.log_info("Adding route to the IG.") - for route_table in conn.get_all_route_tables(filters={"vpc-id": vpc_id}): - conn.create_route(route_table.id, "0.0.0.0/0", ig.id) + for rt in conn.describe_route_tables(Filters=[{"Name": "vpc-id", + "Values": [vpc_id]}])['RouteTables']: + conn.create_route(RouteTableId=rt['RouteTableId'], + DestinationCidrBlock="0.0.0.0/0", + GatewayId=ig_id) # Now create the subnet # Check if it already exists - subnets = conn.get_all_subnets(filters={"tag:IM-INFRA-ID": inf.id, - "tag:IM-SUBNET-ID": net.id}) + subnets = conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', + 'Values': [inf.id]}, + {'Name': 'tag:IM-SUBNET-ID', + 'Values': [net.id]}])['Subnets'] if subnets: subnet = subnets[0] self.log_debug("Subnet %s exists. Do not create." % net.id) net.setValue('cidr', subnet.cidr_block) else: self.log_info("Create subnet for net %s." % net.id) - subnet = conn.create_subnet(vpc_id, net_cidr) - self.log_info("Subnet %s created." % subnet.id) - time.sleep(1) - subnet.add_tag("IM-INFRA-ID", inf.id) - subnet.add_tag("IM-SUBNET-ID", net.id) + subnet = conn.create_subnet(VpcId=vpc_id, CidrBlock=net_cidr, + TagSpecifications=[{'ResourceType': 'subnet', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}, + {'Key': 'IM-SUBNET-ID', + 'Value': net.id}]}]) + self.log_info("Subnet %s created." % subnet['Subnet']['SubnetId']) net.setValue('cidr', net_cidr) # Set also the cidr in the inf RADL inf.radl.get_network_by_id(net.id).setValue('cidr', net_cidr) - net.setValue('provider_id', "%s.%s" % (vpc_id, subnet.id)) + net.setValue('provider_id', "%s.%s" % (vpc_id, subnet['Subnet']['SubnetId'])) except Exception as ex: self.log_exception("Error creating subnets or vpc.") try: - for subnet in conn.get_all_subnets(filters={"tag:IM-INFRA-ID": inf.id}): - self.log_info("Deleting subnet: %s" % subnet.id) - conn.delete_subnet(subnet.id) - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": inf.id}): - self.log_info("Deleting vpc: %s" % vpc.id) - conn.delete_vpc(vpc.id) - for ig in conn.get_all_internet_gateways(filters={"tag:IM-INFRA-ID": inf.id}): - self.log_info("Deleting Internet Gateway: %s" % ig.id) - conn.delete_internet_gateways(ig.id) + for subnet in conn.describe_subnets(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [inf.id]}])['Subnets']: + self.log_info("Deleting subnet: %s" % subnet['Subnets']['SubnetId']) + conn.delete_subnet(SubnetId=subnet['Subnets']['SubnetId']) + for vpc in conn.describe_vpcs(Filters=[{"Name": "tag:IM-INFRA-ID", "Values": [inf.id]}])['Vpcs']: + self.log_info("Deleting vpc: %s" % vpc_id) + conn.delete_vpc(VpcId=vpc_id) + for ig in conn.describe_internet_gateways(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [inf.id]}])['InternetGateways']: + self.log_info("Deleting Internet Gateway: %s" % ig_id) + conn.delete_internet_gateways(InternetGatewayId=ig_id) except Exception: self.log_exception("Error deleting subnets or vpc.") raise ex @@ -584,10 +592,10 @@ def get_networks(self, conn, radl): if provider_id: parts = provider_id.split(".") if len(parts) == 2 and parts[0].startswith("vpc-") and parts[1].startswith("subnet-"): - vpc = conn.get_all_vpcs([parts[0]]) - subnet = conn.get_all_subnets([parts[1]]) + vpc = conn.describe_vpcs(Filters=[{'Name': 'vpc-id', 'Values': [parts[0]]}])['Vpcs'] + subnet = conn.describe_subnets(Filters=[{'Name': 'subnet-id', 'Values': [parts[1]]}])['Subnets'] if vpc and subnet: - return vpc[0].id, subnet[0].id + return vpc[0]['VpcId'], subnet[0]['SubnetId'] elif vpc: raise Exception("Incorrect subnet value in provider_id value: %s" % provider_id) else: @@ -617,7 +625,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): (region_name, ami) = self.getAMIData(system.getValue("disk.0.image.url")) self.log_info("Connecting with the region: " + region_name) - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') res = [] spot = False @@ -651,16 +659,17 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): for i in range(num_vm): res.append((False, "Error no instance type available for the requirements.")) - image = conn.get_image(ami) + image = conn.describe_images(ImageIds=[ami])['Images'] if not image: for i in range(num_vm): res.append((False, "Incorrect AMI selected")) return res + image = image[0] block_device_name = None - for name, device in image.block_device_mapping.items(): - if device.snapshot_id or device.volume_id: - block_device_name = name + for device in image['BlockDeviceMappings']: + if device.get('Ebs', {}).get('SnapshotId'): + block_device_name = device.get('DeviceName') if not block_device_name: self.log_error("Error getting correct block_device name from AMI: " + str(ami)) @@ -714,7 +723,6 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): inf.add_vm(vm) user_data = self.get_cloud_init_data(radl, vm, public_key, user) - bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping(conn) # Get data for the root disk size = None disk_type = "standard" @@ -722,22 +730,41 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): disk_type = system.getValue("disk.0.type") if system.getValue("disk.0.size"): size = system.getFeature("disk.0.size").getValue('G') - bdm[block_device_name] = boto.ec2.blockdevicemapping.BlockDeviceType(volume_type=disk_type, - size=size, - delete_on_termination=True) + bdm = [ + { + 'DeviceName': block_device_name, + 'Ebs': { + 'DeleteOnTermination': True, + 'VolumeType': disk_type + } + } + ] + if size: + bdm[0]['Ebs']['VolumeSize'] = size volumes = self.get_volumes(conn, vm) - for device, (size, snapshot_id, volume_id, disk_type) in volumes.items(): - bdm[device] = boto.ec2.blockdevicemapping.BlockDeviceType(snapshot_id=snapshot_id, - volume_id=volume_id, - volume_type=disk_type, - size=size, delete_on_termination=True) + for device, (size, snapshot_id, _, disk_type) in volumes.items(): + bd = { + 'DeviceName': device, + 'Ebs': { + 'DeleteOnTermination': True, + } + } + if size: + bd['Ebs']['VolumeSize'] = size + if snapshot_id: + bd['Ebs']['SnapshotId'] = snapshot_id + if disk_type: + bd['Ebs']['VolumeType'] = disk_type + bdm.append(bd) if spot: self.log_info("Launching a spot instance") err_msg += " a spot instance " err_msg += " of type: %s " % instance_type.name price = system.getValue("price") + if price: + price = str(price) # Realizamos el request de spot instances if system.getValue('availability_zone'): @@ -745,27 +772,42 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): else: availability_zone = 'us-east-1c' historical_price = 1000.0 - availability_zone_list = conn.get_all_zones() + availability_zone_list = conn.describe_availability_zones()['AvailabilityZones'] for zone in availability_zone_list: - history = conn.get_spot_price_history(instance_type=instance_type.name, - product_description=operative_system, - availability_zone=zone.name, - max_results=1) - self.log_debug("Spot price history for the region " + zone.name) + history = conn.describe_spot_price_history(InstanceTypes=[instance_type.name], + ProductDescriptions=[operative_system], + Filters=[{'Name': 'availability-zone', + 'Values': [zone['ZoneName']]}], + MaxResults=1)['SpotPriceHistory'] + + self.log_debug("Spot price history for the region " + zone['ZoneName']) self.log_debug(history) - if history and history[0].price < historical_price: - historical_price = history[0].price - availability_zone = zone.name + if history and float(history[0]['SpotPrice']) < historical_price: + historical_price = float(history[0]['SpotPrice']) + availability_zone = zone['ZoneName'] self.log_info("Launching the spot request in the zone " + availability_zone) - request = conn.request_spot_instances(price=price, image_id=image.id, count=1, - type='one-time', instance_type=instance_type.name, - placement=availability_zone, key_name=keypair_name, - security_group_ids=sg_ids, block_device_map=bdm, - subnet_id=subnet, user_data=user_data) + launch_spec = {'ImageId': image['ImageId'], + 'InstanceType': instance_type.name, + 'SecurityGroupIds': sg_ids, + 'BlockDeviceMappings': bdm, + 'SubnetId': subnet, + 'UserData': user_data} + + if keypair_name: + launch_spec['KeyName'] = keypair_name + if availability_zone: + launch_spec['Placement'] = {'AvailabilityZone': availability_zone} + + params = {'InstanceCount': 1, + 'Type': 'one-time', + 'LaunchSpecification': launch_spec} + if price: + params['SpotPrice'] = price + request = conn.request_spot_instances(**params) - if request: - ec2_vm_id = region_name + ";" + request[0].id + if request['SpotInstanceRequests']: + ec2_vm_id = region_name + ";" + request['SpotInstanceRequests'][0]['SpotInstanceRequestId'] self.log_debug("RADL:") self.log_debug(system) @@ -783,23 +825,42 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): err_msg += " an ondemand instance " err_msg += " of type: %s " % instance_type.name - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=subnet, - groups=sg_ids, - associate_public_ip_address=add_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + interfaces = [ + { + 'DeviceIndex': 0, + 'SubnetId': subnet, + 'Groups': sg_ids, + 'AssociatePublicIpAddress': add_public_ip, + 'DeleteOnTermination': True + } + ] + + params = {'ImageId': image['ImageId'], + 'MinCount': 1, + 'MaxCount': 1, + 'InstanceType': instance_type.name, + 'NetworkInterfaces': interfaces, + 'BlockDeviceMappings': bdm, + 'UserData': user_data} + + if keypair_name: + params['KeyName'] = keypair_name + if placement: + params['Placement'] = {'AvailabilityZone': placement} + + im_username = "im_user" + if auth_data.getAuthInfo('InfrastructureManager'): + im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] + instace_tags = [{'Key': 'Name', 'Value': self.gen_instance_name(system)}, + {'Key': 'IM-USER', 'Value': im_username}] + for key, value in tags.items(): + instace_tags.append({'Key': key, 'Value': value}) + params['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': instace_tags}] - reservation = conn.run_instances(image.id, min_count=1, max_count=1, key_name=keypair_name, - instance_type=instance_type.name, network_interfaces=interfaces, - placement=placement, block_device_map=bdm, user_data=user_data) + instances = conn.run_instances(**params)['Instances'] - if len(reservation.instances) == 1: - time.sleep(1) - instance = reservation.instances[0] - instance.add_tag("Name", self.gen_instance_name(system)) - for key, value in tags.items(): - instance.add_tag(key, value) - ec2_vm_id = region_name + ";" + instance.id + if instances: + ec2_vm_id = region_name + ";" + instances[0]['InstanceId'] self.log_debug("RADL:") self.log_debug(system) @@ -819,13 +880,17 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i += 1 - # if all the VMs have failed, remove the sgs + # if all the VMs have failed, remove the sgs and nets if all_failed: + try: + self.delete_networks(conn, inf.id) + except Exception: + self.log_exception("Error deleting networks.") if sg_ids: for sgid in sg_ids: self.log_info("Remove the SG: %s" % sgid) try: - conn.delete_security_group(group_id=sgid) + conn.delete_security_group(GroupId=sgid) except Exception: self.log_exception("Error deleting SG.") @@ -836,29 +901,29 @@ def create_volume(self, conn, disk_size, placement=None, vol_type=None, timeout= Create an EBS volume Arguments: - - conn(:py:class:`boto.ec2.connection`): object to connect to EC2 API. + - conn(:py:class:`boto3.EC2.Client`): object to connect to EC2 API. - disk_size(int): The size of the new volume, in GiB - placement(str): The availability zone in which the Volume will be created. - type(str): Type of the volume: standard | io1 | gp2. - timeout(int): Time needed to create the volume. - Returns: a :py:class:`boto.ec2.volume.Volume` of the new volume + Returns: a :py:dict:`boto3.EC2.Volume` of the new volume """ if placement is None: - placement = conn.get_all_zones()[0] - volume = conn.create_volume(disk_size, placement, volume_type=vol_type) + placement = conn.describe_zones()['Zones'][0]['ZoneName'] + volume = conn.create_volume(Size=disk_size, AvailabilityZone=placement, VolumeType=vol_type) cont = 0 err_states = ["error"] - while str(volume.status) != 'available' and str(volume.status) not in err_states and cont < timeout: - self.log_info("State: " + str(volume.status)) + while str(volume['Status']) != 'available' and str(volume['Status']) not in err_states and cont < timeout: + self.log_info("State: " + str(volume['Status'])) cont += 2 time.sleep(2) - volume = conn.get_all_volumes([volume.id])[0] + volume = conn.describe_volumes([volume['VolumeId']])['Volumes'][0] - if str(volume.status) == 'available': + if str(volume['Status']) == 'available': return volume else: self.log_error("Error creating the volume %s, deleting it" % (volume.id)) - conn.delete_volume(volume.id) + conn.delete_volume(volume['VolumeId']) return None @staticmethod @@ -888,17 +953,21 @@ def get_volumes(conn, vm): if disk_url: _, elem_id = EC2CloudConnector.getAMIData(disk_url) if elem_id.startswith('snap-'): - snapshot_id = conn.get_all_snapshots([elem_id])[0].id + snapshot = conn.describe_snapshots(SnapshotIds=[elem_id])['Snapshots'] + if snapshot: + snapshot_id = snapshot_id[0]['SnapshotId'] elif elem_id.startswith('vol-'): - volume_id = conn.get_all_volumes([elem_id])[0].id + volume = conn.describe_volumes(VolumeIds=[elem_id])['Volumes'] + if volume: + volume_id = volume[0]['VolumeId'] else: - snapshot = conn.get_all_snapshots(filters={'tag:Name': elem_id}) + snapshot = conn.describe_snapshots(Filters=[{'Name': 'tag:Name', 'Values': [elem_id]}])['Snapshots'] if snapshot: - snapshot_id = snapshot[0].id + snapshot_id = snapshot[0]['SnapshotId'] else: - volume = conn.get_all_volumes(filters={'tag:Name': elem_id}) + volume = conn.describe_volumes(Filters=[{'Name': 'tag:Name', 'Values': [elem_id]}])['Volumes'] if volume: - volume_id = volume[0].id + volume_id = volume[0]['VolumeId'] else: raise Exception("No snapshot/volume found with name: %s" % elem_id) else: @@ -923,29 +992,29 @@ def get_instance_by_id(self, instance_id, region_name, auth_data): - id(str): ID of the EC2 instance. - region_name(str): Region name to search the instance. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.ec2.instance` of found instance or None if it was not found + Returns: a :py:class:`boto3.EC2.Instance` of found instance or None if it was not found """ instance = None try: - conn = self.get_connection(region_name, auth_data) - - reservations = conn.get_all_instances([instance_id]) - instance = reservations[0].instances[0] + resource = self.get_connection(region_name, auth_data, 'ec2', 'resource') + instance = resource.Instance(instance_id) + instance.load() except Exception: - self.log_error("Error getting instance id: %s" % instance_id) + self.log_exception("Error getting instance id: %s" % instance_id) return instance - def add_elastic_ip(self, vm, instance, fixed_ip=None): + def add_elastic_ip(self, vm, instance, conn, fixed_ip=None): """ Add an elastic IP to an instance Arguments: - vm(:py:class:`IM.VirtualMachine`): VM information. - - instance(:py:class:`boto.ec2.instance`): object to connect to EC2 instance. + - instance(:py:class:`boto3.EC2.Instance`): object to connect to EC2 instance. + - conn(:py:class:`boto3.EC2.Client`): object to connect to EC2 API. - fixed_ip(str, optional): specifies a fixed IP to add to the instance. - Returns: a :py:class:`boto.ec2.address.Address` added or None if some problem occur. + Returns: a :py:dict:`boto3.EC2.Address` added or None if some problem occur. """ if vm.state == VirtualMachine.RUNNING and "elastic_ip" not in vm.__dict__.keys(): # Flag to set that this VM has created (or is creating) the elastic @@ -955,8 +1024,8 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): pub_address = None self.log_info("Add an Elastic IP") if fixed_ip: - for address in instance.connection.get_all_addresses(): - if str(address.public_ip) == fixed_ip: + for address in conn.describe_addresses()['Addresses']: + if str(address['PublicIp']) == fixed_ip: pub_address = address if pub_address: @@ -965,13 +1034,9 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): self.log_warn("Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") return None else: - provider_id = self.get_net_provider_id(vm.info) - if provider_id: - pub_address = instance.connection.allocate_address(domain="vpc") - instance.connection.associate_address(instance.id, allocation_id=pub_address.allocation_id) - else: - pub_address = instance.connection.allocate_address() - instance.connection.associate_address(instance.id, pub_address.public_ip) + pub_address = conn.allocate_address(Domain='vpc') + + conn.associate_address(InstanceId=instance.id, AllocationId=pub_address['AllocationId']) self.log_debug(pub_address) return pub_address @@ -979,7 +1044,7 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): self.log_exception("Error adding an Elastic IP to VM ID: " + str(vm.id)) if pub_address: self.log_exception("The Elastic IP was allocated, release it.") - pub_address.release() + conn.release_address(AllocationId=pub_address['AllocationId']) return None else: self.log_info("The VM is not running, not adding an Elastic IP.") @@ -1017,40 +1082,41 @@ def delete_elastic_ips(self, conn, vm, timeout=240): if pub_ip in fixed_ips: self.log_info("%s is a fixed IP, it is not released" % pub_ip) else: - for address in conn.get_all_addresses(filters={"public-ip": pub_ip}): - self.log_info("This VM has a Elastic IP %s." % address.public_ip) + for address in conn.describe_addresses(Filters=[{"Name": "public-ip", + "Values": [pub_ip]}])['Addresses']: + self.log_info("This VM has a Elastic IP %s." % address['PublicIp']) cont = 0 - while address.instance_id and cont < timeout: + while address['InstanceId'] and cont < timeout: cont += 3 try: self.log_debug("Disassociate it.") - address.disassociate() + conn.disassociate_address(PublicIp=address['PublicIp']) except Exception: self.log_debug("Error disassociating the IP.") - address = conn.get_all_addresses(filters={"public-ip": pub_ip})[0] + address = conn.describe_addresses(Filters=[{"Name": "public-ip", + "Values": [pub_ip]}])['Addresses'][0] self.log_info("It is attached. Wait.") time.sleep(3) - address = conn.get_all_addresses(filters={"public-ip": pub_ip})[0] self.log_info("Now release it.") - address.release() + conn.release_address(AllocationId=address['AllocationId']) - def setIPsFromInstance(self, vm, instance): + def setIPsFromInstance(self, vm, instance, conn): """ Adapt the RADL information of the VM to the real IPs assigned by EC2 Arguments: - vm(:py:class:`IM.VirtualMachine`): VM information. - - instance(:py:class:`boto.ec2.instance`): object to connect to EC2 instance. + - instance(:py:class:`boto3.ec2.Instance`): object to connect to EC2 instance. """ vm_system = vm.info.systems[0] num_pub_nets = num_nets = 0 public_ips = [] private_ips = [] - if (instance.ip_address is not None and len(instance.ip_address) > 0 and - instance.ip_address != instance.private_ip_address): - public_ips = [instance.ip_address] + if (instance.public_ip_address is not None and len(instance.public_ip_address) > 0 and + instance.public_ip_address != instance.private_ip_address): + public_ips = [instance.public_ip_address] num_nets += 1 num_pub_nets = 1 if instance.private_ip_address is not None and len(instance.private_ip_address) > 0: @@ -1071,13 +1137,13 @@ def setIPsFromInstance(self, vm, instance): elastic_ips = [] # Get the elastic IPs assigned (there must be only 1) - for address in instance.connection.get_all_addresses(): - if address.instance_id == instance.id: - elastic_ips.append(str(address.public_ip)) + for address in conn.describe_addresses()['Addresses']: + if address['InstanceId'] == instance.id: + elastic_ips.append(str(address['PublicIp'])) # It will be used if it is different to the public IP of the # instance - if str(address.public_ip) != instance.ip_address: - vm_system.setValue('net_interface.' + str(num_nets) + '.ip', str(instance.ip_address)) + if str(address['PublicIp']) != instance.public_ip_address: + vm_system.setValue('net_interface.' + str(num_nets) + '.ip', str(instance.public_ip_address)) vm_system.setValue('net_interface.' + str(num_nets) + '.connection', public_net.id) num_pub_nets += 1 @@ -1097,14 +1163,14 @@ def setIPsFromInstance(self, vm, instance): # It is a fixed IP if ip not in elastic_ips: # It has not been created yet, do it - self.add_elastic_ip(vm, instance, ip) + self.add_elastic_ip(vm, instance, conn, ip) # EC2 only supports 1 elastic IP per instance (without # VPC), so break break else: # Check if we have enough public IPs if num >= num_pub_nets: - self.add_elastic_ip(vm, instance) + self.add_elastic_ip(vm, instance, conn) # EC2 only supports 1 elastic IP per instance (without # VPC), so break break @@ -1125,13 +1191,15 @@ def addRouterInstance(self, vm, conn): if network.getValue('router'): if not route_table_id: vpc_id = None - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": vm.inf.id}): - vpc_id = vpc.id + for vpc in conn.describe_vpcs(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [vm.inf.id]}])['Vpcs']: + vpc_id = vpc['VpcId'] if not vpc_id: self.log_error("No VPC found.") return False - for rt in conn.get_all_route_tables(filters={"vpc-id": vpc_id}): - route_table_id = rt.id + for rt in conn.describe_route_tables(Filters=[{"Name": "vpc-id", + "Values": [vpc_id]}])['RouteTables']: + route_table_id = rt['RouteTableId'] if not route_table_id: self.log_error("No Route Table found with name.") @@ -1159,18 +1227,20 @@ def addRouterInstance(self, vm, conn): success = False break - reservations = conn.get_all_instances([vrouter]) - vrouter_instance = reservations[0].instances[0] + reservations = conn.describe_instances(InstanceIds=[vrouter])['Reservations'] + vrouter_instance = reservations[0]['Instances'][0] - if vrouter_instance.state != "running": + if vrouter_instance['State']['Name'] != "running": self.log_debug("VRouter instance %s is not running." % system_router) success = False break self.log_info("Adding route %s to instance ID: %s." % (router_cidr, vrouter)) - conn.create_route(route_table_id, router_cidr, instance_id=vrouter) + conn.create_route(RouteTableId=route_table_id, + DestinationCidrBlock=router_cidr, + InstanceId=vrouter) self.log_debug("Disabling sourceDestCheck to instance ID: %s." % vrouter) - conn.modify_instance_attribute(vrouter, attribute='sourceDestCheck', value=False) + conn.modify_instance_attribute(InstanceId=vrouter, SourceDestCheck={'Value': False}) # once set, delete it to not set it again network.delValue('router') @@ -1181,9 +1251,8 @@ def addRouterInstance(self, vm, conn): return success def updateVMInfo(self, vm, auth_data): - region = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - conn = self.get_connection(region, auth_data) + region, instance_id = vm.id.split(";") + conn = self.get_connection(region, auth_data, 'ec2') # Check if the instance_id starts with "sir" -> spot request if (instance_id[0] == "s"): @@ -1192,16 +1261,16 @@ def updateVMInfo(self, vm, auth_data): job_instance_id = None self.log_info("Check if the request has been fulfilled and the instance has been deployed") - job_sir_id = instance_id - request_list = conn.get_all_spot_instance_requests() - for sir in request_list: - # TODO: Check if the request had failed and launch it in - # another availability zone - if sir.state == 'failed': - vm.state = VirtualMachine.FAILED - if sir.id == job_sir_id: - job_instance_id = sir.instance_id - break + request_list = conn.describe_spot_instance_requests(Filters=[{'Name': 'spot-instance-request-id', + 'Values': [instance_id]}]) + sir = [] + if request_list['SpotInstanceRequests']: + sir = request_list['SpotInstanceRequests'][0] + # TODO: Check if the request had failed and launch it in + # another availability zone + if sir['State'] == 'failed': + vm.state = VirtualMachine.FAILED + job_instance_id = sir['InstanceId'] if job_instance_id: self.log_info("Request fulfilled, instance_id: " + str(job_instance_id)) @@ -1214,34 +1283,20 @@ def updateVMInfo(self, vm, auth_data): instance = self.get_instance_by_id(instance_id, region, auth_data) if instance: - try: - # sometime if you try to update a recently created instance - # this operation fails - instance.update() - if "IM-USER" not in instance.tags: - im_username = "im_user" - if auth_data.getAuthInfo('InfrastructureManager'): - im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] - instance.add_tag("IM-USER", im_username) - except Exception as ex: - self.log_exception("Error updating the instance " + instance_id) - return (False, "Error updating the instance " + instance_id + ": " + str(ex)) - vm.info.systems[0].setValue("virtual_system_type", instance.virtualization_type) - vm.info.systems[0].setValue("availability_zone", instance.placement) + vm.info.systems[0].setValue("availability_zone", instance.placement['AvailabilityZone']) - vm.state = self.VM_STATE_MAP.get(instance.state, VirtualMachine.UNKNOWN) + vm.state = self.VM_STATE_MAP.get(instance.state['Name'], VirtualMachine.UNKNOWN) instance_type = self.get_instance_type_by_name(instance.instance_type) self.update_system_info_from_instance(vm.info.systems[0], instance_type) - self.setIPsFromInstance(vm, instance) + self.setIPsFromInstance(vm, instance, conn) self.manage_dns_entries("add", vm, auth_data) self.addRouterInstance(vm, conn) try: - vm.info.systems[0].setValue('launch_time', int(time.mktime( - time.strptime(instance.launch_time[:19], '%Y-%m-%dT%H:%M:%S')))) + vm.info.systems[0].setValue('launch_time', int(instance.launch_time.timestamp())) except Exception as ex: self.log_warn("Error setting the launch_time of the instance. " "Probably the instance is not running:" + str(ex)) @@ -1252,36 +1307,64 @@ def updateVMInfo(self, vm, auth_data): return (True, vm) + def _get_zone(self, conn, domain): + zones = conn.list_hosted_zones_by_name(DNSName=domain, MaxItems='1')['HostedZones'] + if not zones or len(zones) == 0: + return None + return zones[0] + + @staticmethod + def _get_change_batch(action, fqdn, ip): + return { + "Changes": [ + { + "Action": action, + "ResourceRecordSet": { + "Name": fqdn, + "Type": "A", + "TTL": 300, + "ResourceRecords": [{"Value": ip}], + }, + } + ] + } + def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): try: # Workaround to use EC2 as the default case. if self.type == "EC2": - conn = self.get_route53_connection('universal', auth_data) + conn = self.get_connection('universal', auth_data, 'route53') else: auths = auth_data.getAuthInfo("EC2") if not auths: raise Exception("No auth data has been specified to EC2.") else: auth = auths[0] - conn = boto.route53.connect_to_region('universal', - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) - zone = conn.get_zone(domain) + conn = boto3.client('route53', region_name='universal', + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password']) + + zone = self._get_zone(conn, domain) + if not zone: + raise Exception("Could not find DNS zone to update") + zone_id = zone['Id'] + if not zone: self.log_info("Creating DNS zone %s" % domain) - zone = conn.create_zone(domain) + zone = conn.create_hosted_zone(domain) else: self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain - record = zone.get_a(fqdn) - if not record: + records = conn.list_resource_record_sets(HostedZoneId=zone_id, + StartRecordName=fqdn, + StartRecordType='A', + MaxItems='1')['ResourceRecordSets'] + if not records or records[0]['Name'] != fqdn: self.log_info("Creating DNS record %s." % fqdn) - changes = boto.route53.record.ResourceRecordSets(conn, zone.id) - change = changes.add_change("CREATE", fqdn, "A") - change.add_value(ip) - changes.commit() + conn.change_resource_record_sets(HostedZoneId=zone_id, + ChangeBatch=self._get_change_batch('CREATE', fqdn, ip)) else: self.log_info("DNS record %s exists. Do not create." % fqdn) return True @@ -1292,35 +1375,38 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): # Workaround to use EC2 as the default case. if self.type == "EC2": - conn = self.get_route53_connection('universal', auth_data) + conn = self.get_connection('universal', auth_data, 'route53') else: auths = auth_data.getAuthInfo("EC2") if not auths: raise Exception("No auth data has been specified to EC2.") else: auth = auths[0] - conn = boto.route53.connect_to_region('universal', - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) - zone = conn.get_zone(domain) + conn = boto3.client('route53', region_name='universal', + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password']) + zone = self._get_zone(conn, domain) if not zone: self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: fqdn = hostname + "." + domain - record = zone.get_a(fqdn) - if not record: + records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], + StartRecordName=fqdn, + StartRecordType='A', + MaxItems='1')['ResourceRecordSets'] + if not records or records[0]['Name'] != fqdn: self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: self.log_info("Deleting DNS record %s." % fqdn) - changes = boto.route53.record.ResourceRecordSets(conn, zone.id) - change = changes.add_change("DELETE", fqdn, "A") - change.add_value(ip) - changes.commit() + conn.change_resource_record_sets(HostedZoneId=zone['Id'], + ChangeBatch=self._get_change_batch('DELETE', fqdn, ip)) # if there are no A records - # all_a_records = [r for r in conn.get_all_rrsets(zone.id) if r.type == "A"] + # all_a_records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], + # StartRecordType='A')['ResourceRecordSets'] # if not all_a_records: - # conn.delete_hosted_zone(zone.id) + # self.log_info("Deleting DNS zone %s." % domain) + # conn.delete_hosted_zone(zone['Id']) def cancel_spot_requests(self, conn, vm): """ @@ -1333,24 +1419,25 @@ def cancel_spot_requests(self, conn, vm): instance_id = vm.id.split(";")[1] # Check if the instance_id starts with "sir" -> spot request if (instance_id[0] == "s"): - request_list = conn.get_all_spot_instance_requests([instance_id]) - for sir in request_list: - conn.cancel_spot_instance_requests(sir.id) - self.log_info("Spot instance request " + str(sir.id) + " deleted") - break - - def delete_networks(self, conn, vm, timeout=240): + request_list = conn.describe_spot_instance_requests(Filters=[{'Name': 'spot-instance-request-id', + 'Values': ['job_sir_id']}]) + if request_list['SpotInstanceRequests']: + sir = request_list['SpotInstanceRequests'][0] + conn.cancel_spot_instance_requests(sir['SpotInstanceRequestId']) + self.log_info("Spot instance request " + sir['SpotInstanceRequestId'] + " deleted") + + def delete_networks(self, conn, inf_id, timeout=240): """ Delete the created networks """ - for subnet in conn.get_all_subnets(filters={"tag:IM-INFRA-ID": vm.inf.id}): - self.log_info("Deleting subnet: %s" % subnet.id) + for subnet in conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf_id]}])['Subnets']: + self.log_info("Deleting subnet: %s" % subnet['SubnetId']) cont = 0 deleted = False while not deleted and cont < timeout: cont += 5 try: - conn.delete_subnet(subnet.id) + conn.delete_subnet(SubnetId=subnet['SubnetId']) deleted = True except Exception as ex: self.log_warn("Error removing subnet: " + str(ex)) @@ -1359,24 +1446,25 @@ def delete_networks(self, conn, vm, timeout=240): time.sleep(5) if not deleted: - self.log_error("Timeout (%s) deleting the subnet %s" % (timeout, subnet.id)) + self.log_error("Timeout (%s) deleting the subnet %s" % (timeout, subnet['SubnetId'])) vpc_id = None - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": vm.inf.id}): - vpc_id = vpc.id + for vpc in conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf_id]}])['Vpcs']: + vpc_id = vpc['VpcId'] ig_id = None - for ig in conn.get_all_internet_gateways(filters={"tag:IM-INFRA-ID": vm.inf.id}): - ig_id = ig.id + for ig in conn.describe_internet_gateways(Filters=[{'Name': 'tag:IM-INFRA-ID', + 'Values': [inf_id]}])['InternetGateways']: + ig_id = ig['InternetGatewayId'] if ig_id and vpc_id: self.log_info("Detacching Internet Gateway: %s from VPC: %s" % (ig_id, vpc_id)) - conn.detach_internet_gateway(ig_id, vpc_id) + conn.detach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) if ig_id: - self.log_info("Deleting Internet Gateway: %s" % ig.id) - conn.delete_internet_gateway(ig_id) + self.log_info("Deleting Internet Gateway: %s" % ig_id) + conn.delete_internet_gateway(InternetGatewayId=ig_id) if vpc_id: - self.log_info("Deleting vpc: %s" % vpc.id) - conn.delete_vpc(vpc_id) + self.log_info("Deleting vpc: %s" % vpc_id) + conn.delete_vpc(VpcId=vpc_id) def finalize(self, vm, last, auth_data): @@ -1389,10 +1477,9 @@ def finalize(self, vm, last, auth_data): self.log_info("VM with no ID. Ignore.") return True, "" - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') # Terminate the instance instance = self.get_instance_by_id(instance_id, region_name, auth_data) @@ -1431,7 +1518,7 @@ def finalize(self, vm, last, auth_data): # And nets try: - self.delete_networks(conn, vm) + self.delete_networks(conn, vm.inf.id) except Exception as ex: self.log_exception("Error deleting networks.") error_msg += "Error deleting networks: %s. " % ex @@ -1450,14 +1537,14 @@ def _get_security_groups(self, conn, vm): sg_names.append(sg_name) sgs = [] - for sg_name in sg_names: - try: - sgs.extend(conn.get_all_security_groups(filters={'group-name': sg_name})) - except Exception: - self.log_exception("Error getting SG %s" % sg_name) + try: + sg = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': sg_names}]) + sgs = sg['SecurityGroups'] + except Exception: + self.log_exception("Error getting SG %s" % sg_name) return sgs - def delete_security_groups(self, conn, vm, timeout=90): + def delete_security_groups(self, conn, vm): """ Delete the SG of this infrastructure if this is the last VM @@ -1469,68 +1556,67 @@ def delete_security_groups(self, conn, vm, timeout=90): if sgs: # Get the default SG to set in the instances - def_sg_id = conn.get_all_security_groups(filters={'group-name': 'default', - 'vpc-id': sgs[0].vpc_id})[0].id + def_sg_id = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': ['default']}, + {'Name': 'vpc-id', 'Values': [sgs[0]['VpcId']]}] + )['SecurityGroups'][0]['GroupId'] for sg in sgs: - if sg.description != "Security group created by the IM": - self.log_info("SG %s not created by the IM. Do not delete it." % sg.name) + if sg['Description'] != "Security group created by the IM": + self.log_info("SG %s not created by the IM. Do not delete it." % sg['GroupName']) continue try: - for instance in sg.instances(): - instance.modify_attribute("groupSet", [def_sg_id]) + reservations = conn.describe_instances(Filters=[{'Name': 'instance.group-id', + 'Values': [sg['GroupId']]}])['Reservations'] + if reservations: + for instance in reservations[0]['Instances']: + conn.modify_instance_attribute(InstanceId=instance['InstanceId'], Groups=[def_sg_id]) except Exception as ex: - self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg.name, instance.id, ex)) + self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg["GroupName"], + instance['InstanceId'], ex)) # try to wait some seconds to free the SGs time.sleep(5) - self.log_info("Remove the SG: " + sg.name) + self.log_info("Remove the SG: " + sg['GroupName']) try: - sg.revoke('tcp', 0, 65535, src_group=sg) - sg.revoke('udp', 0, 65535, src_group=sg) + conn.revoke_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': 'tcp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, + {'IpProtocol': 'udp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} + ]) except Exception as ex: self.log_warn("Error revoking self rules: " + str(ex)) - sg.delete() + conn.delete_security_group(GroupId=sg['GroupId']) def stop(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - - instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if (instance is not None): - instance.update() - instance.stop() - else: - self.log_warn("Instance %s not found. Not stopping it." % instance_id) - return (False, "Instance %s not found." % instance_id) - - return (True, "") + return self._vm_operation("stop", vm, auth_data) def start(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - - instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if (instance is not None): - instance.update() - instance.start() - else: - self.log_warn("Instance %s not found. Not starting it." % instance_id) - return (False, "Instance %s not found." % instance_id) - - return (True, "") + return self._vm_operation("start", vm, auth_data) def reboot(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + return self._vm_operation("reboot", vm, auth_data) + + def _vm_operation(self, op, vm, auth_data): + region_name, instance_id = vm.id.split(";") instance = self.get_instance_by_id(instance_id, region_name, auth_data) if (instance is not None): - instance.update() - instance.reboot() + if op == "stop": + instance.stop() + elif op == "start": + instance.start() + elif op == "reboot": + instance.reboot() else: - self.log_warn("Instance %s not found. Not rebooting it." % instance_id) + self.log_warn("Instance %s not found. Not %sing it." % (instance_id, op)) return (False, "Instance %s not found." % instance_id) return (True, "") @@ -1544,9 +1630,8 @@ def waitStop(instance, timeout=120): wait = 0 powered_off = False while wait < timeout and not powered_off: - instance.update() - - powered_off = instance.state == 'stopped' + instance.reload() + powered_off = instance.state['Name'] == 'stopped' if not powered_off: time.sleep(2) wait += 2 @@ -1554,14 +1639,11 @@ def waitStop(instance, timeout=120): return powered_off def alterVM(self, vm, radl, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") # Terminate the instance instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if instance: - instance.update() - else: + if not instance: return (False, "The instance has not been found") new_system = self.resize_vm_radl(vm, radl) @@ -1575,7 +1657,8 @@ def alterVM(self, vm, radl, auth_data): if instance_type and instance.instance_type != instance_type.name: stopped = self.waitStop(instance) if stopped: - success = instance.modify_attribute('instanceType', instance_type.name) + success = instance.modify_attribute(Attribute='instanceType', + Value=instance_type.name) if success: self.update_system_info_from_instance(vm.info.systems[0], instance_type) instance.start() @@ -1671,20 +1754,16 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): infrastructure is destroyed. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - http://boto.readthedocs.io/en/latest/ref/ec2.html#module-boto.ec2.volume - http://boto.readthedocs.io/en/latest/ref/ec2.html#module-boto.ec2.instance - Returns: a tuple (success, vm). - The first value is True if the operation finished successfully or False otherwise. - The second value is a str with the url of the new image if the operation finished successfully or an error message otherwise. """ - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - snapshot_id = "" + region_name, instance_id = vm.id.split(";") + snapshot_id = None # Obtain the connection object to connect with EC2 - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') if not conn: return (False, "Error connecting with EC2, check the credentials") @@ -1693,15 +1772,16 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): instance = self.get_instance_by_id(instance_id, region_name, auth_data) if instance: self.log_info("Creating snapshot: " + image_name) - snapshot_id = instance.create_image(image_name, - description="AMI automatically generated by IM", - no_reboot=True) - # Add tags to the snapshot to be recognizable - conn.create_tags(snapshot_id, {'instance_id': instance_id}) + snapshot_id = instance.create_image(Name=image_name, + Description="AMI automatically generated by IM", + NoReboot=True, + TagSpecifications=[{'ResourceType': 'image', + 'Tags': [{'Key': 'instance_id', + 'Value': instance_id}]}]) else: return (False, "Error obtaining details of the instance") - if snapshot_id != "": - new_url = "aws://%s/%s" % (region_name, snapshot_id) + if snapshot_id: + new_url = "aws://%s/%s" % (region_name, snapshot_id['ImageId']) if auto_delete: vm.inf.snapshots.append(new_url) return (True, new_url) @@ -1712,9 +1792,9 @@ def delete_image(self, image_url, auth_data): (region_name, ami) = self.getAMIData(image_url) self.log_info("Deleting image: %s." % image_url) - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') - success = conn.deregister_image(ami, delete_snapshot=True) # https://github.com/boto/boto/issues/3019 + success = conn.deregister_image(ImageId=ami) if success: return (True, "") @@ -1730,7 +1810,7 @@ def list_images(self, auth_data, filters=None): regions = [filters['region']] del filters['region'] if not regions: - regions = [region.name for region in boto.ec2.regions()] + region = [region['RegionName'] for region in boto3.client('ec2').describe_regions()['Regions']] images_filter = {'architecture': 'x86_64', 'image-type': 'machine', 'virtualization-type': 'hvm', 'state': 'available', @@ -1742,12 +1822,12 @@ def list_images(self, auth_data, filters=None): images = [] for region in regions: - conn = self.get_connection(region, auth_data) + conn = self.get_connection(region, auth_data, 'ec2') try: - for image in conn.get_all_images(owners=['self', 'aws-marketplace'], filters=images_filter): - if len(image.id) > 12: # do not add old images - images.append({"uri": "aws://%s/%s" % (region, image.id), - "name": "%s/%s" % (region, image.name)}) + for image in conn.describe_images(Owners=['self', 'aws-marketplace'], Filters=images_filter)['Images']: + if len(image['ImageId']) > 12: # do not add old images + images.append({"uri": "aws://%s/%s" % (region, image['ImageId']), + "name": "%s/%s" % (region, image['Name'])}) except Exception: continue return self._filter_images(images, filters) diff --git a/IM/connectors/Kubernetes.py b/IM/connectors/Kubernetes.py index 8cbb3367..fd85b95e 100644 --- a/IM/connectors/Kubernetes.py +++ b/IM/connectors/Kubernetes.py @@ -21,6 +21,8 @@ import os import re import socket +from random import choice +from string import ascii_lowercase, digits from netaddr import IPNetwork, IPAddress try: from urlparse import urlparse @@ -370,9 +372,12 @@ def _generate_ingress_data(self, namespace, name, dns, port, apps_dns, vm): if dns_url[1]: host = dns_url[1] if apps_dns and not host.endswith(apps_dns): - if not host.endswith(".") and not apps_dns.startswith("."): - host += "." - host += apps_dns + if host.endswith("."): + host = host[:-1] + host += "-" + ''.join(choice(ascii_lowercase + digits) for _ in range(4)) + if apps_dns.startswith("."): + apps_dns = apps_dns[1:] + host += "." + apps_dns if dns_url[2]: path = dns_url[2] diff --git a/changelog b/changelog index dfaa04b2..ff80a770 100644 --- a/changelog +++ b/changelog @@ -777,3 +777,7 @@ IM 1.17.1: * Speed up Ansible installation using newer versions. * Fix problem with 0 disk flavors in OpenStack. * Flush Inf data to DB in case of service termination. + +IM 1.18.0: + * Enable to get IM stats. + * Migrate EC2 conn to boto3 library. diff --git a/codemeta.json b/codemeta.json index 1aab92a3..bfb302d9 100644 --- a/codemeta.json +++ b/codemeta.json @@ -6,7 +6,7 @@ "@type": "SoftwareSourceCode", "identifier": "im", "name": "Infrastructure Manager", - "version": "1.17.1", + "version": "1.18.0", "description": "IM is a tool that deploys complex and customized virtual infrastructures on IaaS Cloud deployments", "license": "GNU General Public License v3.0", "author": [ diff --git a/doc/source/conf.py b/doc/source/conf.py index 54bfd468..7ab6ab98 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -3,8 +3,8 @@ copyright = '2023, I3M-GRyCAP' author = 'micafer' -version = '1.17' -release = '1.17.1' +version = '1.18' +release = '1.18.0' master_doc = 'index' diff --git a/docker-devel/Dockerfile b/docker-devel/Dockerfile index fc6ee4a9..33d00187 100644 --- a/docker-devel/Dockerfile +++ b/docker-devel/Dockerfile @@ -1,8 +1,8 @@ # Dockerfile to create a container with the IM service -FROM ubuntu:22.04 +FROM ubuntu:24.04 ARG BRANCH=devel LABEL maintainer="Miguel Caballer " -LABEL version="1.17.1" +LABEL version="1.18.0" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 @@ -12,11 +12,10 @@ RUN apt-get update && apt-get install --no-install-recommends -y patch wget pyth # Install IM RUN apt-get update && apt-get install --no-install-recommends -y python3-setuptools python3-pip git && \ - pip3 install -U pip && \ - pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ - pip3 install pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ - pip3 install git+https://github.com/micafer/libcloud@ost_nets_extra && \ - pip3 install apache-libcloud==3.8.0 git+https://github.com/grycap/im@$BRANCH && \ + pip3 install --break-system-packages msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ + pip3 install --break-system-packages pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ + pip3 install --break-system-packages git+https://github.com/micafer/libcloud@ost_nets_extra && \ + pip3 install --break-system-packages apache-libcloud==3.8.0 git+https://github.com/grycap/im@$BRANCH && \ apt-get purge -y python3-pip git && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ @@ -24,7 +23,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y python3-setupto # untill this PR is merged and released # https://github.com/apache/libcloud/pull/2016 COPY ost.patch /tmp/ost.patch -RUN patch /usr/local/lib/python3.10/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch +RUN patch /usr/local/lib/python3.12/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch # Copy im configuration files RUN mkdir /etc/im @@ -38,8 +37,5 @@ RUN sed -i -e 's/VM_NUM_USE_CTXT_DIST = 30/VM_NUM_USE_CTXT_DIST = 3/g' /etc/im/i # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/local/lib/python3.10/dist-packages/boto/endpoints.json - # Start IM service CMD /usr/local/bin/im_service \ No newline at end of file diff --git a/docker-py3/Dockerfile b/docker-py3/Dockerfile index 3044d852..81500d20 100644 --- a/docker-py3/Dockerfile +++ b/docker-py3/Dockerfile @@ -1,5 +1,5 @@ # Dockerfile to create a container with the IM service -FROM ubuntu:22.04 +FROM ubuntu:24.04 ENV VERSION=1.17.1 @@ -14,10 +14,9 @@ RUN apt-get update && apt-get install --no-install-recommends -y patch wget pyth # Install IM RUN apt-get update && apt-get install --no-install-recommends -y python3-setuptools python3-pip git && \ - pip3 install -U pip && \ - pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ - pip3 install pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ - pip3 install apache-libcloud==3.8.0 IM==${VERSION} &&\ + pip3 install --break-system-packages msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ + pip3 install --break-system-packages pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ + pip3 install --break-system-packages apache-libcloud==3.8.0 IM==${VERSION} &&\ apt-get purge -y python3-pip git && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ @@ -25,7 +24,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y python3-setupto # untill this PR is merged and released # https://github.com/apache/libcloud/pull/2016 COPY ost.patch /tmp/ost.patch -RUN patch /usr/local/lib/python3.10/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch +RUN patch /usr/local/lib/python3.12/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch # Copy im configuration files RUN mkdir /etc/im @@ -36,8 +35,5 @@ RUN wget https://raw.githubusercontent.com/grycap/im/v${VERSION}/etc/logging.con # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/local/lib/python3.10/dist-packages/boto/endpoints.json - # Start IM service CMD ["/usr/local/bin/im_service"] diff --git a/docker-py3/Dockerfile.alp b/docker-py3/Dockerfile.alp index e21363c7..97f502d0 100644 --- a/docker-py3/Dockerfile.alp +++ b/docker-py3/Dockerfile.alp @@ -40,8 +40,5 @@ RUN apk add --no-cache git &&\ # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/lib/python3.10/site-packages/boto/endpoints.json - # Start IM service CMD im_service.py diff --git a/etc/im.cfg b/etc/im.cfg index 6e1d5ff5..ab6037c3 100644 --- a/etc/im.cfg +++ b/etc/im.cfg @@ -145,6 +145,8 @@ OIDC_ISSUERS = https://aai.egi.eu/auth/realms/egi #OIDC_INSTROSPECT_PATH = "/introspect" # List of OIDC groups that will be allowed to access the IM service #OIDC_GROUPS = +# Claim where the groups are stored in the OIDC token +# OIDC_GROUPS_CLAIM = groups # Force the users to pass a valid OIDC token #FORCE_OIDC_AUTH = False diff --git a/pyproject.toml b/pyproject.toml index fd6487a1..18e1180c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,7 @@ dependencies = [ "PyYAML", "suds-community", "cheroot", - "boto >= 2.29", + "boto3", "apache-libcloud >= 3.2.0", "RADL >= 1.3.3", "flask", diff --git a/requirements-tests.txt b/requirements-tests.txt index 48bdd4d1..456356e3 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -3,7 +3,7 @@ ansible-base paramiko >= 1.14 PyYAML cheroot -boto >= 2.29 +boto3 apache-libcloud >= 3.3.1 RADL >= 1.3.3 flask diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index f2e116d4..29780728 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -15,27 +15,24 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . - import sys import unittest +import datetime sys.path.append(".") sys.path.append("..") from .CloudConn import TestCloudConnectorBase from IM.CloudInfo import CloudInfo from IM.auth import Authentication -from radl import radl_parse +from IM.config import Config from IM.VirtualMachine import VirtualMachine +from radl import radl_parse from IM.InfrastructureInfo import InfrastructureInfo from IM.connectors.EC2 import EC2CloudConnector -from IM.config import Config from mock import patch, MagicMock, call class TestEC2Connector(TestCloudConnectorBase): - """ - Class to test the IM connectors - """ @staticmethod def get_ec2_cloud(): @@ -106,31 +103,20 @@ def test_15_get_all_instance_types(self): self.assertEqual(instance.cores_per_cpu, 1) self.assertEqual(instance.disk_space, 160) - def get_all_subnets(self, subnet_ids=None, filters=None): - subnet = MagicMock() - subnet.id = "subnet-id" - if filters: - return [] - elif subnet_ids: - subnet.cidr_block = "10.10.0.1/24" - return [subnet] - subnet.cidr_block = "10.0.1.0/24" - return [subnet] - - def _get_all_vpcs(self, vpc_ids=None, filters=None, dry_run=False): - vpc = MagicMock() - vpc.id = "vpc-id" - vpc.default = True - if vpc_ids: - return [vpc] + def describe_subnets(self, **kwargs): + subnet = {} + subnet['SubnetId'] = "subnet-id" + if 'Filters' in kwargs and kwargs['Filters']: + return {'Subnets': []} + elif 'SubnetIds' in kwargs and kwargs['SubnetIds']: + subnet['CidrBlock'] = "10.10.0.1/24" else: - return [] + subnet['CidrBlock'] = "10.0.1.0/24" + return {'Subnets': [subnet]} - @patch('boto.ec2.get_region') - @patch('boto.vpc.VPCConnection') - @patch('boto.ec2.blockdevicemapping.BlockDeviceMapping') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_region): + def test_20_launch(self, save_data, mock_boto_session): radl_data = """ network net1 (outbound = 'yes' and outports='8080,9000:9100' and sg_name = 'sgname') network net2 () @@ -143,7 +129,7 @@ def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_regio net_interface.0.dns_name = 'test' and net_interface.1.connection = 'net2' and disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and + disk.0.image.url = 'aws://us-east-1/ami-id' and disk.0.os.credentials.username = 'user' and disk.1.size=1GB and disk.1.device='hdb' and @@ -156,110 +142,42 @@ def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_regio {'type': 'InfrastructureManager', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - region = MagicMock() - get_region.return_value = region - - conn = MagicMock() - VPCConnection.return_value = conn - - image = MagicMock() - device = MagicMock() - reservation = MagicMock() - instance = MagicMock() - device.snapshot_id = True - device.volume_id = True - image.block_device_mapping = {"device": device} - instance.add_tag.return_value = True - instance.id = "iid" - reservation.instances = [instance] - conn.run_instances.return_value = reservation - conn.get_image.return_value = image - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - sg = MagicMock() - sg.id = "sgid" - sg.name = "sgname" - sg.authorize.return_value = True - conn.create_security_group.return_value = sg - - conn.get_all_security_groups.return_value = [] - - blockdevicemapping.return_value = {'device': ''} - inf = InfrastructureInfo() inf.auth = auth inf.radl = radl - res = ec2_cloud.launch(inf, radl, radl, 1, auth) - success, _ = res[0] - self.assertTrue(success, msg="ERROR: launching a VM.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - self.assertEqual(len(conn.create_security_group.call_args_list), 3) - self.assertEqual(conn.create_security_group.call_args_list[0][0][0], "im-%s" % inf.id) - self.assertEqual(conn.create_security_group.call_args_list[1][0][0], "sgname") - self.assertEqual(conn.create_security_group.call_args_list[2][0][0], "im-%s-net2" % inf.id) - # Check the case that we do not use VPC - radl_data = """ - network net1 (outbound = 'yes' and outports='8080') - network net2 (create='yes' and cidr='10.0.128.0/24') - network net3 (create='yes' and cidr='10.0.*.0/24') - network net4 (create='yes') - system test ( - cpu.arch='x86_64' and - cpu.count>=1 and - memory.size>=1g and - net_interface.0.connection = 'net1' and - net_interface.0.dns_name = 'test' and - net_interface.1.connection = 'net2' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and - disk.0.os.credentials.username = 'user' and - #disk.0.os.credentials.private_key = 'private' and - #disk.0.os.credentials.public_key = 'public' and - disk.1.size=1GB and - disk.1.device='hdb' and - disk.1.mount_path='/mnt/path' - )""" - radl = radl_parse.parse_radl(radl_data) - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.create_vpc.return_value = vpc - conn.get_all_vpcs.side_effect = self._get_all_vpcs - - subnet = MagicMock() - subnet.id = "subnet-id" - subnet.cidr_block = "10.10.129.0/24" - conn.create_subnet.return_value = subnet - conn.get_all_subnets.side_effect = self.get_all_subnets + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_conn.describe_security_groups.return_value = {'SecurityGroups': []} + mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', + 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', + 'Ebs': { + 'SnapshotId': 'snap-12345678' + }}]} + ]} + mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'i-12345678'}]} + instance = MagicMock() + mock_res.Instance.return_value = instance - inf = InfrastructureInfo() - inf.auth = auth - inf.radl = radl res = ec2_cloud.launch(inf, radl, radl, 1, auth) - success, _ = res[0] - self.assertTrue(success, msg="ERROR: launching a VM.") - # check the instance_type selected is correct - self.assertIn(".micro", conn.run_instances.call_args_list[1][1]["instance_type"]) - self.assertEqual(conn.create_vpc.call_args_list[0][0][0], "10.0.128.0/22") - self.assertEqual(conn.create_subnet.call_args_list[0][0], ('vpc-id', '10.0.128.0/24')) - self.assertEqual(conn.create_subnet.call_args_list[1][0], ('vpc-id', '10.0.129.0/24')) - self.assertEqual(conn.create_subnet.call_args_list[2][0], ('vpc-id', '10.0.130.0/24')) - + success, msg = res[0] + self.assertTrue(success, msg="ERROR: launching a VM: %s" % msg) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(len(mock_conn.create_security_group.call_args_list), 3) + self.assertEqual(mock_conn.create_security_group.call_args_list[0][1]['GroupName'], "im-%s" % inf.id) + self.assertEqual(mock_conn.create_security_group.call_args_list[1][1]['GroupName'], "sgname") + self.assertEqual(mock_conn.create_security_group.call_args_list[2][1]['GroupName'], "im-%s-net2" % inf.id) + mock_conn.run_instances.assert_called_once() - @patch('boto.ec2.get_region') - @patch('boto.vpc.VPCConnection') - @patch('boto.ec2.blockdevicemapping.BlockDeviceMapping') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_region): + def test_25_launch_spot(self, save_data, mock_boto_session): radl_data = """ network net1 (outbound = 'yes' and provider_id = 'vpc-id.subnet-id') network net2 () @@ -272,7 +190,7 @@ def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_ net_interface.0.dns_name = 'test' and net_interface.1.connection = 'net2' and disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and + disk.0.image.url = 'aws://us-east-1/ami-id' and disk.0.os.credentials.username = 'user' and disk.0.os.credentials.private_key = 'private' and disk.0.os.credentials.public_key = 'public' and @@ -287,45 +205,29 @@ def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_ {'type': 'InfrastructureManager', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - region = MagicMock() - get_region.return_value = region - - conn = MagicMock() - VPCConnection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - image = MagicMock() - device = MagicMock() - reservation = MagicMock() + mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'iid'}]} instance = MagicMock() - device.snapshot_id = True - device.volume_id = True - image.block_device_mapping = {"device": device} - instance.add_tag.return_value = True + mock_res.Instance.return_value = instance instance.id = "iid" - reservation.instances = [instance] - conn.run_instances.return_value = reservation - conn.get_image.return_value = image - - sg = MagicMock() - sg.id = "sgid" - sg.name = "sgname" - sg.authorize.return_value = True - conn.create_security_group.return_value = sg - - conn.get_all_security_groups.return_value = [] - - blockdevicemapping.return_value = {'device': ''} - - zone = MagicMock() - zone.name = 'us-east-1' - conn.get_all_zones.return_value = [zone] - history = MagicMock() - history.price = 0.1 - conn.get_spot_price_history.return_value = [history] - - request = MagicMock() - request.id = "id" - conn.request_spot_instances.return_value = [request] + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', + 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', + 'Ebs': { + 'SnapshotId': 'snap-12345678' + }}]} + ]} + mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} + mock_conn.describe_security_groups.return_value = {'SecurityGroups': []} + mock_conn.describe_availability_zones.return_value = {'AvailabilityZones': [{'ZoneName': 'us-east-1'}]} + mock_conn.describe_spot_price_history.return_value = {'SpotPriceHistory': [{'SpotPrice': '0.1'}]} + mock_conn.request_spot_instances.return_value = {'SpotInstanceRequests': [{'SpotInstanceRequestId': 'sid'}]} inf = InfrastructureInfo() inf.auth = auth @@ -334,10 +236,8 @@ def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_ self.assertTrue(success, msg="ERROR: launching a VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - @patch('boto.route53.connect_to_region') - @patch('boto.route53.record.ResourceRecordSets') - def test_30_updateVMInfo(self, record_sets, connect_to_region, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_30_updateVMInfo(self, mock_boto_session): radl_data = """ network net (outbound = 'yes') network net2 (router = '10.0.10.0/24,vrouter') @@ -364,6 +264,36 @@ def test_30_updateVMInfo(self, record_sets, connect_to_region, get_connection): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', + 'State': {'Name': 'running'}}]}]} + instance = MagicMock() + mock_res.Instance.return_value = instance + instance.id = "iid" + instance.tags = [] + instance.virtualization_type = "vt" + instance.placement = {'AvailabilityZone': 'us-east-1'} + instance.state = {'Name': 'running'} + instance.instance_type = "t1.micro" + instance.launch_time = datetime.datetime.now() + instance.public_ip_address = "158.42.1.1" + instance.private_ip_address = "10.0.0.1" + mock_conn.describe_addresses.return_value = {'Addresses': [{'PublicIp': '158.42.1.1', + 'InstanceId': 'iid'}]} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_conn.describe_route_tables.return_value = {'RouteTables': [{'RouteTableId': 'routet-id'}]} + + mock_conn.list_hosted_zones_by_name.return_value = {'HostedZones': [{'Name': 'domain.com.', + 'Id': 'zone-id'}]} + mock_conn.create_hosted_zone.return_value = {'HostedZone': {'Id': 'zone-idc'}} + mock_conn.list_resource_record_sets.return_value = { + 'ResourceRecordSets': [{'Name': 'some.test.domain.com.'}]} + inf = MagicMock() vm1 = MagicMock() system1 = MagicMock() @@ -373,78 +303,38 @@ def test_30_updateVMInfo(self, record_sets, connect_to_region, get_connection): inf.vm_list = [vm1] vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.tags = [] - instance.virtualization_type = "vt" - instance.placement = "us-east-1" - instance.state = "running" - instance.instance_type = "t1.micro" - instance.launch_time = "2016-12-31T00:00:00" - instance.ip_address = "158.42.1.1" - instance.private_ip_address = "10.0.0.1" - instance.connection = conn - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - address = MagicMock() - address.public_ip = "158.42.1.1" - conn.get_all_addresses.return_value = [address] - - dns_conn = MagicMock() - connect_to_region.return_value = dns_conn - - dns_conn.get_zone.return_value = None - zone = MagicMock() - zone.get_a.return_value = None - dns_conn.create_zone.return_value = zone - changes = MagicMock() - record_sets.return_value = changes - change = MagicMock() - changes.add_change.return_value = change - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - routet = MagicMock() - routet.id = "routet-id" - conn.get_all_route_tables.return_value = [routet] - success, vm = ec2_cloud.updateVMInfo(vm, auth) self.assertTrue(success, msg="ERROR: updating VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - self.assertEqual(dns_conn.create_zone.call_count, 2) - self.assertEqual(dns_conn.get_zone.call_count, 2) - self.assertEqual(dns_conn.create_zone.call_args_list[0][0][0], "domain.com.") - self.assertEqual(changes.add_change.call_args_list[0], call('CREATE', 'test.domain.com.', 'A')) - self.assertEqual(changes.add_change.call_args_list[1], call('CREATE', 'some.test.domain.com.', 'A')) - self.assertEqual(change.add_value.call_args_list[0], call('158.42.1.1')) - self.assertEqual(conn.create_route.call_args_list[0], call('routet-id', '10.0.10.0/24', instance_id='int-id')) + self.assertEqual(mock_conn.list_hosted_zones_by_name.call_count, 2) + self.assertEqual(mock_conn.change_resource_record_sets.call_args_list[0][1]['ChangeBatch']['Changes'], + [{'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'test.domain.com.', + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': '158.42.1.1'}]} + }]) + self.assertEqual(mock_conn.create_route.call_args_list[0][1], {'RouteTableId': 'routet-id', + 'DestinationCidrBlock': '10.0.10.0/24', + 'InstanceId': 'int-id'}) # Test using PRIVATE_NET_MASKS setting 10.0.0.0/8 as public net old_priv = Config.PRIVATE_NET_MASKS Config.PRIVATE_NET_MASKS = ["172.16.0.0/12", "192.168.0.0/16"] - instance.ip_address = None + + instance.public_ip_address = None instance.private_ip_address = "10.0.0.1" - conn.get_all_addresses.return_value = [] + mock_conn.describe_addresses.return_value = {'Addresses': []} + success, vm = ec2_cloud.updateVMInfo(vm, auth) Config.PRIVATE_NET_MASKS = old_priv self.assertEqual(vm.getPublicIP(), "10.0.0.1") self.assertEqual(vm.getPrivateIP(), None) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_30_updateVMInfo_spot(self, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_40_updateVMInfo_spot(self, mock_boto_session): radl_data = """ network net (outbound = 'yes') system test ( @@ -470,113 +360,66 @@ def test_30_updateVMInfo_spot(self, get_connection): inf = MagicMock() vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] instance = MagicMock() - instance.update.return_value = True - instance.tags = [] + mock_res.Instance.return_value = instance + instance.id = "iid" instance.virtualization_type = "vt" - instance.placement = "us-east-1" - instance.state = "running" + instance.placement = {'AvailabilityZone': 'us-east-1'} + instance.state = {'Name': 'running'} instance.instance_type = "t1.micro" instance.launch_time = "2016-12-31T00:00:00" instance.ip_address = "158.42.1.1" instance.private_ip_address = "10.0.0.1" - instance.connection = conn - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - conn.get_all_addresses.return_value = [] - - sir = MagicMock() - sir.state = "" - sir.id = "id" - conn.get_all_spot_instance_requests.return_value = [sir] - - volume = MagicMock() - volume.status = "available" - volume.id = "volid" - conn.create_volume.return_value = volume - conn.attach_volume.return_value = True + mock_conn.describe_addresses.return_value = {'Addresses': []} + mock_conn.describe_spot_instance_requests.return_value = {'SpotInstanceRequests': [{'InstanceId': 'id', + 'State': ''}]} + mock_conn.create_volume.return_value = {'VolumeId': 'volid', 'State': 'available'} success, vm = ec2_cloud.updateVMInfo(vm, auth) self.assertTrue(success, msg="ERROR: updating VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_40_stop(self, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.stop(vm, auth) - - self.assertTrue(success, msg="ERROR: stopping VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_50_start(self, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_50_vmop(self, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() inf = MagicMock() vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - reservation = MagicMock() instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] + mock_res.Instance.return_value = instance success, _ = ec2_cloud.start(vm, auth) - - self.assertTrue(success, msg="ERROR: stopping VM info.") + self.assertTrue(success, msg="ERROR: starting VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.start.call_args_list, [call()]) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_52_reboot(self, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.reboot.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.start(vm, auth) + success, _ = ec2_cloud.stop(vm, auth) + self.assertTrue(success, msg="ERROR: stopping VM.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.stop.call_args_list, [call()]) - self.assertTrue(success, msg="ERROR: stopping VM info.") + success, _ = ec2_cloud.reboot(vm, auth) + self.assertTrue(success, msg="ERROR: rebooting VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.reboot.call_args_list, [call()]) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_55_alter(self, get_connection): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_55_alter(self, mock_boto_session): radl_data = """ network net () system test ( @@ -605,27 +448,29 @@ def test_55_alter(self, get_connection): inf = MagicMock() vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - reservation = MagicMock() instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - instance.state = "stopped" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] + mock_res.Instance.return_value = instance + instance.id = "iid" + instance.instance_type = "t1.micro" + instance.state = {'Name': 'stopped'} success, _ = ec2_cloud.alterVM(vm, new_radl, auth) self.assertTrue(success, msg="ERROR: modifying VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.stop.call_args_list, [call()]) + self.assertEqual(instance.start.call_args_list, [call()]) + self.assertEqual(instance.modify_attribute.call_args_list, [call(Attribute='instanceType', Value='t3a.small')]) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - @patch('boto.route53.connect_to_region') - @patch('boto.route53.record.ResourceRecordSets') - def test_60_finalize(self, record_sets, connect_to_region, sleep, get_connection): + def test_60_finalize(self, sleep, mock_boto_session): radl_data = """ network net (outbound = 'yes') network net2 (outbound = 'yes') @@ -655,145 +500,117 @@ def test_60_finalize(self, record_sets, connect_to_region, sleep, get_connection vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) vm.dns_entries = [('test', 'domain.com.', '158.42.1.1')] - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', + 'State': {'Name': 'running'}}]}]} instance = MagicMock() - device = MagicMock() - instance.update.return_value = True - instance.terminate.return_value = True - instance.block_device_mapping = {"device": device} - device.volume_id = "volid" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - address = MagicMock() - address.public_ip = "158.42.1.1" - address.instance_id = "id-1" - address.disassociate.return_value = True - address.release.return_value = True - conn.get_all_addresses.return_value = [address] - - conn.get_all_spot_instance_requests.return_value = [] - - sg = MagicMock() - sg.name = "im-1" - sg.description = "Security group created by the IM" - sg.instances.return_value = [] - sg.revoke.return_value = True - sg.delete.return_value = True - sg1 = MagicMock() - sg1.name = "im-1-net" - sg1.description = "" - sg1.instances.return_value = [] - sg1.revoke.return_value = True - sg1.delete.return_value = True - sg2 = MagicMock() - sg2.name = "im-1-net2" - sg2.description = "Security group created by the IM" - sg2.instances.return_value = [] - sg2.revoke.return_value = True - sg2.delete.return_value = True - conn.get_all_security_groups.return_value = [sg, sg1, sg2] - - dns_conn = MagicMock() - connect_to_region.return_value = dns_conn - - zone = MagicMock() - record = MagicMock() - zone.id = "zid" - zone.get_a.return_value = record - dns_conn.get_all_rrsets.return_value = [] - dns_conn.get_zone.return_value = zone - changes = MagicMock() - record_sets.return_value = changes - change = MagicMock() - changes.add_change.return_value = change - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - ig = MagicMock() - ig.id = "ig-id" - conn.get_all_internet_gateways.return_value = [ig] + mock_res.Instance.return_value = instance + instance.block_device_mappings = [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeId': 'volid'}}] + mock_conn.describe_addresses.return_value = {'Addresses': [{'PublicIp': '158.42.1.1', + 'InstanceId': 'id-1'}]} + mock_conn.describe_spot_instance_requests.return_value = {'SpotInstanceRequests': []} + + mock_conn.describe_security_groups.return_value = {'SecurityGroups': [ + {'GroupId': 'sg1', 'GroupName': 'im-1', 'Description': 'Security group created by the IM', + 'VpcId': 'vpc-id'}, + {'GroupId': 'sg2', 'GroupName': 'im-1-net', 'Description': '', + 'VpcId': 'vpc-id'}, + {'GroupId': 'sg3', 'GroupName': 'im-1-net2', 'Description': 'Security group created by the IM', + 'VpcId': 'vpc-id'} + ]} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + + mock_conn.list_hosted_zones_by_name.return_value = {'HostedZones': [{'Name': 'domain.com.', + 'Id': 'zone-id'}]} + mock_conn.list_resource_record_sets.return_value = { + 'ResourceRecordSets': [{'Name': 'test.domain.com.'}]} + mock_conn.describe_internet_gateways.return_value = {'InternetGateways': [{'InternetGatewayId': 'ig-id'}]} success, _ = ec2_cloud.finalize(vm, True, auth) self.assertTrue(success, msg="ERROR: finalizing VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - self.assertEqual(changes.add_change.call_args_list, [call('DELETE', 'test.domain.com.', 'A')]) - self.assertEqual(change.add_value.call_args_list, [call('158.42.1.1')]) - self.assertEqual(sg.delete.call_args_list, [call()]) - self.assertEqual(sg1.delete.call_args_list, []) - self.assertEqual(sg2.delete.call_args_list, [call()]) - self.assertEqual(conn.delete_subnet.call_args_list, [call('subnet-id')]) - self.assertEqual(conn.delete_vpc.call_args_list, [call('vpc-id')]) - self.assertEqual(conn.delete_internet_gateway.call_args_list, [call('ig-id')]) - self.assertEqual(conn.detach_internet_gateway.call_args_list, [call('ig-id', 'vpc-id')]) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + self.assertEqual(mock_conn.change_resource_record_sets.call_args_list[0][1]['ChangeBatch']['Changes'], + [{'Action': 'DELETE', + 'ResourceRecordSet': { + 'Name': 'test.domain.com.', + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': '158.42.1.1'}]} + }]) + self.assertEqual(mock_conn.delete_security_group.call_args_list, [call(GroupId='sg1'), + call(GroupId='sg3')]) + self.assertEqual(instance.terminate.call_args_list, [call()]) + self.assertEqual(mock_conn.delete_subnet.call_args_list, [call(SubnetId='subnet-id')]) + self.assertEqual(mock_conn.delete_vpc.call_args_list, [call(VpcId='vpc-id')]) + self.assertEqual(mock_conn.delete_internet_gateway.call_args_list, [call(InternetGatewayId='ig-id')]) + self.assertEqual(mock_conn.detach_internet_gateway.call_args_list, [call(InternetGatewayId='ig-id', + VpcId='vpc-id')]) + + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_70_create_snapshot(self, sleep, get_connection): + def test_70_create_snapshot(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() inf = MagicMock() - vm = VirtualMachine(inf, "region;id1", ec2_cloud.cloud, "", "", ec2_cloud, 1) + vm = VirtualMachine(inf, "us-east-1;id1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - conn = MagicMock() - get_connection.return_value = conn + mock_conn = MagicMock() + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - reservation = MagicMock() instance = MagicMock() - instance.create_image.return_value = "image-ami" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] + mock_res.Instance.return_value = instance + instance.create_image.return_value = {'ImageId': 'image-ami'} success, new_image = ec2_cloud.create_snapshot(vm, 0, "image_name", True, auth) self.assertTrue(success, msg="ERROR: creating snapshot: %s" % new_image) - self.assertEqual(new_image, "aws://region/image-ami") - self.assertEqual(instance.create_image.call_args_list, [call('image_name', - description='AMI automatically generated by IM', - no_reboot=True)]) + self.assertEqual(new_image, "aws://us-east-1/image-ami") + self.assertEqual(instance.create_image.call_args_list, [call(Name='image_name', + Description='AMI automatically generated by IM', + NoReboot=True, + TagSpecifications=[{'ResourceType': 'image', + 'Tags': [{'Key': 'instance_id', + 'Value': 'id1'}]}])]) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_80_delete_image(self, sleep, get_connection): + def test_80_delete_image(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - conn = MagicMock() - get_connection.return_value = conn - conn.deregister_image.return_value = True + mock_conn = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] - success, msg = ec2_cloud.delete_image('aws://region/image-ami', auth) + success, msg = ec2_cloud.delete_image('aws://us-east-1/image-ami', auth) self.assertTrue(success, msg="ERROR: deleting image. %s" % msg) - self.assertEqual(conn.deregister_image.call_args_list, [call('image-ami', delete_snapshot=True)]) + self.assertEqual(mock_conn.deregister_image.call_args_list, [call(ImageId='image-ami')]) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_90_list_images(self, sleep, get_connection): + def test_90_list_images(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - conn = MagicMock() - get_connection.return_value = conn - image = MagicMock() - image.id = "ami-123456789012" - image.name = "image_name" - conn.get_all_images.return_value = [image] - + mock_conn = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-123456789012', + 'Name': 'image_name'}]} res = ec2_cloud.list_images(auth, filters={'region': 'us-east-1'}) self.assertEqual(res, [{'uri': 'aws://us-east-1/ami-123456789012', 'name': 'us-east-1/image_name'}]) diff --git a/test/unit/test_im_logic.py b/test/unit/test_im_logic.py index d9d154cc..2eea95f9 100644 --- a/test/unit/test_im_logic.py +++ b/test/unit/test_im_logic.py @@ -1141,6 +1141,7 @@ def test_check_oidc_groups(self, openidclient): Config.OIDC_ISSUERS = ["https://iam-test.indigo-datacloud.eu/"] Config.OIDC_AUDIENCE = None Config.OIDC_GROUPS = ["urn:mace:egi.eu:group:demo.fedcloud.egi.eu:role=member#aai.egi.eu"] + Config.OIDC_GROUPS_CLAIM = "eduperson_entitlement" IM.check_oidc_token(im_auth)