From e9da92fb4aa28e15932150d13625d9ead650f493 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Wed, 11 Sep 2024 13:58:54 +0200 Subject: [PATCH 01/53] Implements #301 --- IM/connectors/EC2.py | 392 +++++++++++++++++++++++-------------------- 1 file changed, 214 insertions(+), 178 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 1b6fd17b..4aa53252 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -20,11 +20,9 @@ from netaddr import IPNetwork, IPAddress, spanning_cidr try: - import boto.ec2 - import boto.vpc - import boto.route53 + import boto3 except Exception as ex: - print("WARN: Boto library not correctly installed. EC2CloudConnector will not work!.") + print("WARN: Boto3 library not correctly installed. EC2CloudConnector will not work!.") print(ex) try: @@ -182,19 +180,18 @@ def get_connection(self, region_name, auth_data): return self.connection else: self.auth = auth_data - conn = None + client = None try: if 'username' in auth and 'password' in auth: - region = boto.ec2.get_region(region_name) - if region: - token = auth.get('token') - conn = boto.vpc.VPCConnection(aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password'], - security_token=token, - region=region) - else: - raise Exception( - "Incorrect region name: " + region_name) + client = boto3.client('ec2') + regions = client.describe_regions() + region_names = [region['RegionName'] for region in regions['Regions']] + if region_name not in region_names: + raise Exception("Incorrect region name: " + region_name) + client = boto3.client('ec2', region_name=region_name, + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password'], + aws_session_token=auth.get('token')) else: self.log_error("No correct auth data has been specified to EC2: " "username (Access Key) and password (Secret Key)") @@ -202,13 +199,11 @@ def get_connection(self, region_name, auth_data): "username (Access Key) and password (Secret Key)") except Exception as ex: - self.log_exception( - "Error getting the region " + region_name) - raise Exception("Error getting the region " + - region_name + ": " + str(ex)) + self.log_exception("Error getting the region " + region_name) + raise Exception("Error getting the region " + region_name + ": " + str(ex)) - self.connection = conn - return conn + self.connection = client + return client # Get the Route53 connection object def get_route53_connection(self, region_name, auth_data): @@ -233,9 +228,10 @@ def get_route53_connection(self, region_name, auth_data): conn = None try: if 'username' in auth and 'password' in auth: - conn = boto.route53.connect_to_region(region_name, - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) + conn = boto3.client('route53', region_name=region_name, + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password'], + aws_session_token=auth.get('token')) else: self.log_error("No correct auth data has been specified to EC2: " "username (Access Key) and password (Secret Key)") @@ -342,7 +338,7 @@ def set_net_provider_id(radl, vpc, subnet): @staticmethod def _get_security_group(conn, sg_name): try: - return conn.get_all_security_groups(filters={'group-name': sg_name})[0] + return conn.describe_security_groups(Filters={'Name': 'group-name', 'Values': [sg_name]})[0] except Exception: return None @@ -360,10 +356,22 @@ def create_security_groups(self, conn, inf, radl, vpc): if not sg: self.log_info("Creating security group: %s" % sg_name) try: - sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) + sg = conn.create_security_group(GroupName=sg_name, + Description="Security group created by the IM", + VpdId=vpc) # open all the ports for the VMs in the security group - sg.authorize('tcp', 0, 65535, src_group=sg) - sg.authorize('udp', 0, 65535, src_group=sg) + conn.authorize_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': 'tcp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, + {'IpProtocol': 'udp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} + ]) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -389,7 +397,9 @@ def create_security_groups(self, conn, inf, radl, vpc): if not sg: self.log_info("Creating security group: " + sg_name) try: - sg = conn.create_security_group(sg_name, "Security group created by the IM", vpc_id=vpc) + sg = conn.create_security_group(GroupName=sg_name, + Description="Security group created by the IM", + VpdId=vpc) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -403,8 +413,18 @@ def create_security_groups(self, conn, inf, radl, vpc): try: # open all the ports for the VMs in the security group - sg.authorize('tcp', 0, 65535, src_group=sg) - sg.authorize('udp', 0, 65535, src_group=sg) + conn.authorize_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': 'tcp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, + {'IpProtocol': 'udp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} + ]) except Exception as addex: self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) @@ -415,17 +435,23 @@ def create_security_groups(self, conn, inf, radl, vpc): for outport in outports: if outport.is_range(): - try: - sg.authorize(outport.get_protocol(), outport.get_port_init(), - outport.get_port_end(), outport.get_remote_cidr()) - except Exception as addex: - self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) + from_port = outport.get_port_init() + to_port = outport.get_port_end() else: - try: - sg.authorize(outport.get_protocol(), outport.get_remote_port(), - outport.get_remote_port(), outport.get_remote_cidr()) - except Exception as addex: - self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) + from_port = outport.get_remote_port() + to_port = outport.get_remote_port() + + try: + conn.authorize_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': outport.get_protocol(), + 'FromPort': from_port, + 'ToPort': to_port, + 'IpRanges': [{'CidrIp': outport.get_remote_cidr()}]} + ]) + except Exception as addex: + self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) i += 1 except Exception as ex: @@ -444,13 +470,13 @@ def get_default_subnet(conn): vpc_id = None subnet_id = None - for vpc in conn.get_all_vpcs(): - if vpc.is_default or ('Name' in vpc.tags and vpc.tags['Name'] == "default"): - vpc_id = vpc.id - for subnet in conn.get_all_subnets(filters={"vpcId": vpc_id}): - subnet_id = subnet.id - break - break + vpcs = conn.describe_vpcs(Filters=[{'Name': 'is-default', 'Values': ['true']}])['Vpcs'] + if vpcs: + vpc_id = vpcs[0]['VpcId'] + + subnets = conn.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])['Subnets'] + if subnets: + subnet_id = subnets[0]['SubnetId'] return vpc_id, subnet_id @@ -516,45 +542,46 @@ def create_networks(self, conn, radl, inf): # First create the VPC if vpc_id is None: # Check if it already exists - vpcs = conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": inf.id}) + vpcs = conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf.id]}])['Vpcs'] if vpcs: - vpc_id = vpcs[0].id + vpc_id = vpcs[0]['VpcId'] self.log_debug("VPC %s exists. Do not create." % vpc_id) else: # if not create it self.log_info("Creating VPC with cidr: %s." % vpc_cird) - vpc = conn.create_vpc(vpc_cird) - time.sleep(1) - vpc.add_tag("IM-INFRA-ID", inf.id) - vpc_id = vpc.id + vpc = conn.create_vpc(CidrBlock=vpc_cird, TagSpecifications=[{'ResourceType': 'vpc', + 'Tags': [{'Key': 'IM-INFRA-ID', 'Value': inf.id}]}]) + vpc_id = vpc['Vpc']['VpcId'] self.log_info("VPC %s created." % vpc_id) self.log_info("Creating Internet Gateway.") - ig = conn.create_internet_gateway() - time.sleep(1) - ig.add_tag("IM-INFRA-ID", inf.id) - self.log_info("Internet Gateway %s created." % ig.id) - conn.attach_internet_gateway(ig.id, vpc_id) + ig = conn.create_internet_gateway(TagSpecifications=[{'ResourceType': 'internet-gateway', + 'Tags': [{'Key': 'IM-INFRA-ID', 'Value': inf.id}]}]) + ig_id = ig['InternetGateway']['InternetGatewayId'] + self.log_info("Internet Gateway %s created." % ig_id) + conn.attach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) self.log_info("Adding route to the IG.") - for route_table in conn.get_all_route_tables(filters={"vpc-id": vpc_id}): - conn.create_route(route_table.id, "0.0.0.0/0", ig.id) + for route_table in conn.describe_route_tables(Filters={"vpc-id": vpc_id})['RouteTables']: + conn.create_route(RouteTableId=route_table['RouteTableId'], + DestinationCidrBlock="0.0.0.0/0", + GatewayId=ig_id) # Now create the subnet # Check if it already exists - subnets = conn.get_all_subnets(filters={"tag:IM-INFRA-ID": inf.id, - "tag:IM-SUBNET-ID": net.id}) + subnets = conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf.id]}, + {'Name': 'tag:IM-SUBNET-ID', 'Values': [net.id]}])['Subnets'] if subnets: subnet = subnets[0] self.log_debug("Subnet %s exists. Do not create." % net.id) net.setValue('cidr', subnet.cidr_block) else: self.log_info("Create subnet for net %s." % net.id) - subnet = conn.create_subnet(vpc_id, net_cidr) + subnet = conn.create_subnet(VpcId=vpc_id, CidrBlock=net_cidr, + TagSpecifications=[{'ResourceType': 'subnet', + 'Tags': [{'Key': 'IM-INFRA-ID', 'Value': inf.id}, + {'Key': 'IM-SUBNET-ID', 'Value': net.id}]}]) self.log_info("Subnet %s created." % subnet.id) - time.sleep(1) - subnet.add_tag("IM-INFRA-ID", inf.id) - subnet.add_tag("IM-SUBNET-ID", net.id) net.setValue('cidr', net_cidr) # Set also the cidr in the inf RADL inf.radl.get_network_by_id(net.id).setValue('cidr', net_cidr) @@ -563,13 +590,13 @@ def create_networks(self, conn, radl, inf): except Exception as ex: self.log_exception("Error creating subnets or vpc.") try: - for subnet in conn.get_all_subnets(filters={"tag:IM-INFRA-ID": inf.id}): + for subnet in conn.describe_subnets(Filters={"tag:IM-INFRA-ID": inf.id})['Subnets']: self.log_info("Deleting subnet: %s" % subnet.id) conn.delete_subnet(subnet.id) - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": inf.id}): + for vpc in conn.describe_vpcs(Filters={"tag:IM-INFRA-ID": inf.id})['Vpcs']: self.log_info("Deleting vpc: %s" % vpc.id) conn.delete_vpc(vpc.id) - for ig in conn.get_all_internet_gateways(filters={"tag:IM-INFRA-ID": inf.id}): + for ig in conn.describe_internet_gateways(Filters={"tag:IM-INFRA-ID": inf.id})['InternetGateways']: self.log_info("Deleting Internet Gateway: %s" % ig.id) conn.delete_internet_gateways(ig.id) except Exception: @@ -584,10 +611,10 @@ def get_networks(self, conn, radl): if provider_id: parts = provider_id.split(".") if len(parts) == 2 and parts[0].startswith("vpc-") and parts[1].startswith("subnet-"): - vpc = conn.get_all_vpcs([parts[0]]) - subnet = conn.get_all_subnets([parts[1]]) + vpc = conn.describe_vpcs(Filters=[{'Name': 'vpc-id', 'Values': [parts[0]]}])['Vpcs'] + subnet = conn.describe_subnets(Filters=[{'Name': 'subnet-id', 'Values': [parts[1]]}])['Subnets'] if vpc and subnet: - return vpc[0].id, subnet[0].id + return vpc[0]['VpcId'], subnet[0]['SubnetId'] elif vpc: raise Exception("Incorrect subnet value in provider_id value: %s" % provider_id) else: @@ -651,16 +678,17 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): for i in range(num_vm): res.append((False, "Error no instance type available for the requirements.")) - image = conn.get_image(ami) + image = conn.describe_images(ImageIds=['ami'])['Images'] if not image: for i in range(num_vm): res.append((False, "Incorrect AMI selected")) return res + image = image[0] block_device_name = None - for name, device in image.block_device_mapping.items(): - if device.snapshot_id or device.volume_id: - block_device_name = name + for device in image['BlockDeviceMappings']: + if device.get('Ebs', {}).get('SnapshotId'): + block_device_name = device.get('DeviceName') if not block_device_name: self.log_error("Error getting correct block_device name from AMI: " + str(ami)) @@ -714,7 +742,6 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): inf.add_vm(vm) user_data = self.get_cloud_init_data(radl, vm, public_key, user) - bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping(conn) # Get data for the root disk size = None disk_type = "standard" @@ -722,16 +749,28 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): disk_type = system.getValue("disk.0.type") if system.getValue("disk.0.size"): size = system.getFeature("disk.0.size").getValue('G') - bdm[block_device_name] = boto.ec2.blockdevicemapping.BlockDeviceType(volume_type=disk_type, - size=size, - delete_on_termination=True) + bdm = [ + { + 'DeviceName': block_device_name, + 'Ebs': { + 'DeleteOnTermination': True, + 'VolumeSize': size, + 'VolumeType': disk_type + } + } + ] volumes = self.get_volumes(conn, vm) for device, (size, snapshot_id, volume_id, disk_type) in volumes.items(): - bdm[device] = boto.ec2.blockdevicemapping.BlockDeviceType(snapshot_id=snapshot_id, - volume_id=volume_id, - volume_type=disk_type, - size=size, delete_on_termination=True) + bdm.append({ + 'DeviceName': device, + 'Ebs': { + 'SnapshotId': snapshot_id, + 'DeleteOnTermination': True, + 'VolumeSize': size, + 'VolumeType': disk_type + } + }) if spot: self.log_info("Launching a spot instance") @@ -745,27 +784,32 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): else: availability_zone = 'us-east-1c' historical_price = 1000.0 - availability_zone_list = conn.get_all_zones() + availability_zone_list = conn.describe_availability_zones()['AvailabilityZones'] for zone in availability_zone_list: - history = conn.get_spot_price_history(instance_type=instance_type.name, - product_description=operative_system, - availability_zone=zone.name, - max_results=1) - self.log_debug("Spot price history for the region " + zone.name) + history = conn.describe_spot_price_history(InstanceTypes=[instance_type.name], + ProductDescriptions=[operative_system], + Filters=[{'Name': 'availability-zone', 'Values': [zone['ZoneName']]}], + MaxResults=1)['SpotPriceHistory'] + + self.log_debug("Spot price history for the region " + zone['ZoneName']) self.log_debug(history) - if history and history[0].price < historical_price: - historical_price = history[0].price - availability_zone = zone.name + if history and float(history[0]['SpotPrice']) < historical_price: + historical_price = float(history[0]['SpotPrice']) + availability_zone = zone['ZoneName'] self.log_info("Launching the spot request in the zone " + availability_zone) - request = conn.request_spot_instances(price=price, image_id=image.id, count=1, - type='one-time', instance_type=instance_type.name, - placement=availability_zone, key_name=keypair_name, - security_group_ids=sg_ids, block_device_map=bdm, - subnet_id=subnet, user_data=user_data) + request = conn.request_spot_instances(SpotPrice=str(price), InstanceCount=1, Type='one-time', + LaunchSpecification={'ImageId': image['ImageId'], + 'InstanceType': instance_type.name, + 'Placement': {'AvailabilityZone': availability_zone}, + 'KeyName': keypair_name, + 'SecurityGroupIds': sg_ids, + 'BlockDeviceMappings': bdm, + 'SubnetId': subnet, + 'UserData': user_data})['SpotInstanceRequests'] if request: - ec2_vm_id = region_name + ";" + request[0].id + ec2_vm_id = region_name + ";" + request[0]['SpotInstanceRequestId'] self.log_debug("RADL:") self.log_debug(system) @@ -783,23 +827,35 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): err_msg += " an ondemand instance " err_msg += " of type: %s " % instance_type.name - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=subnet, - groups=sg_ids, - associate_public_ip_address=add_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - - reservation = conn.run_instances(image.id, min_count=1, max_count=1, key_name=keypair_name, - instance_type=instance_type.name, network_interfaces=interfaces, - placement=placement, block_device_map=bdm, user_data=user_data) - - if len(reservation.instances) == 1: + interfaces = [ + { + 'DeviceIndex': 0, + 'SubnetId': subnet, + 'Groups': sg_ids, + 'AssociatePublicIpAddress': add_public_ip, + 'DeleteOnTermination': True + } + ] + + instances = conn.run_instances(ImageId=image['ImageId'], MinCount=1, MaxCount=1, + KeyName=keypair_name, InstanceType=instance_type.name, + NetworkInterfaces=interfaces, Placement={'AvailabilityZone': placement}, + BlockDeviceMappings=bdm, UserData=user_data)['Instances'] + + if len(instances) == 1: time.sleep(1) - instance = reservation.instances[0] - instance.add_tag("Name", self.gen_instance_name(system)) + instance = conn.Instance(instances[0]) + + im_username = "im_user" + if auth_data.getAuthInfo('InfrastructureManager'): + im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] + instace_tags = [{'Key': 'Name', 'Value': self.gen_instance_name(system)}, + {'Key': 'IM-USER', 'Value': im_username}] for key, value in tags.items(): - instance.add_tag(key, value) - ec2_vm_id = region_name + ";" + instance.id + instace_tags.append({'Key': key, 'Value': value}) + instance.create_tags(Tags=instace_tags) + + ec2_vm_id = region_name + ";" + instances[0] self.log_debug("RADL:") self.log_debug(system) @@ -844,21 +900,21 @@ def create_volume(self, conn, disk_size, placement=None, vol_type=None, timeout= Returns: a :py:class:`boto.ec2.volume.Volume` of the new volume """ if placement is None: - placement = conn.get_all_zones()[0] + placement = conn.describe_zones()['Zones'][0]['ZoneName'] volume = conn.create_volume(disk_size, placement, volume_type=vol_type) cont = 0 err_states = ["error"] - while str(volume.status) != 'available' and str(volume.status) not in err_states and cont < timeout: + while str(volume['Status']) != 'available' and str(volume['Status']) not in err_states and cont < timeout: self.log_info("State: " + str(volume.status)) cont += 2 time.sleep(2) - volume = conn.get_all_volumes([volume.id])[0] + volume = conn.describe_volumes([volume['VolumeId']])['Volumes'][0] - if str(volume.status) == 'available': + if str(volume['Status']) == 'available': return volume else: self.log_error("Error creating the volume %s, deleting it" % (volume.id)) - conn.delete_volume(volume.id) + conn.delete_volume(volume['VolumeId']) return None @staticmethod @@ -888,17 +944,17 @@ def get_volumes(conn, vm): if disk_url: _, elem_id = EC2CloudConnector.getAMIData(disk_url) if elem_id.startswith('snap-'): - snapshot_id = conn.get_all_snapshots([elem_id])[0].id + snapshot_id = conn.describe_snapshots(SnapshotIds=[elem_id])['Snapshots'][0]['SnapshotId'] elif elem_id.startswith('vol-'): - volume_id = conn.get_all_volumes([elem_id])[0].id + volume_id = conn.describe_volumes(VolumeIds=[elem_id])['Volumes'][0]['VolumeId'] else: - snapshot = conn.get_all_snapshots(filters={'tag:Name': elem_id}) + snapshot = conn.describe_snapshots(Filters=[{'Name': 'tag:Name', 'Values': [elem_id]}])['Snapshots'] if snapshot: - snapshot_id = snapshot[0].id + snapshot_id = snapshot[0]['SnapshotId'] else: - volume = conn.get_all_volumes(filters={'tag:Name': elem_id}) + volume = conn.describe_volumes(Filters=[{'Name': 'tag:Name', 'Values': [elem_id]}])['Volumes'] if volume: - volume_id = volume[0].id + volume_id = volume[0]['VolumeId'] else: raise Exception("No snapshot/volume found with name: %s" % elem_id) else: @@ -929,15 +985,13 @@ def get_instance_by_id(self, instance_id, region_name, auth_data): try: conn = self.get_connection(region_name, auth_data) - - reservations = conn.get_all_instances([instance_id]) - instance = reservations[0].instances[0] + instance = conn.Instance(instance_id) except Exception: self.log_error("Error getting instance id: %s" % instance_id) return instance - def add_elastic_ip(self, vm, instance, fixed_ip=None): + def add_elastic_ip(self, vm, instance, conn, fixed_ip=None): """ Add an elastic IP to an instance @@ -955,8 +1009,8 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): pub_address = None self.log_info("Add an Elastic IP") if fixed_ip: - for address in instance.connection.get_all_addresses(): - if str(address.public_ip) == fixed_ip: + for address in conn.describe_addresses()['Addresses']: + if str(address['PublicIp']) == fixed_ip: pub_address = address if pub_address: @@ -965,13 +1019,9 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): self.log_warn("Setting a fixed IP NOT ALLOCATED! (" + fixed_ip + "). Ignore it.") return None else: - provider_id = self.get_net_provider_id(vm.info) - if provider_id: - pub_address = instance.connection.allocate_address(domain="vpc") - instance.connection.associate_address(instance.id, allocation_id=pub_address.allocation_id) - else: - pub_address = instance.connection.allocate_address() - instance.connection.associate_address(instance.id, pub_address.public_ip) + pub_address = conn.allocate_address(Domain='vpc') + + conn.associate_address(InstanceId=instance.id, AllocationId=pub_address['AllocationId']) self.log_debug(pub_address) return pub_address @@ -979,7 +1029,7 @@ def add_elastic_ip(self, vm, instance, fixed_ip=None): self.log_exception("Error adding an Elastic IP to VM ID: " + str(vm.id)) if pub_address: self.log_exception("The Elastic IP was allocated, release it.") - pub_address.release() + conn.release_address(AllocationId=pub_address['AllocationId']) return None else: self.log_info("The VM is not running, not adding an Elastic IP.") @@ -1017,40 +1067,39 @@ def delete_elastic_ips(self, conn, vm, timeout=240): if pub_ip in fixed_ips: self.log_info("%s is a fixed IP, it is not released" % pub_ip) else: - for address in conn.get_all_addresses(filters={"public-ip": pub_ip}): - self.log_info("This VM has a Elastic IP %s." % address.public_ip) + for address in conn.describe_addresses(Filters={"public-ip": pub_ip})['Addresses']: + self.log_info("This VM has a Elastic IP %s." % address['PublicIp']) cont = 0 - while address.instance_id and cont < timeout: + while address['InstanceId'] and cont < timeout: cont += 3 try: self.log_debug("Disassociate it.") - address.disassociate() + conn.disassociate_address(PublicIp=address['PublicIp']) except Exception: self.log_debug("Error disassociating the IP.") - address = conn.get_all_addresses(filters={"public-ip": pub_ip})[0] + address = conn.describe_addresses(Filters={"public-ip": pub_ip})['Addresses'][0] self.log_info("It is attached. Wait.") time.sleep(3) - address = conn.get_all_addresses(filters={"public-ip": pub_ip})[0] self.log_info("Now release it.") - address.release() + conn.release_address(AllocationId=address['AllocationId']) - def setIPsFromInstance(self, vm, instance): + def setIPsFromInstance(self, vm, instance, conn): """ Adapt the RADL information of the VM to the real IPs assigned by EC2 Arguments: - vm(:py:class:`IM.VirtualMachine`): VM information. - - instance(:py:class:`boto.ec2.instance`): object to connect to EC2 instance. + - instance(:py:class:`boto3.ec2.Instance`): object to connect to EC2 instance. """ vm_system = vm.info.systems[0] num_pub_nets = num_nets = 0 public_ips = [] private_ips = [] - if (instance.ip_address is not None and len(instance.ip_address) > 0 and - instance.ip_address != instance.private_ip_address): - public_ips = [instance.ip_address] + if (instance.public_ip_address is not None and len(instance.public_ip_address) > 0 and + instance.public_ip_address != instance.private_ip_address): + public_ips = [instance.public_ip_address] num_nets += 1 num_pub_nets = 1 if instance.private_ip_address is not None and len(instance.private_ip_address) > 0: @@ -1071,13 +1120,13 @@ def setIPsFromInstance(self, vm, instance): elastic_ips = [] # Get the elastic IPs assigned (there must be only 1) - for address in instance.connection.get_all_addresses(): - if address.instance_id == instance.id: - elastic_ips.append(str(address.public_ip)) + for address in conn.describe_addresses()['Addresses']: + if address['InstanceId'] == instance.id: + elastic_ips.append(str(address['PublicIp'])) # It will be used if it is different to the public IP of the # instance - if str(address.public_ip) != instance.ip_address: - vm_system.setValue('net_interface.' + str(num_nets) + '.ip', str(instance.ip_address)) + if str(address['PublicIp']) != instance.public_ip_address: + vm_system.setValue('net_interface.' + str(num_nets) + '.ip', str(instance.public_ip_address)) vm_system.setValue('net_interface.' + str(num_nets) + '.connection', public_net.id) num_pub_nets += 1 @@ -1097,14 +1146,14 @@ def setIPsFromInstance(self, vm, instance): # It is a fixed IP if ip not in elastic_ips: # It has not been created yet, do it - self.add_elastic_ip(vm, instance, ip) + self.add_elastic_ip(vm, instance, conn, ip) # EC2 only supports 1 elastic IP per instance (without # VPC), so break break else: # Check if we have enough public IPs if num >= num_pub_nets: - self.add_elastic_ip(vm, instance) + self.add_elastic_ip(vm, instance, conn) # EC2 only supports 1 elastic IP per instance (without # VPC), so break break @@ -1193,14 +1242,14 @@ def updateVMInfo(self, vm, auth_data): self.log_info("Check if the request has been fulfilled and the instance has been deployed") job_sir_id = instance_id - request_list = conn.get_all_spot_instance_requests() + request_list = conn.describe_spot_instance_requests()['SpotInstanceRequests'] for sir in request_list: # TODO: Check if the request had failed and launch it in # another availability zone - if sir.state == 'failed': + if sir['State'] == 'failed': vm.state = VirtualMachine.FAILED - if sir.id == job_sir_id: - job_instance_id = sir.instance_id + if sir['SpotInstanceRequestId'] == job_sir_id: + job_instance_id = sir['InstanceId'] break if job_instance_id: @@ -1214,28 +1263,15 @@ def updateVMInfo(self, vm, auth_data): instance = self.get_instance_by_id(instance_id, region, auth_data) if instance: - try: - # sometime if you try to update a recently created instance - # this operation fails - instance.update() - if "IM-USER" not in instance.tags: - im_username = "im_user" - if auth_data.getAuthInfo('InfrastructureManager'): - im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] - instance.add_tag("IM-USER", im_username) - except Exception as ex: - self.log_exception("Error updating the instance " + instance_id) - return (False, "Error updating the instance " + instance_id + ": " + str(ex)) - vm.info.systems[0].setValue("virtual_system_type", instance.virtualization_type) - vm.info.systems[0].setValue("availability_zone", instance.placement) + vm.info.systems[0].setValue("availability_zone", instance.placement['AvailabilityZone']) vm.state = self.VM_STATE_MAP.get(instance.state, VirtualMachine.UNKNOWN) instance_type = self.get_instance_type_by_name(instance.instance_type) self.update_system_info_from_instance(vm.info.systems[0], instance_type) - self.setIPsFromInstance(vm, instance) + self.setIPsFromInstance(vm, instance, conn) self.manage_dns_entries("add", vm, auth_data) self.addRouterInstance(vm, conn) From 328413b36eeb98acf3d397bf13dead38dff31e9c Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Wed, 11 Sep 2024 16:56:21 +0200 Subject: [PATCH 02/53] Implements #301 --- IM/connectors/EC2.py | 230 +++++++++++++++++++++++++------------------ 1 file changed, 134 insertions(+), 96 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 4aa53252..d7f6e75a 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1242,15 +1242,17 @@ def updateVMInfo(self, vm, auth_data): self.log_info("Check if the request has been fulfilled and the instance has been deployed") job_sir_id = instance_id - request_list = conn.describe_spot_instance_requests()['SpotInstanceRequests'] - for sir in request_list: - # TODO: Check if the request had failed and launch it in - # another availability zone - if sir['State'] == 'failed': - vm.state = VirtualMachine.FAILED - if sir['SpotInstanceRequestId'] == job_sir_id: - job_instance_id = sir['InstanceId'] - break + request_list = conn.describe_spot_instance_requests(Filters=[{'Name': 'spot-instance-request-id', + 'Values': ['job_sir_id']}]) + sir = [] + if request_list['SpotInstanceRequests']: + sir = request_list['SpotInstanceRequests'][0] + # TODO: Check if the request had failed and launch it in + # another availability zone + if sir['State'] == 'failed': + vm.state = VirtualMachine.FAILED + if sir['SpotInstanceRequestId'] == job_sir_id: + job_instance_id = sir['InstanceId'] if job_instance_id: self.log_info("Request fulfilled, instance_id: " + str(job_instance_id)) @@ -1288,6 +1290,12 @@ def updateVMInfo(self, vm, auth_data): return (True, vm) + def _get_zone(self, conn, domain): + zones = conn.list_hosted_zones_by_name(DNSName=domain)['HostedZones'] + if not zones or len(zones) == 0: + return None + return zones[0] + def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): try: # Workaround to use EC2 as the default case. @@ -1299,25 +1307,40 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): raise Exception("No auth data has been specified to EC2.") else: auth = auths[0] - conn = boto.route53.connect_to_region('universal', - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) - zone = conn.get_zone(domain) + conn = boto3.client('route53', region_name='universal', + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password']) + + zone = self._get_zone(conn, domain) + if not zone: + raise Exception("Could not find DNS zone to update") + zone_id = zone['Id'] + if not zone: self.log_info("Creating DNS zone %s" % domain) - zone = conn.create_zone(domain) + zone = conn.create_hosted_zone(domain) else: self.log_info("DNS zone %s exists. Do not create." % domain) if zone: fqdn = hostname + "." + domain - record = zone.get_a(fqdn) - if not record: + records = conn.list_resource_record_sets(HostedZoneId=zone_id, + StartRecordName=fqdn, + StartRecordType='A')['ResourceRecordSets'] + if not records or records[0]['Name'] != fqdn: self.log_info("Creating DNS record %s." % fqdn) - changes = boto.route53.record.ResourceRecordSets(conn, zone.id) - change = changes.add_change("CREATE", fqdn, "A") - change.add_value(ip) - changes.commit() + conn.change_resource_record_sets( + HostedZoneId=zone_id, + ChangeBatch={ + 'Changes': [{ + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': fqdn, + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': ip}]} + }] + }) else: self.log_info("DNS record %s exists. Do not create." % fqdn) return True @@ -1335,28 +1358,42 @@ def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): raise Exception("No auth data has been specified to EC2.") else: auth = auths[0] - conn = boto.route53.connect_to_region('universal', - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password']) - zone = conn.get_zone(domain) + conn = boto3.client('route53', region_name='universal', + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password']) + zone = self._get_zone(conn, domain) if not zone: self.log_info("The DNS zone %s does not exists. Do not delete records." % domain) else: fqdn = hostname + "." + domain - record = zone.get_a(fqdn) - if not record: + records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], + StartRecordName=fqdn, + StartRecordType='A')['ResourceRecordSets'] + if not records or records[0]['Name'] != fqdn: self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: self.log_info("Deleting DNS record %s." % fqdn) - changes = boto.route53.record.ResourceRecordSets(conn, zone.id) - change = changes.add_change("DELETE", fqdn, "A") - change.add_value(ip) - changes.commit() + conn.change_resource_record_sets( + HostedZoneId=zone['Id'], + ChangeBatch={ + 'Changes': [{ + 'Action': 'DELETE', + 'ResourceRecordSet': { + 'Name': fqdn, + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': ip}]} + }] + }) # if there are no A records - # all_a_records = [r for r in conn.get_all_rrsets(zone.id) if r.type == "A"] + # all_a_records = conn.list_resource_record_sets( + # HostedZoneId=zone['Id'], + # StartRecordType='A' + #)['ResourceRecordSets'] # if not all_a_records: - # conn.delete_hosted_zone(zone.id) + # self.log_info("Deleting DNS zone %s." % domain) + # conn.delete_hosted_zone(zone['Id']) def cancel_spot_requests(self, conn, vm): """ @@ -1369,24 +1406,25 @@ def cancel_spot_requests(self, conn, vm): instance_id = vm.id.split(";")[1] # Check if the instance_id starts with "sir" -> spot request if (instance_id[0] == "s"): - request_list = conn.get_all_spot_instance_requests([instance_id]) - for sir in request_list: - conn.cancel_spot_instance_requests(sir.id) - self.log_info("Spot instance request " + str(sir.id) + " deleted") - break + request_list = conn.describe_spot_instance_requests(Filters=[{'Name': 'spot-instance-request-id', + 'Values': ['job_sir_id']}]) + if request_list['SpotInstanceRequests']: + sir = request_list['SpotInstanceRequests'][0] + conn.cancel_spot_instance_requests(sir['SpotInstanceRequestId']) + self.log_info("Spot instance request " + sir['SpotInstanceRequestId'] + " deleted") def delete_networks(self, conn, vm, timeout=240): """ Delete the created networks """ - for subnet in conn.get_all_subnets(filters={"tag:IM-INFRA-ID": vm.inf.id}): - self.log_info("Deleting subnet: %s" % subnet.id) + for subnet in conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [vm.inf.id]}])['Subnets']: + self.log_info("Deleting subnet: %s" % subnet['SubnetId']) cont = 0 deleted = False while not deleted and cont < timeout: cont += 5 try: - conn.delete_subnet(subnet.id) + conn.delete_subnet(subnet['SubnetId']) deleted = True except Exception as ex: self.log_warn("Error removing subnet: " + str(ex)) @@ -1398,11 +1436,11 @@ def delete_networks(self, conn, vm, timeout=240): self.log_error("Timeout (%s) deleting the subnet %s" % (timeout, subnet.id)) vpc_id = None - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": vm.inf.id}): - vpc_id = vpc.id + for vpc in conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [vm.inf.id]}])['Vpcs']: + vpc_id = vpc['VpcId'] ig_id = None - for ig in conn.get_all_internet_gateways(filters={"tag:IM-INFRA-ID": vm.inf.id}): - ig_id = ig.id + for ig in conn.describe_internet_gateways(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [vm.inf.id]}])['InternetGateways']: + ig_id = ig['InternetGatewayId'] if ig_id and vpc_id: self.log_info("Detacching Internet Gateway: %s from VPC: %s" % (ig_id, vpc_id)) @@ -1488,7 +1526,8 @@ def _get_security_groups(self, conn, vm): sgs = [] for sg_name in sg_names: try: - sgs.extend(conn.get_all_security_groups(filters={'group-name': sg_name})) + sg = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': [sg_name]}]) + sgs.extend(sg['SecurityGroups'][0]) except Exception: self.log_exception("Error getting SG %s" % sg_name) return sgs @@ -1505,68 +1544,69 @@ def delete_security_groups(self, conn, vm, timeout=90): if sgs: # Get the default SG to set in the instances - def_sg_id = conn.get_all_security_groups(filters={'group-name': 'default', - 'vpc-id': sgs[0].vpc_id})[0].id + def_sg_id = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': ['default']}, + {'Name': 'vpc-id', 'Values': [sgs[0].vpc_id]}] + )['SecurityGroups'][0]['GroupId'] for sg in sgs: - if sg.description != "Security group created by the IM": - self.log_info("SG %s not created by the IM. Do not delete it." % sg.name) + if sg['Description'] != "Security group created by the IM": + self.log_info("SG %s not created by the IM. Do not delete it." % sg['GroupName']) continue try: - for instance in sg.instances(): - instance.modify_attribute("groupSet", [def_sg_id]) + instances = conn.describe_instances(Filters=[{'Name': 'instance.group-id', 'Values': [sg['GroupId']]}]) + for instance in instances['Reservations'][0]['Instances']: + conn.modify_instance_attribute( + InstanceId=instance['InstanceId'], + Groups=[{'GroupId': def_sg_id}] + ) except Exception as ex: self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg.name, instance.id, ex)) # try to wait some seconds to free the SGs time.sleep(5) - self.log_info("Remove the SG: " + sg.name) + self.log_info("Remove the SG: " + sg['Name']) try: - sg.revoke('tcp', 0, 65535, src_group=sg) - sg.revoke('udp', 0, 65535, src_group=sg) + conn.revoke_security_group_ingress() + conn.revoke_security_group_ingress( + GroupId=sg['GroupId'], + IpPermissions=[ + {'IpProtocol': 'tcp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, + {'IpProtocol': 'udp', + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} + ]) except Exception as ex: self.log_warn("Error revoking self rules: " + str(ex)) - sg.delete() + conn.delete_security_group(GroupId=sg['GroupId']) def stop(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - - instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if (instance is not None): - instance.update() - instance.stop() - else: - self.log_warn("Instance %s not found. Not stopping it." % instance_id) - return (False, "Instance %s not found." % instance_id) - - return (True, "") + self._vm_operation("stop", vm, auth_data) def start(self, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] - - instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if (instance is not None): - instance.update() - instance.start() - else: - self.log_warn("Instance %s not found. Not starting it." % instance_id) - return (False, "Instance %s not found." % instance_id) - - return (True, "") + self._vm_operation("start", vm, auth_data) def reboot(self, vm, auth_data): + self._vm_operation(vm, "reboot", auth_data) + + def _vm_operation(self, op, vm, auth_data): region_name = vm.id.split(";")[0] instance_id = vm.id.split(";")[1] instance = self.get_instance_by_id(instance_id, region_name, auth_data) if (instance is not None): - instance.update() - instance.reboot() + if op == "stop": + instance.stop() + elif op == "start": + instance.start() + elif op == "reboot": + instance.reboot() else: - self.log_warn("Instance %s not found. Not rebooting it." % instance_id) + self.log_warn("Instance %s not found. Not %sing it." % (instance_id, op)) return (False, "Instance %s not found." % instance_id) return (True, "") @@ -1580,8 +1620,7 @@ def waitStop(instance, timeout=120): wait = 0 powered_off = False while wait < timeout and not powered_off: - instance.update() - + instance.reload() powered_off = instance.state == 'stopped' if not powered_off: time.sleep(2) @@ -1595,9 +1634,7 @@ def alterVM(self, vm, radl, auth_data): # Terminate the instance instance = self.get_instance_by_id(instance_id, region_name, auth_data) - if instance: - instance.update() - else: + if not instance: return (False, "The instance has not been found") new_system = self.resize_vm_radl(vm, radl) @@ -1611,7 +1648,8 @@ def alterVM(self, vm, radl, auth_data): if instance_type and instance.instance_type != instance_type.name: stopped = self.waitStop(instance) if stopped: - success = instance.modify_attribute('instanceType', instance_type.name) + success = instance.modify_attribute(Attribute='instanceType', + InstanceType=instance_type.name) if success: self.update_system_info_from_instance(vm.info.systems[0], instance_type) instance.start() @@ -1729,11 +1767,11 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): instance = self.get_instance_by_id(instance_id, region_name, auth_data) if instance: self.log_info("Creating snapshot: " + image_name) - snapshot_id = instance.create_image(image_name, - description="AMI automatically generated by IM", - no_reboot=True) + snapshot_id = instance.create_image(Name=image_name, + Description="AMI automatically generated by IM", + NoReboot=True) # Add tags to the snapshot to be recognizable - conn.create_tags(snapshot_id, {'instance_id': instance_id}) + conn.create_tags(Resources=[snapshot_id], Tags=[{'Key': 'instance_id', 'Value': instance_id}]) else: return (False, "Error obtaining details of the instance") if snapshot_id != "": @@ -1750,7 +1788,7 @@ def delete_image(self, image_url, auth_data): self.log_info("Deleting image: %s." % image_url) conn = self.get_connection(region_name, auth_data) - success = conn.deregister_image(ami, delete_snapshot=True) # https://github.com/boto/boto/issues/3019 + success = conn.deregister_image(ami) if success: return (True, "") @@ -1766,7 +1804,7 @@ def list_images(self, auth_data, filters=None): regions = [filters['region']] del filters['region'] if not regions: - regions = [region.name for region in boto.ec2.regions()] + region = [region['RegionName'] for region in conn.describe_regions()['Regions']] images_filter = {'architecture': 'x86_64', 'image-type': 'machine', 'virtualization-type': 'hvm', 'state': 'available', @@ -1780,8 +1818,8 @@ def list_images(self, auth_data, filters=None): for region in regions: conn = self.get_connection(region, auth_data) try: - for image in conn.get_all_images(owners=['self', 'aws-marketplace'], filters=images_filter): - if len(image.id) > 12: # do not add old images + for image in conn.describe_images(Owners=['self', 'aws-marketplace'], Filters=images_filter)['Images']: + if len(image['ImageId']) > 12: # do not add old images images.append({"uri": "aws://%s/%s" % (region, image.id), "name": "%s/%s" % (region, image.name)}) except Exception: From eae62ef74054d451f4a7142ee47d3e85a250e787 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Wed, 11 Sep 2024 16:59:11 +0200 Subject: [PATCH 03/53] Implements #301 --- IM/connectors/EC2.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index d7f6e75a..4000b072 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1524,12 +1524,11 @@ def _get_security_groups(self, conn, vm): sg_names.append(sg_name) sgs = [] - for sg_name in sg_names: - try: - sg = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': [sg_name]}]) - sgs.extend(sg['SecurityGroups'][0]) - except Exception: - self.log_exception("Error getting SG %s" % sg_name) + try: + sg = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': sg_names}]) + sgs.extend(sg['SecurityGroups']) + except Exception: + self.log_exception("Error getting SG %s" % sg_name) return sgs def delete_security_groups(self, conn, vm, timeout=90): From 4f78517219938a06e854a2aaa3a1b86d64b841b8 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 09:31:04 +0200 Subject: [PATCH 04/53] Implements #301 --- IM/connectors/EC2.py | 89 +++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 4000b072..46fbfaad 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -168,7 +168,7 @@ def get_connection(self, region_name, auth_data): Arguments: - region_name(str): EC2 region to connect. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.ec2.connection` or None in case of error + Returns: a :py:class:`boto3.EC2.Client` or None in case of error """ auths = auth_data.getAuthInfo(self.type) if not auths: @@ -213,7 +213,7 @@ def get_route53_connection(self, region_name, auth_data): Arguments: - region_name(str): AWS region to connect. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.route53.connection` or None in case of error + Returns: a :py:class:`boto3.Route53.Client` or None in case of error """ auths = auth_data.getAuthInfo(self.type) if not auths: @@ -338,7 +338,7 @@ def set_net_provider_id(radl, vpc, subnet): @staticmethod def _get_security_group(conn, sg_name): try: - return conn.describe_security_groups(Filters={'Name': 'group-name', 'Values': [sg_name]})[0] + return conn.describe_security_groups(Filters={'Name': 'group-name', 'Values': [sg_name]})['SecurityGroups'][0] except Exception: return None @@ -504,11 +504,11 @@ def get_vpc_cidr(self, radl, conn, inf): Get a common CIDR in all the RADL nets """ nets = [] - for i, net in enumerate(radl.networks): + for net in radl.networks: provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet.cidr_block for subnet in conn.get_all_subnets()] + nets, + [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] + nets, inf, 127) nets.append(net_cidr) @@ -535,7 +535,7 @@ def create_networks(self, conn, radl, inf): provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet.cidr_block for subnet in conn.get_all_subnets()], + [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']], inf, 127) net.delValue('cidr') @@ -678,7 +678,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): for i in range(num_vm): res.append((False, "Error no instance type available for the requirements.")) - image = conn.describe_images(ImageIds=['ami'])['Images'] + image = conn.describe_images(ImageIds=[ami])['Images'] if not image: for i in range(num_vm): res.append((False, "Incorrect AMI selected")) @@ -761,7 +761,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): ] volumes = self.get_volumes(conn, vm) - for device, (size, snapshot_id, volume_id, disk_type) in volumes.items(): + for device, (size, snapshot_id, _, disk_type) in volumes.items(): bdm.append({ 'DeviceName': device, 'Ebs': { @@ -842,9 +842,9 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): NetworkInterfaces=interfaces, Placement={'AvailabilityZone': placement}, BlockDeviceMappings=bdm, UserData=user_data)['Instances'] - if len(instances) == 1: + if instances: time.sleep(1) - instance = conn.Instance(instances[0]) + instance = conn.Instance(instances[0]).load() im_username = "im_user" if auth_data.getAuthInfo('InfrastructureManager'): @@ -881,7 +881,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): for sgid in sg_ids: self.log_info("Remove the SG: %s" % sgid) try: - conn.delete_security_group(group_id=sgid) + conn.delete_security_group(GroupId=sgid) except Exception: self.log_exception("Error deleting SG.") @@ -892,20 +892,20 @@ def create_volume(self, conn, disk_size, placement=None, vol_type=None, timeout= Create an EBS volume Arguments: - - conn(:py:class:`boto.ec2.connection`): object to connect to EC2 API. + - conn(:py:class:`boto3.EC2.Client`): object to connect to EC2 API. - disk_size(int): The size of the new volume, in GiB - placement(str): The availability zone in which the Volume will be created. - type(str): Type of the volume: standard | io1 | gp2. - timeout(int): Time needed to create the volume. - Returns: a :py:class:`boto.ec2.volume.Volume` of the new volume + Returns: a :py:dict:`boto3.EC2.Volume` of the new volume """ if placement is None: placement = conn.describe_zones()['Zones'][0]['ZoneName'] - volume = conn.create_volume(disk_size, placement, volume_type=vol_type) + volume = conn.create_volume(Size=disk_size, AvailabilityZone=placement, VolumeType=vol_type) cont = 0 err_states = ["error"] while str(volume['Status']) != 'available' and str(volume['Status']) not in err_states and cont < timeout: - self.log_info("State: " + str(volume.status)) + self.log_info("State: " + str(volume['Status'])) cont += 2 time.sleep(2) volume = conn.describe_volumes([volume['VolumeId']])['Volumes'][0] @@ -944,9 +944,13 @@ def get_volumes(conn, vm): if disk_url: _, elem_id = EC2CloudConnector.getAMIData(disk_url) if elem_id.startswith('snap-'): - snapshot_id = conn.describe_snapshots(SnapshotIds=[elem_id])['Snapshots'][0]['SnapshotId'] + snapshot = conn.describe_snapshots(SnapshotIds=[elem_id])['Snapshots'] + if snapshot: + snapshot_id = snapshot_id[0]['SnapshotId'] elif elem_id.startswith('vol-'): - volume_id = conn.describe_volumes(VolumeIds=[elem_id])['Volumes'][0]['VolumeId'] + volume = conn.describe_volumes(VolumeIds=[elem_id])['Volumes'] + if volume: + volume_id = volume[0]['VolumeId'] else: snapshot = conn.describe_snapshots(Filters=[{'Name': 'tag:Name', 'Values': [elem_id]}])['Snapshots'] if snapshot: @@ -979,13 +983,13 @@ def get_instance_by_id(self, instance_id, region_name, auth_data): - id(str): ID of the EC2 instance. - region_name(str): Region name to search the instance. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto.ec2.instance` of found instance or None if it was not found + Returns: a :py:class:`boto3.EC2.Instance` of found instance or None if it was not found """ instance = None try: conn = self.get_connection(region_name, auth_data) - instance = conn.Instance(instance_id) + instance = conn.Instance(instance_id).load() except Exception: self.log_error("Error getting instance id: %s" % instance_id) @@ -997,9 +1001,10 @@ def add_elastic_ip(self, vm, instance, conn, fixed_ip=None): Arguments: - vm(:py:class:`IM.VirtualMachine`): VM information. - - instance(:py:class:`boto.ec2.instance`): object to connect to EC2 instance. + - instance(:py:class:`boto3.EC2.Instance`): object to connect to EC2 instance. + - conn(:py:class:`boto3.EC2.Client`): object to connect to EC2 API. - fixed_ip(str, optional): specifies a fixed IP to add to the instance. - Returns: a :py:class:`boto.ec2.address.Address` added or None if some problem occur. + Returns: a :py:dict:`boto3.EC2.Address` added or None if some problem occur. """ if vm.state == VirtualMachine.RUNNING and "elastic_ip" not in vm.__dict__.keys(): # Flag to set that this VM has created (or is creating) the elastic @@ -1174,13 +1179,13 @@ def addRouterInstance(self, vm, conn): if network.getValue('router'): if not route_table_id: vpc_id = None - for vpc in conn.get_all_vpcs(filters={"tag:IM-INFRA-ID": vm.inf.id}): - vpc_id = vpc.id + for vpc in conn.describe_vpcs(Filters={"tag:IM-INFRA-ID": vm.inf.id})['Vpcs']: + vpc_id = vpc['VpcId'] if not vpc_id: self.log_error("No VPC found.") return False - for rt in conn.get_all_route_tables(filters={"vpc-id": vpc_id}): - route_table_id = rt.id + for rt in conn.describe_route_tables(Filters={"vpc-id": vpc_id})['RouteTables']: + route_table_id = rt['RouteTableId'] if not route_table_id: self.log_error("No Route Table found with name.") @@ -1208,18 +1213,20 @@ def addRouterInstance(self, vm, conn): success = False break - reservations = conn.get_all_instances([vrouter]) - vrouter_instance = reservations[0].instances[0] + reservations = conn.describe_instances(InstanceIds=[vrouter])['Reservations'] + vrouter_instance = reservations[0]['Instances'][0] - if vrouter_instance.state != "running": + if vrouter_instance['State']['Name'] != "running": self.log_debug("VRouter instance %s is not running." % system_router) success = False break self.log_info("Adding route %s to instance ID: %s." % (router_cidr, vrouter)) - conn.create_route(route_table_id, router_cidr, instance_id=vrouter) + conn.create_route(RouteTableId=route_table_id, + DestinationCidrBlock=router_cidr, + InstanceId=vrouter) self.log_debug("Disabling sourceDestCheck to instance ID: %s." % vrouter) - conn.modify_instance_attribute(vrouter, attribute='sourceDestCheck', value=False) + conn.modify_instance_attribute(InstanceId=vrouter, SourceDestCheck={'Value': False}) # once set, delete it to not set it again network.delValue('router') @@ -1268,7 +1275,7 @@ def updateVMInfo(self, vm, auth_data): vm.info.systems[0].setValue("virtual_system_type", instance.virtualization_type) vm.info.systems[0].setValue("availability_zone", instance.placement['AvailabilityZone']) - vm.state = self.VM_STATE_MAP.get(instance.state, VirtualMachine.UNKNOWN) + vm.state = self.VM_STATE_MAP.get(instance.state['Name'], VirtualMachine.UNKNOWN) instance_type = self.get_instance_type_by_name(instance.instance_type) self.update_system_info_from_instance(vm.info.systems[0], instance_type) @@ -1278,8 +1285,7 @@ def updateVMInfo(self, vm, auth_data): self.addRouterInstance(vm, conn) try: - vm.info.systems[0].setValue('launch_time', int(time.mktime( - time.strptime(instance.launch_time[:19], '%Y-%m-%dT%H:%M:%S')))) + vm.info.systems[0].setValue('launch_time', int(instance.launch_time.timestamp())) except Exception as ex: self.log_warn("Error setting the launch_time of the instance. " "Probably the instance is not running:" + str(ex)) @@ -1526,7 +1532,7 @@ def _get_security_groups(self, conn, vm): sgs = [] try: sg = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': sg_names}]) - sgs.extend(sg['SecurityGroups']) + sgs = sg['SecurityGroups'] except Exception: self.log_exception("Error getting SG %s" % sg_name) return sgs @@ -1544,7 +1550,7 @@ def delete_security_groups(self, conn, vm, timeout=90): if sgs: # Get the default SG to set in the instances def_sg_id = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': ['default']}, - {'Name': 'vpc-id', 'Values': [sgs[0].vpc_id]}] + {'Name': 'vpc-id', 'Values': [sgs[0]['VpcId']]}] )['SecurityGroups'][0]['GroupId'] for sg in sgs: @@ -1565,7 +1571,6 @@ def delete_security_groups(self, conn, vm, timeout=90): self.log_info("Remove the SG: " + sg['Name']) try: - conn.revoke_security_group_ingress() conn.revoke_security_group_ingress( GroupId=sg['GroupId'], IpPermissions=[ @@ -1648,7 +1653,7 @@ def alterVM(self, vm, radl, auth_data): stopped = self.waitStop(instance) if stopped: success = instance.modify_attribute(Attribute='instanceType', - InstanceType=instance_type.name) + Value=instance_type.name) if success: self.update_system_info_from_instance(vm.info.systems[0], instance_type) instance.start() @@ -1744,9 +1749,6 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): infrastructure is destroyed. - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - http://boto.readthedocs.io/en/latest/ref/ec2.html#module-boto.ec2.volume - http://boto.readthedocs.io/en/latest/ref/ec2.html#module-boto.ec2.instance - Returns: a tuple (success, vm). - The first value is True if the operation finished successfully or False otherwise. - The second value is a str with the url of the new image if the operation finished successfully @@ -1768,9 +1770,10 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): self.log_info("Creating snapshot: " + image_name) snapshot_id = instance.create_image(Name=image_name, Description="AMI automatically generated by IM", - NoReboot=True) - # Add tags to the snapshot to be recognizable - conn.create_tags(Resources=[snapshot_id], Tags=[{'Key': 'instance_id', 'Value': instance_id}]) + NoReboot=True, + TagSpecifications=[{'ResourceType': 'snapshot', + 'Tags': [{'Key': 'instance_id', + 'Value': instance_id}]}]) else: return (False, "Error obtaining details of the instance") if snapshot_id != "": From 459ca2ede64d12ddadd2178e42ebbc211b25c238 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 09:59:11 +0200 Subject: [PATCH 05/53] Implements #301 --- IM/connectors/EC2.py | 117 +++++++++++++++++++++++++------------------ 1 file changed, 67 insertions(+), 50 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 46fbfaad..23d4038c 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -189,9 +189,9 @@ def get_connection(self, region_name, auth_data): if region_name not in region_names: raise Exception("Incorrect region name: " + region_name) client = boto3.client('ec2', region_name=region_name, - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password'], - aws_session_token=auth.get('token')) + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password'], + aws_session_token=auth.get('token')) else: self.log_error("No correct auth data has been specified to EC2: " "username (Access Key) and password (Secret Key)") @@ -338,7 +338,8 @@ def set_net_provider_id(radl, vpc, subnet): @staticmethod def _get_security_group(conn, sg_name): try: - return conn.describe_security_groups(Filters={'Name': 'group-name', 'Values': [sg_name]})['SecurityGroups'][0] + return conn.describe_security_groups(Filters={'Name': 'group-name', + 'Values': [sg_name]})['SecurityGroups'][0] except Exception: return None @@ -364,13 +365,13 @@ def create_security_groups(self, conn, inf, radl, vpc): GroupId=sg['GroupId'], IpPermissions=[ {'IpProtocol': 'tcp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, {'IpProtocol': 'udp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} ]) except Exception as crex: # First check if the SG does exist @@ -507,8 +508,9 @@ def get_vpc_cidr(self, radl, conn, inf): for net in radl.networks: provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: + subnets = [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] + nets, + subnets + nets, inf, 127) nets.append(net_cidr) @@ -531,11 +533,12 @@ def create_networks(self, conn, radl, inf): else: vpc_cird = str(common_cird) vpc_id = None - for i, net in enumerate(radl.networks): + for net in radl.networks: provider_id = net.getValue('provider_id') if net.getValue('create') == 'yes' and not net.isPublic() and not provider_id: + subnets = [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']] net_cidr = self.get_free_cidr(net.getValue('cidr'), - [subnet['CidrBlock'] for subnet in conn.describe_subnets()['Subnets']], + subnets, inf, 127) net.delValue('cidr') @@ -549,14 +552,16 @@ def create_networks(self, conn, radl, inf): else: # if not create it self.log_info("Creating VPC with cidr: %s." % vpc_cird) - vpc = conn.create_vpc(CidrBlock=vpc_cird, TagSpecifications=[{'ResourceType': 'vpc', - 'Tags': [{'Key': 'IM-INFRA-ID', 'Value': inf.id}]}]) + vpc = conn.create_vpc(vpc_cird, TagSpecifications=[{'ResourceType': 'vpc', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}]}]) vpc_id = vpc['Vpc']['VpcId'] self.log_info("VPC %s created." % vpc_id) self.log_info("Creating Internet Gateway.") ig = conn.create_internet_gateway(TagSpecifications=[{'ResourceType': 'internet-gateway', - 'Tags': [{'Key': 'IM-INFRA-ID', 'Value': inf.id}]}]) + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}]}]) ig_id = ig['InternetGateway']['InternetGatewayId'] self.log_info("Internet Gateway %s created." % ig_id) conn.attach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) @@ -569,8 +574,10 @@ def create_networks(self, conn, radl, inf): # Now create the subnet # Check if it already exists - subnets = conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf.id]}, - {'Name': 'tag:IM-SUBNET-ID', 'Values': [net.id]}])['Subnets'] + subnets = conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', + 'Values': [inf.id]}, + {'Name': 'tag:IM-SUBNET-ID', + 'Values': [net.id]}])['Subnets'] if subnets: subnet = subnets[0] self.log_debug("Subnet %s exists. Do not create." % net.id) @@ -579,8 +586,10 @@ def create_networks(self, conn, radl, inf): self.log_info("Create subnet for net %s." % net.id) subnet = conn.create_subnet(VpcId=vpc_id, CidrBlock=net_cidr, TagSpecifications=[{'ResourceType': 'subnet', - 'Tags': [{'Key': 'IM-INFRA-ID', 'Value': inf.id}, - {'Key': 'IM-SUBNET-ID', 'Value': net.id}]}]) + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}, + {'Key': 'IM-SUBNET-ID', + 'Value': net.id}]}]) self.log_info("Subnet %s created." % subnet.id) net.setValue('cidr', net_cidr) # Set also the cidr in the inf RADL @@ -787,10 +796,11 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): availability_zone_list = conn.describe_availability_zones()['AvailabilityZones'] for zone in availability_zone_list: history = conn.describe_spot_price_history(InstanceTypes=[instance_type.name], - ProductDescriptions=[operative_system], - Filters=[{'Name': 'availability-zone', 'Values': [zone['ZoneName']]}], - MaxResults=1)['SpotPriceHistory'] - + ProductDescriptions=[operative_system], + Filters=[{'Name': 'availability-zone', + 'Values': [zone['ZoneName']]}], + MaxResults=1)['SpotPriceHistory'] + self.log_debug("Spot price history for the region " + zone['ZoneName']) self.log_debug(history) if history and float(history[0]['SpotPrice']) < historical_price: @@ -801,15 +811,17 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): request = conn.request_spot_instances(SpotPrice=str(price), InstanceCount=1, Type='one-time', LaunchSpecification={'ImageId': image['ImageId'], 'InstanceType': instance_type.name, - 'Placement': {'AvailabilityZone': availability_zone}, + 'Placement': { + 'AvailabilityZone': + availability_zone}, 'KeyName': keypair_name, 'SecurityGroupIds': sg_ids, 'BlockDeviceMappings': bdm, 'SubnetId': subnet, - 'UserData': user_data})['SpotInstanceRequests'] + 'UserData': user_data}) - if request: - ec2_vm_id = region_name + ";" + request[0]['SpotInstanceRequestId'] + if request['SpotInstanceRequests']: + ec2_vm_id = region_name + ";" + request['SpotInstanceRequests'][0]['SpotInstanceRequestId'] self.log_debug("RADL:") self.log_debug(system) @@ -839,14 +851,15 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): instances = conn.run_instances(ImageId=image['ImageId'], MinCount=1, MaxCount=1, KeyName=keypair_name, InstanceType=instance_type.name, - NetworkInterfaces=interfaces, Placement={'AvailabilityZone': placement}, + NetworkInterfaces=interfaces, + Placement={'AvailabilityZone': placement}, BlockDeviceMappings=bdm, UserData=user_data)['Instances'] if instances: time.sleep(1) instance = conn.Instance(instances[0]).load() - im_username = "im_user" + im_username = "im_user" if auth_data.getAuthInfo('InfrastructureManager'): im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] instace_tags = [{'Key': 'Name', 'Value': self.gen_instance_name(system)}, @@ -1025,7 +1038,7 @@ def add_elastic_ip(self, vm, instance, conn, fixed_ip=None): return None else: pub_address = conn.allocate_address(Domain='vpc') - + conn.associate_address(InstanceId=instance.id, AllocationId=pub_address['AllocationId']) self.log_debug(pub_address) @@ -1340,12 +1353,13 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): ChangeBatch={ 'Changes': [{ 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': fqdn, + 'ResourceRecordSet': { + 'Name': fqdn, 'Type': 'A', 'TTL': 300, - 'ResourceRecords': [{'Value': ip}]} - }] + 'ResourceRecords': [{'Value': ip}] + } + }] }) else: self.log_info("DNS record %s exists. Do not create." % fqdn) @@ -1384,19 +1398,21 @@ def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): ChangeBatch={ 'Changes': [{ 'Action': 'DELETE', - 'ResourceRecordSet': { - 'Name': fqdn, + 'ResourceRecordSet': { + 'Name': fqdn, 'Type': 'A', 'TTL': 300, - 'ResourceRecords': [{'Value': ip}]} - }] - }) + 'ResourceRecords': [{'Value': ip}] + } + }] + } + ) # if there are no A records # all_a_records = conn.list_resource_record_sets( # HostedZoneId=zone['Id'], # StartRecordType='A' - #)['ResourceRecordSets'] + # )['ResourceRecordSets'] # if not all_a_records: # self.log_info("Deleting DNS zone %s." % domain) # conn.delete_hosted_zone(zone['Id']) @@ -1445,7 +1461,8 @@ def delete_networks(self, conn, vm, timeout=240): for vpc in conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [vm.inf.id]}])['Vpcs']: vpc_id = vpc['VpcId'] ig_id = None - for ig in conn.describe_internet_gateways(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [vm.inf.id]}])['InternetGateways']: + for ig in conn.describe_internet_gateways(Filters=[{'Name': 'tag:IM-INFRA-ID', + 'Values': [vm.inf.id]}])['InternetGateways']: ig_id = ig['InternetGatewayId'] if ig_id and vpc_id: @@ -1551,7 +1568,7 @@ def delete_security_groups(self, conn, vm, timeout=90): # Get the default SG to set in the instances def_sg_id = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': ['default']}, {'Name': 'vpc-id', 'Values': [sgs[0]['VpcId']]}] - )['SecurityGroups'][0]['GroupId'] + )['SecurityGroups'][0]['GroupId'] for sg in sgs: if sg['Description'] != "Security group created by the IM": @@ -1575,13 +1592,13 @@ def delete_security_groups(self, conn, vm, timeout=90): GroupId=sg['GroupId'], IpPermissions=[ {'IpProtocol': 'tcp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, {'IpProtocol': 'udp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} + 'FromPort': 0, + 'ToPort': 65535, + 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} ]) except Exception as ex: self.log_warn("Error revoking self rules: " + str(ex)) @@ -1596,7 +1613,7 @@ def start(self, vm, auth_data): def reboot(self, vm, auth_data): self._vm_operation(vm, "reboot", auth_data) - + def _vm_operation(self, op, vm, auth_data): region_name = vm.id.split(";")[0] instance_id = vm.id.split(";")[1] From ffc4ebdd9c8bb03423e74e1a0b2d48a31b8573d6 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 11:42:10 +0200 Subject: [PATCH 06/53] Implements #301 --- IM/connectors/EC2.py | 9 +- test/unit/connectors/EC2.py | 694 ++---------------------------------- 2 files changed, 40 insertions(+), 663 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 23d4038c..39e06949 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -382,7 +382,7 @@ def create_security_groups(self, conn, inf, radl, vpc): else: self.log_info("Security group: " + sg_name + " already created.") - res.append(sg.id) + res.append(sg['GroupId']) while system.getValue("net_interface." + str(i) + ".connection"): network_name = system.getValue("net_interface." + str(i) + ".connection") @@ -410,7 +410,7 @@ def create_security_groups(self, conn, inf, radl, vpc): else: self.log_info("Security group: " + sg_name + " already created.") - res.append(sg.id) + res.append(sg['GroupId']) try: # open all the ports for the VMs in the security group @@ -857,7 +857,8 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if instances: time.sleep(1) - instance = conn.Instance(instances[0]).load() + instance = conn.Instance(instances[0]) + instance.load() im_username = "im_user" if auth_data.getAuthInfo('InfrastructureManager'): @@ -868,7 +869,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): instace_tags.append({'Key': key, 'Value': value}) instance.create_tags(Tags=instace_tags) - ec2_vm_id = region_name + ";" + instances[0] + ec2_vm_id = region_name + ";" + instances[0]['InstanceId'] self.log_debug("RADL:") self.log_debug(system) diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index f2e116d4..9f23d125 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -15,7 +15,6 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . - import sys import unittest @@ -33,9 +32,6 @@ class TestEC2Connector(TestCloudConnectorBase): - """ - Class to test the IM connectors - """ @staticmethod def get_ec2_cloud(): @@ -106,31 +102,20 @@ def test_15_get_all_instance_types(self): self.assertEqual(instance.cores_per_cpu, 1) self.assertEqual(instance.disk_space, 160) - def get_all_subnets(self, subnet_ids=None, filters=None): - subnet = MagicMock() - subnet.id = "subnet-id" - if filters: - return [] - elif subnet_ids: - subnet.cidr_block = "10.10.0.1/24" - return [subnet] - subnet.cidr_block = "10.0.1.0/24" - return [subnet] - - def _get_all_vpcs(self, vpc_ids=None, filters=None, dry_run=False): - vpc = MagicMock() - vpc.id = "vpc-id" - vpc.default = True - if vpc_ids: - return [vpc] + def describe_subnets(self, **kwargs): + subnet = {} + subnet['SubnetId'] = "subnet-id" + if 'Filters' in kwargs and kwargs['Filters']: + return {'Subnets': []} + elif 'SubnetIds' in kwargs and kwargs['SubnetIds']: + subnet['CidrBlock'] = "10.10.0.1/24" else: - return [] + subnet['CidrBlock'] = "10.0.1.0/24" + return {'Subnets': [subnet]} - @patch('boto.ec2.get_region') - @patch('boto.vpc.VPCConnection') - @patch('boto.ec2.blockdevicemapping.BlockDeviceMapping') + @patch('IM.connectors.EC2.boto3.client') @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_region): + def test_20_launch(self, save_data, mock_boto_client): radl_data = """ network net1 (outbound = 'yes' and outports='8080,9000:9100' and sg_name = 'sgname') network net2 () @@ -143,7 +128,7 @@ def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_regio net_interface.0.dns_name = 'test' and net_interface.1.connection = 'net2' and disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and + disk.0.image.url = 'aws://us-east-1/ami-id' and disk.0.os.credentials.username = 'user' and disk.1.size=1GB and disk.1.device='hdb' and @@ -156,646 +141,37 @@ def test_20_launch(self, save_data, blockdevicemapping, VPCConnection, get_regio {'type': 'InfrastructureManager', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() - region = MagicMock() - get_region.return_value = region - - conn = MagicMock() - VPCConnection.return_value = conn - - image = MagicMock() - device = MagicMock() - reservation = MagicMock() - instance = MagicMock() - device.snapshot_id = True - device.volume_id = True - image.block_device_mapping = {"device": device} - instance.add_tag.return_value = True - instance.id = "iid" - reservation.instances = [instance] - conn.run_instances.return_value = reservation - conn.get_image.return_value = image - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - sg = MagicMock() - sg.id = "sgid" - sg.name = "sgname" - sg.authorize.return_value = True - conn.create_security_group.return_value = sg - - conn.get_all_security_groups.return_value = [] - - blockdevicemapping.return_value = {'device': ''} - - inf = InfrastructureInfo() - inf.auth = auth - inf.radl = radl - res = ec2_cloud.launch(inf, radl, radl, 1, auth) - success, _ = res[0] - self.assertTrue(success, msg="ERROR: launching a VM.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - self.assertEqual(len(conn.create_security_group.call_args_list), 3) - self.assertEqual(conn.create_security_group.call_args_list[0][0][0], "im-%s" % inf.id) - self.assertEqual(conn.create_security_group.call_args_list[1][0][0], "sgname") - self.assertEqual(conn.create_security_group.call_args_list[2][0][0], "im-%s-net2" % inf.id) - - # Check the case that we do not use VPC - radl_data = """ - network net1 (outbound = 'yes' and outports='8080') - network net2 (create='yes' and cidr='10.0.128.0/24') - network net3 (create='yes' and cidr='10.0.*.0/24') - network net4 (create='yes') - system test ( - cpu.arch='x86_64' and - cpu.count>=1 and - memory.size>=1g and - net_interface.0.connection = 'net1' and - net_interface.0.dns_name = 'test' and - net_interface.1.connection = 'net2' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and - disk.0.os.credentials.username = 'user' and - #disk.0.os.credentials.private_key = 'private' and - #disk.0.os.credentials.public_key = 'public' and - disk.1.size=1GB and - disk.1.device='hdb' and - disk.1.mount_path='/mnt/path' - )""" - radl = radl_parse.parse_radl(radl_data) - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.create_vpc.return_value = vpc - conn.get_all_vpcs.side_effect = self._get_all_vpcs - - subnet = MagicMock() - subnet.id = "subnet-id" - subnet.cidr_block = "10.10.129.0/24" - conn.create_subnet.return_value = subnet - conn.get_all_subnets.side_effect = self.get_all_subnets - inf = InfrastructureInfo() inf.auth = auth inf.radl = radl - res = ec2_cloud.launch(inf, radl, radl, 1, auth) - success, _ = res[0] - self.assertTrue(success, msg="ERROR: launching a VM.") - # check the instance_type selected is correct - self.assertIn(".micro", conn.run_instances.call_args_list[1][1]["instance_type"]) - self.assertEqual(conn.create_vpc.call_args_list[0][0][0], "10.0.128.0/22") - self.assertEqual(conn.create_subnet.call_args_list[0][0], ('vpc-id', '10.0.128.0/24')) - self.assertEqual(conn.create_subnet.call_args_list[1][0], ('vpc-id', '10.0.129.0/24')) - self.assertEqual(conn.create_subnet.call_args_list[2][0], ('vpc-id', '10.0.130.0/24')) - - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('boto.ec2.get_region') - @patch('boto.vpc.VPCConnection') - @patch('boto.ec2.blockdevicemapping.BlockDeviceMapping') - @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_25_launch_spot(self, save_data, blockdevicemapping, VPCConnection, get_region): - radl_data = """ - network net1 (outbound = 'yes' and provider_id = 'vpc-id.subnet-id') - network net2 () - system test ( - spot = 'yes' and - cpu.arch='x86_64' and - cpu.count>=1 and - memory.size>=512m and - net_interface.0.connection = 'net1' and - net_interface.0.dns_name = 'test' and - net_interface.1.connection = 'net2' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'aws://us-east-one/ami-id' and - disk.0.os.credentials.username = 'user' and - disk.0.os.credentials.private_key = 'private' and - disk.0.os.credentials.public_key = 'public' and - disk.1.size=1GB and - disk.1.device='hdb' and - disk.1.mount_path='/mnt/path' - )""" - radl = radl_parse.parse_radl(radl_data) - radl.check() - - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}, - {'type': 'InfrastructureManager', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - region = MagicMock() - get_region.return_value = region - conn = MagicMock() - VPCConnection.return_value = conn - - image = MagicMock() - device = MagicMock() - reservation = MagicMock() + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_security_groups.return_value = {'SecurityGroups': []} + mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}, + {'RegionName': 'us-west-1'}]} + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', + 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', + 'Ebs': { + 'SnapshotId': 'snap-12345678' + }}]} + ]} + mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'i-12345678'}]} instance = MagicMock() - device.snapshot_id = True - device.volume_id = True - image.block_device_mapping = {"device": device} - instance.add_tag.return_value = True - instance.id = "iid" - reservation.instances = [instance] - conn.run_instances.return_value = reservation - conn.get_image.return_value = image - - sg = MagicMock() - sg.id = "sgid" - sg.name = "sgname" - sg.authorize.return_value = True - conn.create_security_group.return_value = sg - - conn.get_all_security_groups.return_value = [] - - blockdevicemapping.return_value = {'device': ''} - - zone = MagicMock() - zone.name = 'us-east-1' - conn.get_all_zones.return_value = [zone] - history = MagicMock() - history.price = 0.1 - conn.get_spot_price_history.return_value = [history] - - request = MagicMock() - request.id = "id" - conn.request_spot_instances.return_value = [request] + mock_conn.Instance.return_value = instance - inf = InfrastructureInfo() - inf.auth = auth res = ec2_cloud.launch(inf, radl, radl, 1, auth) - success, _ = res[0] - self.assertTrue(success, msg="ERROR: launching a VM.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - @patch('boto.route53.connect_to_region') - @patch('boto.route53.record.ResourceRecordSets') - def test_30_updateVMInfo(self, record_sets, connect_to_region, get_connection): - radl_data = """ - network net (outbound = 'yes') - network net2 (router = '10.0.10.0/24,vrouter') - system test ( - cpu.arch='x86_64' and - cpu.count=1 and - memory.size=512m and - net_interface.0.connection = 'net' and - net_interface.0.ip = '158.42.1.1' and - net_interface.0.dns_name = 'test.domain.com' and - net_interface.0.additional_dns_names = ['some.test@domain.com'] and - net_interface.1.connection = 'net2' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'one://server.com/1' and - disk.0.os.credentials.username = 'user' and - disk.0.os.credentials.password = 'pass' and - disk.1.size=1GB and - disk.1.device='hdb' and - disk.1.mount_path='/mnt/path' - )""" - radl = radl_parse.parse_radl(radl_data) - radl.check() - - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm1 = MagicMock() - system1 = MagicMock() - system1.name = 'vrouter' - vm1.info.systems = [system1] - vm1.id = "region;int-id" - inf.vm_list = [vm1] - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.tags = [] - instance.virtualization_type = "vt" - instance.placement = "us-east-1" - instance.state = "running" - instance.instance_type = "t1.micro" - instance.launch_time = "2016-12-31T00:00:00" - instance.ip_address = "158.42.1.1" - instance.private_ip_address = "10.0.0.1" - instance.connection = conn - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - address = MagicMock() - address.public_ip = "158.42.1.1" - conn.get_all_addresses.return_value = [address] - - dns_conn = MagicMock() - connect_to_region.return_value = dns_conn - - dns_conn.get_zone.return_value = None - zone = MagicMock() - zone.get_a.return_value = None - dns_conn.create_zone.return_value = zone - changes = MagicMock() - record_sets.return_value = changes - change = MagicMock() - changes.add_change.return_value = change - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - routet = MagicMock() - routet.id = "routet-id" - conn.get_all_route_tables.return_value = [routet] - - success, vm = ec2_cloud.updateVMInfo(vm, auth) - - self.assertTrue(success, msg="ERROR: updating VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - self.assertEqual(dns_conn.create_zone.call_count, 2) - self.assertEqual(dns_conn.get_zone.call_count, 2) - self.assertEqual(dns_conn.create_zone.call_args_list[0][0][0], "domain.com.") - self.assertEqual(changes.add_change.call_args_list[0], call('CREATE', 'test.domain.com.', 'A')) - self.assertEqual(changes.add_change.call_args_list[1], call('CREATE', 'some.test.domain.com.', 'A')) - self.assertEqual(change.add_value.call_args_list[0], call('158.42.1.1')) - self.assertEqual(conn.create_route.call_args_list[0], call('routet-id', '10.0.10.0/24', instance_id='int-id')) - - # Test using PRIVATE_NET_MASKS setting 10.0.0.0/8 as public net - old_priv = Config.PRIVATE_NET_MASKS - Config.PRIVATE_NET_MASKS = ["172.16.0.0/12", "192.168.0.0/16"] - instance.ip_address = None - instance.private_ip_address = "10.0.0.1" - conn.get_all_addresses.return_value = [] - success, vm = ec2_cloud.updateVMInfo(vm, auth) - Config.PRIVATE_NET_MASKS = old_priv - self.assertEqual(vm.getPublicIP(), "10.0.0.1") - self.assertEqual(vm.getPrivateIP(), None) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_30_updateVMInfo_spot(self, get_connection): - radl_data = """ - network net (outbound = 'yes') - system test ( - cpu.arch='x86_64' and - cpu.count=1 and - memory.size=512m and - net_interface.0.connection = 'net' and - net_interface.0.dns_name = 'test' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'one://server.com/1' and - disk.0.os.credentials.username = 'user' and - disk.0.os.credentials.password = 'pass' and - disk.1.size=1GB and - disk.1.device='hdb' and - disk.1.mount_path='/mnt/path' - )""" - radl = radl_parse.parse_radl(radl_data) - radl.check() - - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.tags = [] - instance.virtualization_type = "vt" - instance.placement = "us-east-1" - instance.state = "running" - instance.instance_type = "t1.micro" - instance.launch_time = "2016-12-31T00:00:00" - instance.ip_address = "158.42.1.1" - instance.private_ip_address = "10.0.0.1" - instance.connection = conn - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - conn.get_all_addresses.return_value = [] - - sir = MagicMock() - sir.state = "" - sir.id = "id" - conn.get_all_spot_instance_requests.return_value = [sir] - - volume = MagicMock() - volume.status = "available" - volume.id = "volid" - conn.create_volume.return_value = volume - conn.attach_volume.return_value = True - - success, vm = ec2_cloud.updateVMInfo(vm, auth) - - self.assertTrue(success, msg="ERROR: updating VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_40_stop(self, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.stop(vm, auth) - - self.assertTrue(success, msg="ERROR: stopping VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_50_start(self, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.start(vm, auth) - - self.assertTrue(success, msg="ERROR: stopping VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_52_reboot(self, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.reboot.return_value = True - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.start(vm, auth) - - self.assertTrue(success, msg="ERROR: stopping VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - def test_55_alter(self, get_connection): - radl_data = """ - network net () - system test ( - cpu.arch='x86_64' and - cpu.count=1 and - memory.size=512m and - net_interface.0.connection = 'net' and - net_interface.0.dns_name = 'test' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'one://server.com/1' and - disk.0.os.credentials.username = 'user' and - disk.0.os.credentials.password = 'pass' - )""" - radl = radl_parse.parse_radl(radl_data) - - new_radl_data = """ - system test ( - cpu.count>=2 and - memory.size>=2048m - )""" - new_radl = radl_parse.parse_radl(new_radl_data) - - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.update.return_value = True - instance.stop.return_value = True - instance.state = "stopped" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, _ = ec2_cloud.alterVM(vm, new_radl, auth) - - self.assertTrue(success, msg="ERROR: modifying VM info.") - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - @patch('time.sleep') - @patch('boto.route53.connect_to_region') - @patch('boto.route53.record.ResourceRecordSets') - def test_60_finalize(self, record_sets, connect_to_region, sleep, get_connection): - radl_data = """ - network net (outbound = 'yes') - network net2 (outbound = 'yes') - system test ( - cpu.arch='x86_64' and - cpu.count=1 and - memory.size=512m and - net_interface.0.connection = 'net' and - net_interface.0.ip = '158.42.1.1' and - net_interface.0.dns_name = 'test.domain.com' and - net_interface.1.connection = 'net2' and - disk.0.os.name = 'linux' and - disk.0.image.url = 'one://server.com/1' and - disk.0.os.credentials.username = 'user' and - disk.0.os.credentials.password = 'pass' and - disk.1.size=1GB and - disk.1.device='hdb' and - disk.1.mount_path='/mnt/path' - )""" - radl = radl_parse.parse_radl(radl_data) - - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - inf.id = "1" - vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) - vm.dns_entries = [('test', 'domain.com.', '158.42.1.1')] - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - device = MagicMock() - instance.update.return_value = True - instance.terminate.return_value = True - instance.block_device_mapping = {"device": device} - device.volume_id = "volid" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - address = MagicMock() - address.public_ip = "158.42.1.1" - address.instance_id = "id-1" - address.disassociate.return_value = True - address.release.return_value = True - conn.get_all_addresses.return_value = [address] - - conn.get_all_spot_instance_requests.return_value = [] - - sg = MagicMock() - sg.name = "im-1" - sg.description = "Security group created by the IM" - sg.instances.return_value = [] - sg.revoke.return_value = True - sg.delete.return_value = True - sg1 = MagicMock() - sg1.name = "im-1-net" - sg1.description = "" - sg1.instances.return_value = [] - sg1.revoke.return_value = True - sg1.delete.return_value = True - sg2 = MagicMock() - sg2.name = "im-1-net2" - sg2.description = "Security group created by the IM" - sg2.instances.return_value = [] - sg2.revoke.return_value = True - sg2.delete.return_value = True - conn.get_all_security_groups.return_value = [sg, sg1, sg2] - - dns_conn = MagicMock() - connect_to_region.return_value = dns_conn - - zone = MagicMock() - record = MagicMock() - zone.id = "zid" - zone.get_a.return_value = record - dns_conn.get_all_rrsets.return_value = [] - dns_conn.get_zone.return_value = zone - changes = MagicMock() - record_sets.return_value = changes - change = MagicMock() - changes.add_change.return_value = change - - subnet = MagicMock() - subnet.id = "subnet-id" - conn.get_all_subnets.return_value = [subnet] - - vpc = MagicMock() - vpc.id = "vpc-id" - conn.get_all_vpcs.return_value = [vpc] - - ig = MagicMock() - ig.id = "ig-id" - conn.get_all_internet_gateways.return_value = [ig] - - success, _ = ec2_cloud.finalize(vm, True, auth) - - self.assertTrue(success, msg="ERROR: finalizing VM info.") + success, msg = res[0] + self.assertTrue(success, msg="ERROR: launching a VM: %s" % msg) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - self.assertEqual(changes.add_change.call_args_list, [call('DELETE', 'test.domain.com.', 'A')]) - self.assertEqual(change.add_value.call_args_list, [call('158.42.1.1')]) - self.assertEqual(sg.delete.call_args_list, [call()]) - self.assertEqual(sg1.delete.call_args_list, []) - self.assertEqual(sg2.delete.call_args_list, [call()]) - self.assertEqual(conn.delete_subnet.call_args_list, [call('subnet-id')]) - self.assertEqual(conn.delete_vpc.call_args_list, [call('vpc-id')]) - self.assertEqual(conn.delete_internet_gateway.call_args_list, [call('ig-id')]) - self.assertEqual(conn.detach_internet_gateway.call_args_list, [call('ig-id', 'vpc-id')]) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - @patch('time.sleep') - def test_70_create_snapshot(self, sleep, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - inf = MagicMock() - vm = VirtualMachine(inf, "region;id1", ec2_cloud.cloud, "", "", ec2_cloud, 1) - - conn = MagicMock() - get_connection.return_value = conn - - reservation = MagicMock() - instance = MagicMock() - instance.create_image.return_value = "image-ami" - reservation.instances = [instance] - conn.get_all_instances.return_value = [reservation] - - success, new_image = ec2_cloud.create_snapshot(vm, 0, "image_name", True, auth) - - self.assertTrue(success, msg="ERROR: creating snapshot: %s" % new_image) - self.assertEqual(new_image, "aws://region/image-ami") - self.assertEqual(instance.create_image.call_args_list, [call('image_name', - description='AMI automatically generated by IM', - no_reboot=True)]) - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - @patch('time.sleep') - def test_80_delete_image(self, sleep, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - conn = MagicMock() - get_connection.return_value = conn - conn.deregister_image.return_value = True - - success, msg = ec2_cloud.delete_image('aws://region/image-ami', auth) - - self.assertTrue(success, msg="ERROR: deleting image. %s" % msg) - self.assertEqual(conn.deregister_image.call_args_list, [call('image-ami', delete_snapshot=True)]) - self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - - @patch('IM.connectors.EC2.EC2CloudConnector.get_connection') - @patch('time.sleep') - def test_90_list_images(self, sleep, get_connection): - auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) - ec2_cloud = self.get_ec2_cloud() - - conn = MagicMock() - get_connection.return_value = conn - image = MagicMock() - image.id = "ami-123456789012" - image.name = "image_name" - conn.get_all_images.return_value = [image] - - res = ec2_cloud.list_images(auth, filters={'region': 'us-east-1'}) - self.assertEqual(res, [{'uri': 'aws://us-east-1/ami-123456789012', 'name': 'us-east-1/image_name'}]) + self.assertEqual(len(mock_conn.create_security_group.call_args_list), 3) + self.assertEqual(mock_conn.create_security_group.call_args_list[0][1]['GroupName'], "im-%s" % inf.id) + self.assertEqual(mock_conn.create_security_group.call_args_list[1][1]['GroupName'], "sgname") + self.assertEqual(mock_conn.create_security_group.call_args_list[2][1]['GroupName'], "im-%s-net2" % inf.id) + mock_conn.run_instances.assert_called_once() if __name__ == '__main__': From b7bdc2e7bc214741d65f56b746bdb1004352f0d6 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 12:09:25 +0200 Subject: [PATCH 07/53] Implements #301 --- IM/connectors/EC2.py | 4 ++- test/unit/connectors/EC2.py | 66 ++++++++++++++++++++++++++++++++++--- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 39e06949..14cf221d 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -786,6 +786,8 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): err_msg += " a spot instance " err_msg += " of type: %s " % instance_type.name price = system.getValue("price") + if price: + price = str(price) # Realizamos el request de spot instances if system.getValue('availability_zone'): @@ -808,7 +810,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): availability_zone = zone['ZoneName'] self.log_info("Launching the spot request in the zone " + availability_zone) - request = conn.request_spot_instances(SpotPrice=str(price), InstanceCount=1, Type='one-time', + request = conn.request_spot_instances(SpotPrice=price, InstanceCount=1, Type='one-time', LaunchSpecification={'ImageId': image['ImageId'], 'InstanceType': instance_type.name, 'Placement': { diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index 9f23d125..ef505654 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -24,11 +24,9 @@ from IM.CloudInfo import CloudInfo from IM.auth import Authentication from radl import radl_parse -from IM.VirtualMachine import VirtualMachine from IM.InfrastructureInfo import InfrastructureInfo from IM.connectors.EC2 import EC2CloudConnector -from IM.config import Config -from mock import patch, MagicMock, call +from mock import patch, MagicMock class TestEC2Connector(TestCloudConnectorBase): @@ -151,8 +149,7 @@ def test_20_launch(self, save_data, mock_boto_client): mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}, - {'RegionName': 'us-west-1'}]} + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', 'Ebs': { @@ -173,6 +170,65 @@ def test_20_launch(self, save_data, mock_boto_client): self.assertEqual(mock_conn.create_security_group.call_args_list[2][1]['GroupName'], "im-%s-net2" % inf.id) mock_conn.run_instances.assert_called_once() + @patch('IM.connectors.EC2.boto3.client') + @patch('IM.InfrastructureList.InfrastructureList.save_data') + def test_25_launch_spot(self, save_data, mock_boto_client): + radl_data = """ + network net1 (outbound = 'yes' and provider_id = 'vpc-id.subnet-id') + network net2 () + system test ( + spot = 'yes' and + cpu.arch='x86_64' and + cpu.count>=1 and + memory.size>=512m and + net_interface.0.connection = 'net1' and + net_interface.0.dns_name = 'test' and + net_interface.1.connection = 'net2' and + disk.0.os.name = 'linux' and + disk.0.image.url = 'aws://us-east-1/ami-id' and + disk.0.os.credentials.username = 'user' and + disk.0.os.credentials.private_key = 'private' and + disk.0.os.credentials.public_key = 'public' and + disk.1.size=1GB and + disk.1.device='hdb' and + disk.1.mount_path='/mnt/path' + )""" + radl = radl_parse.parse_radl(radl_data) + radl.check() + + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}, + {'type': 'InfrastructureManager', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + + mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'iid'}]} + instance = MagicMock() + mock_conn.Instance.return_value = instance + instance.id = "iid" + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', + 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', + 'Ebs': { + 'SnapshotId': 'snap-12345678' + }}]} + ]} + mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} + mock_conn.describe_security_groups.return_value = {'SecurityGroups': []} + mock_conn.describe_availability_zones.return_value = {'AvailabilityZones': [{'ZoneName': 'us-east-1'}]} + mock_conn.describe_spot_price_history.return_value = {'SpotPriceHistory': [{'SpotPrice': '0.1'}]} + mock_conn.request_spot_instances.return_value = {'SpotInstanceRequests': [{'SpotInstanceRequestId': 'sid'}]} + + inf = InfrastructureInfo() + inf.auth = auth + res = ec2_cloud.launch(inf, radl, radl, 1, auth) + success, _ = res[0] + self.assertTrue(success, msg="ERROR: launching a VM.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + if __name__ == '__main__': unittest.main() From c3f1fd9cf4a904770f0fcb9f08831b1965d696af Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 13:58:12 +0200 Subject: [PATCH 08/53] Implements #301 --- IM/connectors/CloudConnector.py | 4 -- IM/connectors/EC2.py | 3 +- test/unit/connectors/EC2.py | 98 +++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 5 deletions(-) diff --git a/IM/connectors/CloudConnector.py b/IM/connectors/CloudConnector.py index 8b836825..69dc94ec 100644 --- a/IM/connectors/CloudConnector.py +++ b/IM/connectors/CloudConnector.py @@ -734,10 +734,6 @@ def manage_dns_entries(self, op, vm, auth_data, extra_args=None): vm.dns_entries = [] if op == "add": dns_entries = [entry for entry in self.get_dns_entries(vm) if entry not in vm.dns_entries] - dns_entries = [] - for entry in self.get_dns_entries(vm): - if entry not in vm.dns_entries: - dns_entries.append(entry) else: dns_entries = list(vm.dns_entries) if dns_entries: diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 14cf221d..b5436b17 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1005,7 +1005,8 @@ def get_instance_by_id(self, instance_id, region_name, auth_data): try: conn = self.get_connection(region_name, auth_data) - instance = conn.Instance(instance_id).load() + instance = conn.Instance(instance_id) + instance.load() except Exception: self.log_error("Error getting instance id: %s" % instance_id) diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index ef505654..f148f21f 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -17,12 +17,15 @@ # along with this program. If not, see . import sys import unittest +import datetime sys.path.append(".") sys.path.append("..") from .CloudConn import TestCloudConnectorBase from IM.CloudInfo import CloudInfo from IM.auth import Authentication +from IM.config import Config +from IM.VirtualMachine import VirtualMachine from radl import radl_parse from IM.InfrastructureInfo import InfrastructureInfo from IM.connectors.EC2 import EC2CloudConnector @@ -229,6 +232,101 @@ def test_25_launch_spot(self, save_data, mock_boto_client): self.assertTrue(success, msg="ERROR: launching a VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + @patch('IM.connectors.EC2.boto3.client') + def test_30_updateVMInfo(self, mock_boto_client): + radl_data = """ + network net (outbound = 'yes') + network net2 (router = '10.0.10.0/24,vrouter') + system test ( + cpu.arch='x86_64' and + cpu.count=1 and + memory.size=512m and + net_interface.0.connection = 'net' and + net_interface.0.ip = '158.42.1.1' and + net_interface.0.dns_name = 'test.domain.com' and + net_interface.0.additional_dns_names = ['some.test@domain.com'] and + net_interface.1.connection = 'net2' and + disk.0.os.name = 'linux' and + disk.0.image.url = 'one://server.com/1' and + disk.0.os.credentials.username = 'user' and + disk.0.os.credentials.password = 'pass' and + disk.1.size=1GB and + disk.1.device='hdb' and + disk.1.mount_path='/mnt/path' + )""" + radl = radl_parse.parse_radl(radl_data) + radl.check() + + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', + 'State': {'Name': 'running'}}]}]} + instance = MagicMock() + mock_conn.Instance.return_value = instance + instance.id = "iid" + instance.tags = [] + instance.virtualization_type = "vt" + instance.placement = {'AvailabilityZone': 'us-east-1'} + instance.state = {'Name': 'running'} + instance.instance_type = "t1.micro" + instance.launch_time = datetime.datetime.now() + instance.public_ip_address = "158.42.1.1" + instance.private_ip_address = "10.0.0.1" + mock_conn.describe_addresses.return_value = {'Addresses': [{'PublicIp': '158.42.1.1', + 'InstanceId': 'iid'}]} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + mock_conn.describe_route_tables.return_value = {'RouteTables': [{'RouteTableId': 'routet-id'}]} + + mock_conn.list_hosted_zones_by_name.return_value = {'HostedZones': [{'Name': 'domain.com.', + 'Id': 'zone-id'}]} + mock_conn.create_hosted_zone.return_value = {'HostedZone': {'Id': 'zone-idc'}} + mock_conn.list_resource_record_sets.return_value = { + 'ResourceRecordSets': [{'Name': 'some.test.domain.com.'}]} + + inf = MagicMock() + vm1 = MagicMock() + system1 = MagicMock() + system1.name = 'vrouter' + vm1.info.systems = [system1] + vm1.id = "region;int-id" + inf.vm_list = [vm1] + vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) + + success, vm = ec2_cloud.updateVMInfo(vm, auth) + + self.assertTrue(success, msg="ERROR: updating VM info.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(mock_conn.list_hosted_zones_by_name.call_count, 2) + self.assertEqual(mock_conn.change_resource_record_sets.call_args_list[0][1]['ChangeBatch']['Changes'], + [{'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'test.domain.com.', + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': '158.42.1.1'}]} + }]) + self.assertEqual(mock_conn.create_route.call_args_list[0][1], {'RouteTableId': 'routet-id', + 'DestinationCidrBlock': '10.0.10.0/24', + 'InstanceId': 'int-id'}) + + # Test using PRIVATE_NET_MASKS setting 10.0.0.0/8 as public net + old_priv = Config.PRIVATE_NET_MASKS + Config.PRIVATE_NET_MASKS = ["172.16.0.0/12", "192.168.0.0/16"] + + instance.public_ip_address = None + instance.private_ip_address = "10.0.0.1" + mock_conn.describe_addresses.return_value = {'Addresses': []} + + success, vm = ec2_cloud.updateVMInfo(vm, auth) + Config.PRIVATE_NET_MASKS = old_priv + self.assertEqual(vm.getPublicIP(), "10.0.0.1") + self.assertEqual(vm.getPrivateIP(), None) + if __name__ == '__main__': unittest.main() From fd72ab5b24f42e39c2f4a6b7a83bcc59e2a77fcf Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 16:09:29 +0200 Subject: [PATCH 09/53] Implements #301 --- IM/connectors/EC2.py | 8 ++-- test/unit/connectors/EC2.py | 84 ++++++++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 5 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index b5436b17..bee91556 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1473,10 +1473,10 @@ def delete_networks(self, conn, vm, timeout=240): self.log_info("Detacching Internet Gateway: %s from VPC: %s" % (ig_id, vpc_id)) conn.detach_internet_gateway(ig_id, vpc_id) if ig_id: - self.log_info("Deleting Internet Gateway: %s" % ig.id) + self.log_info("Deleting Internet Gateway: %s" % ig_id) conn.delete_internet_gateway(ig_id) if vpc_id: - self.log_info("Deleting vpc: %s" % vpc.id) + self.log_info("Deleting vpc: %s" % vpc_id) conn.delete_vpc(vpc_id) def finalize(self, vm, last, auth_data): @@ -1558,7 +1558,7 @@ def _get_security_groups(self, conn, vm): self.log_exception("Error getting SG %s" % sg_name) return sgs - def delete_security_groups(self, conn, vm, timeout=90): + def delete_security_groups(self, conn, vm): """ Delete the SG of this infrastructure if this is the last VM @@ -1590,7 +1590,7 @@ def delete_security_groups(self, conn, vm, timeout=90): # try to wait some seconds to free the SGs time.sleep(5) - self.log_info("Remove the SG: " + sg['Name']) + self.log_info("Remove the SG: " + sg['GroupName']) try: conn.revoke_security_group_ingress( GroupId=sg['GroupId'], diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index f148f21f..ec8489b3 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -29,7 +29,7 @@ from radl import radl_parse from IM.InfrastructureInfo import InfrastructureInfo from IM.connectors.EC2 import EC2CloudConnector -from mock import patch, MagicMock +from mock import patch, MagicMock, call class TestEC2Connector(TestCloudConnectorBase): @@ -327,6 +327,88 @@ def test_30_updateVMInfo(self, mock_boto_client): self.assertEqual(vm.getPublicIP(), "10.0.0.1") self.assertEqual(vm.getPrivateIP(), None) + @patch('IM.connectors.EC2.boto3.client') + @patch('time.sleep') + def test_60_finalize(self, sleep, mock_boto_client): + radl_data = """ + network net (outbound = 'yes') + network net2 (outbound = 'yes') + system test ( + cpu.arch='x86_64' and + cpu.count=1 and + memory.size=512m and + net_interface.0.connection = 'net' and + net_interface.0.ip = '158.42.1.1' and + net_interface.0.dns_name = 'test.domain.com' and + net_interface.1.connection = 'net2' and + disk.0.os.name = 'linux' and + disk.0.image.url = 'one://server.com/1' and + disk.0.os.credentials.username = 'user' and + disk.0.os.credentials.password = 'pass' and + disk.1.size=1GB and + disk.1.device='hdb' and + disk.1.mount_path='/mnt/path' + )""" + radl = radl_parse.parse_radl(radl_data) + + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + inf = MagicMock() + inf.id = "1" + vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) + vm.dns_entries = [('test', 'domain.com.', '158.42.1.1')] + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', + 'State': {'Name': 'running'}}]}]} + instance = MagicMock() + mock_conn.Instance.return_value = instance + instance.block_device_mappings = [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeId': 'volid'}}] + mock_conn.describe_addresses.return_value = {'Addresses': [{'PublicIp': '158.42.1.1', + 'InstanceId': 'id-1'}]} + mock_conn.describe_spot_instance_requests.return_value = {'SpotInstanceRequests': []} + + mock_conn.describe_security_groups.return_value = {'SecurityGroups': [ + {'GroupId': 'sg1', 'GroupName': 'im-1', 'Description': 'Security group created by the IM', + 'VpcId': 'vpc-id'}, + {'GroupId': 'sg2', 'GroupName': 'im-1-net', 'Description': '', + 'VpcId': 'vpc-id'}, + {'GroupId': 'sg3', 'GroupName': 'im-1-net2', 'Description': 'Security group created by the IM', + 'VpcId': 'vpc-id'} + ]} + mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} + mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} + + mock_conn.list_hosted_zones_by_name.return_value = {'HostedZones': [{'Name': 'domain.com.', + 'Id': 'zone-id'}]} + mock_conn.list_resource_record_sets.return_value = { + 'ResourceRecordSets': [{'Name': 'test.domain.com.'}]} + mock_conn.describe_internet_gateways.return_value = {'InternetGateways': [{'InternetGatewayId': 'ig-id'}]} + + success, _ = ec2_cloud.finalize(vm, True, auth) + + self.assertTrue(success, msg="ERROR: finalizing VM info.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + + self.assertEqual(mock_conn.change_resource_record_sets.call_args_list[0][1]['ChangeBatch']['Changes'], + [{'Action': 'DELETE', + 'ResourceRecordSet': { + 'Name': 'test.domain.com.', + 'Type': 'A', + 'TTL': 300, + 'ResourceRecords': [{'Value': '158.42.1.1'}]} + }]) + self.assertEqual(mock_conn.delete_security_group.call_args_list, [call(GroupId='sg1'), + call(GroupId='sg3')]) + self.assertEqual(instance.terminate.call_args_list, [call()]) + self.assertEqual(mock_conn.delete_subnet.call_args_list, [call('subnet-id')]) + self.assertEqual(mock_conn.delete_vpc.call_args_list, [call('vpc-id')]) + self.assertEqual(mock_conn.delete_internet_gateway.call_args_list, [call('ig-id')]) + self.assertEqual(mock_conn.detach_internet_gateway.call_args_list, [call('ig-id', 'vpc-id')]) + if __name__ == '__main__': unittest.main() From 0c013f1f47015bb52b8de8dcb93fda66f5563730 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 16:43:58 +0200 Subject: [PATCH 10/53] Implements #301 --- IM/connectors/EC2.py | 14 ++-- test/unit/connectors/EC2.py | 129 ++++++++++++++++++++++++++++++++++++ 2 files changed, 135 insertions(+), 8 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index bee91556..cf31a9df 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1265,9 +1265,8 @@ def updateVMInfo(self, vm, auth_data): job_instance_id = None self.log_info("Check if the request has been fulfilled and the instance has been deployed") - job_sir_id = instance_id request_list = conn.describe_spot_instance_requests(Filters=[{'Name': 'spot-instance-request-id', - 'Values': ['job_sir_id']}]) + 'Values': [instance_id]}]) sir = [] if request_list['SpotInstanceRequests']: sir = request_list['SpotInstanceRequests'][0] @@ -1275,8 +1274,7 @@ def updateVMInfo(self, vm, auth_data): # another availability zone if sir['State'] == 'failed': vm.state = VirtualMachine.FAILED - if sir['SpotInstanceRequestId'] == job_sir_id: - job_instance_id = sir['InstanceId'] + job_instance_id = sir['InstanceId'] if job_instance_id: self.log_info("Request fulfilled, instance_id: " + str(job_instance_id)) @@ -1610,13 +1608,13 @@ def delete_security_groups(self, conn, vm): conn.delete_security_group(GroupId=sg['GroupId']) def stop(self, vm, auth_data): - self._vm_operation("stop", vm, auth_data) + return self._vm_operation("stop", vm, auth_data) def start(self, vm, auth_data): - self._vm_operation("start", vm, auth_data) + return self._vm_operation("start", vm, auth_data) def reboot(self, vm, auth_data): - self._vm_operation(vm, "reboot", auth_data) + return self._vm_operation("reboot", vm, auth_data) def _vm_operation(self, op, vm, auth_data): region_name = vm.id.split(";")[0] @@ -1646,7 +1644,7 @@ def waitStop(instance, timeout=120): powered_off = False while wait < timeout and not powered_off: instance.reload() - powered_off = instance.state == 'stopped' + powered_off = instance.state['Name'] == 'stopped' if not powered_off: time.sleep(2) wait += 2 diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index ec8489b3..b95ad66d 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -327,6 +327,135 @@ def test_30_updateVMInfo(self, mock_boto_client): self.assertEqual(vm.getPublicIP(), "10.0.0.1") self.assertEqual(vm.getPrivateIP(), None) + @patch('IM.connectors.EC2.boto3.client') + def test_40_updateVMInfo_spot(self, mock_boto_client): + radl_data = """ + network net (outbound = 'yes') + system test ( + cpu.arch='x86_64' and + cpu.count=1 and + memory.size=512m and + net_interface.0.connection = 'net' and + net_interface.0.dns_name = 'test' and + disk.0.os.name = 'linux' and + disk.0.image.url = 'one://server.com/1' and + disk.0.os.credentials.username = 'user' and + disk.0.os.credentials.password = 'pass' and + disk.1.size=1GB and + disk.1.device='hdb' and + disk.1.mount_path='/mnt/path' + )""" + radl = radl_parse.parse_radl(radl_data) + radl.check() + + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + inf = MagicMock() + vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + instance = MagicMock() + mock_conn.Instance.return_value = instance + instance.id = "iid" + instance.virtualization_type = "vt" + instance.placement = {'AvailabilityZone': 'us-east-1'} + instance.state = {'Name': 'running'} + instance.instance_type = "t1.micro" + instance.launch_time = "2016-12-31T00:00:00" + instance.ip_address = "158.42.1.1" + instance.private_ip_address = "10.0.0.1" + + mock_conn.describe_addresses.return_value = {'Addresses': []} + mock_conn.describe_spot_instance_requests.return_value = {'SpotInstanceRequests': [{'InstanceId': 'id', + 'State': ''}]} + mock_conn.create_volume.return_value = {'VolumeId': 'volid', 'State': 'available'} + + success, vm = ec2_cloud.updateVMInfo(vm, auth) + + self.assertTrue(success, msg="ERROR: updating VM info.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + + @patch('IM.connectors.EC2.boto3.client') + def test_50_vmop(self, mock_boto_client): + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + inf = MagicMock() + vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + + instance = MagicMock() + mock_conn.Instance.return_value = instance + + success, _ = ec2_cloud.start(vm, auth) + self.assertTrue(success, msg="ERROR: starting VM.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.start.call_args_list, [call()]) + + success, _ = ec2_cloud.stop(vm, auth) + self.assertTrue(success, msg="ERROR: stopping VM.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.stop.call_args_list, [call()]) + + success, _ = ec2_cloud.reboot(vm, auth) + self.assertTrue(success, msg="ERROR: rebooting VM.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.reboot.call_args_list, [call()]) + + @patch('IM.connectors.EC2.boto3.client') + def test_55_alter(self, mock_boto_client): + radl_data = """ + network net () + system test ( + cpu.arch='x86_64' and + cpu.count=1 and + memory.size=512m and + net_interface.0.connection = 'net' and + net_interface.0.dns_name = 'test' and + disk.0.os.name = 'linux' and + disk.0.image.url = 'one://server.com/1' and + disk.0.os.credentials.username = 'user' and + disk.0.os.credentials.password = 'pass' + )""" + radl = radl_parse.parse_radl(radl_data) + + new_radl_data = """ + system test ( + cpu.count>=2 and + memory.size>=2048m + )""" + new_radl = radl_parse.parse_radl(new_radl_data) + + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + inf = MagicMock() + vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + + instance = MagicMock() + mock_conn.Instance.return_value = instance + instance.id = "iid" + instance.instance_type = "t1.micro" + instance.state = {'Name': 'stopped'} + + success, _ = ec2_cloud.alterVM(vm, new_radl, auth) + + self.assertTrue(success, msg="ERROR: modifying VM info.") + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + self.assertEqual(instance.stop.call_args_list, [call()]) + self.assertEqual(instance.start.call_args_list, [call()]) + self.assertEqual(instance.modify_attribute.call_args_list, [call(InstanceType={'Value': 't2.micro'})]) + @patch('IM.connectors.EC2.boto3.client') @patch('time.sleep') def test_60_finalize(self, sleep, mock_boto_client): From cf42fb7f17486c91102469f55f3f8a43f85f3873 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 12 Sep 2024 16:57:46 +0200 Subject: [PATCH 11/53] Implements #301 --- IM/connectors/EC2.py | 10 +++---- test/unit/connectors/EC2.py | 59 +++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index cf31a9df..d363ac1b 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1775,7 +1775,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): """ region_name = vm.id.split(";")[0] instance_id = vm.id.split(";")[1] - snapshot_id = "" + snapshot_id = None # Obtain the connection object to connect with EC2 conn = self.get_connection(region_name, auth_data) @@ -1795,8 +1795,8 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): 'Value': instance_id}]}]) else: return (False, "Error obtaining details of the instance") - if snapshot_id != "": - new_url = "aws://%s/%s" % (region_name, snapshot_id) + if snapshot_id: + new_url = "aws://%s/%s" % (region_name, snapshot_id['ImageId']) if auto_delete: vm.inf.snapshots.append(new_url) return (True, new_url) @@ -1841,8 +1841,8 @@ def list_images(self, auth_data, filters=None): try: for image in conn.describe_images(Owners=['self', 'aws-marketplace'], Filters=images_filter)['Images']: if len(image['ImageId']) > 12: # do not add old images - images.append({"uri": "aws://%s/%s" % (region, image.id), - "name": "%s/%s" % (region, image.name)}) + images.append({"uri": "aws://%s/%s" % (region, image['ImageId']), + "name": "%s/%s" % (region, image['Name'])}) except Exception: continue return self._filter_images(images, filters) diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index b95ad66d..d849f746 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -538,6 +538,65 @@ def test_60_finalize(self, sleep, mock_boto_client): self.assertEqual(mock_conn.delete_internet_gateway.call_args_list, [call('ig-id')]) self.assertEqual(mock_conn.detach_internet_gateway.call_args_list, [call('ig-id', 'vpc-id')]) + @patch('IM.connectors.EC2.boto3.client') + @patch('time.sleep') + def test_70_create_snapshot(self, sleep, mock_boto_client): + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + inf = MagicMock() + vm = VirtualMachine(inf, "us-east-1;id1", ec2_cloud.cloud, "", "", ec2_cloud, 1) + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + + instance = MagicMock() + mock_conn.Instance.return_value = instance + instance.create_image.return_value = {'ImageId': 'image-ami'} + + success, new_image = ec2_cloud.create_snapshot(vm, 0, "image_name", True, auth) + + self.assertTrue(success, msg="ERROR: creating snapshot: %s" % new_image) + self.assertEqual(new_image, "aws://us-east-1/image-ami") + self.assertEqual(instance.create_image.call_args_list, [call(Name='image_name', + Description='AMI automatically generated by IM', + NoReboot=True, + TagSpecifications=[{'ResourceType': 'snapshot', + 'Tags': [{'Key': 'instance_id', + 'Value': 'id1'}]}])]) + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + + @patch('IM.connectors.EC2.boto3.client') + @patch('time.sleep') + def test_80_delete_image(self, sleep, mock_boto_client): + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + + success, msg = ec2_cloud.delete_image('aws://us-east-1/image-ami', auth) + + self.assertTrue(success, msg="ERROR: deleting image. %s" % msg) + self.assertEqual(mock_conn.deregister_image.call_args_list, [call('image-ami')]) + self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) + + @patch('IM.connectors.EC2.boto3.client') + @patch('time.sleep') + def test_90_list_images(self, sleep, mock_boto_client): + auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) + ec2_cloud = self.get_ec2_cloud() + + mock_conn = MagicMock() + mock_boto_client.return_value = mock_conn + mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-123456789012', + 'Name': 'image_name'}]} + res = ec2_cloud.list_images(auth, filters={'region': 'us-east-1'}) + self.assertEqual(res, [{'uri': 'aws://us-east-1/ami-123456789012', 'name': 'us-east-1/image_name'}]) + if __name__ == '__main__': unittest.main() From aebf47a3ecb820a083d4464ca4fe8a3c3411cd1a Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 13 Sep 2024 08:22:22 +0200 Subject: [PATCH 12/53] Implements #301 --- docker-devel/Dockerfile | 16 ++++++---------- docker-py3/Dockerfile | 14 +++++--------- docker-py3/Dockerfile.alp | 3 --- pyproject.toml | 2 +- requirements-tests.txt | 2 +- 5 files changed, 13 insertions(+), 24 deletions(-) diff --git a/docker-devel/Dockerfile b/docker-devel/Dockerfile index 4c6b80d5..48bd4759 100644 --- a/docker-devel/Dockerfile +++ b/docker-devel/Dockerfile @@ -1,5 +1,5 @@ # Dockerfile to create a container with the IM service -FROM ubuntu:22.04 +FROM ubuntu:24.04 ARG BRANCH=devel LABEL maintainer="Miguel Caballer " LABEL version="1.17.0" @@ -12,11 +12,10 @@ RUN apt-get update && apt-get install --no-install-recommends -y patch wget pyth # Install IM RUN apt-get update && apt-get install --no-install-recommends -y python3-setuptools python3-pip git && \ - pip3 install -U pip && \ - pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ - pip3 install pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ - pip3 install git+https://github.com/micafer/libcloud@ost_nets_extra && \ - pip3 install apache-libcloud==3.8.0 git+https://github.com/grycap/im@$BRANCH && \ + pip3 install --break-system-packages msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ + pip3 install --break-system-packages pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ + pip3 install --break-system-packages git+https://github.com/micafer/libcloud@ost_nets_extra && \ + pip3 install --break-system-packages apache-libcloud==3.8.0 git+https://github.com/grycap/im@$BRANCH && \ apt-get purge -y python3-pip git && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ @@ -24,7 +23,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y python3-setupto # untill this PR is merged and released # https://github.com/apache/libcloud/pull/2016 COPY ost.patch /tmp/ost.patch -RUN patch /usr/local/lib/python3.10/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch +RUN patch /usr/local/lib/python3.12/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch # Copy im configuration files RUN mkdir /etc/im @@ -38,8 +37,5 @@ RUN sed -i -e 's/VM_NUM_USE_CTXT_DIST = 30/VM_NUM_USE_CTXT_DIST = 3/g' /etc/im/i # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/local/lib/python3.10/dist-packages/boto/endpoints.json - # Start IM service CMD /usr/local/bin/im_service \ No newline at end of file diff --git a/docker-py3/Dockerfile b/docker-py3/Dockerfile index d6a9843c..8eb7094f 100644 --- a/docker-py3/Dockerfile +++ b/docker-py3/Dockerfile @@ -1,5 +1,5 @@ # Dockerfile to create a container with the IM service -FROM ubuntu:22.04 +FROM ubuntu:24.04 ENV VERSION=1.17.0 @@ -14,10 +14,9 @@ RUN apt-get update && apt-get install --no-install-recommends -y patch wget pyth # Install IM RUN apt-get update && apt-get install --no-install-recommends -y python3-setuptools python3-pip git && \ - pip3 install -U pip && \ - pip3 install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ - pip3 install pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ - pip3 install apache-libcloud==3.8.0 && IM==${VERSION}\ + pip3 install --break-system-packages msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-compute azure-mgmt-network azure-mgmt-resource azure-mgmt-dns azure-identity==1.8.0 && \ + pip3 install --break-system-packages pyOpenSSL cheroot xmltodict pymongo ansible==8.7.0&& \ + pip3 install --break-system-packages apache-libcloud==3.8.0 IM==${VERSION} &&\ apt-get purge -y python3-pip git && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ @@ -25,7 +24,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y python3-setupto # untill this PR is merged and released # https://github.com/apache/libcloud/pull/2016 COPY ost.patch /tmp/ost.patch -RUN patch /usr/local/lib/python3.10/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch +RUN patch /usr/local/lib/python3.12/dist-packages/libcloud/compute/drivers/openstack.py < /tmp/ost.patch && rm /tmp/ost.patch # Copy im configuration files RUN mkdir /etc/im @@ -36,8 +35,5 @@ RUN wget https://raw.githubusercontent.com/grycap/im/v${VERSION}/etc/logging.con # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/local/lib/python3.10/dist-packages/boto/endpoints.json - # Start IM service CMD ["/usr/local/bin/im_service"] diff --git a/docker-py3/Dockerfile.alp b/docker-py3/Dockerfile.alp index e21363c7..97f502d0 100644 --- a/docker-py3/Dockerfile.alp +++ b/docker-py3/Dockerfile.alp @@ -40,8 +40,5 @@ RUN apk add --no-cache git &&\ # Copy a ansible.cfg with correct minimum values COPY ansible.cfg /etc/ansible/ansible.cfg -# Fix boto issue https://github.com/boto/boto/issues/3783 -COPY endpoints.json /usr/lib/python3.10/site-packages/boto/endpoints.json - # Start IM service CMD im_service.py diff --git a/pyproject.toml b/pyproject.toml index fd6487a1..18e1180c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,7 @@ dependencies = [ "PyYAML", "suds-community", "cheroot", - "boto >= 2.29", + "boto3", "apache-libcloud >= 3.2.0", "RADL >= 1.3.3", "flask", diff --git a/requirements-tests.txt b/requirements-tests.txt index 48bdd4d1..456356e3 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -3,7 +3,7 @@ ansible-base paramiko >= 1.14 PyYAML cheroot -boto >= 2.29 +boto3 apache-libcloud >= 3.3.1 RADL >= 1.3.3 flask From b36fecdaff009a0644471a44d8d9ee6ee5c447ab Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 13 Sep 2024 09:12:11 +0200 Subject: [PATCH 13/53] Implements #301 --- IM/connectors/EC2.py | 85 +++++++++++++++++++++++++------------ test/unit/connectors/EC2.py | 2 +- 2 files changed, 58 insertions(+), 29 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index d363ac1b..c874aff8 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1,5 +1,5 @@ # IM - Infrastructure Manager -# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia +# Copyright (C) 2024 - GRyCAP - Universitat Politecnica de Valencia # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -359,7 +359,7 @@ def create_security_groups(self, conn, inf, radl, vpc): try: sg = conn.create_security_group(GroupName=sg_name, Description="Security group created by the IM", - VpdId=vpc) + VpcId=vpc) # open all the ports for the VMs in the security group conn.authorize_security_group_ingress( GroupId=sg['GroupId'], @@ -400,7 +400,7 @@ def create_security_groups(self, conn, inf, radl, vpc): try: sg = conn.create_security_group(GroupName=sg_name, Description="Security group created by the IM", - VpdId=vpc) + VpcId=vpc) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -475,9 +475,17 @@ def get_default_subnet(conn): if vpcs: vpc_id = vpcs[0]['VpcId'] - subnets = conn.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])['Subnets'] - if subnets: - subnet_id = subnets[0]['SubnetId'] + # Just in case there is no default VPC, in some old accounts + # get the VPC named default + if not vpc_id: + vpcs = conn.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': ['default']}])['Vpcs'] + if vpcs: + vpc_id = vpcs[0]['VpcId'] + + if vpc_id: + subnets = conn.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])['Subnets'] + if subnets: + subnet_id = subnets[0]['SubnetId'] return vpc_id, subnet_id @@ -763,23 +771,28 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): 'DeviceName': block_device_name, 'Ebs': { 'DeleteOnTermination': True, - 'VolumeSize': size, 'VolumeType': disk_type } } ] + if size: + bdm[0]['Ebs']['VolumeSize'] = size volumes = self.get_volumes(conn, vm) for device, (size, snapshot_id, _, disk_type) in volumes.items(): - bdm.append({ + bd = { 'DeviceName': device, 'Ebs': { - 'SnapshotId': snapshot_id, 'DeleteOnTermination': True, - 'VolumeSize': size, - 'VolumeType': disk_type } - }) + } + if size: + bd['Ebs']['VolumeSize'] = size + if snapshot_id: + bd['Ebs']['SnapshotId'] = snapshot_id + if disk_type: + bd['Ebs']['VolumeType'] = disk_type + bdm.append(bd) if spot: self.log_info("Launching a spot instance") @@ -810,17 +823,24 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): availability_zone = zone['ZoneName'] self.log_info("Launching the spot request in the zone " + availability_zone) - request = conn.request_spot_instances(SpotPrice=price, InstanceCount=1, Type='one-time', - LaunchSpecification={'ImageId': image['ImageId'], - 'InstanceType': instance_type.name, - 'Placement': { - 'AvailabilityZone': - availability_zone}, - 'KeyName': keypair_name, - 'SecurityGroupIds': sg_ids, - 'BlockDeviceMappings': bdm, - 'SubnetId': subnet, - 'UserData': user_data}) + launch_spec = {'ImageId': image['ImageId'], + 'InstanceType': instance_type.name, + 'SecurityGroupIds': sg_ids, + 'BlockDeviceMappings': bdm, + 'SubnetId': subnet, + 'UserData': user_data} + + if keypair_name: + launch_spec['KeyName'] = keypair_name + if availability_zone: + launch_spec['Placement'] = {'AvailabilityZone': availability_zone} + + params = {'InstanceCount': 1, + 'Type': 'one-time', + 'LaunchSpecification': launch_spec} + if price: + params['SpotPrice'] = price + request = conn.request_spot_instances(**params) if request['SpotInstanceRequests']: ec2_vm_id = region_name + ";" + request['SpotInstanceRequests'][0]['SpotInstanceRequestId'] @@ -851,11 +871,20 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): } ] - instances = conn.run_instances(ImageId=image['ImageId'], MinCount=1, MaxCount=1, - KeyName=keypair_name, InstanceType=instance_type.name, - NetworkInterfaces=interfaces, - Placement={'AvailabilityZone': placement}, - BlockDeviceMappings=bdm, UserData=user_data)['Instances'] + params = {'ImageId': image['ImageId'], + 'MinCount': 1, + 'MaxCount': 1, + 'InstanceType': instance_type.name, + 'NetworkInterfaces': interfaces, + 'BlockDeviceMappings': bdm, + 'UserData': user_data} + + if keypair_name: + params['KeyName'] = keypair_name + if placement: + params['Placement'] = {'AvailabilityZone': placement} + + instances = conn.run_instances(**params)['Instances'] if instances: time.sleep(1) diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index d849f746..883a1a47 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -454,7 +454,7 @@ def test_55_alter(self, mock_boto_client): self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) self.assertEqual(instance.stop.call_args_list, [call()]) self.assertEqual(instance.start.call_args_list, [call()]) - self.assertEqual(instance.modify_attribute.call_args_list, [call(InstanceType={'Value': 't2.micro'})]) + self.assertEqual(instance.modify_attribute.call_args_list, [call(Attribute='instanceType', Value='t3a.small')]) @patch('IM.connectors.EC2.boto3.client') @patch('time.sleep') From ecdd36dbef963772d0490744301af26c335a2060 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 13 Sep 2024 12:46:16 +0200 Subject: [PATCH 14/53] Implements #301 --- IM/connectors/EC2.py | 167 +++++++++++++++--------------------- test/unit/connectors/EC2.py | 94 +++++++++++--------- 2 files changed, 125 insertions(+), 136 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index c874aff8..86ad4844 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -102,7 +102,6 @@ class EC2CloudConnector(CloudConnector): def __init__(self, cloud_info, inf): self.connection = None - self.route53_connection = None self.auth = None CloudConnector.__init__(self, cloud_info, inf) @@ -161,7 +160,7 @@ def update_system_info_from_instance(system, instance_type): system.addFeature(Feature("gpu.model", "=", instance_type.gpu_model), conflict="other", missing="other") # Get the EC2 connection object - def get_connection(self, region_name, auth_data): + def get_connection(self, region_name, auth_data, service_name, object_type='client'): """ Get a :py:class:`boto.ec2.connection` to interact with. @@ -177,21 +176,31 @@ def get_connection(self, region_name, auth_data): auth = auths[0] if self.connection and self.auth.compare(auth_data, self.type): - return self.connection + if object_type == 'resource': + client = self.connection.resource('ec2') + else: + client = self.connection.client('ec2') else: self.auth = auth_data client = None try: if 'username' in auth and 'password' in auth: - client = boto3.client('ec2') - regions = client.describe_regions() - region_names = [region['RegionName'] for region in regions['Regions']] - if region_name not in region_names: - raise Exception("Incorrect region name: " + region_name) - client = boto3.client('ec2', region_name=region_name, - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password'], - aws_session_token=auth.get('token')) + if region_name != 'universal': + client = boto3.client('ec2') + regions = client.describe_regions() + region_names = [region['RegionName'] for region in regions['Regions']] + if region_name not in region_names: + raise Exception("Incorrect region name: " + region_name) + + session = boto3.session.Session(region_name=region_name, + aws_access_key_id=auth['username'], + aws_secret_access_key=auth['password'], + aws_session_token=auth.get('token')) + self.connection = session + if object_type == 'resource': + client = session.resource(service_name) + else: + client = session.client(service_name) else: self.log_error("No correct auth data has been specified to EC2: " "username (Access Key) and password (Secret Key)") @@ -202,48 +211,7 @@ def get_connection(self, region_name, auth_data): self.log_exception("Error getting the region " + region_name) raise Exception("Error getting the region " + region_name + ": " + str(ex)) - self.connection = client - return client - - # Get the Route53 connection object - def get_route53_connection(self, region_name, auth_data): - """ - Get a :py:class:`boto.route53.connection` to interact with. - - Arguments: - - region_name(str): AWS region to connect. - - auth_data(:py:class:`dict` of str objects): Authentication data to access cloud provider. - Returns: a :py:class:`boto3.Route53.Client` or None in case of error - """ - auths = auth_data.getAuthInfo(self.type) - if not auths: - raise Exception("No auth data has been specified to EC2.") - else: - auth = auths[0] - - if self.route53_connection and self.auth.compare(auth_data, self.type): - return self.route53_connection - else: - self.auth = auth_data - conn = None - try: - if 'username' in auth and 'password' in auth: - conn = boto3.client('route53', region_name=region_name, - aws_access_key_id=auth['username'], - aws_secret_access_key=auth['password'], - aws_session_token=auth.get('token')) - else: - self.log_error("No correct auth data has been specified to EC2: " - "username (Access Key) and password (Secret Key)") - raise Exception("No correct auth data has been specified to EC2: " - "username (Access Key) and password (Secret Key)") - - except Exception as ex: - self.log_exception("Error conneting Route53 in region " + region_name) - raise Exception("Error conneting Route53 in region" + region_name + ": " + str(ex)) - - self.route53_connection = conn - return conn + return client # path format: aws://eu-west-1/ami-00685b74 @staticmethod @@ -338,8 +306,8 @@ def set_net_provider_id(radl, vpc, subnet): @staticmethod def _get_security_group(conn, sg_name): try: - return conn.describe_security_groups(Filters={'Name': 'group-name', - 'Values': [sg_name]})['SecurityGroups'][0] + return conn.describe_security_groups(Filters=[{'Name': 'group-name', + 'Values': [sg_name]}])['SecurityGroups'][0] except Exception: return None @@ -575,8 +543,9 @@ def create_networks(self, conn, radl, inf): conn.attach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) self.log_info("Adding route to the IG.") - for route_table in conn.describe_route_tables(Filters={"vpc-id": vpc_id})['RouteTables']: - conn.create_route(RouteTableId=route_table['RouteTableId'], + for rt in conn.describe_route_tables(Filters=[{"Name": "vpc-id", + "Values": [vpc_id]}])['RouteTables']: + conn.create_route(RouteTableId=rt['RouteTableId'], DestinationCidrBlock="0.0.0.0/0", GatewayId=ig_id) @@ -607,13 +576,15 @@ def create_networks(self, conn, radl, inf): except Exception as ex: self.log_exception("Error creating subnets or vpc.") try: - for subnet in conn.describe_subnets(Filters={"tag:IM-INFRA-ID": inf.id})['Subnets']: + for subnet in conn.describe_subnets(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [inf.id]}])['Subnets']: self.log_info("Deleting subnet: %s" % subnet.id) conn.delete_subnet(subnet.id) - for vpc in conn.describe_vpcs(Filters={"tag:IM-INFRA-ID": inf.id})['Vpcs']: + for vpc in conn.describe_vpcs(Filters=[{"Name": "tag:IM-INFRA-ID", "Values": [inf.id]}])['Vpcs']: self.log_info("Deleting vpc: %s" % vpc.id) conn.delete_vpc(vpc.id) - for ig in conn.describe_internet_gateways(Filters={"tag:IM-INFRA-ID": inf.id})['InternetGateways']: + for ig in conn.describe_internet_gateways(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [inf.id]}])['InternetGateways']: self.log_info("Deleting Internet Gateway: %s" % ig.id) conn.delete_internet_gateways(ig.id) except Exception: @@ -661,7 +632,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): (region_name, ami) = self.getAMIData(system.getValue("disk.0.image.url")) self.log_info("Connecting with the region: " + region_name) - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') res = [] spot = False @@ -884,22 +855,18 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if placement: params['Placement'] = {'AvailabilityZone': placement} + im_username = "im_user" + if auth_data.getAuthInfo('InfrastructureManager'): + im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] + instace_tags = [{'Key': 'Name', 'Value': self.gen_instance_name(system)}, + {'Key': 'IM-USER', 'Value': im_username}] + for key, value in tags.items(): + instace_tags.append({'Key': key, 'Value': value}) + params['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': instace_tags}] + instances = conn.run_instances(**params)['Instances'] if instances: - time.sleep(1) - instance = conn.Instance(instances[0]) - instance.load() - - im_username = "im_user" - if auth_data.getAuthInfo('InfrastructureManager'): - im_username = auth_data.getAuthInfo('InfrastructureManager')[0]['username'] - instace_tags = [{'Key': 'Name', 'Value': self.gen_instance_name(system)}, - {'Key': 'IM-USER', 'Value': im_username}] - for key, value in tags.items(): - instace_tags.append({'Key': key, 'Value': value}) - instance.create_tags(Tags=instace_tags) - ec2_vm_id = region_name + ";" + instances[0]['InstanceId'] self.log_debug("RADL:") @@ -1033,11 +1000,11 @@ def get_instance_by_id(self, instance_id, region_name, auth_data): instance = None try: - conn = self.get_connection(region_name, auth_data) - instance = conn.Instance(instance_id) + resource = self.get_connection(region_name, auth_data, 'ec2', 'resource') + instance = resource.Instance(instance_id) instance.load() except Exception: - self.log_error("Error getting instance id: %s" % instance_id) + self.log_exception("Error getting instance id: %s" % instance_id) return instance @@ -1118,7 +1085,8 @@ def delete_elastic_ips(self, conn, vm, timeout=240): if pub_ip in fixed_ips: self.log_info("%s is a fixed IP, it is not released" % pub_ip) else: - for address in conn.describe_addresses(Filters={"public-ip": pub_ip})['Addresses']: + for address in conn.describe_addresses(Filters=[{"Name": "public-ip", + "Values": [pub_ip]}])['Addresses']: self.log_info("This VM has a Elastic IP %s." % address['PublicIp']) cont = 0 while address['InstanceId'] and cont < timeout: @@ -1128,7 +1096,8 @@ def delete_elastic_ips(self, conn, vm, timeout=240): conn.disassociate_address(PublicIp=address['PublicIp']) except Exception: self.log_debug("Error disassociating the IP.") - address = conn.describe_addresses(Filters={"public-ip": pub_ip})['Addresses'][0] + address = conn.describe_addresses(Filters=[{"Name": "public-ip", + "Values": [pub_ip]}])['Addresses'][0] self.log_info("It is attached. Wait.") time.sleep(3) @@ -1225,12 +1194,14 @@ def addRouterInstance(self, vm, conn): if network.getValue('router'): if not route_table_id: vpc_id = None - for vpc in conn.describe_vpcs(Filters={"tag:IM-INFRA-ID": vm.inf.id})['Vpcs']: + for vpc in conn.describe_vpcs(Filters=[{"Name": "tag:IM-INFRA-ID", + "Values": [vm.inf.id]}])['Vpcs']: vpc_id = vpc['VpcId'] if not vpc_id: self.log_error("No VPC found.") return False - for rt in conn.describe_route_tables(Filters={"vpc-id": vpc_id})['RouteTables']: + for rt in conn.describe_route_tables(Filters=[{"Name": "vpc-id", + "Values": [vpc_id]}])['RouteTables']: route_table_id = rt['RouteTableId'] if not route_table_id: @@ -1285,7 +1256,7 @@ def addRouterInstance(self, vm, conn): def updateVMInfo(self, vm, auth_data): region = vm.id.split(";")[0] instance_id = vm.id.split(";")[1] - conn = self.get_connection(region, auth_data) + conn = self.get_connection(region, auth_data, 'ec2') # Check if the instance_id starts with "sir" -> spot request if (instance_id[0] == "s"): @@ -1350,7 +1321,7 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): try: # Workaround to use EC2 as the default case. if self.type == "EC2": - conn = self.get_route53_connection('universal', auth_data) + conn = self.get_connection('universal', auth_data, 'route53') else: auths = auth_data.getAuthInfo("EC2") if not auths: @@ -1402,7 +1373,7 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): # Workaround to use EC2 as the default case. if self.type == "EC2": - conn = self.get_route53_connection('universal', auth_data) + conn = self.get_connection('universal', auth_data, 'route53') else: auths = auth_data.getAuthInfo("EC2") if not auths: @@ -1520,7 +1491,7 @@ def finalize(self, vm, last, auth_data): region_name = vm.id.split(";")[0] instance_id = vm.id.split(";")[1] - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') # Terminate the instance instance = self.get_instance_by_id(instance_id, region_name, auth_data) @@ -1606,14 +1577,16 @@ def delete_security_groups(self, conn, vm): self.log_info("SG %s not created by the IM. Do not delete it." % sg['GroupName']) continue try: - instances = conn.describe_instances(Filters=[{'Name': 'instance.group-id', 'Values': [sg['GroupId']]}]) - for instance in instances['Reservations'][0]['Instances']: - conn.modify_instance_attribute( - InstanceId=instance['InstanceId'], - Groups=[{'GroupId': def_sg_id}] - ) + reservations = conn.describe_instances(Filters=[{'Name': 'instance.group-id', + 'Values': [sg['GroupId']]}])['Reservations'] + if reservations and reservations[0]['Instances']: + for instance in reservations[0]['Instances']: + conn.modify_instance_attribute( + InstanceId=instance['InstanceId'], + Groups=[{'GroupId': def_sg_id}] + ) except Exception as ex: - self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg.name, instance.id, ex)) + self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg["GroupName"], instance.id, ex)) # try to wait some seconds to free the SGs time.sleep(5) @@ -1807,7 +1780,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): snapshot_id = None # Obtain the connection object to connect with EC2 - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') if not conn: return (False, "Error connecting with EC2, check the credentials") @@ -1819,7 +1792,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): snapshot_id = instance.create_image(Name=image_name, Description="AMI automatically generated by IM", NoReboot=True, - TagSpecifications=[{'ResourceType': 'snapshot', + TagSpecifications=[{'ResourceType': 'image', 'Tags': [{'Key': 'instance_id', 'Value': instance_id}]}]) else: @@ -1836,7 +1809,7 @@ def delete_image(self, image_url, auth_data): (region_name, ami) = self.getAMIData(image_url) self.log_info("Deleting image: %s." % image_url) - conn = self.get_connection(region_name, auth_data) + conn = self.get_connection(region_name, auth_data, 'ec2') success = conn.deregister_image(ami) @@ -1866,7 +1839,7 @@ def list_images(self, auth_data, filters=None): images = [] for region in regions: - conn = self.get_connection(region, auth_data) + conn = self.get_connection(region, auth_data, 'ec2') try: for image in conn.describe_images(Owners=['self', 'aws-marketplace'], Filters=images_filter)['Images']: if len(image['ImageId']) > 12: # do not add old images diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index 883a1a47..43b6b98b 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -114,9 +114,9 @@ def describe_subnets(self, **kwargs): subnet['CidrBlock'] = "10.0.1.0/24" return {'Subnets': [subnet]} - @patch('IM.connectors.EC2.boto3.client') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_20_launch(self, save_data, mock_boto_client): + def test_20_launch(self, save_data, mock_boto_session): radl_data = """ network net1 (outbound = 'yes' and outports='8080,9000:9100' and sg_name = 'sgname') network net2 () @@ -147,7 +147,9 @@ def test_20_launch(self, save_data, mock_boto_client): inf.radl = radl mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_security_groups.return_value = {'SecurityGroups': []} mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} @@ -161,7 +163,7 @@ def test_20_launch(self, save_data, mock_boto_client): ]} mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'i-12345678'}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance res = ec2_cloud.launch(inf, radl, radl, 1, auth) success, msg = res[0] @@ -173,9 +175,9 @@ def test_20_launch(self, save_data, mock_boto_client): self.assertEqual(mock_conn.create_security_group.call_args_list[2][1]['GroupName'], "im-%s-net2" % inf.id) mock_conn.run_instances.assert_called_once() - @patch('IM.connectors.EC2.boto3.client') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('IM.InfrastructureList.InfrastructureList.save_data') - def test_25_launch_spot(self, save_data, mock_boto_client): + def test_25_launch_spot(self, save_data, mock_boto_session): radl_data = """ network net1 (outbound = 'yes' and provider_id = 'vpc-id.subnet-id') network net2 () @@ -204,12 +206,14 @@ def test_25_launch_spot(self, save_data, mock_boto_client): ec2_cloud = self.get_ec2_cloud() mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'iid'}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance instance.id = "iid" mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} @@ -232,8 +236,8 @@ def test_25_launch_spot(self, save_data, mock_boto_client): self.assertTrue(success, msg="ERROR: launching a VM.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.boto3.client') - def test_30_updateVMInfo(self, mock_boto_client): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_30_updateVMInfo(self, mock_boto_session): radl_data = """ network net (outbound = 'yes') network net2 (router = '10.0.10.0/24,vrouter') @@ -261,12 +265,14 @@ def test_30_updateVMInfo(self, mock_boto_client): ec2_cloud = self.get_ec2_cloud() mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', 'State': {'Name': 'running'}}]}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance instance.id = "iid" instance.tags = [] instance.virtualization_type = "vt" @@ -327,8 +333,8 @@ def test_30_updateVMInfo(self, mock_boto_client): self.assertEqual(vm.getPublicIP(), "10.0.0.1") self.assertEqual(vm.getPrivateIP(), None) - @patch('IM.connectors.EC2.boto3.client') - def test_40_updateVMInfo_spot(self, mock_boto_client): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_40_updateVMInfo_spot(self, mock_boto_session): radl_data = """ network net (outbound = 'yes') system test ( @@ -355,10 +361,12 @@ def test_40_updateVMInfo_spot(self, mock_boto_client): vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance instance.id = "iid" instance.virtualization_type = "vt" instance.placement = {'AvailabilityZone': 'us-east-1'} @@ -378,8 +386,8 @@ def test_40_updateVMInfo_spot(self, mock_boto_client): self.assertTrue(success, msg="ERROR: updating VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.boto3.client') - def test_50_vmop(self, mock_boto_client): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_50_vmop(self, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() @@ -387,11 +395,13 @@ def test_50_vmop(self, mock_boto_client): vm = VirtualMachine(inf, "us-east-1;id-1", ec2_cloud.cloud, "", "", ec2_cloud, 1) mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance success, _ = ec2_cloud.start(vm, auth) self.assertTrue(success, msg="ERROR: starting VM.") @@ -408,8 +418,8 @@ def test_50_vmop(self, mock_boto_client): self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) self.assertEqual(instance.reboot.call_args_list, [call()]) - @patch('IM.connectors.EC2.boto3.client') - def test_55_alter(self, mock_boto_client): + @patch('IM.connectors.EC2.boto3.session.Session') + def test_55_alter(self, mock_boto_session): radl_data = """ network net () system test ( @@ -439,11 +449,13 @@ def test_55_alter(self, mock_boto_client): vm = VirtualMachine(inf, "us-east-1;sid-1", ec2_cloud.cloud, radl, radl, ec2_cloud, 1) mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance instance.id = "iid" instance.instance_type = "t1.micro" instance.state = {'Name': 'stopped'} @@ -456,9 +468,9 @@ def test_55_alter(self, mock_boto_client): self.assertEqual(instance.start.call_args_list, [call()]) self.assertEqual(instance.modify_attribute.call_args_list, [call(Attribute='instanceType', Value='t3a.small')]) - @patch('IM.connectors.EC2.boto3.client') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_60_finalize(self, sleep, mock_boto_client): + def test_60_finalize(self, sleep, mock_boto_session): radl_data = """ network net (outbound = 'yes') network net2 (outbound = 'yes') @@ -489,12 +501,14 @@ def test_60_finalize(self, sleep, mock_boto_client): vm.dns_entries = [('test', 'domain.com.', '158.42.1.1')] mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', 'State': {'Name': 'running'}}]}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance instance.block_device_mappings = [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeId': 'volid'}}] mock_conn.describe_addresses.return_value = {'Addresses': [{'PublicIp': '158.42.1.1', 'InstanceId': 'id-1'}]} @@ -538,9 +552,9 @@ def test_60_finalize(self, sleep, mock_boto_client): self.assertEqual(mock_conn.delete_internet_gateway.call_args_list, [call('ig-id')]) self.assertEqual(mock_conn.detach_internet_gateway.call_args_list, [call('ig-id', 'vpc-id')]) - @patch('IM.connectors.EC2.boto3.client') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_70_create_snapshot(self, sleep, mock_boto_client): + def test_70_create_snapshot(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() @@ -548,11 +562,13 @@ def test_70_create_snapshot(self, sleep, mock_boto_client): vm = VirtualMachine(inf, "us-east-1;id1", ec2_cloud.cloud, "", "", ec2_cloud, 1) mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_res = MagicMock() + mock_boto_session.return_value.client.return_value = mock_conn + mock_boto_session.return_value.resource.return_value = mock_res mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} instance = MagicMock() - mock_conn.Instance.return_value = instance + mock_res.Instance.return_value = instance instance.create_image.return_value = {'ImageId': 'image-ami'} success, new_image = ec2_cloud.create_snapshot(vm, 0, "image_name", True, auth) @@ -562,19 +578,19 @@ def test_70_create_snapshot(self, sleep, mock_boto_client): self.assertEqual(instance.create_image.call_args_list, [call(Name='image_name', Description='AMI automatically generated by IM', NoReboot=True, - TagSpecifications=[{'ResourceType': 'snapshot', + TagSpecifications=[{'ResourceType': 'image', 'Tags': [{'Key': 'instance_id', 'Value': 'id1'}]}])]) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.boto3.client') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_80_delete_image(self, sleep, mock_boto_client): + def test_80_delete_image(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_boto_session.return_value.client.return_value = mock_conn mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} success, msg = ec2_cloud.delete_image('aws://us-east-1/image-ami', auth) @@ -583,14 +599,14 @@ def test_80_delete_image(self, sleep, mock_boto_client): self.assertEqual(mock_conn.deregister_image.call_args_list, [call('image-ami')]) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) - @patch('IM.connectors.EC2.boto3.client') + @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') - def test_90_list_images(self, sleep, mock_boto_client): + def test_90_list_images(self, sleep, mock_boto_session): auth = Authentication([{'id': 'ec2', 'type': 'EC2', 'username': 'user', 'password': 'pass'}]) ec2_cloud = self.get_ec2_cloud() mock_conn = MagicMock() - mock_boto_client.return_value = mock_conn + mock_boto_session.return_value.client.return_value = mock_conn mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-123456789012', 'Name': 'image_name'}]} From 12d271b66cab33ba617e1db026bf32901494c4b9 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 13 Sep 2024 12:48:47 +0200 Subject: [PATCH 15/53] Implements #301 --- IM/connectors/EC2.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 86ad4844..c1cc4946 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -177,12 +177,11 @@ def get_connection(self, region_name, auth_data, service_name, object_type='clie if self.connection and self.auth.compare(auth_data, self.type): if object_type == 'resource': - client = self.connection.resource('ec2') + return self.connection.resource(service_name) else: - client = self.connection.client('ec2') + return self.connection.client(service_name) else: self.auth = auth_data - client = None try: if 'username' in auth and 'password' in auth: if region_name != 'universal': @@ -198,9 +197,9 @@ def get_connection(self, region_name, auth_data, service_name, object_type='clie aws_session_token=auth.get('token')) self.connection = session if object_type == 'resource': - client = session.resource(service_name) + return session.resource(service_name) else: - client = session.client(service_name) + return session.client(service_name) else: self.log_error("No correct auth data has been specified to EC2: " "username (Access Key) and password (Secret Key)") @@ -211,8 +210,6 @@ def get_connection(self, region_name, auth_data, service_name, object_type='clie self.log_exception("Error getting the region " + region_name) raise Exception("Error getting the region " + region_name + ": " + str(ex)) - return client - # path format: aws://eu-west-1/ami-00685b74 @staticmethod def getAMIData(path): From ac12e23d23d153b7769a7ba0b8db5b3b0492f6eb Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 13 Sep 2024 12:52:12 +0200 Subject: [PATCH 16/53] Implements #301 --- IM/connectors/EC2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index c1cc4946..c2d303e9 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1824,7 +1824,7 @@ def list_images(self, auth_data, filters=None): regions = [filters['region']] del filters['region'] if not regions: - region = [region['RegionName'] for region in conn.describe_regions()['Regions']] + region = [region['RegionName'] for region in boto3.client('ec2').describe_regions()['Regions']] images_filter = {'architecture': 'x86_64', 'image-type': 'machine', 'virtualization-type': 'hvm', 'state': 'available', From 64396ab854ff1638e57a4440b287071bad9a5d35 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 13 Sep 2024 13:12:57 +0200 Subject: [PATCH 17/53] Implements #301 --- IM/connectors/EC2.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index c2d303e9..c9f26614 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1563,12 +1563,6 @@ def delete_security_groups(self, conn, vm): """ sgs = self._get_security_groups(conn, vm) - if sgs: - # Get the default SG to set in the instances - def_sg_id = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': ['default']}, - {'Name': 'vpc-id', 'Values': [sgs[0]['VpcId']]}] - )['SecurityGroups'][0]['GroupId'] - for sg in sgs: if sg['Description'] != "Security group created by the IM": self.log_info("SG %s not created by the IM. Do not delete it." % sg['GroupName']) @@ -1576,14 +1570,12 @@ def delete_security_groups(self, conn, vm): try: reservations = conn.describe_instances(Filters=[{'Name': 'instance.group-id', 'Values': [sg['GroupId']]}])['Reservations'] - if reservations and reservations[0]['Instances']: + if reservations: for instance in reservations[0]['Instances']: - conn.modify_instance_attribute( - InstanceId=instance['InstanceId'], - Groups=[{'GroupId': def_sg_id}] - ) + conn.modify_instance_attribute(InstanceId=instance['InstanceId'], Groups=[]) except Exception as ex: - self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg["GroupName"], instance.id, ex)) + self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg["GroupName"], + instance['InstanceId'], ex)) # try to wait some seconds to free the SGs time.sleep(5) From f57996fd0b6c4c67ddbce44697dffbba6f6e26a5 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 13 Sep 2024 13:58:31 +0200 Subject: [PATCH 18/53] Implements #301 --- IM/connectors/EC2.py | 59 ++++++++++++++++++++++--------------- test/unit/connectors/EC2.py | 11 +++---- 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index c9f26614..77b94f3f 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -525,9 +525,10 @@ def create_networks(self, conn, radl, inf): else: # if not create it self.log_info("Creating VPC with cidr: %s." % vpc_cird) - vpc = conn.create_vpc(vpc_cird, TagSpecifications=[{'ResourceType': 'vpc', - 'Tags': [{'Key': 'IM-INFRA-ID', - 'Value': inf.id}]}]) + vpc = conn.create_vpc(CidrBlock=vpc_cird, + TagSpecifications=[{'ResourceType': 'vpc', + 'Tags': [{'Key': 'IM-INFRA-ID', + 'Value': inf.id}]}]) vpc_id = vpc['Vpc']['VpcId'] self.log_info("VPC %s created." % vpc_id) @@ -564,26 +565,26 @@ def create_networks(self, conn, radl, inf): 'Value': inf.id}, {'Key': 'IM-SUBNET-ID', 'Value': net.id}]}]) - self.log_info("Subnet %s created." % subnet.id) + self.log_info("Subnet %s created." % subnet['Subnet']['SubnetId']) net.setValue('cidr', net_cidr) # Set also the cidr in the inf RADL inf.radl.get_network_by_id(net.id).setValue('cidr', net_cidr) - net.setValue('provider_id', "%s.%s" % (vpc_id, subnet.id)) + net.setValue('provider_id', "%s.%s" % (vpc_id, subnet['Subnet']['SubnetId'])) except Exception as ex: self.log_exception("Error creating subnets or vpc.") try: for subnet in conn.describe_subnets(Filters=[{"Name": "tag:IM-INFRA-ID", "Values": [inf.id]}])['Subnets']: - self.log_info("Deleting subnet: %s" % subnet.id) - conn.delete_subnet(subnet.id) + self.log_info("Deleting subnet: %s" % subnet['Subnets']['SubnetId']) + conn.delete_subnet(SubnetId=subnet['Subnets']['SubnetId']) for vpc in conn.describe_vpcs(Filters=[{"Name": "tag:IM-INFRA-ID", "Values": [inf.id]}])['Vpcs']: - self.log_info("Deleting vpc: %s" % vpc.id) - conn.delete_vpc(vpc.id) + self.log_info("Deleting vpc: %s" % vpc_id) + conn.delete_vpc(VpcId=vpc_id) for ig in conn.describe_internet_gateways(Filters=[{"Name": "tag:IM-INFRA-ID", "Values": [inf.id]}])['InternetGateways']: - self.log_info("Deleting Internet Gateway: %s" % ig.id) - conn.delete_internet_gateways(ig.id) + self.log_info("Deleting Internet Gateway: %s" % ig_id) + conn.delete_internet_gateways(InternetGatewayId=ig_id) except Exception: self.log_exception("Error deleting subnets or vpc.") raise ex @@ -884,8 +885,12 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): i += 1 - # if all the VMs have failed, remove the sgs + # if all the VMs have failed, remove the sgs and nets if all_failed: + try: + self.delete_networks(conn, inf.id) + except Exception as ex: + self.log_exception("Error deleting networks.") if sg_ids: for sgid in sg_ids: self.log_info("Remove the SG: %s" % sgid) @@ -1434,18 +1439,18 @@ def cancel_spot_requests(self, conn, vm): conn.cancel_spot_instance_requests(sir['SpotInstanceRequestId']) self.log_info("Spot instance request " + sir['SpotInstanceRequestId'] + " deleted") - def delete_networks(self, conn, vm, timeout=240): + def delete_networks(self, conn, inf_id, timeout=240): """ Delete the created networks """ - for subnet in conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [vm.inf.id]}])['Subnets']: + for subnet in conn.describe_subnets(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf_id]}])['Subnets']: self.log_info("Deleting subnet: %s" % subnet['SubnetId']) cont = 0 deleted = False while not deleted and cont < timeout: cont += 5 try: - conn.delete_subnet(subnet['SubnetId']) + conn.delete_subnet(SubnetId=subnet['SubnetId']) deleted = True except Exception as ex: self.log_warn("Error removing subnet: " + str(ex)) @@ -1454,25 +1459,25 @@ def delete_networks(self, conn, vm, timeout=240): time.sleep(5) if not deleted: - self.log_error("Timeout (%s) deleting the subnet %s" % (timeout, subnet.id)) + self.log_error("Timeout (%s) deleting the subnet %s" % (timeout, subnet['SubnetId'])) vpc_id = None - for vpc in conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [vm.inf.id]}])['Vpcs']: + for vpc in conn.describe_vpcs(Filters=[{'Name': 'tag:IM-INFRA-ID', 'Values': [inf_id]}])['Vpcs']: vpc_id = vpc['VpcId'] ig_id = None for ig in conn.describe_internet_gateways(Filters=[{'Name': 'tag:IM-INFRA-ID', - 'Values': [vm.inf.id]}])['InternetGateways']: + 'Values': [inf_id]}])['InternetGateways']: ig_id = ig['InternetGatewayId'] if ig_id and vpc_id: self.log_info("Detacching Internet Gateway: %s from VPC: %s" % (ig_id, vpc_id)) - conn.detach_internet_gateway(ig_id, vpc_id) + conn.detach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id) if ig_id: self.log_info("Deleting Internet Gateway: %s" % ig_id) - conn.delete_internet_gateway(ig_id) + conn.delete_internet_gateway(InternetGatewayId=ig_id) if vpc_id: self.log_info("Deleting vpc: %s" % vpc_id) - conn.delete_vpc(vpc_id) + conn.delete_vpc(VpcId=vpc_id) def finalize(self, vm, last, auth_data): @@ -1527,7 +1532,7 @@ def finalize(self, vm, last, auth_data): # And nets try: - self.delete_networks(conn, vm) + self.delete_networks(conn, vm.inf.id) except Exception as ex: self.log_exception("Error deleting networks.") error_msg += "Error deleting networks: %s. " % ex @@ -1563,6 +1568,12 @@ def delete_security_groups(self, conn, vm): """ sgs = self._get_security_groups(conn, vm) + if sgs: + # Get the default SG to set in the instances + def_sg_id = conn.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': ['default']}, + {'Name': 'vpc-id', 'Values': [sgs[0]['VpcId']]}] + )['SecurityGroups'][0]['GroupId'] + for sg in sgs: if sg['Description'] != "Security group created by the IM": self.log_info("SG %s not created by the IM. Do not delete it." % sg['GroupName']) @@ -1572,7 +1583,7 @@ def delete_security_groups(self, conn, vm): 'Values': [sg['GroupId']]}])['Reservations'] if reservations: for instance in reservations[0]['Instances']: - conn.modify_instance_attribute(InstanceId=instance['InstanceId'], Groups=[]) + conn.modify_instance_attribute(InstanceId=instance['InstanceId'], Groups=[def_sg_id]) except Exception as ex: self.log_warn("Error removing the SG %s from the instance: %s. %s" % (sg["GroupName"], instance['InstanceId'], ex)) @@ -1800,7 +1811,7 @@ def delete_image(self, image_url, auth_data): self.log_info("Deleting image: %s." % image_url) conn = self.get_connection(region_name, auth_data, 'ec2') - success = conn.deregister_image(ami) + success = conn.deregister_image(ImageId=ami) if success: return (True, "") diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index 43b6b98b..286f4433 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -547,10 +547,11 @@ def test_60_finalize(self, sleep, mock_boto_session): self.assertEqual(mock_conn.delete_security_group.call_args_list, [call(GroupId='sg1'), call(GroupId='sg3')]) self.assertEqual(instance.terminate.call_args_list, [call()]) - self.assertEqual(mock_conn.delete_subnet.call_args_list, [call('subnet-id')]) - self.assertEqual(mock_conn.delete_vpc.call_args_list, [call('vpc-id')]) - self.assertEqual(mock_conn.delete_internet_gateway.call_args_list, [call('ig-id')]) - self.assertEqual(mock_conn.detach_internet_gateway.call_args_list, [call('ig-id', 'vpc-id')]) + self.assertEqual(mock_conn.delete_subnet.call_args_list, [call(SubnetId='subnet-id')]) + self.assertEqual(mock_conn.delete_vpc.call_args_list, [call(VpcId='vpc-id')]) + self.assertEqual(mock_conn.delete_internet_gateway.call_args_list, [call(InternetGatewayId='ig-id')]) + self.assertEqual(mock_conn.detach_internet_gateway.call_args_list, [call(InternetGatewayId='ig-id', + VpcId='vpc-id')]) @patch('IM.connectors.EC2.boto3.session.Session') @patch('time.sleep') @@ -596,7 +597,7 @@ def test_80_delete_image(self, sleep, mock_boto_session): success, msg = ec2_cloud.delete_image('aws://us-east-1/image-ami', auth) self.assertTrue(success, msg="ERROR: deleting image. %s" % msg) - self.assertEqual(mock_conn.deregister_image.call_args_list, [call('image-ami')]) + self.assertEqual(mock_conn.deregister_image.call_args_list, [call(ImageId='image-ami')]) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) @patch('IM.connectors.EC2.boto3.session.Session') From 61b1429c14d319dd73c9e78850a1c84cd8f58586 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Mon, 16 Sep 2024 10:14:19 +0200 Subject: [PATCH 19/53] Implements #301 --- IM/connectors/EC2.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 77b94f3f..417cbc6a 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -1314,7 +1314,7 @@ def updateVMInfo(self, vm, auth_data): return (True, vm) def _get_zone(self, conn, domain): - zones = conn.list_hosted_zones_by_name(DNSName=domain)['HostedZones'] + zones = conn.list_hosted_zones_by_name(DNSName=domain, MaxItems='1')['HostedZones'] if not zones or len(zones) == 0: return None return zones[0] @@ -1349,7 +1349,8 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): fqdn = hostname + "." + domain records = conn.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=fqdn, - StartRecordType='A')['ResourceRecordSets'] + StartRecordType='A', + MaxItems='1')['ResourceRecordSets'] if not records or records[0]['Name'] != fqdn: self.log_info("Creating DNS record %s." % fqdn) conn.change_resource_record_sets( @@ -1392,7 +1393,8 @@ def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): fqdn = hostname + "." + domain records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], StartRecordName=fqdn, - StartRecordType='A')['ResourceRecordSets'] + StartRecordType='A', + MaxItems='1')['ResourceRecordSets'] if not records or records[0]['Name'] != fqdn: self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: @@ -1413,10 +1415,8 @@ def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): ) # if there are no A records - # all_a_records = conn.list_resource_record_sets( - # HostedZoneId=zone['Id'], - # StartRecordType='A' - # )['ResourceRecordSets'] + # all_a_records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], + # StartRecordType='A')['ResourceRecordSets'] # if not all_a_records: # self.log_info("Deleting DNS zone %s." % domain) # conn.delete_hosted_zone(zone['Id']) From 0a2da33a81e655769f431623427f3157e9477cb1 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 20 Sep 2024 13:06:05 +0200 Subject: [PATCH 20/53] Minor changes --- IM/connectors/EC2.py | 109 +++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 62 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index 417cbc6a..bda0eb2e 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -308,6 +308,23 @@ def _get_security_group(conn, sg_name): except Exception: return None + @staticmethod + def _get_default_security_rules(sg): + return [ + { + "IpProtocol": "tcp", + "FromPort": 0, + "ToPort": 65535, + "UserIdGroupPairs": [{"GroupId": sg["GroupId"]}], + }, + { + "IpProtocol": "udp", + "FromPort": 0, + "ToPort": 65535, + "UserIdGroupPairs": [{"GroupId": sg["GroupId"]}], + }, + ] + def create_security_groups(self, conn, inf, radl, vpc): res = [] try: @@ -326,18 +343,8 @@ def create_security_groups(self, conn, inf, radl, vpc): Description="Security group created by the IM", VpcId=vpc) # open all the ports for the VMs in the security group - conn.authorize_security_group_ingress( - GroupId=sg['GroupId'], - IpPermissions=[ - {'IpProtocol': 'tcp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, - {'IpProtocol': 'udp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} - ]) + conn.authorize_security_group_ingress(GroupId=sg['GroupId'], + IpPermissions=self._get_default_security_rules(sg)) except Exception as crex: # First check if the SG does exist sg = self._get_security_group(conn, sg_name) @@ -379,18 +386,8 @@ def create_security_groups(self, conn, inf, radl, vpc): try: # open all the ports for the VMs in the security group - conn.authorize_security_group_ingress( - GroupId=sg['GroupId'], - IpPermissions=[ - {'IpProtocol': 'tcp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]}, - {'IpProtocol': 'udp', - 'FromPort': 0, - 'ToPort': 65535, - 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]} - ]) + conn.authorize_security_group_ingress(GroupId=sg['GroupId'], + IpPermissions=self._get_default_security_rules(sg)) except Exception as addex: self.log_warn("Exception adding SG rules. Probably the rules exists:" + str(addex)) @@ -889,7 +886,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if all_failed: try: self.delete_networks(conn, inf.id) - except Exception as ex: + except Exception: self.log_exception("Error deleting networks.") if sg_ids: for sgid in sg_ids: @@ -1256,8 +1253,7 @@ def addRouterInstance(self, vm, conn): return success def updateVMInfo(self, vm, auth_data): - region = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region, instance_id = vm.id.split(";") conn = self.get_connection(region, auth_data, 'ec2') # Check if the instance_id starts with "sir" -> spot request @@ -1319,6 +1315,22 @@ def _get_zone(self, conn, domain): return None return zones[0] + @staticmethod + def _get_change_batch(action, fqdn, ip): + return { + "Changes": [ + { + "Action": action, + "ResourceRecordSet": { + "Name": fqdn, + "Type": "A", + "TTL": 300, + "ResourceRecords": [{"Value": ip}], + }, + } + ] + } + def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): try: # Workaround to use EC2 as the default case. @@ -1353,19 +1365,8 @@ def add_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): MaxItems='1')['ResourceRecordSets'] if not records or records[0]['Name'] != fqdn: self.log_info("Creating DNS record %s." % fqdn) - conn.change_resource_record_sets( - HostedZoneId=zone_id, - ChangeBatch={ - 'Changes': [{ - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': fqdn, - 'Type': 'A', - 'TTL': 300, - 'ResourceRecords': [{'Value': ip}] - } - }] - }) + conn.change_resource_record_sets(HostedZoneId=zone_id, + ChangeBatch=self._get_change_batch('CREATE', fqdn, ip)) else: self.log_info("DNS record %s exists. Do not create." % fqdn) return True @@ -1399,20 +1400,8 @@ def del_dns_entry(self, hostname, domain, ip, auth_data, extra_args=None): self.log_info("DNS record %s does not exists. Do not delete." % fqdn) else: self.log_info("Deleting DNS record %s." % fqdn) - conn.change_resource_record_sets( - HostedZoneId=zone['Id'], - ChangeBatch={ - 'Changes': [{ - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'Name': fqdn, - 'Type': 'A', - 'TTL': 300, - 'ResourceRecords': [{'Value': ip}] - } - }] - } - ) + conn.change_resource_record_sets(HostedZoneId=zone['Id'], + ChangeBatch=self._get_change_batch('DELETE', fqdn, ip)) # if there are no A records # all_a_records = conn.list_resource_record_sets(HostedZoneId=zone['Id'], @@ -1490,8 +1479,7 @@ def finalize(self, vm, last, auth_data): self.log_info("VM with no ID. Ignore.") return True, "" - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") conn = self.get_connection(region_name, auth_data, 'ec2') @@ -1619,8 +1607,7 @@ def reboot(self, vm, auth_data): return self._vm_operation("reboot", vm, auth_data) def _vm_operation(self, op, vm, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") instance = self.get_instance_by_id(instance_id, region_name, auth_data) if (instance is not None): @@ -1654,8 +1641,7 @@ def waitStop(instance, timeout=120): return powered_off def alterVM(self, vm, radl, auth_data): - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") # Terminate the instance instance = self.get_instance_by_id(instance_id, region_name, auth_data) @@ -1775,8 +1761,7 @@ def create_snapshot(self, vm, disk_num, image_name, auto_delete, auth_data): - The second value is a str with the url of the new image if the operation finished successfully or an error message otherwise. """ - region_name = vm.id.split(";")[0] - instance_id = vm.id.split(";")[1] + region_name, instance_id = vm.id.split(";") snapshot_id = None # Obtain the connection object to connect with EC2 From b3e4bb682306e268e9b1632ea08178bba222577d Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 20 Sep 2024 13:24:22 +0200 Subject: [PATCH 21/53] Fix get region names --- IM/connectors/EC2.py | 4 +--- test/unit/connectors/EC2.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/IM/connectors/EC2.py b/IM/connectors/EC2.py index bda0eb2e..0daedc7a 100644 --- a/IM/connectors/EC2.py +++ b/IM/connectors/EC2.py @@ -185,9 +185,7 @@ def get_connection(self, region_name, auth_data, service_name, object_type='clie try: if 'username' in auth and 'password' in auth: if region_name != 'universal': - client = boto3.client('ec2') - regions = client.describe_regions() - region_names = [region['RegionName'] for region in regions['Regions']] + region_names = boto3.session.Session().get_available_regions('ec2') if region_name not in region_names: raise Exception("Incorrect region name: " + region_name) diff --git a/test/unit/connectors/EC2.py b/test/unit/connectors/EC2.py index 286f4433..29780728 100755 --- a/test/unit/connectors/EC2.py +++ b/test/unit/connectors/EC2.py @@ -154,7 +154,7 @@ def test_20_launch(self, save_data, mock_boto_session): mock_conn.create_security_group.return_value = {'GroupId': 'sg-id'} mock_conn.describe_vpcs.return_value = {'Vpcs': [{'VpcId': 'vpc-id'}]} mock_conn.describe_subnets.return_value = {'Subnets': [{'SubnetId': 'subnet-id'}]} - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-id', 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', 'Ebs': { @@ -209,7 +209,7 @@ def test_25_launch_spot(self, save_data, mock_boto_session): mock_res = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn mock_boto_session.return_value.resource.return_value = mock_res - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] mock_conn.run_instances.return_value = {'Instances': [{'InstanceId': 'iid'}]} instance = MagicMock() @@ -268,7 +268,7 @@ def test_30_updateVMInfo(self, mock_boto_session): mock_res = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn mock_boto_session.return_value.resource.return_value = mock_res - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', 'State': {'Name': 'running'}}]}]} instance = MagicMock() @@ -364,7 +364,7 @@ def test_40_updateVMInfo_spot(self, mock_boto_session): mock_res = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn mock_boto_session.return_value.resource.return_value = mock_res - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] instance = MagicMock() mock_res.Instance.return_value = instance instance.id = "iid" @@ -398,7 +398,7 @@ def test_50_vmop(self, mock_boto_session): mock_res = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn mock_boto_session.return_value.resource.return_value = mock_res - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] instance = MagicMock() mock_res.Instance.return_value = instance @@ -452,7 +452,7 @@ def test_55_alter(self, mock_boto_session): mock_res = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn mock_boto_session.return_value.resource.return_value = mock_res - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] instance = MagicMock() mock_res.Instance.return_value = instance @@ -504,7 +504,7 @@ def test_60_finalize(self, sleep, mock_boto_session): mock_res = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn mock_boto_session.return_value.resource.return_value = mock_res - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] mock_conn.describe_instances.return_value = {'Reservations': [{'Instances': [{'InstanceId': 'vrid', 'State': {'Name': 'running'}}]}]} instance = MagicMock() @@ -566,7 +566,7 @@ def test_70_create_snapshot(self, sleep, mock_boto_session): mock_res = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn mock_boto_session.return_value.resource.return_value = mock_res - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] instance = MagicMock() mock_res.Instance.return_value = instance @@ -592,7 +592,7 @@ def test_80_delete_image(self, sleep, mock_boto_session): mock_conn = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] success, msg = ec2_cloud.delete_image('aws://us-east-1/image-ami', auth) @@ -608,7 +608,7 @@ def test_90_list_images(self, sleep, mock_boto_session): mock_conn = MagicMock() mock_boto_session.return_value.client.return_value = mock_conn - mock_conn.describe_regions.return_value = {'Regions': [{'RegionName': 'us-east-1'}]} + mock_boto_session.return_value.get_available_regions.return_value = ['us-east-1'] mock_conn.describe_images.return_value = {'Images': [{'ImageId': 'ami-123456789012', 'Name': 'image_name'}]} res = ec2_cloud.list_images(auth, filters={'region': 'us-east-1'}) From 2d92f36f4cb89f89e0a0062a9a85f9336b6c7b30 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 8 Nov 2024 09:27:38 +0100 Subject: [PATCH 22/53] Set ver 1.18.0 --- IM/__init__.py | 2 +- changelog | 4 ++++ codemeta.json | 2 +- doc/source/conf.py | 4 ++-- docker-devel/Dockerfile | 2 +- 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/IM/__init__.py b/IM/__init__.py index ab315621..d03e90b7 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -19,7 +19,7 @@ 'InfrastructureInfo', 'InfrastructureManager', 'recipe', 'request', 'REST', 'retry', 'ServiceRequests', 'SSH', 'SSHRetry', 'timedcall', 'UnixHTTPAdapter', 'VirtualMachine', 'VMRC', 'xmlobject'] -__version__ = '1.17.1' +__version__ = '1.18.0' __author__ = 'Miguel Caballer' diff --git a/changelog b/changelog index dfaa04b2..ff80a770 100644 --- a/changelog +++ b/changelog @@ -777,3 +777,7 @@ IM 1.17.1: * Speed up Ansible installation using newer versions. * Fix problem with 0 disk flavors in OpenStack. * Flush Inf data to DB in case of service termination. + +IM 1.18.0: + * Enable to get IM stats. + * Migrate EC2 conn to boto3 library. diff --git a/codemeta.json b/codemeta.json index 1aab92a3..bfb302d9 100644 --- a/codemeta.json +++ b/codemeta.json @@ -6,7 +6,7 @@ "@type": "SoftwareSourceCode", "identifier": "im", "name": "Infrastructure Manager", - "version": "1.17.1", + "version": "1.18.0", "description": "IM is a tool that deploys complex and customized virtual infrastructures on IaaS Cloud deployments", "license": "GNU General Public License v3.0", "author": [ diff --git a/doc/source/conf.py b/doc/source/conf.py index 54bfd468..7ab6ab98 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -3,8 +3,8 @@ copyright = '2023, I3M-GRyCAP' author = 'micafer' -version = '1.17' -release = '1.17.1' +version = '1.18' +release = '1.18.0' master_doc = 'index' diff --git a/docker-devel/Dockerfile b/docker-devel/Dockerfile index ca00ac25..33d00187 100644 --- a/docker-devel/Dockerfile +++ b/docker-devel/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:24.04 ARG BRANCH=devel LABEL maintainer="Miguel Caballer " -LABEL version="1.17.1" +LABEL version="1.18.0" LABEL description="Container image to run the IM service. (http://www.grycap.upv.es/im)" EXPOSE 8899 8800 From 7dcbdfb5a71c230606d029b72e9cb9f2c01ab1cb Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 8 Nov 2024 09:37:41 +0100 Subject: [PATCH 23/53] Fix test --- test/unit/test_im_logic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/unit/test_im_logic.py b/test/unit/test_im_logic.py index d9d154cc..2eea95f9 100644 --- a/test/unit/test_im_logic.py +++ b/test/unit/test_im_logic.py @@ -1141,6 +1141,7 @@ def test_check_oidc_groups(self, openidclient): Config.OIDC_ISSUERS = ["https://iam-test.indigo-datacloud.eu/"] Config.OIDC_AUDIENCE = None Config.OIDC_GROUPS = ["urn:mace:egi.eu:group:demo.fedcloud.egi.eu:role=member#aai.egi.eu"] + Config.OIDC_GROUPS_CLAIM = "eduperson_entitlement" IM.check_oidc_token(im_auth) From 5758d2e2f0f56682fccb0a5fe05d7f6fb37f0b14 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 8 Nov 2024 11:50:12 +0100 Subject: [PATCH 24/53] Fix test --- test/integration/TestIM.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py index 1b937d36..94728e5c 100755 --- a/test/integration/TestIM.py +++ b/test/integration/TestIM.py @@ -261,7 +261,7 @@ def test_19_addresource(self): Test AddResource function """ (success, res) = self.server.AddResource( - self.inf_id, RADL_ADD_WIN, self.auth_data) + self.inf_id, RADL_ADD, self.auth_data) self.assertTrue(success, msg="ERROR calling AddResource: " + str(res)) (success, vm_ids) = self.server.GetInfrastructureInfo( @@ -272,7 +272,7 @@ def test_19_addresource(self): str(len(vm_ids)) + "). It must be 4")) all_configured = self.wait_inf_state( - self.inf_id, VirtualMachine.CONFIGURED, 2700) + self.inf_id, VirtualMachine.CONFIGURED, 2400) self.assertTrue( all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).") From 06f2180679bd1056e61b996c453dabd438997d97 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Mon, 11 Nov 2024 09:57:24 +0100 Subject: [PATCH 25/53] Fix error in ctxt dist --- contextualization/ctxt_agent_dist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contextualization/ctxt_agent_dist.py b/contextualization/ctxt_agent_dist.py index af981185..f96a8a01 100755 --- a/contextualization/ctxt_agent_dist.py +++ b/contextualization/ctxt_agent_dist.py @@ -211,7 +211,7 @@ def get_master_ssh(self, general_conf_data): return SSHRetry(vm_ip, ctxt_vm['user'], passwd, private_key, ctxt_vm['remote_port']) @staticmethod - def get_ssh(vm, pk_file, changed_pass=None): + def get_ssh(vm, pk_file, changed_pass=None, use_proxy=False): passwd = vm['passwd'] if 'new_passwd' in vm and vm['new_passwd'] and changed_pass: passwd = vm['new_passwd'] From ebc958b48e3f5774fbd0d036e0e7c649166d071c Mon Sep 17 00:00:00 2001 From: Miguel Caballer Fernandez Date: Wed, 13 Nov 2024 15:57:15 +0100 Subject: [PATCH 26/53] Fix typos --- doc/source/tosca.rst | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/doc/source/tosca.rst b/doc/source/tosca.rst index 2cdc6bcf..c079b8ae 100644 --- a/doc/source/tosca.rst +++ b/doc/source/tosca.rst @@ -5,9 +5,9 @@ TOSCA The Infrastructure Manager supports the definition of Cloud topologies using `OASIS TOSCA Simple Profile in YAML Version 1.0 `_. -The TOSCA support has been developed under de framework of the `INDIGO DataCloud EU project `_. +The TOSCA support was developed under the framework of the `INDIGO DataCloud EU project `_. You can see some input examples at -`https://github.com/indigo-dc/tosca-types/tree/master/examples `_. +`https://github.com/grycap/tosca/tree/main/templates `_. Basic example ^^^^^^^^^^^^^ @@ -55,7 +55,7 @@ the SSH credentials to access it:: Setting VMI URI ^^^^^^^^^^^^^^^^ -As in RADL you can set an specific URI identifying the VMI to use in the VM. +As in RADL, you can set a specific URI identifying the VMI to use in the VM. The URI format is the same used in RADL (:ref:`radl_system`). In this case the type must be changed to ``tosca.nodes.indigo.Compute`` (the Compute normative type does not support the ``os image`` property), and the image property must @@ -82,7 +82,7 @@ be added in the ``os`` capability:: Advanced Compute host properties ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``tosca.nodes.indigo.Compute`` custom type add a new set of advanced features to the +The ``tosca.nodes.indigo.Compute`` custom type adds a new set of advanced features to the host properties, enabling the request of GPUs and `Intel SGX `_ CPU support in the compute node:: @@ -109,8 +109,8 @@ Network properties Basic properties ----------------- -The easiest way to specify network requirements of the Compute node is sing the endpoint capability properties. -For example the following example the compute node requests for a public IP:: +The easiest way to specify network requirements of the Compute node is using the endpoint capability properties. +For example, the following example the compute node requests for a public IP:: ... simple_node: @@ -123,15 +123,15 @@ For example the following example the compute node requests for a public IP:: Possible values of the ``network_name`` endpoint property: - * PRIVATE: The Compute node does not requires a public IP. **This is the default behavior if no + * PRIVATE: The Compute node does not require a public IP. **This is the default behaviour if no endpoint capability is defined**. * PUBLIC: The Compute node requires a public IP. * Network provider ID: As the `provider_id` network property in RADL It defines the name of the network in a specific Cloud provider (see :ref:`_radl_network`): -Furthermore the endpoint capability has a set of additional properties -to set the DNS name of the node or the set of ports to be externally accesible:: +Furthermore, the endpoint capability has a set of additional properties +to set the DNS name of the node or the set of ports to be externally accessible:: ... @@ -151,7 +151,7 @@ to set the DNS name of the node or the set of ports to be externally accesible:: Advanced properties ------------------- -In case that you need a more detailed definition of the networks, you can use the +In case you need a more detailed definition of the networks, you can use the ``tosca.nodes.network.Network`` and ``tosca.nodes.network.Port`` TOSCA normative types. In this way you can define the set of networks needed in your topology using the ports to link the networks with the Compute nodes:: @@ -198,7 +198,7 @@ Custom defined Port type ``tosca.nodes.indigo.network.Port`` has a set of additi Software Components ^^^^^^^^^^^^^^^^^^^ -IM enable to use Ansible playbooks as implementation scripts. Furthermore it enables to specify +IM enable the use of Ansible playbooks as implementation scripts. Furthermore, it enables to specify Ansible roles (``tosca.artifacts.AnsibleGalaxy.role``) and collections (``tosca.artifacts.AnsibleGalaxy.collections``) to be installed and used in the playbooks:: @@ -255,8 +255,8 @@ some cloud providers, in general is better not to add it:: Policies & groups ^^^^^^^^^^^^^^^^^ -IM enables the definition of the specific cloud provider where the Compute nodes will be deployed in an hybrid deployment. -For example, in the following code we assume that we have defined three computes nodes (compute_one, compute_two and compute_three). +IM enables the definition of the specific cloud provider where the Compute nodes will be deployed in a hybrid deployment. +For example, in the following code we assume that we have defined three compute nodes (compute_one, compute_two and compute_three). We can create a placement group with two of them (compute_one and compute_two) and then set a placement policy with a cloud_id (that must be defined in the :ref:`auth-file`), and create a second placement policy where we can set a different cloud provider and, optionally, an availability zone:: @@ -285,10 +285,10 @@ Container Applications (Kubernetes connector) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ IM also enables the definition of container applications to be deployed in a Kubernetes cluster. -In the following example we can see how to define a container application (IM) that uses a +In the following example, we can see how to define a container application (IM) that uses a ConfigMap for a configuration file. The IM application is connected with a MySQL backend using the ``IM_DATA_DB`` environment variable. The MySQL container is defined with a Persistent -Volume Claim (PVC) of 10GB. Furthermore the IM application specifies an endpoint to be published +Volume Claim (PVC) of 10GB. Furthermore, the IM application specifies an endpoint to be published that will result in the creation of a Kubernetes Ingress. ... @@ -384,14 +384,14 @@ Advanced Output values The ``tosca.nodes.indigo.Compute`` node type adds a new attribute named: ``ansible_output``. It is a map that has one element per each IM configuration step, so you can access it by name. The steps have the keyword -``tasks`` that is also a map that has one element per ansible task. In this case -it can bes accessed using the task name as defined in the playbook. Finally +``tasks``, that is also a map that has one element per ansible task. In this case +it can be accessed using the task name as defined in the playbook. Finally there is an ``output`` keyword that returns the output of the task. In most of the cases the task is a ``debug`` ansible task that shows anything you want to return. -In the following example the specified task was a debug ansible task that shows the -value of a internal defined value:: +In the following example, the specified task was a debug ansible task that shows the +value of a internally defined value:: ... From 3db3189a76ed11709e9fd76d24aca22eafcda75a Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 14 Nov 2024 12:44:13 +0100 Subject: [PATCH 27/53] Fix SSH #1633 --- IM/SSH.py | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/IM/SSH.py b/IM/SSH.py index 966793fc..8090e5cd 100644 --- a/IM/SSH.py +++ b/IM/SSH.py @@ -118,8 +118,10 @@ def __init__(self, host, user, passwd=None, private_key=None, port=22, proxy_hos private_key_obj = StringIO() if os.path.isfile(private_key): pkfile = open(private_key) + self.private_key = "" for line in pkfile.readlines(): private_key_obj.write(line) + self.private_key += line pkfile.close() else: # Avoid windows line endings @@ -128,18 +130,31 @@ def __init__(self, host, user, passwd=None, private_key=None, port=22, proxy_hos if not private_key.endswith("\n"): private_key += "\n" private_key_obj.write(private_key) + self.private_key = private_key - self.private_key = private_key - private_key_obj.seek(0) + self.private_key_obj = self._load_private_key(private_key_obj) - if "BEGIN RSA PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.RSAKey.from_private_key(private_key_obj) - elif "BEGIN DSA PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.DSSKey.from_private_key(private_key_obj) - elif "BEGIN EC PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.ECDSAKey.from_private_key(private_key_obj) - elif "BEGIN OPENSSH PRIVATE KEY" in private_key: - self.private_key_obj = paramiko.Ed25519Key.from_private_key(private_key_obj) + @staticmethod + def _load_private_key(private_key_obj): + """ Load a private key from a file-like object""" + private_key_obj.seek(0) + try: + return paramiko.RSAKey.from_private_key(private_key_obj) + except Exception: + private_key_obj.seek(0) + try: + return paramiko.DSSKey.from_private_key(private_key_obj) + except Exception: + private_key_obj.seek(0) + try: + return paramiko.ECDSAKey.from_private_key(private_key_obj) + except Exception: + private_key_obj.seek(0) + try: + return paramiko.Ed25519Key.from_private_key(private_key_obj) + except Exception: + private_key_obj.seek(0) + raise Exception("Invalid private key") def __del__(self): self.close() From a35d58bc5bcf3ac04addec7fbee4691d3d3c479b Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 14 Nov 2024 14:51:10 +0100 Subject: [PATCH 28/53] Improve code #1633 --- IM/SSH.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/IM/SSH.py b/IM/SSH.py index 8090e5cd..bde5fbba 100644 --- a/IM/SSH.py +++ b/IM/SSH.py @@ -132,28 +132,17 @@ def __init__(self, host, user, passwd=None, private_key=None, port=22, proxy_hos private_key_obj.write(private_key) self.private_key = private_key + private_key_obj.seek(0) self.private_key_obj = self._load_private_key(private_key_obj) @staticmethod def _load_private_key(private_key_obj): """ Load a private key from a file-like object""" - private_key_obj.seek(0) - try: - return paramiko.RSAKey.from_private_key(private_key_obj) - except Exception: - private_key_obj.seek(0) - try: - return paramiko.DSSKey.from_private_key(private_key_obj) - except Exception: - private_key_obj.seek(0) - try: - return paramiko.ECDSAKey.from_private_key(private_key_obj) - except Exception: - private_key_obj.seek(0) - try: - return paramiko.Ed25519Key.from_private_key(private_key_obj) - except Exception: - private_key_obj.seek(0) + for kype in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey, paramiko.Ed25519Key]: + try: + return kype.from_private_key(private_key_obj) + except Exception: + private_key_obj.seek(0) raise Exception("Invalid private key") def __del__(self): From b38eea0fd8e49a8706b9bc145674be13067090fa Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 14 Nov 2024 15:24:46 +0100 Subject: [PATCH 29/53] Add default init date --- IM/InfrastructureManager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index ff3213a4..eab08d99 100644 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -2056,6 +2056,8 @@ def GetStats(init_date, end_date, auth): """ # First check the auth data auth = InfrastructureManager.check_auth_data(auth) + if not init_date: + init_date = "1970-01-01" stats = Stats.get_stats(init_date, end_date, auth) if stats is None: raise Exception("ERROR connecting with the database!.") From ca389bf68498ac2d826ef2ccf33a0e9b7cb65ebe Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 15 Nov 2024 08:11:41 +0100 Subject: [PATCH 30/53] Fix test --- test/integration/TestIM.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py index 94728e5c..aa9dff8e 100755 --- a/test/integration/TestIM.py +++ b/test/integration/TestIM.py @@ -510,7 +510,7 @@ def test_40_export_import(self): success, msg="ERROR calling ImportInfrastructure: " + str(res)) def test_45_stats(self): - (success, res) = self.server.GetStats(None, None, self.auth_data) + (success, res) = self.server.GetStats('', '', self.auth_data) self.assertTrue( success, msg="ERROR calling GetStats: " + str(res)) self.assertEqual(len(res), 3, msg="ERROR getting stats: Incorrect number of infrastructures") From ebb123a453030b0d7ac607e2139fd899cc391ce4 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 15 Nov 2024 11:17:02 +0100 Subject: [PATCH 31/53] Do not use none in stats --- IM/Stats.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/IM/Stats.py b/IM/Stats.py index f5a53b05..21de2658 100644 --- a/IM/Stats.py +++ b/IM/Stats.py @@ -35,7 +35,7 @@ class Stats(): @staticmethod def _get_data(str_data, init_date=None, end_date=None): dic = json.loads(str_data) - resp = {'creation_date': None} + resp = {'creation_date': ''} if 'creation_date' in dic and dic['creation_date']: creation_date = datetime.datetime.fromtimestamp(float(dic['creation_date'])) resp['creation_date'] = str(creation_date) @@ -44,7 +44,7 @@ def _get_data(str_data, init_date=None, end_date=None): if end_date and creation_date > end_date: return None - resp['tosca_name'] = None + resp['tosca_name'] = '' if 'extra_info' in dic and dic['extra_info'] and "TOSCA" in dic['extra_info']: try: tosca = yaml.safe_load(dic['extra_info']['TOSCA']) @@ -56,8 +56,8 @@ def _get_data(str_data, init_date=None, end_date=None): resp['vm_count'] = 0 resp['cpu_count'] = 0 resp['memory_size'] = 0 - resp['cloud_type'] = None - resp['cloud_host'] = None + resp['cloud_type'] = '' + resp['cloud_host'] = '' resp['hybrid'] = False resp['deleted'] = True if 'deleted' in dic and dic['deleted'] else False for str_vm_data in dic['vm_list']: From 7116df367cf31e80f7200009ad0f0ced848aa2f5 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 15 Nov 2024 11:17:02 +0100 Subject: [PATCH 32/53] Do not use none in stats --- IM/Stats.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/IM/Stats.py b/IM/Stats.py index f5a53b05..21de2658 100644 --- a/IM/Stats.py +++ b/IM/Stats.py @@ -35,7 +35,7 @@ class Stats(): @staticmethod def _get_data(str_data, init_date=None, end_date=None): dic = json.loads(str_data) - resp = {'creation_date': None} + resp = {'creation_date': ''} if 'creation_date' in dic and dic['creation_date']: creation_date = datetime.datetime.fromtimestamp(float(dic['creation_date'])) resp['creation_date'] = str(creation_date) @@ -44,7 +44,7 @@ def _get_data(str_data, init_date=None, end_date=None): if end_date and creation_date > end_date: return None - resp['tosca_name'] = None + resp['tosca_name'] = '' if 'extra_info' in dic and dic['extra_info'] and "TOSCA" in dic['extra_info']: try: tosca = yaml.safe_load(dic['extra_info']['TOSCA']) @@ -56,8 +56,8 @@ def _get_data(str_data, init_date=None, end_date=None): resp['vm_count'] = 0 resp['cpu_count'] = 0 resp['memory_size'] = 0 - resp['cloud_type'] = None - resp['cloud_host'] = None + resp['cloud_type'] = '' + resp['cloud_host'] = '' resp['hybrid'] = False resp['deleted'] = True if 'deleted' in dic and dic['deleted'] else False for str_vm_data in dic['vm_list']: From a8bc4fd765ffac7f7722406df060fc507d6ee35c Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 15 Nov 2024 11:44:43 +0100 Subject: [PATCH 33/53] User virtualenv --- IM/ConfManager.py | 3 +- contextualization/conf-ansible.yml | 53 ++++++++++++++++++++---------- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index 6ce5d057..fd2997b4 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -397,7 +397,8 @@ def launch_ctxt_agent(self, vm, tasks): vault_password = vm.info.systems[0].getValue("vault.password") if vault_password: vault_export = "export VAULT_PASS='%s' && " % vault_password - (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + "python3 " + Config.REMOTE_CONF_DIR + + (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + "/var/tmp/.ansible/bin/python3 " + + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + ctxt_agent_command + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + "/general_info.cfg " + remote_dir + "/" + os.path.basename(conf_file) + diff --git a/contextualization/conf-ansible.yml b/contextualization/conf-ansible.yml index de3172eb..d586ac8a 100644 --- a/contextualization/conf-ansible.yml +++ b/contextualization/conf-ansible.yml @@ -6,6 +6,7 @@ vars: # Ansible specific Version or "latest" ANSIBLE_VERSION: 4.10.0 + VENV_PATH: /var/tmp/.ansible tasks: ############## To avoid some issues with cloud-init and unattended upgrades ############### - name: Avoid unattended upgrades @@ -99,15 +100,15 @@ ################### Install Ansible/pip requisites ######################### - name: Debian/Ubuntu install requisites with apt - apt: name=python3-pip,wget,python3-setuptools,python3-psutil,sshpass,openssh-client,unzip install_recommends=no + apt: name=python3-pip,wget,python3-setuptools,sshpass,openssh-client,unzip install_recommends=no when: ansible_os_family == "Debian" - name: Yum install requisites RH 7/8 or Fedora - command: yum install -y python3-pip python3-setuptools python3-psutil sshpass openssh-clients + command: yum install -y python3-pip python3-setuptools sshpass openssh-clients when: ansible_os_family == "RedHat" - name: Zypper install requirements Suse - zypper: name=python3-pip,python3-setuptools,python3-psutil,wget,python3-cryptography state=present + zypper: name=python3-pip,python3-setuptools,wget,python3-cryptography state=present when: ansible_os_family == "Suse" - name: Install python-setuptools @@ -116,11 +117,26 @@ ######################################### Use pip to enable to set the version ############################################# + - name: Set extra_args var + set_fact: + extra_args: --prefer-binary + + - name: Set extra_args var in py3.11 + set_fact: + extra_args: --prefer-binary --break-system-packages + when: ansible_python_version is version('3.11', '>=') + + - name: Install virtualenv with pip + pip: + name: virtualenv + executable: pip3 + extra_args: "{{ extra_args }}" + # Version over 21 does not work with python 3.6 or older - name: Upgrade pip in py3.6- pip: name: pip>18.0,<21.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" # in some old distros we need to trust in the pypi to avoid SSL errors extra_args: --trusted-host files.pythonhosted.org --trusted-host pypi.org --trusted-host pypi.python.org when: ansible_python_version is version('3.7', '<') @@ -128,37 +144,38 @@ - name: Upgrade pip in py3.7-py3.8 pip: name: pip>20.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" when: ansible_python_version is version('3.7', '>=') and ansible_python_version is version('3.9', '<') - name: Upgrade pip in py3.9-py3.10 pip: name: pip>=22.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" when: ansible_python_version is version('3.9', '>=') and ansible_python_version is version('3.11', '<') # Version 66 (#2497) fails - name: Upgrade setuptools with Pip in py3.11- pip: name: setuptools<66.0.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" when: ansible_python_version is version('3.11', '<') - name: Set extra_args var set_fact: extra_args: --prefer-binary - - name: Set extra_args var in py3.11 - set_fact: - extra_args: --prefer-binary --break-system-packages - when: ansible_python_version is version('3.11', '>=') + - name: Install psutil + pip: + name: psutil + virtualenv: "{{ VENV_PATH }}" + extra_args: "{{ extra_args }}" - name: Install cryptography & pyOpenSSL in py3.11- pip: name: - cryptography>36.0.0,<39.0.0 - pyOpenSSL>20.0,<22.1.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ansible_python_version is version('3.11', '<') @@ -167,7 +184,7 @@ name: - cryptography>36.0.0 - pyOpenSSL>20.0 - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ansible_python_version is version('3.11', '>=') @@ -178,7 +195,7 @@ - pyyaml - paramiko>=2.9.5 - packaging - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" - name: Set Ansible newer version for python 3.8+ @@ -201,14 +218,14 @@ pip: name: ansible version: "{{ ANSIBLE_VERSION }}" - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ANSIBLE_VERSION != "latest" - name: Install latest ansible version with Pip pip: name: ansible - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" when: ANSIBLE_VERSION == "latest" @@ -219,13 +236,13 @@ name: - jmespath - scp - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" - name: Install pywinrm with Pip pip: name: pywinrm - executable: pip3 + virtualenv: "{{ VENV_PATH }}" extra_args: "{{ extra_args }}" ignore_errors: yes From 0b832dd09b422c2b53ec8e8a53b81c4eae27bd8f Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 15 Nov 2024 12:27:33 +0100 Subject: [PATCH 34/53] Fix --- contextualization/conf-ansible.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/contextualization/conf-ansible.yml b/contextualization/conf-ansible.yml index d586ac8a..0804668b 100644 --- a/contextualization/conf-ansible.yml +++ b/contextualization/conf-ansible.yml @@ -130,7 +130,6 @@ pip: name: virtualenv executable: pip3 - extra_args: "{{ extra_args }}" # Version over 21 does not work with python 3.6 or older - name: Upgrade pip in py3.6- From 1f6b85293ae9f014cbcc1646060b36a88858a878 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 15 Nov 2024 12:45:41 +0100 Subject: [PATCH 35/53] Fix --- contextualization/conf-ansible.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/contextualization/conf-ansible.yml b/contextualization/conf-ansible.yml index 0804668b..a8544a95 100644 --- a/contextualization/conf-ansible.yml +++ b/contextualization/conf-ansible.yml @@ -119,17 +119,18 @@ - name: Set extra_args var set_fact: - extra_args: --prefer-binary + extra_args: '' - name: Set extra_args var in py3.11 set_fact: - extra_args: --prefer-binary --break-system-packages + extra_args: --break-system-packages when: ansible_python_version is version('3.11', '>=') - name: Install virtualenv with pip pip: name: virtualenv executable: pip3 + extra_args: "{{ extra_args }}" # Version over 21 does not work with python 3.6 or older - name: Upgrade pip in py3.6- From 132369dde56fceacc372c0abd098d39a6c7e5272 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Fernandez Date: Sun, 17 Nov 2024 20:06:59 +0100 Subject: [PATCH 36/53] Fix test --- test/integration/TestIM.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py index aa9dff8e..2cbba369 100755 --- a/test/integration/TestIM.py +++ b/test/integration/TestIM.py @@ -513,7 +513,7 @@ def test_45_stats(self): (success, res) = self.server.GetStats('', '', self.auth_data) self.assertTrue( success, msg="ERROR calling GetStats: " + str(res)) - self.assertEqual(len(res), 3, msg="ERROR getting stats: Incorrect number of infrastructures") + self.assertEqual(len(res), 4, msg="ERROR getting stats: Incorrect number of infrastructures") def test_50_destroy(self): """ From fe773586d97ae5317cb51458ccfcb10aac2c42a3 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Tue, 26 Nov 2024 10:17:22 +0100 Subject: [PATCH 37/53] Fix ansible-galaxy path --- IM/CtxtAgentBase.py | 4 ++-- test/unit/test_ctxt_agent.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/IM/CtxtAgentBase.py b/IM/CtxtAgentBase.py index 15457ccb..486789b1 100644 --- a/IM/CtxtAgentBase.py +++ b/IM/CtxtAgentBase.py @@ -508,7 +508,7 @@ def install_ansible_roles(self, general_conf_data, playbook): task["name"] = "Create YAML file to install the collections with ansible-galaxy" yaml_data[0]['tasks'].append(task) - task = {"command": "ansible-galaxy collection install -c -r %s" % filename} + task = {"command": "/var/tmp/.ansible/bin/ansible-galaxy collection install -c -r %s" % filename} task["name"] = "Install galaxy collections" task["become"] = "yes" task["register"] = "collections_install" @@ -562,7 +562,7 @@ def install_ansible_roles(self, general_conf_data, playbook): task["name"] = "Create YAML file to install the roles with ansible-galaxy" yaml_data[0]['tasks'].append(task) - task = {"command": "ansible-galaxy install -c -r %s" % filename} + task = {"command": "/var/tmp/.ansible/bin/ansible-galaxy install -c -r %s" % filename} task["name"] = "Install galaxy roles" task["become"] = "yes" task["register"] = "roles_install" diff --git a/test/unit/test_ctxt_agent.py b/test/unit/test_ctxt_agent.py index bc4894b2..7354646f 100755 --- a/test/unit/test_ctxt_agent.py +++ b/test/unit/test_ctxt_agent.py @@ -277,7 +277,8 @@ def test_95_install_ansible_roles(self): copy_content = yaml_data[0]['tasks'][1]['copy'][pos + 9:-2] self.assertEqual(copy_content, "[{src: ansible_role}, {name: hadoop, src: " "'git+https://github.com/micafer/ansible-role-hadoop'}]") - self.assertEqual(yaml_data[0]['tasks'][2]['command'][:47], "ansible-galaxy install -c -r /tmp/galaxy_roles_") + self.assertEqual(yaml_data[0]['tasks'][2]['command'][:69], + "/var/tmp/.ansible/bin/ansible-galaxy install -c -r /tmp/galaxy_roles_") os.unlink(res) @@ -300,8 +301,8 @@ def test_99_install_ansible_collections(self): pos = yaml_data[0]['tasks'][0]['copy'].find('content="') copy_content = yaml_data[0]['tasks'][0]['copy'][pos + 9:-2] self.assertEqual(copy_content, "{collections: [{name: ns.collection, version: '1.0'}]}") - self.assertEqual(yaml_data[0]['tasks'][1]['command'][:64], - "ansible-galaxy collection install -c -r /tmp/galaxy_collections_") + self.assertEqual(yaml_data[0]['tasks'][1]['command'][:86], + "/var/tmp/.ansible/bin/ansible-galaxy collection install -c -r /tmp/galaxy_collections_") os.unlink(res) From 48eec8b324f6deb78fdaa9fae6c9b2ffeb3cf7af Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Tue, 26 Nov 2024 13:01:45 +0100 Subject: [PATCH 38/53] Fix ctxt dist path --- IM/ConfManager.py | 5 +++-- IM/CtxtAgentBase.py | 5 +++-- contextualization/ctxt_agent_dist.py | 3 ++- test/unit/test_ctxt_agent.py | 4 ++-- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index fd2997b4..ff3125d1 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -56,6 +56,7 @@ from IM.recipe import Recipe from IM.config import Config from radl.radl import system, contextualize_item +from IM.CtxtAgentBase import CtxtAgentBase class ConfManager(LoggerMixin, threading.Thread): @@ -397,8 +398,8 @@ def launch_ctxt_agent(self, vm, tasks): vault_password = vm.info.systems[0].getValue("vault.password") if vault_password: vault_export = "export VAULT_PASS='%s' && " % vault_password - (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + "/var/tmp/.ansible/bin/python3 " + - Config.REMOTE_CONF_DIR + + (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + CtxtAgentBase.VENV_DIR + + "/bin/python3 " + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + ctxt_agent_command + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + "/general_info.cfg " + remote_dir + "/" + os.path.basename(conf_file) + diff --git a/IM/CtxtAgentBase.py b/IM/CtxtAgentBase.py index 486789b1..a88b0953 100644 --- a/IM/CtxtAgentBase.py +++ b/IM/CtxtAgentBase.py @@ -37,6 +37,7 @@ class CtxtAgentBase: # the ConfManager PLAYBOOK_RETRIES = 1 INTERNAL_PLAYBOOK_RETRIES = 1 + VENV_DIR = "/var/tmp/.ansible" def __init__(self, conf_data_filename): self.logger = None @@ -508,7 +509,7 @@ def install_ansible_roles(self, general_conf_data, playbook): task["name"] = "Create YAML file to install the collections with ansible-galaxy" yaml_data[0]['tasks'].append(task) - task = {"command": "/var/tmp/.ansible/bin/ansible-galaxy collection install -c -r %s" % filename} + task = {"command": self.VENV_DIR + "/bin/ansible-galaxy collection install -c -r %s" % filename} task["name"] = "Install galaxy collections" task["become"] = "yes" task["register"] = "collections_install" @@ -562,7 +563,7 @@ def install_ansible_roles(self, general_conf_data, playbook): task["name"] = "Create YAML file to install the roles with ansible-galaxy" yaml_data[0]['tasks'].append(task) - task = {"command": "/var/tmp/.ansible/bin/ansible-galaxy install -c -r %s" % filename} + task = {"command": self.VENV_DIR + "/bin/ansible-galaxy install -c -r %s" % filename} task["name"] = "Install galaxy roles" task["become"] = "yes" task["register"] = "roles_install" diff --git a/contextualization/ctxt_agent_dist.py b/contextualization/ctxt_agent_dist.py index f96a8a01..ac5c1e77 100755 --- a/contextualization/ctxt_agent_dist.py +++ b/contextualization/ctxt_agent_dist.py @@ -162,7 +162,8 @@ def LaunchRemoteAgent(self, vm, vault_pass, pk_file, changed_pass_ok): vm_dir = os.path.abspath(os.path.dirname(self.vm_conf_data_filename)) remote_dir = os.path.abspath(os.path.dirname(self.conf_data_filename)) try: - (pid, _, _) = ssh_client.execute(vault_export + "nohup python3 " + remote_dir + "/ctxt_agent_dist.py " + + (pid, _, _) = ssh_client.execute(vault_export + "nohup " + CtxtAgentBase.VENV_DIR + "/bin/python3 " + + remote_dir + "/ctxt_agent_dist.py " + self.conf_data_filename + " " + self.vm_conf_data_filename + " 1 > " + vm_dir + "/stdout 2> " + vm_dir + "/stderr < /dev/null & echo -n $!") diff --git a/test/unit/test_ctxt_agent.py b/test/unit/test_ctxt_agent.py index 7354646f..3f9dac14 100755 --- a/test/unit/test_ctxt_agent.py +++ b/test/unit/test_ctxt_agent.py @@ -278,7 +278,7 @@ def test_95_install_ansible_roles(self): self.assertEqual(copy_content, "[{src: ansible_role}, {name: hadoop, src: " "'git+https://github.com/micafer/ansible-role-hadoop'}]") self.assertEqual(yaml_data[0]['tasks'][2]['command'][:69], - "/var/tmp/.ansible/bin/ansible-galaxy install -c -r /tmp/galaxy_roles_") + ctxt_agent.VENV_DIR + "/bin/ansible-galaxy install -c -r /tmp/galaxy_roles_") os.unlink(res) @@ -302,7 +302,7 @@ def test_99_install_ansible_collections(self): copy_content = yaml_data[0]['tasks'][0]['copy'][pos + 9:-2] self.assertEqual(copy_content, "{collections: [{name: ns.collection, version: '1.0'}]}") self.assertEqual(yaml_data[0]['tasks'][1]['command'][:86], - "/var/tmp/.ansible/bin/ansible-galaxy collection install -c -r /tmp/galaxy_collections_") + ctxt_agent.VENV_DIR + "/bin/ansible-galaxy collection install -c -r /tmp/galaxy_collections_") os.unlink(res) From b596d223fd8d68c00e94cf2287af20abd23ddaac Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Tue, 26 Nov 2024 16:30:51 +0100 Subject: [PATCH 39/53] Fix ctxt dist path --- IM/ConfManager.py | 5 +++-- contextualization/ctxt_agent_dist.py | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index ff3125d1..438619e7 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -391,15 +391,16 @@ def launch_ctxt_agent(self, vm, tasks): if len(self.inf.get_vm_list()) > Config.VM_NUM_USE_CTXT_DIST: self.log_info("Using ctxt_agent_dist") ctxt_agent_command = "/ctxt_agent_dist.py " + python_path = "python3 " else: self.log_info("Using ctxt_agent") ctxt_agent_command = "/ctxt_agent.py " + python_path = CtxtAgentBase.VENV_DIR + "/bin/python3 " vault_export = "" vault_password = vm.info.systems[0].getValue("vault.password") if vault_password: vault_export = "export VAULT_PASS='%s' && " % vault_password - (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + CtxtAgentBase.VENV_DIR + - "/bin/python3 " + Config.REMOTE_CONF_DIR + + (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + python_path + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + ctxt_agent_command + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + "/general_info.cfg " + remote_dir + "/" + os.path.basename(conf_file) + diff --git a/contextualization/ctxt_agent_dist.py b/contextualization/ctxt_agent_dist.py index ac5c1e77..f96a8a01 100755 --- a/contextualization/ctxt_agent_dist.py +++ b/contextualization/ctxt_agent_dist.py @@ -162,8 +162,7 @@ def LaunchRemoteAgent(self, vm, vault_pass, pk_file, changed_pass_ok): vm_dir = os.path.abspath(os.path.dirname(self.vm_conf_data_filename)) remote_dir = os.path.abspath(os.path.dirname(self.conf_data_filename)) try: - (pid, _, _) = ssh_client.execute(vault_export + "nohup " + CtxtAgentBase.VENV_DIR + "/bin/python3 " + - remote_dir + "/ctxt_agent_dist.py " + + (pid, _, _) = ssh_client.execute(vault_export + "nohup python3 " + remote_dir + "/ctxt_agent_dist.py " + self.conf_data_filename + " " + self.vm_conf_data_filename + " 1 > " + vm_dir + "/stdout 2> " + vm_dir + "/stderr < /dev/null & echo -n $!") From d34b9cb1762af41b9ea123610f8e55084219238e Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Tue, 26 Nov 2024 16:53:53 +0100 Subject: [PATCH 40/53] Fix ctxt dist path --- IM/ConfManager.py | 5 ++--- contextualization/ansible_install.sh | 4 ++++ contextualization/ctxt_agent_dist.py | 3 ++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index 438619e7..ff3125d1 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -391,16 +391,15 @@ def launch_ctxt_agent(self, vm, tasks): if len(self.inf.get_vm_list()) > Config.VM_NUM_USE_CTXT_DIST: self.log_info("Using ctxt_agent_dist") ctxt_agent_command = "/ctxt_agent_dist.py " - python_path = "python3 " else: self.log_info("Using ctxt_agent") ctxt_agent_command = "/ctxt_agent.py " - python_path = CtxtAgentBase.VENV_DIR + "/bin/python3 " vault_export = "" vault_password = vm.info.systems[0].getValue("vault.password") if vault_password: vault_export = "export VAULT_PASS='%s' && " % vault_password - (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + python_path + Config.REMOTE_CONF_DIR + + (pid, _, _) = ssh.execute("nohup sh -c \"" + vault_export + CtxtAgentBase.VENV_DIR + + "/bin/python3 " + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + ctxt_agent_command + Config.REMOTE_CONF_DIR + "/" + str(self.inf.id) + "/" + "/general_info.cfg " + remote_dir + "/" + os.path.basename(conf_file) + diff --git a/contextualization/ansible_install.sh b/contextualization/ansible_install.sh index fcdcbb3f..062dd240 100755 --- a/contextualization/ansible_install.sh +++ b/contextualization/ansible_install.sh @@ -39,6 +39,10 @@ distribution_id() { echo ${RETVAL} } +# Create a symbolic link to python3 in case of not venv created +ls /var/tmp/.ansible/bin/ || mkdir -p /var/tmp/.ansible/bin/ +ls /var/tmp/.ansible/bin/python3 || ln -s /usr/bin/python3 /var/tmp/.ansible/bin/python3 + if [ $(which ansible-playbook) ]; then echo "Ansible installed. Do not install." else diff --git a/contextualization/ctxt_agent_dist.py b/contextualization/ctxt_agent_dist.py index f96a8a01..ac5c1e77 100755 --- a/contextualization/ctxt_agent_dist.py +++ b/contextualization/ctxt_agent_dist.py @@ -162,7 +162,8 @@ def LaunchRemoteAgent(self, vm, vault_pass, pk_file, changed_pass_ok): vm_dir = os.path.abspath(os.path.dirname(self.vm_conf_data_filename)) remote_dir = os.path.abspath(os.path.dirname(self.conf_data_filename)) try: - (pid, _, _) = ssh_client.execute(vault_export + "nohup python3 " + remote_dir + "/ctxt_agent_dist.py " + + (pid, _, _) = ssh_client.execute(vault_export + "nohup " + CtxtAgentBase.VENV_DIR + "/bin/python3 " + + remote_dir + "/ctxt_agent_dist.py " + self.conf_data_filename + " " + self.vm_conf_data_filename + " 1 > " + vm_dir + "/stdout 2> " + vm_dir + "/stderr < /dev/null & echo -n $!") From 3fc36d73a21ea7695f7d36e2bdae69132c9f5db2 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Wed, 27 Nov 2024 09:46:01 +0100 Subject: [PATCH 41/53] Increase timeout --- test/integration/TestIM.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py index 2cbba369..3e666a81 100755 --- a/test/integration/TestIM.py +++ b/test/integration/TestIM.py @@ -154,7 +154,7 @@ def test_11_create(self): self.__class__.inf_id = inf_id all_configured = self.wait_inf_state( - inf_id, VirtualMachine.CONFIGURED, 2400) + inf_id, VirtualMachine.CONFIGURED, 2700) self.assertTrue( all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).") From 98de1878586c3b8fb6b06ea8342c3aa8180d9260 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 28 Nov 2024 08:19:39 +0100 Subject: [PATCH 42/53] Fix XiB units --- IM/connectors/OSCAR.py | 4 ++-- requirements-tests.txt | 2 +- test/unit/connectors/Fogbow.py | 2 +- test/unit/connectors/GCE.py | 2 +- test/unit/connectors/Kubernetes.py | 6 +++--- test/unit/connectors/OCCI.py | 2 +- test/unit/connectors/OSCAR.py | 6 +++--- test/unit/connectors/OpenNebula.py | 2 +- test/unit/test_im_logic.py | 4 ++-- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/IM/connectors/OSCAR.py b/IM/connectors/OSCAR.py index d7dfc3ba..f5d17603 100644 --- a/IM/connectors/OSCAR.py +++ b/IM/connectors/OSCAR.py @@ -134,7 +134,7 @@ def _get_service_json(radl_system): if radl_system.getValue("name"): service["name"] = radl_system.getValue("name") if radl_system.getValue("memory.size"): - service["memory"] = "%dMi" % radl_system.getFeature('memory.size').getValue('M') + service["memory"] = "%dMi" % radl_system.getFeature('memory.size').getValue('Mi') if radl_system.getValue("cpu.count"): service["cpu"] = "%g" % radl_system.getValue("cpu.count") if radl_system.getValue("gpu.count"): @@ -260,7 +260,7 @@ def update_system_info_from_service_info(self, system, service_info): conflict="other", missing="other") if "memory" in service_info and service_info["memory"]: memory = self.convert_memory_unit(service_info["memory"], "Mi") - system.addFeature(Feature("memory.size", "=", memory, "M"), + system.addFeature(Feature("memory.size", "=", memory, "Mi"), conflict="other", missing="other") if "script" in service_info and service_info["script"]: system.addFeature(Feature("script", "=", service_info["script"]), diff --git a/requirements-tests.txt b/requirements-tests.txt index 456356e3..aaddb89f 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -5,7 +5,7 @@ PyYAML cheroot boto3 apache-libcloud >= 3.3.1 -RADL >= 1.3.3 +RADL >= 1.3.4 flask werkzeug netaddr diff --git a/test/unit/connectors/Fogbow.py b/test/unit/connectors/Fogbow.py index ad950027..0625d2ea 100755 --- a/test/unit/connectors/Fogbow.py +++ b/test/unit/connectors/Fogbow.py @@ -300,7 +300,7 @@ def test_30_updateVMInfo(self, sleep, requests): self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) self.assertEqual(vm.info.systems[0].getValue("net_interface.1.ip"), "10.0.0.1") self.assertEqual(vm.info.systems[0].getValue("net_interface.0.ip"), "8.8.8.8") - self.assertEqual(vm.info.systems[0].getValue("memory.size"), 1073741824) + self.assertEqual(vm.info.systems[0].getValue("memory.size"), 1024000000) self.assertEqual(vm.info.systems[0].getValue("disk.1.device"), "/dev/sdb") data = json.loads(requests.call_args_list[1][1]["data"]) diff --git a/test/unit/connectors/GCE.py b/test/unit/connectors/GCE.py index fd8524ea..a9d976aa 100755 --- a/test/unit/connectors/GCE.py +++ b/test/unit/connectors/GCE.py @@ -129,7 +129,7 @@ def test_10_concrete(self, get_driver): concrete = gce_cloud.concreteSystem(radl_system, auth) self.assertEqual(len(concrete), 1) - self.assertEqual(concrete[0].getValue("memory.size"), 2147483648) + self.assertEqual(concrete[0].getValue("memory.size"), 2048000000) self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) @patch('libcloud.compute.drivers.gce.GCENodeDriver') diff --git a/test/unit/connectors/Kubernetes.py b/test/unit/connectors/Kubernetes.py index 3e8667d7..742c7836 100755 --- a/test/unit/connectors/Kubernetes.py +++ b/test/unit/connectors/Kubernetes.py @@ -188,7 +188,7 @@ def test_20_launch(self, save_data, requests): 'labels': {'name': 'test-1'}}, "spec": { "accessModes": ["ReadWriteOnce"], - "resources": {"requests": {"storage": 10737418240}}, + "resources": {"requests": {"storage": 10000000000}}, }, } self.assertEqual(requests.call_args_list[1][0][1], @@ -237,8 +237,8 @@ def test_20_launch(self, save_data, requests): "imagePullPolicy": "Always", "ports": [{"containerPort": 8080, "protocol": "TCP"}], "resources": { - "limits": {"cpu": "1", "memory": "536870912"}, - "requests": {"cpu": "1", "memory": "536870912"}, + "limits": {"cpu": "1", "memory": "512000000"}, + "requests": {"cpu": "1", "memory": "512000000"}, }, "env": [{"name": "var", "value": "some_val"}, {"name": "var2", "value": "some,val2"}], diff --git a/test/unit/connectors/OCCI.py b/test/unit/connectors/OCCI.py index 5a967aed..e07e10d3 100755 --- a/test/unit/connectors/OCCI.py +++ b/test/unit/connectors/OCCI.py @@ -327,7 +327,7 @@ def test_30_updateVMInfo(self, get_keystone_uri, requests): self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) memory = vm.info.systems[0].getValue("memory.size") - self.assertEqual(memory, 1824522240) + self.assertEqual(memory, 1740000000) @patch('requests.request') @patch('IM.connectors.OCCI.KeyStoneAuth.get_keystone_uri') diff --git a/test/unit/connectors/OSCAR.py b/test/unit/connectors/OSCAR.py index 9c4a4a83..fa4715a6 100755 --- a/test/unit/connectors/OSCAR.py +++ b/test/unit/connectors/OSCAR.py @@ -124,7 +124,7 @@ def test_20_launch(self, save_data, requests): radl_data = """ system test ( name = 'plants' and - memory.size = 2G and + memory.size = 2GI and cpu.count = 1.0 and cpu.sgx = 1 and gpu.count = 1 and @@ -288,7 +288,7 @@ def test_55_alter(self, requests): new_radl_data = """ system test ( cpu.count>=2 and - memory.size>=4G + memory.size>=4GI )""" new_radl = radl_parse.parse_radl(new_radl_data) @@ -306,7 +306,7 @@ def test_55_alter(self, requests): self.assertTrue(success, msg="ERROR: modifying VM info.") self.assertNotIn("ERROR", self.log.getvalue(), msg="ERROR found in log: %s" % self.log.getvalue()) self.assertEqual(new_vm.info.systems[0].getValue("cpu.count"), 2) - self.assertEqual(new_vm.info.systems[0].getFeature("memory.size").getValue("M"), 4096) + self.assertEqual(new_vm.info.systems[0].getFeature("memory.size").getValue("M"), 4295) self.assertEqual(requests.call_args_list[0][0][0], "PUT") self.assertEqual(requests.call_args_list[0][0][1], "http://oscar.com:80/system/services/fname") self.assertEqual(json.loads(requests.call_args_list[0][1]['data']), {'memory': '4096Mi', 'cpu': '2'}) diff --git a/test/unit/connectors/OpenNebula.py b/test/unit/connectors/OpenNebula.py index 9c897651..d73fad01 100755 --- a/test/unit/connectors/OpenNebula.py +++ b/test/unit/connectors/OpenNebula.py @@ -144,7 +144,7 @@ def test_20_launch(self, save_data, getONEVersion, server_proxy): OS = [ ARCH = "x86_64" ] DISK = [ IMAGE_ID = "1" ] - DISK = [ SAVE = no, TYPE = fs , FORMAT = qcow2, SIZE = 1024, TARGET = hdb ] + DISK = [ SAVE = no, TYPE = fs , FORMAT = qcow2, SIZE = 1000, TARGET = hdb ] SCHED_REQUIREMENTS = "CLUSTER_ID=\\"0\\""\n""" diff --git a/test/unit/test_im_logic.py b/test/unit/test_im_logic.py index 2eea95f9..68ce6315 100644 --- a/test/unit/test_im_logic.py +++ b/test/unit/test_im_logic.py @@ -1552,8 +1552,8 @@ def test_estimate_resources(self): 'cloud0': { 'cloudType': 'Dummy', 'cloudEndpoint': 'http://server.com:80/path', - 'compute': [{'cpuCores': 2, 'memoryInMegabytes': 4096, 'diskSizeInGigabytes': 40}, - {'cpuCores': 1, 'memoryInMegabytes': 2048, 'diskSizeInGigabytes': 10}], + 'compute': [{'cpuCores': 2, 'memoryInMegabytes': 4000, 'diskSizeInGigabytes': 40}, + {'cpuCores': 1, 'memoryInMegabytes': 2000, 'diskSizeInGigabytes': 10}], 'storage': [{'sizeInGigabytes': 100}] }}) From 6bf3386792b0f8a502eaf807de777771b79d18b3 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 28 Nov 2024 08:34:53 +0100 Subject: [PATCH 43/53] Fix test --- test/unit/REST.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/unit/REST.py b/test/unit/REST.py index 3b723a9b..f7c8f9ed 100755 --- a/test/unit/REST.py +++ b/test/unit/REST.py @@ -228,8 +228,8 @@ def test_CreateInfrastructure(self, get_infrastructure, CreateInfrastructure): data=read_file_as_bytes("../files/test_simple.json")) self.assertEqual(res.json, {"one": {"cloudType": "OpenNebula", "cloudEndpoint": "http://ramses.i3m.upv.es:2633", - "compute": [{"cpuCores": 1, "memoryInMegabytes": 1024}, - {"cpuCores": 1, "memoryInMegabytes": 1024}], "storage": []}}) + "compute": [{"cpuCores": 1, "memoryInMegabytes": 1074}, + {"cpuCores": 1, "memoryInMegabytes": 1074}], "storage": []}}) headers["Content-Type"] = "application/json" CreateInfrastructure.side_effect = InvaliddUserException() From 5a980938b44b4542b398d946e51869472dd16246 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 28 Nov 2024 09:01:09 +0100 Subject: [PATCH 44/53] Change python ver in tests --- .github/workflows/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index dfb7eade..b0b202d4 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -16,7 +16,7 @@ jobs: - name: Set up Python 3. uses: actions/setup-python@v5 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: python -m pip install tox From 348badfc57704d7afaf7245a34a11ac1014579c2 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 28 Nov 2024 09:23:02 +0100 Subject: [PATCH 45/53] Change ansible ver in tests --- requirements-tests.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements-tests.txt b/requirements-tests.txt index aaddb89f..2c8c9ae9 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,5 +1,4 @@ -ansible >= 2.4 -ansible-base +ansible == 8.7.0 paramiko >= 1.14 PyYAML cheroot From 3ce577df4250b7cc83586a4e7c1c057840a6ecdc Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Thu, 28 Nov 2024 09:26:23 +0100 Subject: [PATCH 46/53] Add nosec --- IM/CtxtAgentBase.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IM/CtxtAgentBase.py b/IM/CtxtAgentBase.py index a88b0953..a38d4487 100644 --- a/IM/CtxtAgentBase.py +++ b/IM/CtxtAgentBase.py @@ -37,7 +37,7 @@ class CtxtAgentBase: # the ConfManager PLAYBOOK_RETRIES = 1 INTERNAL_PLAYBOOK_RETRIES = 1 - VENV_DIR = "/var/tmp/.ansible" + VENV_DIR = "/var/tmp/.ansible" # nosec def __init__(self, conf_data_filename): self.logger = None From 96793c91959607ddcd5e2b2c12340d9f9376b59c Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 29 Nov 2024 09:21:15 +0100 Subject: [PATCH 47/53] Add bandit tests --- .github/workflows/main.yaml | 3 +++ IM/SSH.py | 4 ++-- IM/connectors/Docker.py | 6 +++--- IM/connectors/OCCI.py | 2 +- tox.ini | 2 +- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index b0b202d4..d1e1c950 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -24,6 +24,9 @@ jobs: - name: Check code style run: tox -e style + - name: Check security + run: tox -e bandit + - name: Unit tests run: tox -e coverage diff --git a/IM/SSH.py b/IM/SSH.py index bde5fbba..de142214 100644 --- a/IM/SSH.py +++ b/IM/SSH.py @@ -77,7 +77,7 @@ def run(self): channel = self.client.get_transport().open_session() if self.ssh.tty: channel.get_pty() - channel.exec_command(self.command + "\n") + channel.exec_command(self.command + "\n") # nosec stdout = channel.makefile() stderr = channel.makefile_stderr() exit_status = channel.recv_exit_status() @@ -264,7 +264,7 @@ def execute(self, command, timeout=None): if self.tty: channel.get_pty() - channel.exec_command(command + "\n") + channel.exec_command(command + "\n") # nosec stdout = channel.makefile() stderr = channel.makefile_stderr() exit_status = channel.recv_exit_status() diff --git a/IM/connectors/Docker.py b/IM/connectors/Docker.py index a3855550..e8bd19fb 100644 --- a/IM/connectors/Docker.py +++ b/IM/connectors/Docker.py @@ -182,7 +182,7 @@ def _generate_create_svc_request_data(self, image_name, outports, vm, ssh_port, command += " ; " command += "mkdir /var/run/sshd" command += " ; " - command += "sed -i '/PermitRootLogin/c\PermitRootLogin yes' /etc/ssh/sshd_config" + command += "sed -i '/PermitRootLogin/c\\PermitRootLogin yes' /etc/ssh/sshd_config" command += " ; " command += "rm -f /etc/ssh/ssh_host_rsa_key*" command += " ; " @@ -190,7 +190,7 @@ def _generate_create_svc_request_data(self, image_name, outports, vm, ssh_port, command += " ; " command += "echo 'root:" + self._root_password + "' | chpasswd" command += " ; " - command += "sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" + command += "sed 's@session\\s*required\\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" command += " ; " command += " /usr/sbin/sshd -D" @@ -264,7 +264,7 @@ def _generate_create_cont_request_data(self, image_name, outports, vm, ssh_port) command += " ; " command += "echo 'root:" + self._root_password + "' | chpasswd" command += " ; " - command += "sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" + command += "sed 's@session\\s*required\\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" command += " ; " command += " /usr/sbin/sshd -D" diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index c2125e9c..f7716430 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -229,7 +229,7 @@ def get_net_info(occi_res): mask) for mask in Config.PRIVATE_NET_MASKS]) elif kv[0].strip() == "occi.networkinterface.interface": net_interface = kv[1].strip('"') - num_interface = re.findall('\d+', net_interface)[0] + num_interface = re.findall(r'\d+', net_interface)[0] elif kv[0].strip() == "self": link = kv[1].strip('"') if num_interface and ip_address: diff --git a/tox.ini b/tox.ini index 7a1f1e46..934616b2 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ commands = python -m coverage run --source=. -m unittest discover -v -s test/uni [testenv:bandit] deps = bandit -commands = bandit IM -r -f html -o bandit.html -s B108,B601,B608,B507,B104 -ll +commands = bandit IM -r -f html -o bandit.html -ll [flake8] ignore = E402,E265,W605,W504,F811 From 74ef9465203aa854fa4e10e5a5e87d34789db9c5 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 29 Nov 2024 09:25:06 +0100 Subject: [PATCH 48/53] Fix style --- IM/connectors/Docker.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/IM/connectors/Docker.py b/IM/connectors/Docker.py index e8bd19fb..d93a708c 100644 --- a/IM/connectors/Docker.py +++ b/IM/connectors/Docker.py @@ -190,7 +190,8 @@ def _generate_create_svc_request_data(self, image_name, outports, vm, ssh_port, command += " ; " command += "echo 'root:" + self._root_password + "' | chpasswd" command += " ; " - command += "sed 's@session\\s*required\\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" + command += ("sed 's@session\\s*required\\s*pam_loginuid.so@session " + + "optional pam_loginuid.so@g' -i /etc/pam.d/sshd") command += " ; " command += " /usr/sbin/sshd -D" @@ -264,7 +265,8 @@ def _generate_create_cont_request_data(self, image_name, outports, vm, ssh_port) command += " ; " command += "echo 'root:" + self._root_password + "' | chpasswd" command += " ; " - command += "sed 's@session\\s*required\\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd" + command += ("sed 's@session\\s*required\\s*pam_loginuid.so@session" + + " optional pam_loginuid.so@g' -i /etc/pam.d/sshd") command += " ; " command += " /usr/sbin/sshd -D" From 149d9b3991f28dcc866d9038036a973595a9e9bd Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 29 Nov 2024 09:27:44 +0100 Subject: [PATCH 49/53] change bandit command --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 934616b2..05c22d08 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ commands = python -m coverage run --source=. -m unittest discover -v -s test/uni [testenv:bandit] deps = bandit -commands = bandit IM -r -f html -o bandit.html -ll +commands = bandit IM -r -ll [flake8] ignore = E402,E265,W605,W504,F811 From ac6a6a4b422b3c9a5d47ebc776b5358ee1258a97 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 29 Nov 2024 09:29:39 +0100 Subject: [PATCH 50/53] change bandit command --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 05c22d08..44a44f25 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ commands = python -m coverage run --source=. -m unittest discover -v -s test/uni [testenv:bandit] deps = bandit -commands = bandit IM -r -ll +commands = bandit IM -r -s B108,B601,B608,B507 -ll [flake8] ignore = E402,E265,W605,W504,F811 From a535f38a6966e9efd5c56521058355c30596621c Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 29 Nov 2024 09:31:07 +0100 Subject: [PATCH 51/53] change bandit command --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 44a44f25..baf5c7d8 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ commands = python -m coverage run --source=. -m unittest discover -v -s test/uni [testenv:bandit] deps = bandit -commands = bandit IM -r -s B108,B601,B608,B507 -ll +commands = bandit IM -r -s B108,B601,B608,B507,B104 -ll [flake8] ignore = E402,E265,W605,W504,F811 From f044f99d9943b3b27de3a93ee8a5e23e3f75d3dd Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Fri, 29 Nov 2024 09:43:42 +0100 Subject: [PATCH 52/53] Improve bandit sec --- IM/ConfManager.py | 2 +- IM/CtxtAgentBase.py | 8 ++++---- IM/InfrastructureList.py | 7 ++++--- IM/SSH.py | 4 ++-- IM/Stats.py | 2 +- IM/VirtualMachine.py | 2 +- IM/config.py | 6 +++--- IM/tosca/Tosca.py | 2 +- tox.ini | 2 +- 9 files changed, 18 insertions(+), 17 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index ff3125d1..d82f7b9f 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -1384,7 +1384,7 @@ def configure_ansible(self, ssh, tmp_dir, ansible_version=None): if ssh.proxy_host.private_key: priv_key_filename = "/var/tmp/%s_%s_%s.pem" % (ssh.proxy_host.username, ssh.username, - ssh.host) + ssh.host) # nosec # copy it to the proxy host to enable im_client to use it # ssh.proxy_host.sftp_put_content(ssh.proxy_host.private_key, priv_key_filename) # ssh.proxy_host.sftp_chmod(priv_key_filename, 0o600) diff --git a/IM/CtxtAgentBase.py b/IM/CtxtAgentBase.py index a38d4487..3a1a5226 100644 --- a/IM/CtxtAgentBase.py +++ b/IM/CtxtAgentBase.py @@ -289,7 +289,7 @@ def add_proxy_host_line(self, vm_data): # we must create it in the localhost to use it later with ansible priv_key_filename = "/var/tmp/%s_%s_%s.pem" % (proxy['user'], vm_data['user'], - vm_data['ip']) + vm_data['ip']) # nosec with open(priv_key_filename, 'w') as f: f.write(proxy['private_key']) os.chmod(priv_key_filename, 0o600) @@ -502,7 +502,7 @@ def install_ansible_roles(self, general_conf_data, playbook): if galaxy_collections: now = str(int(time.time() * 100)) - filename = "/tmp/galaxy_collections_%s.yml" % now + filename = "/tmp/galaxy_collections_%s.yml" % now # nosec yaml_deps = yaml.safe_dump({"collections": galaxy_collections}, default_flow_style=True) self.logger.debug("Galaxy collections file: %s" % yaml_deps) task = {"copy": 'dest=%s content="%s"' % (filename, yaml_deps)} @@ -556,7 +556,7 @@ def install_ansible_roles(self, general_conf_data, playbook): if galaxy_dependencies: now = str(int(time.time() * 100)) - filename = "/tmp/galaxy_roles_%s.yml" % now + filename = "/tmp/galaxy_roles_%s.yml" % now # nosec yaml_deps = yaml.safe_dump(galaxy_dependencies, default_flow_style=True) self.logger.debug("Galaxy depencies file: %s" % yaml_deps) task = {"copy": 'dest=%s content="%s"' % (filename, yaml_deps)} @@ -598,7 +598,7 @@ def LaunchAnsiblePlaybook(self, output, remote_dir, playbook_file, vm, threads, gen_pk_file = pk_file else: if vm['private_key'] and not vm['passwd']: - gen_pk_file = "/tmp/pk_" + vm['ip'] + ".pem" + gen_pk_file = "/tmp/pk_" + vm['ip'] + ".pem" # nosec pk_out = open(gen_pk_file, 'w') pk_out.write(vm['private_key']) pk_out.close() diff --git a/IM/InfrastructureList.py b/IM/InfrastructureList.py index c9b1d99c..b743fce2 100644 --- a/IM/InfrastructureList.py +++ b/IM/InfrastructureList.py @@ -182,12 +182,13 @@ def _get_data_from_db(db_url, inf_id=None, auth=None): if db.db_type == DataBase.MONGO: res = db.find("inf_list", {"id": inf_id}, {data_field: True, "deleted": True}) else: - res = db.select("select " + data_field + ",deleted from inf_list where id = %s", (inf_id,)) + res = db.select("select " + data_field + ",deleted from inf_list where id = %s", # nosec + (inf_id,)) else: if db.db_type == DataBase.MONGO: res = db.find("inf_list", {"deleted": 0}, {data_field: True, "deleted": True}, [('_id', -1)]) else: - res = db.select("select " + data_field + ",deleted from inf_list where deleted = 0" + res = db.select("select " + data_field + ",deleted from inf_list where deleted = 0" # nosec " order by rowid desc") if len(res) > 0: for elem in res: @@ -296,7 +297,7 @@ def _get_inf_ids_from_db(auth=None): where = "where deleted = 0 and (%s)" % like else: where = "where deleted = 0" - res = db.select("select id from inf_list %s order by rowid desc" % where) + res = db.select("select id from inf_list %s order by rowid desc" % where) # nosec for elem in res: if db.db_type == DataBase.MONGO: inf_list.append(elem['id']) diff --git a/IM/SSH.py b/IM/SSH.py index de142214..1fda8227 100644 --- a/IM/SSH.py +++ b/IM/SSH.py @@ -182,13 +182,13 @@ def connect(self, time_out=None): return self.client, self.proxy client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # nosec proxy = None proxy_channel = None if self.proxy_host: proxy = paramiko.SSHClient() - proxy.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + proxy.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # nosec proxy.connect(self.proxy_host.host, self.proxy_host.port, username=self.proxy_host.username, password=self.proxy_host.password, pkey=self.proxy_host.private_key_obj) proxy_transport = proxy.get_transport() diff --git a/IM/Stats.py b/IM/Stats.py index 21de2658..b0f13f62 100644 --- a/IM/Stats.py +++ b/IM/Stats.py @@ -125,7 +125,7 @@ def get_stats(init_date="1970-01-01", end_date=None, auth=None): if like: where += " and" where += " date <= '%s'" % end_date - res = db.select("select data, date, id from inf_list %s order by rowid desc" % where) + res = db.select("select data, date, id from inf_list %s order by rowid desc" % where) # nosec for elem in res: if db.db_type == DataBase.MONGO: diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index 3210365f..9b6ebc34 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -1135,7 +1135,7 @@ def get_ssh_command(self): reverse_opt = "-R %d:localhost:22" % (self.SSH_REVERSE_BASE_PORT + self.creation_im_id) if ssh.private_key: - filename = "/tmp/%s_%s.pem" % (self.inf.id, self.im_id) + filename = "/tmp/%s_%s.pem" % (self.inf.id, self.im_id) # nosec command = 'echo "%s" > %s && chmod 400 %s ' % (ssh.private_key, filename, filename) command += ('&& ssh -N %s -p %s -i %s -o "UserKnownHostsFile=/dev/null"' ' -o "StrictHostKeyChecking=no" %s@%s &' % (reverse_opt, diff --git a/IM/config.py b/IM/config.py index 80478fa2..b71bc179 100644 --- a/IM/config.py +++ b/IM/config.py @@ -58,10 +58,10 @@ class Config: WAIT_SSH_ACCCESS_TIMEOUT = 300 WAIT_PUBLIC_IP_TIMEOUT = 90 XMLRCP_PORT = 8899 - XMLRCP_ADDRESS = "0.0.0.0" + XMLRCP_ADDRESS = "0.0.0.0" # nosec ACTIVATE_REST = True REST_PORT = 8800 - REST_ADDRESS = "0.0.0.0" + REST_ADDRESS = "0.0.0.0" # nosec USER_DB = "" IM_PATH = os.path.dirname(os.path.realpath(__file__)) LOG_FILE = '/var/log/im/inf.log' @@ -85,7 +85,7 @@ class Config: VM_INFO_UPDATE_FREQUENCY = 10 # This value must be always higher than VM_INFO_UPDATE_FREQUENCY VM_INFO_UPDATE_ERROR_GRACE_PERIOD = 120 - REMOTE_CONF_DIR = "/var/tmp/.im" + REMOTE_CONF_DIR = "/var/tmp/.im" # nosec MAX_SSH_ERRORS = 5 PRIVATE_NET_MASKS = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "198.18.0.0/15"] diff --git a/IM/tosca/Tosca.py b/IM/tosca/Tosca.py index 86820239..10d733b5 100644 --- a/IM/tosca/Tosca.py +++ b/IM/tosca/Tosca.py @@ -800,7 +800,7 @@ def _gen_configure_from_interfaces(self, node, compute, interfaces): variables = "" tasks = "" recipe_list = [] - remote_artifacts_path = "/tmp" + remote_artifacts_path = "/tmp" # nosec # Take the interfaces in correct order for name in ['create', 'pre_configure_source', 'pre_configure_target', 'configure_rel', 'configure', 'post_configure_source', 'post_configure_target', 'start', diff --git a/tox.ini b/tox.ini index baf5c7d8..05c22d08 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ commands = python -m coverage run --source=. -m unittest discover -v -s test/uni [testenv:bandit] deps = bandit -commands = bandit IM -r -s B108,B601,B608,B507,B104 -ll +commands = bandit IM -r -ll [flake8] ignore = E402,E265,W605,W504,F811 From 185d3813b6fbe2e970507ef7d5d0b076078cdf03 Mon Sep 17 00:00:00 2001 From: Miguel Caballer Date: Mon, 2 Dec 2024 08:50:03 +0100 Subject: [PATCH 53/53] Fix error in RHE --- contextualization/conf-ansible.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/contextualization/conf-ansible.yml b/contextualization/conf-ansible.yml index a8544a95..128e32d7 100644 --- a/contextualization/conf-ansible.yml +++ b/contextualization/conf-ansible.yml @@ -132,6 +132,14 @@ executable: pip3 extra_args: "{{ extra_args }}" + - name: Create virtualenv link in PATH + file: + state: link + src: /usr/local/bin/virtualenv + dest: /usr/local/sbin/virtualenv + when: ansible_os_family == "RedHat" + ignore_errors: yes + # Version over 21 does not work with python 3.6 or older - name: Upgrade pip in py3.6- pip: