From d573d0aea39ad8b2da69043450709db1b49661bf Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Sun, 5 Aug 2018 17:37:52 +0200 Subject: [PATCH 1/9] Rewritten the openstack utils to used the openstacksdk, doesn't yet properly replace the old code yet --- common/lib/openstackUtils.py | 63 +++-- common/lib/openstackUtils2.py | 419 ++++++++++++++++++++++++++++++++++ wistar/configuration.py | 11 + 3 files changed, 478 insertions(+), 15 deletions(-) create mode 100644 common/lib/openstackUtils2.py diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index 5c5fe7b..f78f13b 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -26,6 +26,12 @@ from wistar import configuration +import openstack +from openstack.config import loader +from openstack import utils + +import openstackUtils2 + # OpenStack component URLs # _glance_url = ':9292/v1' _analytics_url = ':8081' @@ -63,6 +69,8 @@ def connect_to_openstack(): logger.debug("--- connect_to_openstack ---") + return openstackUtils2.connect_to_openstack() + logger.debug('verify configuration') if not hasattr(configuration, 'openstack_host'): @@ -102,7 +110,7 @@ def connect_to_openstack(): "password": { "user": { "name": "%s", - "domain": { "id": "default" }, + "domain": { "id": "%s" }, "password": "%s" } } @@ -110,18 +118,22 @@ def connect_to_openstack(): "scope": { "project": { "domain": { - "id": "default" + "id": "%s" }, - "name": "admin" + "name": "%s" } } } } - """ % (configuration.openstack_user, configuration.openstack_password) + """ % (configuration.openstack_user, + configuration.openstack_domain, + configuration.openstack_password, + configuration.openstack_domain, + configuration.openstack_project) try: _auth_token = "" - request = urllib2.Request("http://" + configuration.openstack_host + _auth_url) + request = urllib2.Request(http_or_https() + configuration.openstack_host + _auth_url) request.add_header("Content-Type", "application/json") request.add_header("charset", "UTF-8") request.add_header("Content-Length", len(_auth_json)) @@ -164,7 +176,7 @@ def get_project_auth_token(project): "password": { "user": { "name": "%s", - "domain": { "id": "default" }, + "domain": { "id": "%s" }, "password": "%s" } } @@ -172,17 +184,21 @@ def get_project_auth_token(project): "scope": { "project": { "domain": { - "id": "default" + "id": "%s" }, "name": "%s" } } } } - """ % (configuration.openstack_user, configuration.openstack_password, project) + """ % (configuration.openstack_user, + configuration.openstack_domain, + configuration.openstack_password, + configuration.openstack_domain, + configuration.openstack_project) try: - request = urllib2.Request("http://" + configuration.openstack_host + _auth_url) + request = urllib2.Request(http_or_https() + configuration.openstack_host + _auth_url) request.add_header("Content-Type", "application/json") request.add_header("charset", "UTF-8") request.add_header("Content-Length", len(_auth_json)) @@ -203,7 +219,8 @@ def get_project_id(project_name): :return: string UUID or None """ - logger.debug("--- get_project_id ---") + logger.debug("--- get_project_id ---" + project_name) + return openstackUtils2.get_project_id(project_name) projects_url = create_os_url('/projects') projects_string = do_get(projects_url) @@ -365,6 +382,10 @@ def get_consumed_management_ips(): get all reserved management ips :return: list of dicts """ + + + return open + Utils2.get_consumed_management_ips() consumed_ips = list() ports_string = get_neutron_ports_for_network(configuration.openstack_mgmt_network) if ports_string is None: @@ -395,6 +416,8 @@ def get_glance_image_list(): """ logger.debug("--- get_glance_image_list ---") + return openstackUtils2.get_glance_image_list() + url = create_glance_url("/images") image_list_string = do_get(url) @@ -432,6 +455,7 @@ def get_glance_image_detail(glance_id): :return: json response from glance /images/glance_id URL """ logger.debug("--- get_glance_image_detail ---") + return openstackUtils2.get_glance_image_detail(glance_id) url = create_glance_url("/images/%s" % glance_id) image_string = do_get(url) @@ -447,6 +471,7 @@ def get_glance_image_detail_by_name(image_name): :return: json response from glance /images?name=image_name URL or None """ logger.debug("--- get_glance_image_detail ---") + return openstackUtils2.get_glance_image_detail_by_name(image_name) url = create_glance_url("/images?name=%s" % image_name) image_string = do_get(url) @@ -478,6 +503,7 @@ def get_image_id_for_name(image_name): :return: glance id or None on failure """ logger.debug("--- get_image_id_for_name ---") + return openstackUtils2.get_image_id_for_name(image_name) image_detail = get_glance_image_detail_by_name(image_name) if 'name' in image_detail and image_detail['name'] == image_name: @@ -494,6 +520,7 @@ def get_stack_details(stack_name): :return: stack object or None if not found! """ logger.debug("--- get_stack_details ---") + return openstackUtils2.get_stack_details(stack_name) url = create_heat_url("/%s/stacks" % _tenant_id) @@ -532,6 +559,8 @@ def delete_stack(stack_name): """ logger.debug("--- delete_stack ---") + return openstackUtils2.delete_stack(stack_name) + stack_details = get_stack_details(stack_name) if stack_details is None: return None @@ -547,6 +576,7 @@ def get_nova_flavors(project_name): :return: JSON encoded string """ logger.debug("--- get_nova_flavors ---") + return openstackUtils2.get_nova_flavors(project_name) project_id = get_project_id(project_name) url = create_nova_url("/" + project_id + '/flavors/detail') return do_get(url) @@ -563,6 +593,7 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): """ logger.debug("checking: " + str(cpu) + " " + str(ram) + " " + str(disk)) + return openstackUtils2.get_minimum_flavor_for_specs(project_name, cpu, ram, disk) # create an emergency flavor so we have something to return in case we can't connect to openstack # or some other issue prevents us from determining the right thing to do @@ -661,6 +692,8 @@ def create_stack(stack_name, template_string): :return: JSON response from HEAT-API or None on failure """ logger.debug("--- create_stack ---") + return openstackUtils2.create_stack(stack_name, template_string) + url = create_heat_url("/" + str(_tenant_id) + "/stacks") data = '''{ @@ -728,23 +761,23 @@ def get_nova_serial_console(instance_name): # URL Utility functions def create_glance_url(url): - return "http://" + configuration.openstack_host + _glance_url + url + return http_or_https() + configuration.openstack_glance + _glance_url + url def create_neutron_url(url): - return "http://" + configuration.openstack_host + _neutron_url + url + return http_or_https() + configuration.openstack_host + _neutron_url + url def create_os_url(url): - return "http://" + configuration.openstack_host + _os_url + url + return http_or_https() + configuration.openstack_host + _os_url + url def create_heat_url(url): - return "http://" + configuration.openstack_host + _heat_url + url + return http_or_https() + configuration.openstack_host + _heat_url + url def create_nova_url(url): - return "http://" + configuration.openstack_host + _nova_url + url + return http_or_https + configuration.openstack_host + _nova_url + url # Utility REST functions below diff --git a/common/lib/openstackUtils2.py b/common/lib/openstackUtils2.py new file mode 100644 index 0000000..2909fb9 --- /dev/null +++ b/common/lib/openstackUtils2.py @@ -0,0 +1,419 @@ +import json +import logging +import mmap +import time + +from wistar import configuration + +import openstack +from openstack.config import loader +from openstack import utils +from openstack.cloud import OpenStackCloud +from keystoneauth1.exceptions.http import Unauthorized as ErrorUnauthorized + + +""" +ajax/views.py: stack_details = openstackUtils.get_stack_details(stack_name) +ajax/views.py: stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) +ajax/views.py: if not openstackUtils.connect_to_openstack(): +ajax/views.py: tenant_id = openstackUtils.get_project_id(configuration.openstack_project) +ajax/views.py: logger.debug(openstackUtils.create_stack(stack_name, heat_template)) +ajax/views.py: if openstackUtils.connect_to_openstack(): +ajax/views.py: logger.debug(openstackUtils.delete_stack(stack_name)) +api/views.py:from common.lib import openstackUtils +api/views.py: if openstackUtils.connect_to_openstack(): +common/lib/consoleUtils.py:import openstackUtils +common/lib/consoleUtils.py: if openstackUtils.connect_to_openstack(): +common/lib/consoleUtils.py: ws_url = openstackUtils.get_nova_serial_console(name) +common/lib/imageUtils.py:import openstackUtils +common/lib/imageUtils.py: if openstackUtils.connect_to_openstack(): +common/lib/imageUtils.py: glance_detail = openstackUtils.get_glance_image_detail(image_id) +common/lib/imageUtils.py: return openstackUtils.get_glance_image_detail_by_name(image_detail['name']) +common/lib/imageUtils.py: if openstackUtils.connect_to_openstack(): +common/lib/imageUtils.py: images = openstackUtils.get_glance_image_list() +common/lib/imageUtils.py: if openstackUtils.connect_to_openstack(): +common/lib/imageUtils.py: images = openstackUtils.get_glance_image_list() +common/lib/wistarUtils.py:import openstackUtils +common/lib/wistarUtils.py: flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, +common/lib/wistarUtils.py: if openstackUtils.connect_to_openstack(): +common/lib/wistarUtils.py: dhcp_leases = openstackUtils.get_consumed_management_ips() +images/views.py:from common.lib import openstackUtils +images/views.py: openstackUtils.connect_to_openstack() +images/views.py: glance_id = openstackUtils.get_image_id_for_name(image.name) +images/views.py: if openstackUtils.connect_to_openstack(): +images/views.py: glance_id = openstackUtils.get_image_id_for_name(image.name) +images/views.py: glance_json = openstackUtils.get_glance_image_detail(glance_id) +images/views.py: if openstackUtils.connect_to_openstack(): +images/views.py: image_list = openstackUtils.list_glance_images() +images/views.py: if openstackUtils.connect_to_openstack(): +images/views.py: openstackUtils.upload_image_to_glance(image.name, image.filePath.path) +images/views.py: if openstackUtils.connect_to_openstack(): +images/views.py: image_details = openstackUtils.get_glance_image_detail(glance_id) +topologies/views.py:from common.lib import openstackUtils +topologies/views.py: if openstackUtils.connect_to_openstack(): +topologies/views.py: logger.debug(openstackUtils.delete_stack(stack_name)) +""" + +logger = logging.getLogger(__name__) + + +def create_connection(): + """ + Creates an connection object based on the configuration mode + Either uses the openstacksdk mode which searches for clouds.yaml + Or uses the configuration options + """ + if configuration.openstack_mode == "auto": + return openstack.connect(cloud=configuration.openstack_cloud) + else: + return openstack.connect( + auth_url=configuration.openstack_host, + project_name=configuration.openstack_project, + username=configuration.openstack_user, + password=configuration.openstack_password, + region_name=configuration.openstack_region + ) + + +def connect_to_openstack(): + """ + Tries to connect to the selected openstack cloud + """ + logger.debug("--- connect_to_openstack ---") + + connection = create_connection() + try: + connection.authorize() + return True + except ErrorUnauthorized: + return False + +def get_glance_image_list(): + # logger.debug("--- get_glance_image_list ---") + + connection = create_connection() + + images = connection.image.images() + + return [image.to_dict() for image in images if image.status == "active"] + + +def get_glance_image_detail(image_id): + logger.debug("---get_glance_image-detail_by_id") + connection = create_connection() + + result = connection.image.get_image(image_id) + if result is None: + return None + return result.to_dict() + +def get_glance_image_detail_by_name(image_name): + logger.debug("-- get glance image detail by name") + connection = create_connection() + + result = connection.image.find_image(image_name) + if result is None: + return None + else: + return result.to_dict() + + + +def get_image_id_for_name(image_name): + connection = create_connection() + + result = connection.image.find_image(image_name) + if result is None: + return None + else: + return result.to_dict()["id"] + +def upload_image_to_glance(name, image_file_path): + """ + :param name: name of the image to be created + :param image_file_path: path of the file to upload + :return: json encoded results string + """ + #FIXME this is not properly checked yet + connection = create_connection() + + image_attrs = dict() + image_attrs['disk_format'] = 'qcow2' + image_attrs['container_format'] = 'bare' + image_attrs['name'] = name + + f = open(image_file_path, 'rb') + fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + + image_attrs['data'] = fio + + connection.images.upload_image(**image_attrs) + + +def get_nova_flavors(project_name): + connection = create_connection() + + all_flavors = connection.compute.flavors() + + flavor_dicts = [flavor.to_dict() for flavor in all_flavors] + + logger.debug("FLAVORS") + logger.debug(str(flavor_dicts)) + + return json.dumps(flavor_dicts) + # return [flavor.to_dict() for flavor in connection.compute.flavors()] + +def get_nova_serial_console(instance_name): + """ + Get the websocket URL for the serial proxy for a given nova server (instance) + :param instance_name: name of the instance + :return: websocket url ws://x.x.x.x:xxxx/token=xxxxx + """ + #FIXME no proper openstacksdk implementation yet + connection = create_connection() + server = connection.compute.find_server(instance_name) + + if server == None: + return None + # Trying to get the console via a manual query + + # First build the cor + + cloud = OpenStackCloud() + project_id = cloud.current_project_id + + data = '{"os-getVNCConsole": {"type": "novnc"}}' + url = create_nova_url('/%s/servers/%s/action' % (project_id, server.id)) + logger.debug("nova console: trying: " + str(url)) + try: + project_auth_token = connection.authorize() + request = urllib2.Request(url) + request.add_header("Content-Type", "application/json") + request.add_header("charset", "UTF-8") + request.add_header("X-Auth-Token", project_auth_token) + request.get_method = lambda: 'POST' + result = urllib2.urlopen(request, data) + console_json_data = json.loads(result.read()) + logger.debug(json.dumps(console_json_data, indent=2)) + return console_json_data["console"]["url"] + except URLError as e: + logger.error("Could not get serial console to instance: %s" % instance_name) + logger.error("error was %s" % str(e)) + return None + + + +def create_nova_url(url): + """ + Creates a nova url based on the service and endpoint in the sdk + """ + conn = create_connection() + + nova_id = conn.identity.find_service("nova").id + + endpoint_query == { + "service_id": nova_id, + "interface": "public" + } + + # This should only give one result + endpoint = conn.identity.endpoints(**endpoint_query) + + return endpoint[0].url + url + + + + +def get_project_id(project_name): + """ + :param project_name: name of the project to search for + """ + + connection = create_connection() + cloud = OpenStackCloud() + logger.debug("--get project id") + return cloud.current_project_id + logger.debug("--- all projects--") + logger.debug(str(connection.__dict__)) + logger.debug("--properties") + for project in connection.identity.projects(user_id=cloud.current_user_id): + logger.debug(str(project)) + logger.debug("Find project") + result = connection.identity.find_project(project_name, user_id=cloud.current_user_id) + if result is None: + return None + else: + return result.to_dict()["id"] + + +def get_consumed_management_ips(): + """ + Return a list of dicts of the format + [ + { "ip-address": "xxx.xxx.xxx.xxx"} + ] + This mimics the libvirt dnsmasq format for dhcp reservations + This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to + get all reserved management ips + :return: list of dicts + """ + ips = [] + connection = create_connection() + + mgmt_network = connection.network.find_network(configuration.openstack_mgmt_network) + if mgmt_network is None: + return ips + + for port in connection.network.ports(network_id=mgmt_network.id): + for fixed_ip in port.fixed_ips: + fip = {} + logger.debug(fixed_ip) + fip["ip-address"] = fixed_ip["ip_address"] + ips.append(fip) + + logger.debug(str(ips)) + return ips + + +def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): + """ + Query nova to get all flavors and return the flavor that best matches our desired constraints + :param project_name: name of the project to check for flavors + :param cpu: number of cores desired + :param ram: amount of ram desired in MB + :param disk: amount of disk required in GB + :return: flavor object {"name": "m1.xlarge"} + """ + + emergency_flavor = dict() + emergency_flavor['name'] = "m1.large" + + connection = create_connection() + logger.debug("Trying to determine minumum flavor") + flavors = connection.compute.flavors() + flavors = [flavor.to_dict() for flavor in flavors] + + cpu_candidates = list() + ram_candidates = list() + disk_candidates = list() + logger.debug("checking flavors") + + # first, let's see if we have an exact match! + for f in flavors: + logger.debug("checking flavor: " + f["name"]) + if f["vcpus"] == cpu and f["ram"] == ram and f["disk"] == disk: + return f + + logger.debug("not exact match yet") + # we don't have an exact match yet! + for f in flavors: + logger.debug(str(f["vcpus"]) + " " + str(cpu)) + if "vcpus" in f and f["vcpus"] >= int(cpu): + cpu_candidates.append(f) + + logger.debug("got cpu candidates: " + str(len(cpu_candidates))) + + for f in cpu_candidates: + if "ram" in f and f["ram"] >= ram: + ram_candidates.append(f) + + logger.debug("got ram candidates: " + str(len(ram_candidates))) + + for f in ram_candidates: + if "disk" in f and f["disk"] >= disk: + disk_candidates.append(f) + + logger.debug("got disk candidates: " + str(len(disk_candidates))) + + if len(disk_candidates) == 0: + # uh-oh, just return the largest and hope for the best! + return emergency_flavor + elif len(disk_candidates) == 1: + return disk_candidates[0] + else: + # we have more than one candidate left + # let's find the smallest flavor left! + cpu_low = 99 + disk_low = 999 + ram_low = 99999 + for f in disk_candidates: + if f["vcpus"] < cpu_low: + cpu_low = f["vcpus"] + if f["ram"] < ram_low: + ram_low = f["ram"] + if f["disk"] < disk_low: + disk_low = f["disk"] + + for f in disk_candidates: + if f["vcpus"] == cpu_low and f["ram"] == ram_low and f["disk"] == disk_low: + # found the lowest available + logger.debug("return lowest across all axis") + return f + for f in disk_candidates: + if f["vcpus"] == cpu_low and f["ram"] == ram_low: + # lowest available along ram and cpu axis + logger.debug("return lowest across cpu and ram") + return f + for f in disk_candidates: + if f["vcpus"] == cpu: + logger.debug("return lowest cpu only") + logger.debug(f) + return f + + # should not arrive here :-/ + logger.debug("got to the impossible") + return disk_candidates[0] + +def create_stack(stack_name, template_string): + """ + Creates a Stack via a HEAT template + :param stack_name: name of the stack to create + :param template_string: HEAT template to be used + :return: JSON response from HEAT-API or None on failure + """ + + connection = create_connection() + + template = json.loads(template_string) + + heat_data = {} + heat_data["name"] = stack_name + heat_data["template"] = template + + # result = connection.orchestration.create_stack({"name"}) + + result = connection.orchestration.create_stack(preview=False, **heat_data) + logger.debug(result) + return result + +def delete_stack(stack_name): + """ + Deletes a stack from OpenStack + :param stack_name: name of the stack to be deleted + :return: JSON response fro HEAT API + """ + connection = create_connection() + + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + connection.orchestration.delete_stack(stack_details["id"]) + +def get_stack_details(stack_name): + """ + Returns python object representing Stack details + :param stack_name: name of the stack to find + :return: stack object or None if not found! + """ + logger.debug("--- get_stack_details ---") + + connection = create_connection() + + result = connection.orchestration.find_stack(stack_name) + if result is None: + logger.debug("stack doesn't exist yet") + return None + + else: + return result.to_dict() \ No newline at end of file diff --git a/wistar/configuration.py b/wistar/configuration.py index 592e538..18004cc 100644 --- a/wistar/configuration.py +++ b/wistar/configuration.py @@ -66,6 +66,15 @@ # some version of openstack use '/dashboard', '/horizon', or '/' openstack_horizon_url = "http://10.10.10.10" +# Selects the method to authenticate against openstack +# can be "manual" or "auto" +# Manual uses the configuration data in this configuration file +# Auto uses the openstacksdk to search for authenication details in clouds.yaml files +# On "auto", it uses the openstack_cloud variable to select to cloud in the clouds.yaml +# https://docs.openstack.org/openstacksdk/latest/user/guides/connect_from_config.html +openstack_mode = "auto" +openstack_cloud = "openstack" + # authentication parameters openstack_host = '10.10.10.10' openstack_user = 'admin' @@ -73,6 +82,8 @@ # project under which to place all topologies/stacks openstack_project = 'admin' +# The region for the cloud +openstack_region = "RegionOne" openstack_mgmt_network = 'wistar_mgmt' openstack_external_network = 'public-br-eth0' From 91d0b062c84f58fcd890411c0d32813f8d702605 Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Sun, 5 Aug 2018 17:51:16 +0200 Subject: [PATCH 2/9] Complete replacement with openstacksdk --- common/lib/openstackUtils.py | 1039 ++++++++------------------------- common/lib/openstackUtils2.py | 419 ------------- 2 files changed, 246 insertions(+), 1212 deletions(-) delete mode 100644 common/lib/openstackUtils2.py diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index f78f13b..f45b87d 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -1,585 +1,236 @@ -# -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER -# -# Copyright (c) 2015 Juniper Networks, Inc. -# All rights reserved. -# -# Use is subject to license terms. -# -# Licensed under the Apache License, Version 2.0 (the ?License?); you may not -# use this file except in compliance with the License. You may obtain a copy of -# the License at http://www.apache.org/licenses/LICENSE-2.0. -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - import json import logging import mmap import time -import urllib2 -from urllib2 import URLError from wistar import configuration import openstack from openstack.config import loader -from openstack import utils - -import openstackUtils2 - -# OpenStack component URLs -# _glance_url = ':9292/v1' -_analytics_url = ':8081' -_api_url = ':8082' -_os_url = ':5000/v3' -_nova_url = ':8774/v2' -_neutron_url = ':9696/v2.0' -_glance_url = ':9292/v2' -_heat_url = ':8004/v1' -_auth_url = _os_url + "/auth/tokens" -_data_url = ':8143/api/tenant/networking/' - -# auth token will get populated by connect on each instantiation -# and referenced by each subsequent call -_auth_token = "" -_project_auth_token = "" -_tenant_id = "" - -_token_cache_time = time.time() -_project_token_cache_time = time.time() - -# cache auth tokens for 1 hour -_max_cache_time = 3600 +from openstack import utils +from openstack.cloud import OpenStackCloud +from keystoneauth1.exceptions.http import Unauthorized as ErrorUnauthorized + logger = logging.getLogger(__name__) -def connect_to_openstack(): +def create_connection(): """ - authenticates to keystone at configuration.openstack_host with OPENSTACK_USER, OPENSTACK_PASS - will set the _auth_token property on success, which is then used for all subsequent - calls from this module - :return: True on successful authentication to keystone, False otherwise + Creates an connection object based on the configuration mode + Either uses the openstacksdk mode which searches for clouds.yaml + Or uses the configuration options """ + if configuration.openstack_mode == "auto": + return openstack.connect(cloud=configuration.openstack_cloud) + else: + return openstack.connect( + auth_url=configuration.openstack_host, + project_name=configuration.openstack_project, + username=configuration.openstack_user, + password=configuration.openstack_password, + region_name=configuration.openstack_region + ) - logger.debug("--- connect_to_openstack ---") - - return openstackUtils2.connect_to_openstack() - - logger.debug('verify configuration') - - if not hasattr(configuration, 'openstack_host'): - logger.error('Openstack Host is not configured') - return False - - if not hasattr(configuration, 'openstack_user'): - logger.error('Openstack User is not configured') - return False - - if not hasattr(configuration, 'openstack_password'): - logger.error('Openstack Password is not configured') - return False - global _auth_token - global _tenant_id - global _token_cache_time - - # simple cache calculation - # _token_cache_time will get updated when we refresh the token - # so let's find out how long ago that was - # and if we should refresh again - now = time.time() - diff = now - _token_cache_time - - if diff < _max_cache_time and _auth_token != "": - return _auth_token - - logger.debug("refreshing auth token") - _token_cache_time = now - _auth_token = "" - - _auth_json = """ - { "auth": { - "identity": { - "methods": ["password"], - "password": { - "user": { - "name": "%s", - "domain": { "id": "%s" }, - "password": "%s" - } - } - }, - "scope": { - "project": { - "domain": { - "id": "%s" - }, - "name": "%s" - } - } - } - } - """ % (configuration.openstack_user, - configuration.openstack_domain, - configuration.openstack_password, - configuration.openstack_domain, - configuration.openstack_project) +def connect_to_openstack(): + """ + Tries to connect to the selected openstack cloud + """ + logger.debug("--- connect_to_openstack ---") + connection = create_connection() try: - _auth_token = "" - request = urllib2.Request(http_or_https() + configuration.openstack_host + _auth_url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("Content-Length", len(_auth_json)) - result = urllib2.urlopen(request, _auth_json) - _auth_token = result.info().getheader('X-Subject-Token') - # now get the tenant_id for the chosen project - _tenant_id = get_project_id(configuration.openstack_project) - # logger.debug(_auth_token) + connection.authorize() return True - except URLError as e: - logger.error("Could not authenticate to openstack!") - logger.error("error was %s" % str(e)) + except ErrorUnauthorized: return False +def get_glance_image_list(): + # logger.debug("--- get_glance_image_list ---") -def get_project_auth_token(project): - """ - :param project: project name string - :return: auth_token specific to this project, None on error - """ - logger.debug("--- get_project_auth_token ---") - - global _project_auth_token - global _project_token_cache_time - - now = time.time() - diff = now - _project_token_cache_time - - if diff < _max_cache_time and _project_auth_token != "": - return _project_auth_token - - logger.debug("refreshing project auth token") - _project_token_cache_time = now - _project_auth_token = "" - - _auth_json = """ - { "auth": { - "identity": { - "methods": ["password"], - "password": { - "user": { - "name": "%s", - "domain": { "id": "%s" }, - "password": "%s" - } - } - }, - "scope": { - "project": { - "domain": { - "id": "%s" - }, - "name": "%s" - } - } - } - } - """ % (configuration.openstack_user, - configuration.openstack_domain, - configuration.openstack_password, - configuration.openstack_domain, - configuration.openstack_project) - - try: - request = urllib2.Request(http_or_https() + configuration.openstack_host + _auth_url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("Content-Length", len(_auth_json)) - result = urllib2.urlopen(request, _auth_json) - _project_auth_token = result.info().getheader('X-Subject-Token') - return _project_auth_token + connection = create_connection() - except URLError as e: - logger.error("Could not get project auth token") - logger.error("error was %s" % str(e)) - return None + images = connection.image.images() + return [image.to_dict() for image in images if image.status == "active"] -def get_project_id(project_name): - """ - Gets the UUID of the project by project_name - :param project_name: Name of the Project - :return: string UUID or None - """ - logger.debug("--- get_project_id ---" + project_name) - return openstackUtils2.get_project_id(project_name) +def get_glance_image_detail(image_id): + logger.debug("---get_glance_image-detail_by_id") + connection = create_connection() - projects_url = create_os_url('/projects') - projects_string = do_get(projects_url) - if projects_string is None: + result = connection.image.get_image(image_id) + if result is None: return None + return result.to_dict() - projects = json.loads(projects_string) - for project in projects["projects"]: - if project["name"] == project_name: - return str(project["id"]) - - return None - - -def get_network_id(network_name): - """ - Gets the UUID of the network by network_name - :param network_name: Name of the network - :return: string UUID or None - """ - - logger.debug("--- get_network_id ---") - - networks_url = create_neutron_url('/networks?name=%s' % network_name) - logger.info(networks_url) - networks_string = do_get(networks_url) - logger.info(networks_string) - if networks_string is None: - logger.error('Did not find a network for that name!') - return None +def get_glance_image_detail_by_name(image_name): + logger.debug("-- get glance image detail by name") + connection = create_connection() - try: - networks = json.loads(networks_string) - except ValueError: - logger.error('Could not parse json response in get_network_id') + result = connection.image.find_image(image_name) + if result is None: return None + else: + return result.to_dict() - for network in networks["networks"]: - if network["name"] == network_name: - logger.info('Found id!') - return str(network["id"]) - - return None - - -def upload_image_to_glance_old(name, image_file_path): - """ - :param name: name of the image to be uploaded - :param image_file_path: full filesystem path as string to the image - :return: json encoded results string from glance REST api - """ - logger.debug("--- upload_image_to_glance ---") - url = create_glance_url('/images') - - try: - f = open(image_file_path, 'rb') - fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) - - request = urllib2.Request(url, fio) - request.add_header("X-Auth-Token", _auth_token) - request.add_header("Content-Type", "application/octet-stream") - request.add_header("x-image-meta-name", name) - request.add_header("x-image-meta-disk_format", "qcow2") - request.add_header("x-image-meta-container_format", "bare") - request.add_header("x-image-meta-is_public", "true") - request.add_header("x-image-meta-min_ram", "1024") - request.add_header("x-image-meta-min_disk", "1") - result = urllib2.urlopen(request) - return result.read() - except Exception as e: - logger.error("Could not upload image to glance") - logger.error("error was %s" % str(e)) +def get_image_id_for_name(image_name): + connection = create_connection() - finally: - fio.close() - f.close() + result = connection.image.find_image(image_name) + if result is None: return None - + else: + return result.to_dict()["id"] def upload_image_to_glance(name, image_file_path): """ - :param name: name of the image to be created :param image_file_path: path of the file to upload - :return: json encoded results string from glance REST api - """ - logger.debug("--- create_image_in_glance ---") - - url = create_glance_url('/images') - - try: - - d = dict() - d['disk_format'] = 'qcow2' - d['container_format'] = 'bare' - d['name'] = name - - r_data = do_post(url, json.dumps(d)) - - except Exception as e: - logger.error("Could not upload image to glance") - logger.error("error was %s" % str(e)) - return None - - try: - r_json = json.loads(r_data) - if 'id' in r_json: - image_id = r_json['id'] - - logger.info('Preparing to push image data to glance!') - f = open(image_file_path, 'rb') - fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) - upload_url = create_glance_url('/images/%s/file' % image_id) - request = urllib2.Request(upload_url, fio) - request.add_header("Content-Type", "application/octet-stream") - request.add_header("X-Auth-Token", _auth_token) - request.get_method = lambda: 'PUT' - return urllib2.urlopen(request) - else: - logger.error('Could not find an ID key in returned json from glance image create') - logger.error(r_data) - logger.error('returning None') - return None - - except ValueError: - logger.error('Could not parse JSON return data from glance image create') - return None - - -def get_neutron_ports_for_network(network_name): - """ - :return: json response from /ports URL - """ - logger.debug("--- get_neutron_port_list ---") - - network_id = get_network_id(network_name) - if network_id is None: - logger.warn("couldn't find the correct network_id") - return None - - url = create_neutron_url("/ports.json?network_id=%s&fields=id&fields=fixed_ips" % network_id) - logger.debug(url) - port_list_string = do_get(url) - logger.debug(port_list_string) - - return port_list_string - - -def get_consumed_management_ips(): - """ - Return a list of dicts of the format - [ - { "ip-address": "xxx.xxx.xxx.xxx"} - ] - This mimics the libvirt dnsmasq format for dhcp reservations - This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to - get all reserved management ips - :return: list of dicts + :return: json encoded results string """ + #FIXME this is not properly checked yet + connection = create_connection() + image_attrs = dict() + image_attrs['disk_format'] = 'qcow2' + image_attrs['container_format'] = 'bare' + image_attrs['name'] = name - return open - Utils2.get_consumed_management_ips() - consumed_ips = list() - ports_string = get_neutron_ports_for_network(configuration.openstack_mgmt_network) - if ports_string is None: - return consumed_ips - try: - ports = json.loads(ports_string) - except ValueError: - logger.error('Could not parse json response in get_consumed_management_ips') - return consumed_ips - - if 'ports' not in ports: - logger.error('unexpected keys in json response!') - return consumed_ips + f = open(image_file_path, 'rb') + fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) - for port in ports['ports']: - for fixed_ip in port['fixed_ips']: - if configuration.management_prefix in fixed_ip['ip_address']: - fip = dict() - fip['ip-address'] = fixed_ip['ip_address'] - consumed_ips.append(fip) + image_attrs['data'] = fio - return consumed_ips + connection.images.upload_image(**image_attrs) -def get_glance_image_list(): - """ - :return: list of json objects from glance /images URL filtered with only shared or public images - """ - logger.debug("--- get_glance_image_list ---") - - return openstackUtils2.get_glance_image_list() - - url = create_glance_url("/images") - image_list_string = do_get(url) - - image_list = list() - - if image_list_string is None: - return image_list - - try: - glance_return = json.loads(image_list_string) - except ValueError: - logger.warn('Could not parse json response from glance /images') - return image_list - - if 'images' not in glance_return: - logger.warn('did not find images key in glance return data') - logger.debug(glance_return) - return image_list - - for im in glance_return['images']: - - if 'status' in im and im['status'] != 'active': - logger.debug('Skipping non-active image %s' % im['name']) - continue +def get_nova_flavors(project_name): + connection = create_connection() - if 'visibility' in im and im['visibility'] in ['shared', 'public']: - image_list.append(im) + all_flavors = connection.compute.flavors() - return image_list + flavor_dicts = [flavor.to_dict() for flavor in all_flavors] + logger.debug("FLAVORS") + logger.debug(str(flavor_dicts)) + + return json.dumps(flavor_dicts) + # return [flavor.to_dict() for flavor in connection.compute.flavors()] -def get_glance_image_detail(glance_id): +def get_nova_serial_console(instance_name): """ - :param glance_id: id of the glance image to retrieve - :return: json response from glance /images/glance_id URL + Get the websocket URL for the serial proxy for a given nova server (instance) + :param instance_name: name of the instance + :return: websocket url ws://x.x.x.x:xxxx/token=xxxxx """ - logger.debug("--- get_glance_image_detail ---") - return openstackUtils2.get_glance_image_detail(glance_id) + #FIXME no proper openstacksdk implementation yet + connection = create_connection() + server = connection.compute.find_server(instance_name) - url = create_glance_url("/images/%s" % glance_id) - image_string = do_get(url) - if image_string is None: + if server == None: return None + # Trying to get the console via a manual query - return json.loads(image_string) - - -def get_glance_image_detail_by_name(image_name): - """ - :param image_name: name of the glance image to retrieve - :return: json response from glance /images?name=image_name URL or None - """ - logger.debug("--- get_glance_image_detail ---") - return openstackUtils2.get_glance_image_detail_by_name(image_name) + # First build the cor - url = create_glance_url("/images?name=%s" % image_name) - image_string = do_get(url) - if image_string is None: - logger.error('Error calling glance api, no data') - return None + cloud = OpenStackCloud() + project_id = cloud.current_project_id + data = '{"os-getVNCConsole": {"type": "novnc"}}' + url = create_nova_url('/%s/servers/%s/action' % (project_id, server.id)) + logger.debug("nova console: trying: " + str(url)) try: - images_dict = json.loads(image_string) - - if 'images' not in images_dict: - logger.error('Unexpected output from glance api') - return None - - for image in images_dict['images']: - if 'name' in image and image['name'] == image_name: - logger.debug('returning image with id: %s' % image.get('id', '0')) - return image - - except ValueError: - logger.error('Could not parse json return from glance api') + project_auth_token = connection.authorize() + request = urllib2.Request(url) + request.add_header("Content-Type", "application/json") + request.add_header("charset", "UTF-8") + request.add_header("X-Auth-Token", project_auth_token) + request.get_method = lambda: 'POST' + result = urllib2.urlopen(request, data) + console_json_data = json.loads(result.read()) + logger.debug(json.dumps(console_json_data, indent=2)) + return console_json_data["console"]["url"] + except URLError as e: + logger.error("Could not get serial console to instance: %s" % instance_name) + logger.error("error was %s" % str(e)) return None -def get_image_id_for_name(image_name): + +def create_nova_url(url): """ - Returns the glance Id for the given image_name - :param image_name: name of image to search for - :return: glance id or None on failure + Creates a nova url based on the service and endpoint in the sdk """ - logger.debug("--- get_image_id_for_name ---") - return openstackUtils2.get_image_id_for_name(image_name) - - image_detail = get_glance_image_detail_by_name(image_name) - if 'name' in image_detail and image_detail['name'] == image_name: - # all is well, return the id from here - return image_detail.get('id', None) + conn = create_connection() - return None + nova_id = conn.identity.find_service("nova").id + endpoint_query == { + "service_id": nova_id, + "interface": "public" + } -def get_stack_details(stack_name): - """ - Returns python object representing Stack details - :param stack_name: name of the stack to find - :return: stack object or None if not found! - """ - logger.debug("--- get_stack_details ---") - return openstackUtils2.get_stack_details(stack_name) + # This should only give one result + endpoint = conn.identity.endpoints(**endpoint_query) - url = create_heat_url("/%s/stacks" % _tenant_id) + return endpoint[0].url + url - stacks_list_string = do_get(url) - stacks_list = json.loads(stacks_list_string) - for stack in stacks_list["stacks"]: - if stack["stack_name"] == stack_name: - return stack - logger.info("stack name %s was not found!" % stack_name) - return None -def get_stack_resources(stack_name, stack_id): - """ - Get all the resources for this Stack - :param stack_name: name of stack - :param stack_id: id of stack - use get_stack_details to retrieve this - :return: json response from HEAT API +def get_project_id(project_name): """ - logger.debug("--- get_stack_resources ---") - - url = create_heat_url("/%s/stacks/%s/%s/resources" % (_tenant_id, stack_name, stack_id)) - stack_resources_string = do_get(url) - if stack_resources_string is None: + :param project_name: name of the project to search for + """ + + connection = create_connection() + cloud = OpenStackCloud() + logger.debug("--get project id") + return cloud.current_project_id + logger.debug("--- all projects--") + logger.debug(str(connection.__dict__)) + logger.debug("--properties") + for project in connection.identity.projects(user_id=cloud.current_user_id): + logger.debug(str(project)) + logger.debug("Find project") + result = connection.identity.find_project(project_name, user_id=cloud.current_user_id) + if result is None: return None else: - return json.loads(stack_resources_string) + return result.to_dict()["id"] -def delete_stack(stack_name): +def get_consumed_management_ips(): """ - Deletes a stack from OpenStack - :param stack_name: name of the stack to be deleted - :return: JSON response fro HEAT API + Return a list of dicts of the format + [ + { "ip-address": "xxx.xxx.xxx.xxx"} + ] + This mimics the libvirt dnsmasq format for dhcp reservations + This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to + get all reserved management ips + :return: list of dicts """ - logger.debug("--- delete_stack ---") + ips = [] + connection = create_connection() - return openstackUtils2.delete_stack(stack_name) + mgmt_network = connection.network.find_network(configuration.openstack_mgmt_network) + if mgmt_network is None: + return ips - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = stack_details["id"] - url = create_heat_url("/%s/stacks/%s/%s" % (_tenant_id, stack_name, stack_id)) - return do_delete(url) + for port in connection.network.ports(network_id=mgmt_network.id): + for fixed_ip in port.fixed_ips: + fip = {} + logger.debug(fixed_ip) + fip["ip-address"] = fixed_ip["ip_address"] + ips.append(fip) - -def get_nova_flavors(project_name): - """ - Returns flavors for a specific project from Nova in JSON encoded string - :return: JSON encoded string - """ - logger.debug("--- get_nova_flavors ---") - return openstackUtils2.get_nova_flavors(project_name) - project_id = get_project_id(project_name) - url = create_nova_url("/" + project_id + '/flavors/detail') - return do_get(url) + logger.debug(str(ips)) + return ips def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): @@ -592,97 +243,84 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): :return: flavor object {"name": "m1.xlarge"} """ - logger.debug("checking: " + str(cpu) + " " + str(ram) + " " + str(disk)) - return openstackUtils2.get_minimum_flavor_for_specs(project_name, cpu, ram, disk) - - # create an emergency flavor so we have something to return in case we can't connect to openstack - # or some other issue prevents us from determining the right thing to do emergency_flavor = dict() - emergency_flavor['name'] = "m1.xlarge" + emergency_flavor['name'] = "m1.large" - if not connect_to_openstack(): - return emergency_flavor - - flavors = get_nova_flavors(project_name) - try: - flavors_object = json.loads(flavors) - except ValueError: - logger.error('Could not parse nova return data') - return emergency_flavor + connection = create_connection() + logger.debug("Trying to determine minumum flavor") + flavors = connection.compute.flavors() + flavors = [flavor.to_dict() for flavor in flavors] cpu_candidates = list() ram_candidates = list() disk_candidates = list() + logger.debug("checking flavors") - if "flavors" in flavors_object: - logger.debug("checking flavors") + # first, let's see if we have an exact match! + for f in flavors: + logger.debug("checking flavor: " + f["name"]) + if f["vcpus"] == cpu and f["ram"] == ram and f["disk"] == disk: + return f - # first, let's see if we have an exact match! - for f in flavors_object["flavors"]: - logger.debug("checking flavor: " + f["name"]) - if f["vcpus"] == cpu and f["ram"] == ram and f["disk"] == disk: - return f + logger.debug("not exact match yet") + # we don't have an exact match yet! + for f in flavors: + logger.debug(str(f["vcpus"]) + " " + str(cpu)) + if "vcpus" in f and f["vcpus"] >= int(cpu): + cpu_candidates.append(f) + + logger.debug("got cpu candidates: " + str(len(cpu_candidates))) + + for f in cpu_candidates: + if "ram" in f and f["ram"] >= ram: + ram_candidates.append(f) - logger.debug("not exact match yet") - # we don't have an exact match yet! - for f in flavors_object["flavors"]: - logger.debug(str(f["vcpus"]) + " " + str(cpu)) - if "vcpus" in f and f["vcpus"] >= int(cpu): - cpu_candidates.append(f) - - logger.debug("got cpu candidates: " + str(len(cpu_candidates))) - - for f in cpu_candidates: - if "ram" in f and f["ram"] >= ram: - ram_candidates.append(f) - - logger.debug("got ram candidates: " + str(len(ram_candidates))) - - for f in ram_candidates: - if "disk" in f and f["disk"] >= disk: - disk_candidates.append(f) - - logger.debug("got disk candidates: " + str(len(disk_candidates))) - - if len(disk_candidates) == 0: - # uh-oh, just return the largest and hope for the best! - return emergency_flavor - elif len(disk_candidates) == 1: - return disk_candidates[0] - else: - # we have more than one candidate left - # let's find the smallest flavor left! - cpu_low = 99 - disk_low = 999 - ram_low = 99999 - for f in disk_candidates: - if f["vcpus"] < cpu_low: - cpu_low = f["vcpus"] - if f["ram"] < ram_low: - ram_low = f["ram"] - if f["disk"] < disk_low: - disk_low = f["disk"] - - for f in disk_candidates: - if f["vcpus"] == cpu_low and f["ram"] == ram_low and f["disk"] == disk_low: - # found the lowest available - logger.debug("return lowest across all axis") - return f - for f in disk_candidates: - if f["vcpus"] == cpu_low and f["ram"] == ram_low: - # lowest available along ram and cpu axis - logger.debug("return lowest across cpu and ram") - return f - for f in disk_candidates: - if f["vcpus"] == cpu: - logger.debug("return lowest cpu only") - logger.debug(f) - return f - - # should not arrive here :-/ - logger.debug("got to the impossible") - return disk_candidates[0] + logger.debug("got ram candidates: " + str(len(ram_candidates))) + + for f in ram_candidates: + if "disk" in f and f["disk"] >= disk: + disk_candidates.append(f) + + logger.debug("got disk candidates: " + str(len(disk_candidates))) + + if len(disk_candidates) == 0: + # uh-oh, just return the largest and hope for the best! + return emergency_flavor + elif len(disk_candidates) == 1: + return disk_candidates[0] + else: + # we have more than one candidate left + # let's find the smallest flavor left! + cpu_low = 99 + disk_low = 999 + ram_low = 99999 + for f in disk_candidates: + if f["vcpus"] < cpu_low: + cpu_low = f["vcpus"] + if f["ram"] < ram_low: + ram_low = f["ram"] + if f["disk"] < disk_low: + disk_low = f["disk"] + + for f in disk_candidates: + if f["vcpus"] == cpu_low and f["ram"] == ram_low and f["disk"] == disk_low: + # found the lowest available + logger.debug("return lowest across all axis") + return f + for f in disk_candidates: + if f["vcpus"] == cpu_low and f["ram"] == ram_low: + # lowest available along ram and cpu axis + logger.debug("return lowest across cpu and ram") + return f + for f in disk_candidates: + if f["vcpus"] == cpu: + logger.debug("return lowest cpu only") + logger.debug(f) + return f + # should not arrive here :-/ + logger.debug("got to the impossible") + return disk_candidates[0] def create_stack(stack_name, template_string): """ @@ -691,234 +329,49 @@ def create_stack(stack_name, template_string): :param template_string: HEAT template to be used :return: JSON response from HEAT-API or None on failure """ - logger.debug("--- create_stack ---") - return openstackUtils2.create_stack(stack_name, template_string) - - - url = create_heat_url("/" + str(_tenant_id) + "/stacks") - data = '''{ - "disable_rollback": true, - "parameters": {}, - "stack_name": "%s", - "template": %s - }''' % (stack_name, template_string) - logger.debug("Creating stack with data:") - logger.debug(data) - return do_post(url, data) - - -def get_nova_serial_console(instance_name): - """ - Get the websocket URL for the serial proxy for a given nova server (instance) - :param instance_name: name of the instance - :return: websocket url ws://x.x.x.x:xxxx/token=xxxxx - """ - logger.debug("--- get_nova_serial_console ---") - logger.debug("Looking for instance: %s" % instance_name) - server_detail_url = create_nova_url('/%s/servers?name=%s' % (_tenant_id, instance_name)) - server_detail = do_nova_get(server_detail_url) + connection = create_connection() - # logger.debug("got details: %s" % server_detail) + template = json.loads(template_string) - if server_detail is None: - return None - - json_data = json.loads(server_detail) - if len(json_data["servers"]) == 0: - return None + heat_data = {} + heat_data["name"] = stack_name + heat_data["template"] = template - server_uuid = "" - for s in json_data["servers"]: - if s["name"] == instance_name: - server_uuid = s["id"] - break - - if server_uuid == "": - logger.error("Console not found with server name %s" % instance_name) - return None - - # logger.debug(server_uuid) - data = '{"os-getSerialConsole": {"type": "serial"}}' - url = create_nova_url('/%s/servers/%s/action' % (_tenant_id, server_uuid)) - - try: - project_auth_token = get_project_auth_token(configuration.openstack_project) - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", project_auth_token) - request.get_method = lambda: 'POST' - result = urllib2.urlopen(request, data) - console_json_data = json.loads(result.read()) - logger.debug(json.dumps(console_json_data, indent=2)) - return console_json_data["console"]["url"] - except URLError as e: - logger.error("Could not get serial console to instance: %s" % instance_name) - logger.error("error was %s" % str(e)) - return None - - -# URL Utility functions -def create_glance_url(url): - return http_or_https() + configuration.openstack_glance + _glance_url + url - - -def create_neutron_url(url): - return http_or_https() + configuration.openstack_host + _neutron_url + url - - -def create_os_url(url): - return http_or_https() + configuration.openstack_host + _os_url + url - - -def create_heat_url(url): - return http_or_https() + configuration.openstack_host + _heat_url + url - - -def create_nova_url(url): - return http_or_https + configuration.openstack_host + _nova_url + url + # result = connection.orchestration.create_stack({"name"}) + result = connection.orchestration.create_stack(preview=False, **heat_data) + logger.debug(result) + return result -# Utility REST functions below -def do_get(url): - """ - Performs a simple REST GET - :param url: full URL for GET request - :return: response from urllib2.urlopen(r).read() or None - """ - try: - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", _auth_token) - request.get_method = lambda: 'GET' - result = urllib2.urlopen(request) - return result.read() - except Exception as e: - logger.error("Could not perform GET to url: %s" % url) - logger.error("error was %s" % str(e)) - return None - - -def do_post(url, data): - """ - Performs a simple REST POST - :param url: full url to use for POST - :param data: url encoded data - :return: string response from urllib2.urlopen(r,data).read() or None - """ - try: - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("Content-Length", len(data)) - request.add_header("X-Auth-Token", _auth_token) - result = urllib2.urlopen(request, data) - return result.read() - except URLError as e: - logger.error("Could not perform POST to url: %s" % url) - logger.error("error was %s" % str(e)) - return None - - -def do_put(url, data=""): +def delete_stack(stack_name): """ - Performs a simple REST PUT - :param url: full URL to use for PUT - :param data: url encoded data - :return: string response from urllib2.urlopen(r, data).read() or None + Deletes a stack from OpenStack + :param stack_name: name of the stack to be deleted + :return: JSON response fro HEAT API """ - try: - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", _auth_token) - request.get_method = lambda: 'PUT' - - if data == "": - result = urllib2.urlopen(request) - else: - result = urllib2.urlopen(request, data) + connection = create_connection() - return result.read() - except URLError as e: - logger.error("Could not perform PUT to url: %s" % url) - logger.error("error was %s" % str(e)) - return None - - -def do_nova_get(url): - """ - Performs a simple REST GET - :param url: full URL for GET request - :return: response from urllib2.urlopen(r).read() or None - """ - try: - project_auth_token = get_project_auth_token(configuration.openstack_project) - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", project_auth_token) - request.get_method = lambda: 'GET' - result = urllib2.urlopen(request) - return result.read() - except Exception as e: - logger.error("Could not perform GET to url: %s" % url) - logger.error("error was %s" % str(e)) + stack_details = get_stack_details(stack_name) + if stack_details is None: return None + else: + connection.orchestration.delete_stack(stack_details["id"]) - -def do_nova_delete(url, project_name, data=""): +def get_stack_details(stack_name): """ - Performs a DELETE request with the specified project auth token - :param url: full url to use for DELETE - :param project_name: name of the project - :param data: (optional) url encoded data - :return: string response from urllib2.urlopen(r, data).read() or None + Returns python object representing Stack details + :param stack_name: name of the stack to find + :return: stack object or None if not found! """ - logger.debug("--- connect_to_openstack ---") - try: - project_token = get_project_auth_token(project_name) - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", project_token) - request.get_method = lambda: 'DELETE' + logger.debug("--- get_stack_details ---") - if data == "": - result = urllib2.urlopen(request) - else: - result = urllib2.urlopen(request, data) + connection = create_connection() - return result.read() - except URLError as e: - logger.error("Could not perform DELETE to url: %s" % url) - logger.error("error was %s" % str(e)) + result = connection.orchestration.find_stack(stack_name) + if result is None: + logger.debug("stack doesn't exist yet") return None - -def do_delete(url, data=""): - """ - Performs a simple REST DELETE call - :param url: full url to use for Delete - :param data: (optional) url encoded data - :return: string response from urllib2.urlopen(r, data).read() or None - """ - try: - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", _auth_token) - request.get_method = lambda: 'DELETE' - - if data == "": - result = urllib2.urlopen(request) - else: - result = urllib2.urlopen(request, data) - - return result.read() - except URLError as e: - logger.error("Could not perform DELETE to url: %s" % url) - logger.error("error was %s" % str(e)) - return None + else: + return result.to_dict() \ No newline at end of file diff --git a/common/lib/openstackUtils2.py b/common/lib/openstackUtils2.py deleted file mode 100644 index 2909fb9..0000000 --- a/common/lib/openstackUtils2.py +++ /dev/null @@ -1,419 +0,0 @@ -import json -import logging -import mmap -import time - -from wistar import configuration - -import openstack -from openstack.config import loader -from openstack import utils -from openstack.cloud import OpenStackCloud -from keystoneauth1.exceptions.http import Unauthorized as ErrorUnauthorized - - -""" -ajax/views.py: stack_details = openstackUtils.get_stack_details(stack_name) -ajax/views.py: stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) -ajax/views.py: if not openstackUtils.connect_to_openstack(): -ajax/views.py: tenant_id = openstackUtils.get_project_id(configuration.openstack_project) -ajax/views.py: logger.debug(openstackUtils.create_stack(stack_name, heat_template)) -ajax/views.py: if openstackUtils.connect_to_openstack(): -ajax/views.py: logger.debug(openstackUtils.delete_stack(stack_name)) -api/views.py:from common.lib import openstackUtils -api/views.py: if openstackUtils.connect_to_openstack(): -common/lib/consoleUtils.py:import openstackUtils -common/lib/consoleUtils.py: if openstackUtils.connect_to_openstack(): -common/lib/consoleUtils.py: ws_url = openstackUtils.get_nova_serial_console(name) -common/lib/imageUtils.py:import openstackUtils -common/lib/imageUtils.py: if openstackUtils.connect_to_openstack(): -common/lib/imageUtils.py: glance_detail = openstackUtils.get_glance_image_detail(image_id) -common/lib/imageUtils.py: return openstackUtils.get_glance_image_detail_by_name(image_detail['name']) -common/lib/imageUtils.py: if openstackUtils.connect_to_openstack(): -common/lib/imageUtils.py: images = openstackUtils.get_glance_image_list() -common/lib/imageUtils.py: if openstackUtils.connect_to_openstack(): -common/lib/imageUtils.py: images = openstackUtils.get_glance_image_list() -common/lib/wistarUtils.py:import openstackUtils -common/lib/wistarUtils.py: flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, -common/lib/wistarUtils.py: if openstackUtils.connect_to_openstack(): -common/lib/wistarUtils.py: dhcp_leases = openstackUtils.get_consumed_management_ips() -images/views.py:from common.lib import openstackUtils -images/views.py: openstackUtils.connect_to_openstack() -images/views.py: glance_id = openstackUtils.get_image_id_for_name(image.name) -images/views.py: if openstackUtils.connect_to_openstack(): -images/views.py: glance_id = openstackUtils.get_image_id_for_name(image.name) -images/views.py: glance_json = openstackUtils.get_glance_image_detail(glance_id) -images/views.py: if openstackUtils.connect_to_openstack(): -images/views.py: image_list = openstackUtils.list_glance_images() -images/views.py: if openstackUtils.connect_to_openstack(): -images/views.py: openstackUtils.upload_image_to_glance(image.name, image.filePath.path) -images/views.py: if openstackUtils.connect_to_openstack(): -images/views.py: image_details = openstackUtils.get_glance_image_detail(glance_id) -topologies/views.py:from common.lib import openstackUtils -topologies/views.py: if openstackUtils.connect_to_openstack(): -topologies/views.py: logger.debug(openstackUtils.delete_stack(stack_name)) -""" - -logger = logging.getLogger(__name__) - - -def create_connection(): - """ - Creates an connection object based on the configuration mode - Either uses the openstacksdk mode which searches for clouds.yaml - Or uses the configuration options - """ - if configuration.openstack_mode == "auto": - return openstack.connect(cloud=configuration.openstack_cloud) - else: - return openstack.connect( - auth_url=configuration.openstack_host, - project_name=configuration.openstack_project, - username=configuration.openstack_user, - password=configuration.openstack_password, - region_name=configuration.openstack_region - ) - - -def connect_to_openstack(): - """ - Tries to connect to the selected openstack cloud - """ - logger.debug("--- connect_to_openstack ---") - - connection = create_connection() - try: - connection.authorize() - return True - except ErrorUnauthorized: - return False - -def get_glance_image_list(): - # logger.debug("--- get_glance_image_list ---") - - connection = create_connection() - - images = connection.image.images() - - return [image.to_dict() for image in images if image.status == "active"] - - -def get_glance_image_detail(image_id): - logger.debug("---get_glance_image-detail_by_id") - connection = create_connection() - - result = connection.image.get_image(image_id) - if result is None: - return None - return result.to_dict() - -def get_glance_image_detail_by_name(image_name): - logger.debug("-- get glance image detail by name") - connection = create_connection() - - result = connection.image.find_image(image_name) - if result is None: - return None - else: - return result.to_dict() - - - -def get_image_id_for_name(image_name): - connection = create_connection() - - result = connection.image.find_image(image_name) - if result is None: - return None - else: - return result.to_dict()["id"] - -def upload_image_to_glance(name, image_file_path): - """ - :param name: name of the image to be created - :param image_file_path: path of the file to upload - :return: json encoded results string - """ - #FIXME this is not properly checked yet - connection = create_connection() - - image_attrs = dict() - image_attrs['disk_format'] = 'qcow2' - image_attrs['container_format'] = 'bare' - image_attrs['name'] = name - - f = open(image_file_path, 'rb') - fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) - - image_attrs['data'] = fio - - connection.images.upload_image(**image_attrs) - - -def get_nova_flavors(project_name): - connection = create_connection() - - all_flavors = connection.compute.flavors() - - flavor_dicts = [flavor.to_dict() for flavor in all_flavors] - - logger.debug("FLAVORS") - logger.debug(str(flavor_dicts)) - - return json.dumps(flavor_dicts) - # return [flavor.to_dict() for flavor in connection.compute.flavors()] - -def get_nova_serial_console(instance_name): - """ - Get the websocket URL for the serial proxy for a given nova server (instance) - :param instance_name: name of the instance - :return: websocket url ws://x.x.x.x:xxxx/token=xxxxx - """ - #FIXME no proper openstacksdk implementation yet - connection = create_connection() - server = connection.compute.find_server(instance_name) - - if server == None: - return None - # Trying to get the console via a manual query - - # First build the cor - - cloud = OpenStackCloud() - project_id = cloud.current_project_id - - data = '{"os-getVNCConsole": {"type": "novnc"}}' - url = create_nova_url('/%s/servers/%s/action' % (project_id, server.id)) - logger.debug("nova console: trying: " + str(url)) - try: - project_auth_token = connection.authorize() - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", project_auth_token) - request.get_method = lambda: 'POST' - result = urllib2.urlopen(request, data) - console_json_data = json.loads(result.read()) - logger.debug(json.dumps(console_json_data, indent=2)) - return console_json_data["console"]["url"] - except URLError as e: - logger.error("Could not get serial console to instance: %s" % instance_name) - logger.error("error was %s" % str(e)) - return None - - - -def create_nova_url(url): - """ - Creates a nova url based on the service and endpoint in the sdk - """ - conn = create_connection() - - nova_id = conn.identity.find_service("nova").id - - endpoint_query == { - "service_id": nova_id, - "interface": "public" - } - - # This should only give one result - endpoint = conn.identity.endpoints(**endpoint_query) - - return endpoint[0].url + url - - - - -def get_project_id(project_name): - """ - :param project_name: name of the project to search for - """ - - connection = create_connection() - cloud = OpenStackCloud() - logger.debug("--get project id") - return cloud.current_project_id - logger.debug("--- all projects--") - logger.debug(str(connection.__dict__)) - logger.debug("--properties") - for project in connection.identity.projects(user_id=cloud.current_user_id): - logger.debug(str(project)) - logger.debug("Find project") - result = connection.identity.find_project(project_name, user_id=cloud.current_user_id) - if result is None: - return None - else: - return result.to_dict()["id"] - - -def get_consumed_management_ips(): - """ - Return a list of dicts of the format - [ - { "ip-address": "xxx.xxx.xxx.xxx"} - ] - This mimics the libvirt dnsmasq format for dhcp reservations - This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to - get all reserved management ips - :return: list of dicts - """ - ips = [] - connection = create_connection() - - mgmt_network = connection.network.find_network(configuration.openstack_mgmt_network) - if mgmt_network is None: - return ips - - for port in connection.network.ports(network_id=mgmt_network.id): - for fixed_ip in port.fixed_ips: - fip = {} - logger.debug(fixed_ip) - fip["ip-address"] = fixed_ip["ip_address"] - ips.append(fip) - - logger.debug(str(ips)) - return ips - - -def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): - """ - Query nova to get all flavors and return the flavor that best matches our desired constraints - :param project_name: name of the project to check for flavors - :param cpu: number of cores desired - :param ram: amount of ram desired in MB - :param disk: amount of disk required in GB - :return: flavor object {"name": "m1.xlarge"} - """ - - emergency_flavor = dict() - emergency_flavor['name'] = "m1.large" - - connection = create_connection() - logger.debug("Trying to determine minumum flavor") - flavors = connection.compute.flavors() - flavors = [flavor.to_dict() for flavor in flavors] - - cpu_candidates = list() - ram_candidates = list() - disk_candidates = list() - logger.debug("checking flavors") - - # first, let's see if we have an exact match! - for f in flavors: - logger.debug("checking flavor: " + f["name"]) - if f["vcpus"] == cpu and f["ram"] == ram and f["disk"] == disk: - return f - - logger.debug("not exact match yet") - # we don't have an exact match yet! - for f in flavors: - logger.debug(str(f["vcpus"]) + " " + str(cpu)) - if "vcpus" in f and f["vcpus"] >= int(cpu): - cpu_candidates.append(f) - - logger.debug("got cpu candidates: " + str(len(cpu_candidates))) - - for f in cpu_candidates: - if "ram" in f and f["ram"] >= ram: - ram_candidates.append(f) - - logger.debug("got ram candidates: " + str(len(ram_candidates))) - - for f in ram_candidates: - if "disk" in f and f["disk"] >= disk: - disk_candidates.append(f) - - logger.debug("got disk candidates: " + str(len(disk_candidates))) - - if len(disk_candidates) == 0: - # uh-oh, just return the largest and hope for the best! - return emergency_flavor - elif len(disk_candidates) == 1: - return disk_candidates[0] - else: - # we have more than one candidate left - # let's find the smallest flavor left! - cpu_low = 99 - disk_low = 999 - ram_low = 99999 - for f in disk_candidates: - if f["vcpus"] < cpu_low: - cpu_low = f["vcpus"] - if f["ram"] < ram_low: - ram_low = f["ram"] - if f["disk"] < disk_low: - disk_low = f["disk"] - - for f in disk_candidates: - if f["vcpus"] == cpu_low and f["ram"] == ram_low and f["disk"] == disk_low: - # found the lowest available - logger.debug("return lowest across all axis") - return f - for f in disk_candidates: - if f["vcpus"] == cpu_low and f["ram"] == ram_low: - # lowest available along ram and cpu axis - logger.debug("return lowest across cpu and ram") - return f - for f in disk_candidates: - if f["vcpus"] == cpu: - logger.debug("return lowest cpu only") - logger.debug(f) - return f - - # should not arrive here :-/ - logger.debug("got to the impossible") - return disk_candidates[0] - -def create_stack(stack_name, template_string): - """ - Creates a Stack via a HEAT template - :param stack_name: name of the stack to create - :param template_string: HEAT template to be used - :return: JSON response from HEAT-API or None on failure - """ - - connection = create_connection() - - template = json.loads(template_string) - - heat_data = {} - heat_data["name"] = stack_name - heat_data["template"] = template - - # result = connection.orchestration.create_stack({"name"}) - - result = connection.orchestration.create_stack(preview=False, **heat_data) - logger.debug(result) - return result - -def delete_stack(stack_name): - """ - Deletes a stack from OpenStack - :param stack_name: name of the stack to be deleted - :return: JSON response fro HEAT API - """ - connection = create_connection() - - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - connection.orchestration.delete_stack(stack_details["id"]) - -def get_stack_details(stack_name): - """ - Returns python object representing Stack details - :param stack_name: name of the stack to find - :return: stack object or None if not found! - """ - logger.debug("--- get_stack_details ---") - - connection = create_connection() - - result = connection.orchestration.find_stack(stack_name) - if result is None: - logger.debug("stack doesn't exist yet") - return None - - else: - return result.to_dict() \ No newline at end of file From 34883bd7118f8fa604e36e002c3bb57de6ae8ad1 Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Sun, 5 Aug 2018 17:54:50 +0200 Subject: [PATCH 3/9] Added ansible-playbook for bionic --- extras/install_wistar_ubuntu_18_pb.yml | 188 +++++++++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 extras/install_wistar_ubuntu_18_pb.yml diff --git a/extras/install_wistar_ubuntu_18_pb.yml b/extras/install_wistar_ubuntu_18_pb.yml new file mode 100644 index 0000000..182d8fa --- /dev/null +++ b/extras/install_wistar_ubuntu_18_pb.yml @@ -0,0 +1,188 @@ +--- +# +# Provisions all the required dependencies for Wistar on the local host +# wistar_branch refers to the branch from git that you want to clone +# this is useful if you'd like to follow the develop branch for example +# + +- name: Provision Wistar + hosts: localhost + connection: local + become: true + vars: + wistar_branch: master + + tasks: + - name: Update all packages to the latest version + apt: + upgrade: dist + update_cache: yes + + - name: Install Junos-eznc dependancies + apt: + name: "{{ item }}" + state: present + with_items: + - build-essential + - libxml2-dev + - libxslt1-dev + - libz-dev + - libffi-dev + - libssl-dev + - python-dev + - git + - python-pip + + - name: Install python-cryptography + pip: + name: cryptography + editable: false + + - name: Install junos-eznc + pip: + name: junos-eznc + editable: false + + - name: Install jxmlease + pip: + name: jxmlease + editable: false + + - name: Install Wistar dependancies + apt: + name: "{{ item }}" + state: present + update_cache: true + with_items: + - qemu-kvm + - libvirt-bin + - socat + - python-pexpect + - python-libvirt + - python-yaml + - unzip + - bridge-utils + - python-numpy + - genisoimage + - python-netaddr + - python-markupsafe + - python-setuptools + - mtools + - dosfstools + - openvswitch-switch + + - name: Install Django + pip: + name: django + version: 1.9.9 + editable: false + + - name: Install Python virtualBox + pip: + name: pyvbox + editable: false + + - name: Create Wistar directory structure 1 + file: + path: /opt/wistar + state: directory + - name: Create Wistar directory structure 2 + file: + path: /opt/wistar/user_images + state: directory + - name: Create Wistar directory structure 3 + file: + path: /opt/wistar/wistar-master + state: directory + - name: Create Wistar directory structure 4 + file: + path: /opt/wistar/media + state: directory + - name: Create Wistar directory structure 5 + file: + path: /opt/wistar/seeds + state: directory + - name: Create Wistar directory structure 6 + file: + path: /opt/wistar/user_images/instances + state: directory + + - name: Pull latest Wistar from Git + git: + repo: https://github.com/Juniper/wistar.git + dest: /opt/wistar/wistar-master/ + version: "{{ wistar_branch }}" + + - name: Create Wistar tables + command: /opt/wistar/wistar-master/manage.py migrate + + - name: install apache2 + apt: + name: "{{ item }}" + state: present + with_items: + - apache2 + - libapache2-mod-wsgi + + - name: enable the Apache2 module "wsgi" + apache2_module: + state: present + name: wsgi + notify: restart apache + + - name: set permissions on wistar dir + file: + path: /opt/wistar + owner: www-data + group: www-data + state: directory + recurse: yes + + - name: set permissions on wistar log + file: + path: /var/log/wistar.log + owner: www-data + group: www-data + state: touch + + - name: set permissions on wistar errorlog + file: + path: /var/log/apache2/wistar.log + owner: www-data + group: www-data + state: touch + + - name: set permissions on wistar accesslog + file: + path: /var/log/apache2/wistar_access.log + owner: www-data + group: www-data + state: touch + + - name: copy wistar config file to apache + copy: + src: 999-wistar.conf + dest: /etc/apache2/sites-available/999-wistar.conf + + - name: enable wistar site in apache + file: + src: /etc/apache2/sites-available/999-wistar.conf + dest: /etc/apache2/sites-enabled/999-wistar.conf + state: link + notify: restart apache + + - name: add www-data to libvirt users + user: + name: www-data + groups: libvirt + append: yes + + - name: Allow libvirtd group to modify ovs-vsctl + lineinfile: + dest: /etc/sudoers + state: present + line: '%libvirt ALL=NOPASSWD: /usr/bin/ovs-vsctl' + + handlers: + - name: restart apache + service: name=apache2 state=restarted From 57449aa6406aaa37d6063f29638dd384381af11c Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Sun, 5 Aug 2018 18:03:51 +0200 Subject: [PATCH 4/9] Added the openstack sdk to the playbooks and to the README.md --- README.md | 4 +++- extras/install_wistar_ubuntu_16_pb.yml | 1 + extras/install_wistar_ubuntu_18_pb.yml | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6797cdd..dccfb03 100644 --- a/README.md +++ b/README.md @@ -20,9 +20,11 @@ Quick Start instructions for KVM deployments: To get started, you need a server running Ubuntu 14.04 (or some similar flavor) with libvirt, kvm and a few python tools. +You can use the various ansible-playbooks in the extras folder to get an install up and running quickly. + Install the required Ubuntu packages- ``` -root@wistar-build:~# apt-get install python-pip python-dev build-essential qemu-kvm libz-dev libvirt-bin socat python-pexpect python-libvirt libxml2-dev libxslt1-dev unzip bridge-utils genisoimage python-netaddr libffi-dev libssl-dev python-markupsafe libxml2-dev libxslt1-dev git mtools dosfstools +root@wistar-build:~# apt-get install python-pip python-dev build-essential qemu-kvm libz-dev libvirt-bin socat python-pexpect python-libvirt libxml2-dev libxslt1-dev unzip bridge-utils genisoimage python-netaddr libffi-dev libssl-dev python-markupsafe libxml2-dev libxslt1-dev git mtools dosfstools python-openstacksdk ``` Install Python packages- diff --git a/extras/install_wistar_ubuntu_16_pb.yml b/extras/install_wistar_ubuntu_16_pb.yml index 0b44e41..2c7c95a 100644 --- a/extras/install_wistar_ubuntu_16_pb.yml +++ b/extras/install_wistar_ubuntu_16_pb.yml @@ -70,6 +70,7 @@ - mtools - dosfstools - openvswitch-switch + - python-openstacksdk - name: Install Django pip: diff --git a/extras/install_wistar_ubuntu_18_pb.yml b/extras/install_wistar_ubuntu_18_pb.yml index 182d8fa..36a0db4 100644 --- a/extras/install_wistar_ubuntu_18_pb.yml +++ b/extras/install_wistar_ubuntu_18_pb.yml @@ -70,6 +70,7 @@ - mtools - dosfstools - openvswitch-switch + - python-openstacksdk - name: Install Django pip: From 23dd57249ef460571e1ff4eb1fde2933283ffd10 Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Sun, 5 Aug 2018 20:46:33 +0200 Subject: [PATCH 5/9] Fixed the stack details and status pane with the openstacksdk backend --- ajax/views.py | 68 +++++++++++++++++++++++++----------- common/lib/openstackUtils.py | 20 ++++++++++- 2 files changed, 67 insertions(+), 21 deletions(-) diff --git a/ajax/views.py b/ajax/views.py index e3a0957..87354b0 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -585,9 +585,22 @@ def refresh_openstack_deployment_status(request, topology_id): stack_details = openstackUtils.get_stack_details(stack_name) stack_resources = dict() logger.debug(stack_details) - if stack_details is not None and 'stack_status' in stack_details and 'COMPLETE' in stack_details["stack_status"]: + + if stack_details is not None and "stack_status" in stack_details and "COMPLETE" in stack_details["stack_status"]: + stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) + + if stack_details is not None and 'status' in stack_details and 'COMPLETE' in stack_details["status"]: + # This fixes compatbility with newer resource responses which have different fields + # Simply readd the data with the old names stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) + stack_details["stack_status"] = stack_details["status"] + stack_details["stack_status_reason"] = stack_details["status_reason"] + + for resource in stack_resources["resources"]: + resource["resource_name"] = resource["name"] + resource["resource_status"] = resource["status"] + if hasattr(configuration, 'openstack_horizon_url'): horizon_url = configuration.openstack_horizon_url else: @@ -886,27 +899,36 @@ def redeploy_topology(request): return render(request, 'ajax/ajaxError.html', {'error': "Topology doesn't exist"}) try: - domains = libvirtUtils.get_domains_for_topology(topology_id) - config = wistarUtils.load_config_from_topology_json(topo.json, topology_id) - - logger.debug('checking for orphaned domains first') - # find domains we no longer need - for d in domains: - logger.debug('checking domain: %s' % d['name']) - found = False - for config_device in config["devices"]: - if config_device['name'] == d['name']: - found = True - continue + if configuration.deployment_backend == "openstack": + # Updates the stack with the new heat template + # This should allow for the hotplugging of connections + # Currently doesn't do anything + #FIXME + update_stack(request, topology_id) + + elif configuration.deployment_backend == "kvm": + + domains = libvirtUtils.get_domains_for_topology(topology_id) + config = wistarUtils.load_config_from_topology_json(topo.json, topology_id) + + logger.debug('checking for orphaned domains first') + # find domains we no longer need + for d in domains: + logger.debug('checking domain: %s' % d['name']) + found = False + for config_device in config["devices"]: + if config_device['name'] == d['name']: + found = True + continue - if not found: - logger.info("undefine domain: " + d["name"]) - source_file = libvirtUtils.get_image_for_domain(d["uuid"]) - if libvirtUtils.undefine_domain(d["uuid"]): - if source_file is not None: - osUtils.remove_instance(source_file) + if not found: + logger.info("undefine domain: " + d["name"]) + source_file = libvirtUtils.get_image_for_domain(d["uuid"]) + if libvirtUtils.undefine_domain(d["uuid"]): + if source_file is not None: + osUtils.remove_instance(source_file) - osUtils.remove_cloud_init_seed_dir_for_domain(d['name']) + osUtils.remove_cloud_init_seed_dir_for_domain(d['name']) except Exception as e: logger.debug("Caught Exception in redeploy") @@ -1475,6 +1497,7 @@ def deploy_stack(request, topology_id): except ObjectDoesNotExist: return render(request, 'error.html', {'error': "Topology not found!"}) + heat_template =None try: # generate a stack name # FIXME should add a check to verify this is a unique name @@ -1506,6 +1529,11 @@ def deploy_stack(request, topology_id): logger.debug(str(e)) return render(request, 'error.html', {'error': str(e)}) +def update_stack(request, topology_id): + """ + Updates an already existing stack with a new template + """ + pass def delete_stack(request, topology_id): """ diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index f45b87d..3d96465 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -374,4 +374,22 @@ def get_stack_details(stack_name): return None else: - return result.to_dict() \ No newline at end of file + return result.to_dict() + +def get_stack_resources(stack_name, stack_id): + """ + Get all the resources for this Stack + :param stack_name: name of stack + :param stack_id: id of stack - use get_stack_details to retrieve this + :return: json response from HEAT API + """ + + conn = create_connection() + + stack = conn.orchestration.get_stack(stack_id) + + resources = conn.orchestration.resources(stack) + logger.debug("Got resources") + resources_list = [r.to_dict() for r in resources] + logger.debug(resources_list) + return {"resources": resources_list} \ No newline at end of file From 9da84028fdd3c7469acf86a9a8e73610d822da55 Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Mon, 6 Aug 2018 16:29:53 +0200 Subject: [PATCH 6/9] Added update stack functionality to the re-deploy button --- ajax/views.py | 47 ++++++++++++++++++++++++++++-------- common/lib/openstackUtils.py | 26 ++++++++++++++++++++ 2 files changed, 63 insertions(+), 10 deletions(-) diff --git a/ajax/views.py b/ajax/views.py index 87354b0..ffd5810 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -885,6 +885,8 @@ def multi_clone_topology(request): def redeploy_topology(request): + + logger.debug("---redeploy_topology---") required_fields = set(['json', 'topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"}) @@ -901,8 +903,8 @@ def redeploy_topology(request): try: if configuration.deployment_backend == "openstack": # Updates the stack with the new heat template - # This should allow for the hotplugging of connections - # Currently doesn't do anything + # Should check first if the stack exists + # if the stack doesn't exist, just switch to deployment instead #FIXME update_stack(request, topology_id) @@ -935,13 +937,14 @@ def redeploy_topology(request): logger.debug(str(e)) return render(request, 'ajax/ajaxError.html', {'error': str(e)}) - # forward onto deploy topo - try: - inline_deploy_topology(config) - except Exception as e: - logger.debug("Caught Exception in inline_deploy") - logger.debug(str(e)) - return render(request, 'ajax/ajaxError.html', {'error': str(e)}) + # forward onto deploy topoloy if this is a kvm topology + if configuration.deployment_backend == "kvm": + try: + inline_deploy_topology(config) + except Exception as e: + logger.debug("Caught Exception in inline_deploy") + logger.debug(str(e)) + return render(request, 'ajax/ajaxError.html', {'error': str(e)}) return refresh_deployment_status(request) @@ -1533,7 +1536,31 @@ def update_stack(request, topology_id): """ Updates an already existing stack with a new template """ - pass + try: + topology = Topology.objects.get(pk=topology_id) + except ObjectDoesNotExist: + return render(request, 'error.html', {'error': "Topology not found!"}) + try: + stack_name = topology.name.replace(' ', '_') + # let's parse the json and convert to simple lists and dicts + logger.debug("loading config") + config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) + logger.debug("Config is loaded") + heat_template = wistarUtils.get_heat_json_from_topology_config(config, stack_name) + logger.debug("heat template created") + if not openstackUtils.connect_to_openstack(): + return render(request, 'error.html', {'error': "Could not connect to Openstack"}) + + logger.debug(openstackUtils.update_stack(stack_name, heat_template)) + + return HttpResponseRedirect('/topologies/' + topology_id + '/') + + except Exception as e: + logger.debug("Caught Exception in update stack") + logger.debug(str(e)) + + return render(request, 'error.html', {'error': str(e)}) + def delete_stack(request, topology_id): """ diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index 3d96465..6336851 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -344,6 +344,32 @@ def create_stack(stack_name, template_string): logger.debug(result) return result +def update_stack(stack_name, template_string): + """ + Updates the heat template associated with a stack + This triggers a rebuild of the associated resources so may break certain topologies + """ + + connection = create_connection() + + template = json.loads(template_string) + + stack = connection.orchestration.find_stack(stack_name) + + + if stack is None: + # Stack has been deleted or never deployed! + return None + else: + heat_data = {} + heat_data["template"] = template + + result = connection.orchestration.update_stack(stack, **heat_data) + logger.debug(result) + + return result + + def delete_stack(stack_name): """ Deletes a stack from OpenStack From 67991cb03612a2ba5d0e664fb34c1b94d8aff25a Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Tue, 7 Aug 2018 13:57:36 +0200 Subject: [PATCH 7/9] Fixed re-deploy to do a deploy if the stack doesn't exist yet --- ajax/views.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ajax/views.py b/ajax/views.py index ffd5810..ef87dde 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -1551,7 +1551,13 @@ def update_stack(request, topology_id): if not openstackUtils.connect_to_openstack(): return render(request, 'error.html', {'error': "Could not connect to Openstack"}) - logger.debug(openstackUtils.update_stack(stack_name, heat_template)) + + result = openstackUtils.update(stack_stack, heat_template) + if result == None: + logger.debug("Can't update stack since it doesn't exist, deploying") + openstackUtils.create_stack(stack_name, heat_template) + else: + logger.debug(result) return HttpResponseRedirect('/topologies/' + topology_id + '/') From 9153098618697497d2709bb959ee4e857031903b Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Tue, 7 Aug 2018 14:55:24 +0200 Subject: [PATCH 8/9] Fixed re-deploy to do a deploy if the stack doesn't exist yet --- ajax/views.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ajax/views.py b/ajax/views.py index ef87dde..cc766ad 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -1552,7 +1552,7 @@ def update_stack(request, topology_id): return render(request, 'error.html', {'error': "Could not connect to Openstack"}) - result = openstackUtils.update(stack_stack, heat_template) + result = openstackUtils.update_stack(stack_name, heat_template) if result == None: logger.debug("Can't update stack since it doesn't exist, deploying") openstackUtils.create_stack(stack_name, heat_template) From b9d6e55b9d05d452822df1d30b6a6d299362b366 Mon Sep 17 00:00:00 2001 From: Cees Portegies Date: Sun, 12 Aug 2018 19:19:43 +0200 Subject: [PATCH 9/9] Added the current status of instances in openstack and on/off functionality, additionally, a reboot button for quick rebooting of instances is added --- .../ajax/openstackDeploymentStatus.html | 34 +++++++++++++-- ajax/urls.py | 1 + ajax/views.py | 26 ++++++++++- common/lib/openstackUtils.py | 43 ++++++++++++++++++- common/static/js/wistar_utils.js | 38 ++++++++++++++++ 5 files changed, 136 insertions(+), 6 deletions(-) diff --git a/ajax/templates/ajax/openstackDeploymentStatus.html b/ajax/templates/ajax/openstackDeploymentStatus.html index 63feee1..0a0c03e 100644 --- a/ajax/templates/ajax/openstackDeploymentStatus.html +++ b/ajax/templates/ajax/openstackDeploymentStatus.html @@ -57,9 +57,34 @@ {{ resource.resource_name }} - - {% if 'COMPLETE' in resource.resource_status %} -
+ + {% if 'COMPLETE' in resource.resource_status and resource.physical_status == "ACTIVE" %} +
+ + ✓ + +
+   +
+ +
+   + + + + {% elif 'COMPLETE' in resource.resource_status and resource.physical_status == "SHUTOFF" %} + +   + {% elif 'COMPLETE' in resource.resource_status and resource.physical_status == "REBOOT" %} +
 
+ + {% elif 'COMPLETE' in resource.resource_status and resource.physical_status == None %} +
  {% else %}
  diff --git a/ajax/urls.py b/ajax/urls.py index 59520b4..16a2444 100644 --- a/ajax/urls.py +++ b/ajax/urls.py @@ -49,6 +49,7 @@ url(r'^manageDomain/$', views.manage_domain, name='manageDomain'), url(r'^manageNetwork/$', views.manage_network, name='manageNetwork'), url(r'^manageHypervisor/$', views.manage_hypervisor, name='manage_hypervisor'), + url(r'^manageInstance/$', views.manage_instance, name="manageInstance"), url(r'^executeCli/$', views.execute_cli, name='executeCli'), url(r'^executeLinuxCli/$', views.execute_linux_cli, name='executeLinuxCli'), url(r'^launchWebConsole/$', views.launch_web_console, name='launchWebConsole'), diff --git a/ajax/views.py b/ajax/views.py index cc766ad..2181ae1 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -588,11 +588,17 @@ def refresh_openstack_deployment_status(request, topology_id): if stack_details is not None and "stack_status" in stack_details and "COMPLETE" in stack_details["stack_status"]: stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) + # No attempt being made to get the physical status, since this is for legacy Openstack + # And I do not know what field names are + for resource in stack_resources: + resource["physical_status"] = None if stack_details is not None and 'status' in stack_details and 'COMPLETE' in stack_details["status"]: # This fixes compatbility with newer resource responses which have different fields # Simply readd the data with the old names - stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) + + # Also get the physical status + stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"], resource_status=True) stack_details["stack_status"] = stack_details["status"] stack_details["stack_status_reason"] = stack_details["status_reason"] @@ -724,6 +730,24 @@ def manage_domain(request): else: return render(request, 'ajax/ajaxError.html', {'error': "Unknown Parameters in POST!"}) +def manage_instance(request): + """ + This function manages basic interactions with the OS::Nova::Server + resources in the deployed openstack stack + The instanceId corresponds to the OS::Nova::Server instance + """ + required_fields = set(['topologyId', 'action', 'instanceId']) + + if not required_fields.issubset(request.POST): + return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) + + instance_id = request.POST['instanceId'] + action = request.POST['action'] + topology_id = request.POST["topologyId"] + + openstackUtils.manage_instance(instance_id, action) + + return refresh_openstack_deployment_status(request, topology_id) def manage_network(request): required_fields = set(['networkName', 'action', 'topologyId']) diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index 6336851..da23a0c 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -107,6 +107,24 @@ def upload_image_to_glance(name, image_file_path): connection.images.upload_image(**image_attrs) +def manage_instance(instance_id, action): + """ + Some basic interactions with server instances + """ + + conn = create_connection() + + server_instance = conn.compute.get_server(instance_id) + + if action == "stop" and server_instance.status == "ACTIVE": + # Only attempt to stop if it's active, don't try to interact with a non stable instance + conn.compute.stop_server(instance_id) + elif action == "start" and server_instance.status == "SHUTOFF": + # Only attempt to start if it's properly shut off + conn.compute.start_server(instance_id) + elif action == "reboot" and server_instance.status == "ACTIVE": + conn.compute.reboot_server(instance_id, "SOFT") + def get_nova_flavors(project_name): connection = create_connection() @@ -402,11 +420,12 @@ def get_stack_details(stack_name): else: return result.to_dict() -def get_stack_resources(stack_name, stack_id): +def get_stack_resources(stack_name, stack_id, resource_status=False): """ Get all the resources for this Stack :param stack_name: name of stack :param stack_id: id of stack - use get_stack_details to retrieve this + :param resource_status: Also get the physical_status of the OS::Nova::Server instances :return: json response from HEAT API """ @@ -418,4 +437,24 @@ def get_stack_resources(stack_name, stack_id): logger.debug("Got resources") resources_list = [r.to_dict() for r in resources] logger.debug(resources_list) - return {"resources": resources_list} \ No newline at end of file + if resource_status == False: + return {"resources": resources_list} + + #Get status of the resources as well + + for resource in resources_list: + # Only get the status for the OS::Nova::Server + if resource["resource_type"] == "OS::Nova::Server" and "COMPLETE" in resource["status"]: + + status = conn.compute.get_server(resource["physical_resource_id"]).status + + # Add the key for the status of the physical status + resource["physical_status"] = status + else: + # Either it's not Nova or not yet completed + resource["physical_status"] = None + + logger.debug("Also gotten the status") + logger.debug(resources_list) + + return {"resources": resources_list} diff --git a/common/static/js/wistar_utils.js b/common/static/js/wistar_utils.js index 75a1116..9384ec1 100644 --- a/common/static/js/wistar_utils.js +++ b/common/static/js/wistar_utils.js @@ -61,6 +61,44 @@ alert('Could not perform request!'); }); } + + // Similar to the manageDomain function, except for OpenStack instances + // Does not immediately change the page, unlike the domain one + // Only a refresh of the status will show if an instance has been turned off + function manageInstance(action, instanceId, topoId) { + var doc = jQuery(document.documentElement); + doc.css('cursor', 'progress'); + + var doc = jQuery(document.documentElement); + doc.css('cursor', 'progress'); + + if (action == "stop") { + if (typeof s != 'undefined') { + s.setBootState("down"); + } + + if (! confirm("This will power off the instance ungracefully!")) { + doc.css('cursor', ''); + return false; + } + } + var url = '/ajax/manageInstance/'; + var params = { + 'topologyId' : topoId, + 'instanceId' : instanceId, + 'action' : action + }; + var post = jQuery.post(url, params, function(response) { + var content = jQuery(response); + jQuery('#deploymentStatus').empty().append(content); + }); + post.fail(function() { + alert('Could not perform request!'); + }); + post.always(function() { + doc.css('cursor', ''); + }); + } function manageDomain(action, domainId, topoId) { var doc = jQuery(document.documentElement);