Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replacement of the Openstack backend to openstacksdk #45

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@ Quick Start instructions for KVM deployments:

To get started, you need a server running Ubuntu 14.04 (or some similar flavor) with libvirt, kvm and a few python tools.

You can use the various ansible-playbooks in the extras folder to get an install up and running quickly.

Install the required Ubuntu packages-
```
root@wistar-build:~# apt-get install python-pip python-dev build-essential qemu-kvm libz-dev libvirt-bin socat python-pexpect python-libvirt libxml2-dev libxslt1-dev unzip bridge-utils genisoimage python-netaddr libffi-dev libssl-dev python-markupsafe libxml2-dev libxslt1-dev git mtools dosfstools
root@wistar-build:~# apt-get install python-pip python-dev build-essential qemu-kvm libz-dev libvirt-bin socat python-pexpect python-libvirt libxml2-dev libxslt1-dev unzip bridge-utils genisoimage python-netaddr libffi-dev libssl-dev python-markupsafe libxml2-dev libxslt1-dev git mtools dosfstools python-openstacksdk
```

Install Python packages-
Expand Down
34 changes: 31 additions & 3 deletions ajax/templates/ajax/openstackDeploymentStatus.html
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,44 @@
{{ resource.resource_name }}
</a>
</td>
<td colspan="2" style="white-space: nowrap;">
{% if 'COMPLETE' in resource.resource_status %}
<div class="status_green">&#10003;</div>
<td colspan="3" style="white-space: nowrap;">
{% if 'COMPLETE' in resource.resource_status and resource.physical_status == "ACTIVE" %}
<div class="status_green">
<a href="#" onclick="javascript: manageInstance('stop', '{{ resource.physical_resource_id }}', '{{ topology_id }}');"
title="Request shutdown in OpenStack">
&#10003;</a>

</div>
&nbsp;
<div class="status_grey">
<a href="#" onclick="javascript: manageInstance('reboot', '{{ resource.physical_resource_id }}', '{{ topology_id }}');"
title="Request reboot in OpenStack">&#x21bb;</a>
</div>
&nbsp;
<a href="#"
onclick="javascript: window.open('{{ openstack_horizon_url }}/project/instances/{{ resource.physical_resource_id }}/?tab=instance_details__console');"
title="Launch Console"
>
<img src="{% static 'images/console.png' %}" width="20px" height="20px"/>
</a>
{% elif 'COMPLETE' in resource.resource_status and resource.physical_status == "SHUTOFF" %}
<div class="status_red">
<a href="#" onclick="javascript: manageInstance('start', '{{ resource.physical_resource_id }}', '{{ topology_id }}');"
title="Request start up in OpenStack">
&#9661;</div>
&nbsp;
{% elif 'COMPLETE' in resource.resource_status and resource.physical_status == "REBOOT" %}
<div class="status_red">&#x21bb;</div>
&nbsp;
<a href="#"
onclick="javascript: window.open('{{ openstack_horizon_url }}/project/instances/{{ resource.physical_resource_id }}/?tab=instance_details__console');"
title="Launch Console"
>
<img src="{% static 'images/console.png' %}" width="20px" height="20px"/>
</a>
<!-- Legacy addition, couldn't get physical status so no interaction buttons -->
{% elif 'COMPLETE' in resource.resource_status and resource.physical_status == None %}
<div class="status_green">&#10003;</div>&nbsp;
{% else %}
<div class="status_red">&#9661;</div>
&nbsp;
Expand Down
1 change: 1 addition & 0 deletions ajax/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
url(r'^manageDomain/$', views.manage_domain, name='manageDomain'),
url(r'^manageNetwork/$', views.manage_network, name='manageNetwork'),
url(r'^manageHypervisor/$', views.manage_hypervisor, name='manage_hypervisor'),
url(r'^manageInstance/$', views.manage_instance, name="manageInstance"),
url(r'^executeCli/$', views.execute_cli, name='executeCli'),
url(r'^executeLinuxCli/$', views.execute_linux_cli, name='executeLinuxCli'),
url(r'^launchWebConsole/$', views.launch_web_console, name='launchWebConsole'),
Expand Down
139 changes: 112 additions & 27 deletions ajax/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,8 +585,27 @@ def refresh_openstack_deployment_status(request, topology_id):
stack_details = openstackUtils.get_stack_details(stack_name)
stack_resources = dict()
logger.debug(stack_details)
if stack_details is not None and 'stack_status' in stack_details and 'COMPLETE' in stack_details["stack_status"]:

if stack_details is not None and "stack_status" in stack_details and "COMPLETE" in stack_details["stack_status"]:
stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"])
# No attempt being made to get the physical status, since this is for legacy Openstack
# And I do not know what field names are
for resource in stack_resources:
resource["physical_status"] = None

if stack_details is not None and 'status' in stack_details and 'COMPLETE' in stack_details["status"]:
# This fixes compatbility with newer resource responses which have different fields
# Simply readd the data with the old names

# Also get the physical status
stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"], resource_status=True)

stack_details["stack_status"] = stack_details["status"]
stack_details["stack_status_reason"] = stack_details["status_reason"]

for resource in stack_resources["resources"]:
resource["resource_name"] = resource["name"]
resource["resource_status"] = resource["status"]

if hasattr(configuration, 'openstack_horizon_url'):
horizon_url = configuration.openstack_horizon_url
Expand Down Expand Up @@ -711,6 +730,24 @@ def manage_domain(request):
else:
return render(request, 'ajax/ajaxError.html', {'error': "Unknown Parameters in POST!"})

def manage_instance(request):
"""
This function manages basic interactions with the OS::Nova::Server
resources in the deployed openstack stack
The instanceId corresponds to the OS::Nova::Server instance
"""
required_fields = set(['topologyId', 'action', 'instanceId'])

if not required_fields.issubset(request.POST):
return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"})

instance_id = request.POST['instanceId']
action = request.POST['action']
topology_id = request.POST["topologyId"]

openstackUtils.manage_instance(instance_id, action)

return refresh_openstack_deployment_status(request, topology_id)

def manage_network(request):
required_fields = set(['networkName', 'action', 'topologyId'])
Expand Down Expand Up @@ -872,6 +909,8 @@ def multi_clone_topology(request):


def redeploy_topology(request):

logger.debug("---redeploy_topology---")
required_fields = set(['json', 'topologyId'])
if not required_fields.issubset(request.POST):
return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"})
Expand All @@ -886,40 +925,50 @@ def redeploy_topology(request):
return render(request, 'ajax/ajaxError.html', {'error': "Topology doesn't exist"})

try:
domains = libvirtUtils.get_domains_for_topology(topology_id)
config = wistarUtils.load_config_from_topology_json(topo.json, topology_id)

logger.debug('checking for orphaned domains first')
# find domains we no longer need
for d in domains:
logger.debug('checking domain: %s' % d['name'])
found = False
for config_device in config["devices"]:
if config_device['name'] == d['name']:
found = True
continue
if configuration.deployment_backend == "openstack":
# Updates the stack with the new heat template
# Should check first if the stack exists
# if the stack doesn't exist, just switch to deployment instead
#FIXME
update_stack(request, topology_id)

elif configuration.deployment_backend == "kvm":

domains = libvirtUtils.get_domains_for_topology(topology_id)
config = wistarUtils.load_config_from_topology_json(topo.json, topology_id)

logger.debug('checking for orphaned domains first')
# find domains we no longer need
for d in domains:
logger.debug('checking domain: %s' % d['name'])
found = False
for config_device in config["devices"]:
if config_device['name'] == d['name']:
found = True
continue

if not found:
logger.info("undefine domain: " + d["name"])
source_file = libvirtUtils.get_image_for_domain(d["uuid"])
if libvirtUtils.undefine_domain(d["uuid"]):
if source_file is not None:
osUtils.remove_instance(source_file)
if not found:
logger.info("undefine domain: " + d["name"])
source_file = libvirtUtils.get_image_for_domain(d["uuid"])
if libvirtUtils.undefine_domain(d["uuid"]):
if source_file is not None:
osUtils.remove_instance(source_file)

osUtils.remove_cloud_init_seed_dir_for_domain(d['name'])
osUtils.remove_cloud_init_seed_dir_for_domain(d['name'])

except Exception as e:
logger.debug("Caught Exception in redeploy")
logger.debug(str(e))
return render(request, 'ajax/ajaxError.html', {'error': str(e)})

# forward onto deploy topo
try:
inline_deploy_topology(config)
except Exception as e:
logger.debug("Caught Exception in inline_deploy")
logger.debug(str(e))
return render(request, 'ajax/ajaxError.html', {'error': str(e)})
# forward onto deploy topoloy if this is a kvm topology
if configuration.deployment_backend == "kvm":
try:
inline_deploy_topology(config)
except Exception as e:
logger.debug("Caught Exception in inline_deploy")
logger.debug(str(e))
return render(request, 'ajax/ajaxError.html', {'error': str(e)})

return refresh_deployment_status(request)

Expand Down Expand Up @@ -1475,6 +1524,7 @@ def deploy_stack(request, topology_id):
except ObjectDoesNotExist:
return render(request, 'error.html', {'error': "Topology not found!"})

heat_template =None
try:
# generate a stack name
# FIXME should add a check to verify this is a unique name
Expand Down Expand Up @@ -1506,6 +1556,41 @@ def deploy_stack(request, topology_id):
logger.debug(str(e))
return render(request, 'error.html', {'error': str(e)})

def update_stack(request, topology_id):
"""
Updates an already existing stack with a new template
"""
try:
topology = Topology.objects.get(pk=topology_id)
except ObjectDoesNotExist:
return render(request, 'error.html', {'error': "Topology not found!"})
try:
stack_name = topology.name.replace(' ', '_')
# let's parse the json and convert to simple lists and dicts
logger.debug("loading config")
config = wistarUtils.load_config_from_topology_json(topology.json, topology_id)
logger.debug("Config is loaded")
heat_template = wistarUtils.get_heat_json_from_topology_config(config, stack_name)
logger.debug("heat template created")
if not openstackUtils.connect_to_openstack():
return render(request, 'error.html', {'error': "Could not connect to Openstack"})


result = openstackUtils.update_stack(stack_name, heat_template)
if result == None:
logger.debug("Can't update stack since it doesn't exist, deploying")
openstackUtils.create_stack(stack_name, heat_template)
else:
logger.debug(result)

return HttpResponseRedirect('/topologies/' + topology_id + '/')

except Exception as e:
logger.debug("Caught Exception in update stack")
logger.debug(str(e))

return render(request, 'error.html', {'error': str(e)})


def delete_stack(request, topology_id):
"""
Expand Down
Loading