diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..e69de29bb
diff --git a/IM/InfrastructureInfo.py b/IM/InfrastructureInfo.py
index 50036c1cc..c8bc0891f 100644
--- a/IM/InfrastructureInfo.py
+++ b/IM/InfrastructureInfo.py
@@ -25,6 +25,7 @@
from radl.radl import RADL, Feature, deploy, system, contextualize_item
from radl.radl_parse import parse_radl
from radl.radl_json import radlToSimple
+from IM.openid.JWT import JWT
from IM.config import Config
try:
from Queue import PriorityQueue
@@ -32,6 +33,7 @@
from queue import PriorityQueue
from IM.VirtualMachine import VirtualMachine
from IM.auth import Authentication
+from IM.tosca.Tosca import Tosca
class IncorrectVMException(Exception):
@@ -91,6 +93,8 @@ def __init__(self):
"""Flag to specify that the configuration threads of this inf has finished successfully or with errors."""
self.conf_threads = []
""" List of configuration threads."""
+ self.extra_info = {}
+ """ Extra information about the Infrastructure."""
self.last_access = datetime.now()
""" Time of the last access to this Inf. """
self.snapshots = []
@@ -116,6 +120,8 @@ def serialize(self):
odict['auth'] = odict['auth'].serialize()
if odict['radl']:
odict['radl'] = str(odict['radl'])
+ if odict['extra_info'] and "TOSCA" in odict['extra_info']:
+ odict['extra_info'] = {'TOSCA': odict['extra_info']['TOSCA'].serialize()}
return json.dumps(odict)
@staticmethod
@@ -130,6 +136,12 @@ def deserialize(str_data):
dic['auth'] = Authentication.deserialize(dic['auth'])
if dic['radl']:
dic['radl'] = parse_radl(dic['radl'])
+ if 'extra_info' in dic and dic['extra_info'] and "TOSCA" in dic['extra_info']:
+ try:
+ dic['extra_info']['TOSCA'] = Tosca.deserialize(dic['extra_info']['TOSCA'])
+ except:
+ del dic['extra_info']['TOSCA']
+ InfrastructureInfo.logger.exception("Error deserializing TOSCA document")
newinf.__dict__.update(dic)
newinf.cloud_connector = None
# Set the ConfManager object and the lock to the data loaded
@@ -264,14 +276,14 @@ def update_radl(self, radl, deployed_vms):
"""
with self._lock:
- # Add new systems and networks only
+ # Add new networks only
for s in radl.systems + radl.networks + radl.ansible_hosts:
if not self.radl.add(s.clone(), "ignore"):
InfrastructureInfo.logger.warn(
"Ignoring the redefinition of %s %s" % (type(s), s.getId()))
# Add or update configures
- for s in radl.configures:
+ for s in radl.configures + radl.systems:
self.radl.add(s.clone(), "replace")
InfrastructureInfo.logger.warn(
"(Re)definition of %s %s" % (type(s), s.getId()))
@@ -571,6 +583,15 @@ def is_authorized(self, auth):
if self_im_auth[elem] != other_im_auth[elem]:
return False
+ if 'token' in self_im_auth:
+ if 'token' not in other_im_auth:
+ return False
+ decoded_token = JWT().get_info(other_im_auth['token'])
+ password = str(decoded_token['iss']) + str(decoded_token['sub'])
+ # check that the token provided is associated with the current owner of the inf.
+ if self_im_auth['password'] != password:
+ return False
+
return True
else:
return False
diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py
index ff804c2ad..81e04787b 100644
--- a/IM/InfrastructureManager.py
+++ b/IM/InfrastructureManager.py
@@ -34,6 +34,12 @@
from radl.radl import Feature, RADL
from radl.radl_json import dump_radl as dump_radl_json
+from IM.config import Config
+from IM.VirtualMachine import VirtualMachine
+from IM.openid.JWT import JWT
+from IM.openid.OpenIDClient import OpenIDClient
+
+
if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
from multiprocessing.pool import ThreadPool
@@ -438,6 +444,13 @@ def AddResource(inf_id, radl_data, auth, context=True, failed_clouds=None):
raise Exception(
"No correct VMRC auth data provided nor image URL")
+ if Config.SINGLE_SITE:
+ image_id = os.path.basename(s.getValue("disk.0.image.url"))
+ url_prefix = Config.SINGLE_SITE_IMAGE_URL_PREFIX
+ if not url_prefix.endswith("/"):
+ url_prefix = url_prefix + "/"
+ s.setValue("disk.0.image.url", url_prefix + image_id)
+
# Remove the requested apps from the system
s_without_apps = radl.get_system_by_name(system_id).clone()
s_without_apps.delValue("disk.0.applications")
@@ -1192,6 +1205,78 @@ def check_im_user(auth):
else:
return True
+ @staticmethod
+ def check_oidc_token(im_auth):
+ token = im_auth["token"]
+ success = False
+ try:
+ # decode the token to get the info
+ decoded_token = JWT().get_info(token)
+ except Exception as ex:
+ InfrastructureManager.logger.exception("Error trying decode OIDC auth token: %s" % str(ex))
+ raise Exception("Error trying to decode OIDC auth token: %s" % str(ex))
+
+ # First check if the issuer is in valid
+ if decoded_token['iss'] not in Config.OIDC_ISSUERS:
+ InfrastructureManager.logger.error("Incorrect OIDC issuer: %s" % decoded_token['iss'])
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. Issuer not accepted.")
+
+ # Now check the audience
+ if Config.OIDC_AUDIENCE:
+ if 'aud' in decoded_token and decoded_token['aud']:
+ found = False
+ for aud in decoded_token['aud'].split(","):
+ if aud == Config.OIDC_AUDIENCE:
+ found = True
+ break
+ if found:
+ InfrastructureManager.logger.debug("Audience %s successfully checked." % Config.OIDC_AUDIENCE)
+ else:
+ InfrastructureManager.logger.error("Audience %s not found in access token." % Config.OIDC_AUDIENCE)
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. Audience not accepted.")
+ else:
+ InfrastructureManager.logger.error("Audience %s not found in access token." % Config.OIDC_AUDIENCE)
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. Audience not accepted.")
+
+ if Config.OIDC_SCOPES and Config.OIDC_CLIENT_ID and Config.OIDC_CLIENT_SECRET:
+ success, res = OpenIDClient.get_token_introspection(token,
+ Config.OIDC_CLIENT_ID,
+ Config.OIDC_CLIENT_SECRET)
+ if not success:
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. "
+ "Invalid token or Client credentials.")
+ else:
+ if not res["scope"]:
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. "
+ "No scope obtained from introspection.")
+ else:
+ scopes = res["scope"].split(" ")
+ if not all([elem in scopes for elem in Config.OIDC_SCOPES]):
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. Scopes %s "
+ "not in introspection scopes: %s" % (" ".join(Config.OIDC_SCOPES),
+ res["scope"]))
+
+ # Now check if the token is not expired
+ expired, msg = OpenIDClient.is_access_token_expired(token)
+ if expired:
+ InfrastructureManager.logger.error("OIDC auth %s." % msg)
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. OIDC auth %s." % msg)
+
+ try:
+ # Now try to get user info
+ success, userinfo = OpenIDClient.get_user_info_request(token)
+ if success:
+ # convert to username to use it in the rest of the IM
+ im_auth['username'] = str(userinfo.get("preferred_username"))
+ im_auth['password'] = str(decoded_token['iss']) + str(userinfo.get("sub"))
+ except Exception as ex:
+ InfrastructureManager.logger.exception("Error trying to validate OIDC auth token: %s" % str(ex))
+ raise Exception("Error trying to validate OIDC auth token: %s" % str(ex))
+
+ if not success:
+ InfrastructureManager.logger.error("Incorrect OIDC auth token: %s" % userinfo)
+ raise InvaliddUserException("Invalid InfrastructureManager credentials. %s." % userinfo)
+
@staticmethod
def check_auth_data(auth):
# First check if it is configured to check the users from a list
@@ -1200,10 +1285,26 @@ def check_auth_data(auth):
if not im_auth:
raise IncorrectVMCrecentialsException("No credentials provided for the InfrastructureManager.")
- # if not assume the basic user/password auth data
+ # First check if an OIDC token is included
+ if "token" in im_auth[0]:
+ InfrastructureManager.check_oidc_token(im_auth[0])
+
+ # Now check if the user is in authorized
if not InfrastructureManager.check_im_user(im_auth):
raise InvaliddUserException()
+ if Config.SINGLE_SITE:
+ vmrc_auth = auth.getAuthInfo("VMRC")
+ single_site_auth = auth.getAuthInfo(Config.SINGLE_SITE_TYPE)
+
+ single_site_auth[0]["host"] = Config.SINGLE_SITE_AUTH_HOST
+
+ auth_list = []
+ auth_list.extend(im_auth)
+ auth_list.extend(vmrc_auth)
+ auth_list.extend(single_site_auth)
+ auth = Authentication(auth_list)
+
# We have to check if TTS is needed for other auth item
return auth
diff --git a/IM/REST.py b/IM/REST.py
index 9b50f40e4..783399898 100644
--- a/IM/REST.py
+++ b/IM/REST.py
@@ -17,6 +17,7 @@
import logging
import threading
import json
+import base64
import bottle
from IM.InfrastructureInfo import IncorrectVMException, DeletedVMException
@@ -27,6 +28,7 @@
from IM.config import Config
from radl.radl_json import parse_radl as parse_radl_json, dump_radl as dump_radl_json, featuresToSimple, radlToSimple
from radl.radl import RADL, Features, Feature
+from IM.tosca.Tosca import Tosca
logger = logging.getLogger('InfrastructureManager')
@@ -168,8 +170,36 @@ def get_auth_header():
Get the Authentication object from the AUTHORIZATION header
replacing the new line chars.
"""
- auth_data = bottle.request.headers[
- 'AUTHORIZATION'].replace(AUTH_NEW_LINE_SEPARATOR, "\n")
+ auth_header = bottle.request.headers['AUTHORIZATION']
+ if Config.SINGLE_SITE:
+ if auth_header.startswith("Basic "):
+ auth_data = base64.b64decode(auth_header[6:])
+ user_pass = auth_data.split(":")
+ im_auth = {"type": "InfrastructureManager",
+ "username": user_pass[0],
+ "password": user_pass[1]}
+ single_site_auth = {"type": Config.SINGLE_SITE_TYPE,
+ "host": Config.SINGLE_SITE_AUTH_HOST,
+ "username": user_pass[0],
+ "password": user_pass[1]}
+ return Authentication([im_auth, single_site_auth])
+ elif auth_header.startswith("Bearer "):
+ token = auth_header[7:].strip()
+ im_auth = {"type": "InfrastructureManager",
+ "username": "user",
+ "token": token}
+ if Config.SINGLE_SITE_TYPE == "OpenStack":
+ single_site_auth = {"type": Config.SINGLE_SITE_TYPE,
+ "host": Config.SINGLE_SITE_AUTH_HOST,
+ "username": "indigo-dc",
+ "tenant": "oidc",
+ "password": token}
+ else:
+ single_site_auth = {"type": Config.SINGLE_SITE_TYPE,
+ "host": Config.SINGLE_SITE_AUTH_HOST,
+ "token": token}
+ return Authentication([im_auth, single_site_auth])
+ auth_data = auth_header.replace(AUTH_NEW_LINE_SEPARATOR, "\n")
auth_data = auth_data.split(AUTH_LINE_SEPARATOR)
return Authentication(Authentication.read_auth_data(auth_data))
@@ -326,6 +356,19 @@ def RESTGetInfrastructureProperty(infid=None, prop=None):
bottle.response.content_type = "application/json"
res = InfrastructureManager.GetInfrastructureState(infid, auth)
return format_output(res, default_type="application/json", field_name="state")
+ elif prop == "outputs":
+ accept = get_media_type('Accept')
+ if accept and "application/json" not in accept and "*/*" not in accept and "application/*" not in accept:
+ return return_error(415, "Unsupported Accept Media Types: %s" % accept)
+ bottle.response.content_type = "application/json"
+ auth = InfrastructureManager.check_auth_data(auth)
+ sel_inf = InfrastructureManager.get_infrastructure(infid, auth)
+ if "TOSCA" in sel_inf.extra_info:
+ res = sel_inf.extra_info["TOSCA"].get_outputs(sel_inf)
+ else:
+ bottle.abort(
+ 403, "'outputs' infrastructure property is not valid in this infrastructure")
+ return format_output(res, default_type="application/json", field_name="outputs")
else:
return return_error(404, "Incorrect infrastructure property")
@@ -377,10 +420,14 @@ def RESTCreateInfrastructure():
try:
content_type = get_media_type('Content-Type')
radl_data = bottle.request.body.read().decode("utf-8")
+ tosca_data = None
if content_type:
if "application/json" in content_type:
radl_data = parse_radl_json(radl_data)
+ elif "text/yaml" in content_type:
+ tosca_data = Tosca(radl_data)
+ _, radl_data = tosca_data.to_radl()
elif "text/plain" in content_type or "*/*" in content_type or "text/*" in content_type:
content_type = "text/plain"
else:
@@ -388,6 +435,11 @@ def RESTCreateInfrastructure():
inf_id = InfrastructureManager.CreateInfrastructure(radl_data, auth)
+ # Store the TOSCA document
+ if tosca_data:
+ sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
+ sel_inf.extra_info['TOSCA'] = tosca_data
+
bottle.response.headers['InfID'] = inf_id
bottle.response.content_type = "text/uri-list"
protocol = "http://"
@@ -483,17 +535,35 @@ def RESTAddResource(infid=None):
content_type = get_media_type('Content-Type')
radl_data = bottle.request.body.read().decode("utf-8")
+ tosca_data = None
+ remove_list = []
if content_type:
if "application/json" in content_type:
radl_data = parse_radl_json(radl_data)
+ elif "text/yaml" in content_type:
+ tosca_data = Tosca(radl_data)
+ auth = InfrastructureManager.check_auth_data(auth)
+ sel_inf = InfrastructureManager.get_infrastructure(infid, auth)
+ # merge the current TOSCA with the new one
+ if isinstance(sel_inf.extra_info['TOSCA'], Tosca):
+ tosca_data = sel_inf.extra_info['TOSCA'].merge(tosca_data)
+ remove_list, radl_data = tosca_data.to_radl(sel_inf)
elif "text/plain" in content_type or "*/*" in content_type or "text/*" in content_type:
content_type = "text/plain"
else:
return return_error(415, "Unsupported Media Type %s" % content_type)
+ if remove_list:
+ InfrastructureManager.RemoveResource(infid, remove_list, auth, context)
+
vm_ids = InfrastructureManager.AddResource(infid, radl_data, auth, context)
+ # Replace the TOSCA document
+ if tosca_data:
+ sel_inf = InfrastructureManager.get_infrastructure(infid, auth)
+ sel_inf.extra_info['TOSCA'] = tosca_data
+
protocol = "http://"
if Config.REST_SSL:
protocol = "https://"
diff --git a/IM/config.py b/IM/config.py
index 5921607a6..0f0113069 100644
--- a/IM/config.py
+++ b/IM/config.py
@@ -89,8 +89,17 @@ class Config:
CONFMAMAGER_CHECK_STATE_INTERVAL = 5
UPDATE_CTXT_LOG_INTERVAL = 20
ANSIBLE_INSTALL_TIMEOUT = 500
+ SINGLE_SITE = False
+ SINGLE_SITE_TYPE = ''
+ SINGLE_SITE_AUTH_HOST = ''
+ SINGLE_SITE_IMAGE_URL_PREFIX = ''
+ OIDC_ISSUERS = ["https://iam-test.indigo-datacloud.eu/"]
+ OIDC_AUDIENCE = None
INF_CACHE_TIME = None
VMINFO_JSON = False
+ OIDC_CLIENT_ID = None
+ OIDC_CLIENT_SECRET = None
+ OIDC_SCOPES = []
VM_NUM_USE_CTXT_DIST = 30
config = ConfigParser()
@@ -105,11 +114,23 @@ class Config:
if 'IM_DATA_DB' in os.environ:
Config.DATA_DB = os.environ['IM_DATA_DB']
+if 'IM_SINGLE_SITE_ONE_HOST' in os.environ:
+ Config.SINGLE_SITE = True
+ Config.SINGLE_SITE_TYPE = 'OpenNebula'
+ Config.SINGLE_SITE_AUTH_HOST = 'http://%s:2633' % os.environ['IM_SINGLE_SITE_ONE_HOST']
+ Config.SINGLE_SITE_IMAGE_URL_PREFIX = 'one://%s/' % os.environ['IM_SINGLE_SITE_ONE_HOST']
+
class ConfigOpenNebula:
TEMPLATE_CONTEXT = ''
TEMPLATE_OTHER = 'GRAPHICS = [type="vnc",listen="0.0.0.0"]'
IMAGE_UNAME = ''
+ TTS_URL = 'https://localhost:8443'
if config.has_section("OpenNebula"):
parse_options(config, 'OpenNebula', ConfigOpenNebula)
+
+
+# In this case set assume that the TTS server is in the same server
+if 'IM_SINGLE_SITE_ONE_HOST' in os.environ:
+ ConfigOpenNebula.TTS_URL = 'https://%s:8443' % os.environ['IM_SINGLE_SITE_ONE_HOST']
diff --git a/IM/connectors/OpenNebula.py b/IM/connectors/OpenNebula.py
index 733d8e53a..05644f835 100644
--- a/IM/connectors/OpenNebula.py
+++ b/IM/connectors/OpenNebula.py
@@ -31,6 +31,7 @@
from IM.config import ConfigOpenNebula
from netaddr import IPNetwork, IPAddress
from IM.config import Config
+from IM.tts.onetts import ONETTSClient
# Set of classes to parse the XML results of the ONE API
@@ -239,8 +240,15 @@ def getSessionID(self, auth_data, hash_password=None):
hash_password = True
if hash_password:
passwd = hashlib.sha1(passwd.strip().encode('utf-8')).hexdigest()
-
return auth['username'] + ":" + passwd
+ elif 'token' in auth:
+ username, passwd = ONETTSClient.get_auth_from_tts(ConfigOpenNebula.TTS_URL,
+ self.cloud.server, auth['token'])
+ if not username or not passwd:
+ raise Exception("Error getting ONE credentials using TTS.")
+ auth["username"] = username
+ auth["password"] = passwd
+ return username + ":" + passwd
else:
raise Exception("No correct auth data has been specified to OpenNebula: username and password")
@@ -613,7 +621,7 @@ def getONETemplate(self, radl, sgs, auth_data):
while system.getValue("disk." + str(cont) + ".image.url") or system.getValue("disk." + str(cont) + ".size"):
disk_image = system.getValue("disk." + str(cont) + ".image.url")
if disk_image:
- disks += '\nDISK = [ IMAGE_ID = "%s" ]\n' % uriparse(disk_image)[
+ disks += 'DISK = [ IMAGE_ID = "%s" ]\n' % uriparse(disk_image)[
2][1:]
else:
disk_size = system.getFeature(
diff --git a/IM/openid/JWT.py b/IM/openid/JWT.py
new file mode 100644
index 000000000..6ee26c3c7
--- /dev/null
+++ b/IM/openid/JWT.py
@@ -0,0 +1,73 @@
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public Licenslast_updatee for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+"""
+Class to unpack the JWT IAM tokens
+"""
+import json
+import base64
+import re
+
+
+class JWT(object):
+
+ @staticmethod
+ def b64d(b):
+ """Decode some base64-encoded bytes.
+
+ Raises Exception if the string contains invalid characters or padding.
+
+ :param b: bytes
+ """
+
+ cb = b.rstrip(b"=") # shouldn't but there you are
+
+ # Python's base64 functions ignore invalid characters, so we need to
+ # check for them explicitly.
+ b64_re = re.compile(b"^[A-Za-z0-9_-]*$")
+ if not b64_re.match(cb):
+ raise Exception(cb, "base64-encoded data contains illegal characters")
+
+ if cb == b:
+ b = JWT.add_padding(b)
+
+ return base64.urlsafe_b64decode(b)
+
+ @staticmethod
+ def add_padding(b):
+ # add padding chars
+ m = len(b) % 4
+ if m == 1:
+ # NOTE: for some reason b64decode raises *TypeError* if the
+ # padding is incorrect.
+ raise Exception(b, "incorrect padding")
+ elif m == 2:
+ b += b"=="
+ elif m == 3:
+ b += b"="
+ return b
+
+ @staticmethod
+ def get_info(token):
+ """
+ Unpacks a JWT into its parts and base64 decodes the parts
+ individually, returning the part 1 json decoded, where the
+ token info is stored.
+
+ :param token: The JWT token
+ """
+ part = tuple(token.encode("utf-8").split(b"."))
+ part = [JWT.b64d(p) for p in part]
+ return json.loads(part[1].decode("utf-8"))
diff --git a/IM/openid/OpenIDClient.py b/IM/openid/OpenIDClient.py
new file mode 100644
index 000000000..6e6d9d96e
--- /dev/null
+++ b/IM/openid/OpenIDClient.py
@@ -0,0 +1,79 @@
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public Licenslast_updatee for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+'''
+Class to contact with an OpenID server
+'''
+import requests
+import json
+import time
+from .JWT import JWT
+
+
+class OpenIDClient(object):
+
+ VERIFY_SSL = False
+
+ @staticmethod
+ def get_user_info_request(token):
+ """
+ Get a the user info from a token
+ """
+ try:
+ decoded_token = JWT().get_info(token)
+ headers = {'Authorization': 'Bearer %s' % token}
+ url = "%s%s" % (decoded_token['iss'], "/userinfo")
+ resp = requests.request("GET", url, verify=OpenIDClient.VERIFY_SSL, headers=headers)
+ if resp.status_code != 200:
+ return False, "Code: %d. Message: %s." % (resp.status_code, resp.text)
+ return True, json.loads(resp.text)
+ except Exception as ex:
+ return False, str(ex)
+
+ @staticmethod
+ def get_token_introspection(token, client_id, client_secret):
+ """
+ Get token introspection
+ """
+ try:
+ decoded_token = JWT().get_info(token)
+ url = "%s%s" % (decoded_token['iss'], "/introspect?token=%s&token_type_hint=access_token" % token)
+ resp = requests.request("GET", url, verify=OpenIDClient.VERIFY_SSL,
+ auth=requests.auth.HTTPBasicAuth(client_id, client_secret))
+ if resp.status_code != 200:
+ return False, "Code: %d. Message: %s." % (resp.status_code, resp.text)
+ return True, json.loads(resp.text)
+ except Exception as ex:
+ return False, str(ex)
+
+ @staticmethod
+ def is_access_token_expired(token):
+ """
+ Check if the current access token is expired
+ """
+ if token:
+ try:
+ decoded_token = JWT().get_info(token)
+ now = int(time.time())
+ expires = int(decoded_token['exp'])
+ validity = expires - now
+ if validity < 0:
+ return True, "Token expired"
+ else:
+ return False, "Valid Token for %d seconds" % validity
+ except:
+ return True, "Error getting token info"
+ else:
+ return True, "No token specified"
diff --git a/IM/openid/__init__.py b/IM/openid/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/IM/tosca/Tosca.py b/IM/tosca/Tosca.py
new file mode 100644
index 000000000..1754c2831
--- /dev/null
+++ b/IM/tosca/Tosca.py
@@ -0,0 +1,1431 @@
+import os
+import logging
+import yaml
+import copy
+try:
+ from urllib.request import urlopen
+except:
+ from urllib import urlopen
+try:
+ unicode("hola")
+except NameError:
+ unicode = str
+
+from IM.uriparse import uriparse
+from toscaparser.tosca_template import ToscaTemplate
+from toscaparser.elements.interfaces import InterfacesDef
+from toscaparser.functions import Function, is_function, get_function, GetAttribute, Concat, Token
+from radl.radl import system, deploy, network, Feature, Features, configure, contextualize_item, RADL, contextualize
+
+
+class Tosca:
+ """
+ Class to translate a TOSCA document to an RADL object.
+
+ TODO: What about CSAR files?
+
+ """
+
+ ARTIFACTS_PATH = os.path.dirname(
+ os.path.realpath(__file__)) + "/tosca-types/artifacts"
+ ARTIFACTS_REMOTE_REPO = "https://raw.githubusercontent.com/indigo-dc/tosca-types/master/artifacts/"
+
+ logger = logging.getLogger('InfrastructureManager')
+
+ def __init__(self, yaml_str):
+ Tosca.logger.debug("TOSCA: %s" % yaml_str)
+ self.yaml = yaml.load(yaml_str)
+ self.tosca = ToscaTemplate(yaml_dict_tpl=copy.deepcopy(self.yaml))
+
+ def serialize(self):
+ return yaml.dump(self.yaml)
+
+ @staticmethod
+ def deserialize(str_data):
+ return Tosca(str_data)
+
+ def _get_cloud_id(self, sys_name):
+ """
+ Get the cloud ID of the deployment based on policies
+ """
+ for policy in self.tosca.policies:
+ if policy.type_definition.type == "tosca.policies.Placement":
+ node_list = []
+ if policy.targets_type == "node_templates":
+ node_list = policy.targets_list
+ elif policy.targets_type == "groups":
+ for group in policy.targets_list:
+ node_list.extend(group.member_nodes)
+
+ for node in node_list:
+ if node.name == sys_name:
+ if 'cloud_id' in policy.properties:
+ Tosca.logger.debug("Set cloud id: %s to system: %s." % (policy.properties['cloud_id'],
+ sys_name))
+ return policy.properties['cloud_id']
+ else:
+ Tosca.logger.warn("Policy %s not supported. Ignoring it." % policy.type_definition.type)
+
+ return None
+
+ def to_radl(self, inf_info=None):
+ """
+ Converts the current ToscaTemplate object in a RADL object
+ If the inf_info parameter is not None, it is an AddResource and
+ we must check the number of resources to correctly compute the
+ number of nodes to deploy
+ """
+
+ all_removal_list = []
+ relationships = []
+ for node in self.tosca.nodetemplates:
+ # Store relationships to check later
+ for relationship, trgt in node.relationships.items():
+ src = node
+ relationships.append((src, trgt, relationship))
+
+ radl = RADL()
+ interfaces = {}
+ cont_intems = []
+
+ for node in self.tosca.nodetemplates:
+ root_type = Tosca._get_root_parent_type(node).type
+
+ if root_type == "tosca.nodes.BlockStorage":
+ # The BlockStorage disks are processed later
+ pass
+ elif root_type == "tosca.nodes.network.Port":
+ pass
+ elif root_type == "tosca.nodes.network.Network":
+ # TODO: check IM to support more network properties
+ # At this moment we only support the network_type with values,
+ # private and public
+ net = Tosca._gen_network(node)
+ radl.networks.append(net)
+ else:
+ if root_type == "tosca.nodes.Compute":
+ # Add the system RADL element
+ sys = self._gen_system(node, self.tosca.nodetemplates)
+ # add networks using the simple method with the public_ip
+ # property
+ self._add_node_nets(
+ node, radl, sys, self.tosca.nodetemplates)
+ radl.systems.append(sys)
+ # Add the deploy element for this system
+ min_instances, _, default_instances, count, removal_list = self._get_scalable_properties(
+ node)
+ if count is not None:
+ # we must check the correct number of instances to
+ # deploy
+ num_instances = count
+ elif default_instances is not None:
+ num_instances = default_instances
+ elif min_instances is not None:
+ num_instances = min_instances
+ else:
+ num_instances = 1
+
+ current_num_instances = self._get_num_instances(sys.name, inf_info)
+ num_instances = num_instances - current_num_instances
+ Tosca.logger.debug("User requested %d instances of type %s and there"
+ " are %s" % (num_instances, sys.name, current_num_instances))
+
+ # TODO: Think about to check the IDs of the VMs
+ if num_instances < 0:
+ all_removal_list.extend(removal_list[0:-num_instances])
+
+ if num_instances > 0:
+ cloud_id = self._get_cloud_id(sys.name)
+ dep = deploy(sys.name, num_instances, cloud_id)
+ radl.deploys.append(dep)
+ compute = node
+ else:
+ # Select the host to host this element
+ compute = self._find_host_compute(
+ node, self.tosca.nodetemplates)
+ if not compute:
+ Tosca.logger.warn(
+ "Node %s has not compute node to host in." % node.name)
+
+ interfaces = Tosca._get_interfaces(node)
+ interfaces.update(Tosca._get_relationships_interfaces(relationships, node))
+
+ conf = self._gen_configure_from_interfaces(
+ radl, node, interfaces, compute)
+ if conf:
+ level = Tosca._get_dependency_level(node)
+ radl.configures.append(conf)
+ if compute:
+ cont_intems.append(contextualize_item(
+ compute.name, conf.name, level))
+
+ if cont_intems:
+ radl.contextualize = contextualize(cont_intems)
+ else:
+ # If there are no configures, disable contextualization
+ radl.contextualize = contextualize({})
+
+ self._order_deploys(radl)
+
+ return all_removal_list, self._complete_radl_networks(radl)
+
+ def _order_deploys(self, radl):
+ """
+ Order the RADL deploys to assure VMs with Public IPs a set a the beginning
+ (to avoid problems with cluster configuration)
+ """
+ pub = []
+ priv = []
+ for d in radl.deploys:
+ if radl.hasPublicNet(d.id):
+ pub.append(d)
+ else:
+ priv.append(d)
+
+ radl.deploys = pub + priv
+
+ def _get_num_instances(self, sys_name, inf_info):
+ """
+ Get the current number of instances of system type name sys_name
+ """
+ current_num = 0
+
+ if inf_info:
+ vm_list = inf_info.get_vm_list_by_system_name()
+ if sys_name in vm_list:
+ current_num = len(vm_list[sys_name])
+
+ return current_num
+
+ @staticmethod
+ def _format_outports(ports_dict):
+ res = ""
+ for port in ports_dict.values():
+ protocol = "tcp"
+ source_range = None
+ if "protocol" in port:
+ protocol = port["protocol"]
+ if "source_range" in port:
+ source_range = port["source_range"]
+ else:
+ if "source" in port:
+ remote_port = port["source"]
+ if "target" in port:
+ local_port = port["target"]
+ else:
+ local_port = remote_port
+
+ # In case of source_range do not use port mapping only direct ports
+ if source_range:
+ if res:
+ res += ","
+ res += "%s:%s/%s" % (source_range[0], source_range[1], protocol)
+ else:
+ if res:
+ res += ","
+ res += "%s/%s-%s/%s" % (remote_port, protocol, local_port, protocol)
+
+ return res
+
+ def _add_node_nets(self, node, radl, system, nodetemplates):
+ # Find associated Networks
+ nets = Tosca._get_bind_networks(node, nodetemplates)
+ if nets:
+ # If there are network nodes, use it to define system network
+ # properties
+ for net_name, ip, dns_name, num in nets:
+ system.setValue('net_interface.%d.connection' % num, net_name)
+ # This is not a normative property
+ if dns_name:
+ system.setValue('net_interface.%d.dns_name' %
+ num, dns_name)
+ if ip:
+ system.setValue('net_interface.%d.ip' % num, ip)
+ else:
+ public_ip = False
+ private_ip = True
+
+ # This is the solution using the deprecated public_ip property
+ node_props = node.get_properties()
+ if node_props and "public_ip" in node_props:
+ public_ip = self._final_function_result(node_props["public_ip"].value, node)
+
+ # This is the solution using endpoints
+ net_provider_id = None
+ dns_name = None
+ ports = {}
+ node_caps = node.get_capabilities()
+ if node_caps:
+ if "endpoint" in node_caps:
+ cap_props = node_caps["endpoint"].get_properties()
+ if cap_props and "network_name" in cap_props:
+ network_name = str(self._final_function_result(cap_props["network_name"].value, node))
+ if network_name == "PUBLIC":
+ public_ip = True
+ # In this case the user is specifying the provider_id
+ elif network_name.endswith(".PUBLIC"):
+ public_ip = True
+ parts = network_name.split(".")
+ net_provider_id = ".".join(parts[:-1])
+ elif network_name.endswith(".PRIVATE"):
+ parts = network_name.split(".")
+ net_provider_id = ".".join(parts[:-1])
+ elif network_name != "PRIVATE":
+ # assume that is a private one
+ net_provider_id = network_name
+ if cap_props and "dns_name" in cap_props:
+ dns_name = self._final_function_result(cap_props["dns_name"].value, node)
+ if cap_props and "private_ip" in cap_props:
+ private_ip = self._final_function_result(cap_props["private_ip"].value, node)
+ if cap_props and "ports" in cap_props:
+ ports = self._final_function_result(cap_props["ports"].value, node)
+
+ # The private net is always added
+ if not public_ip or private_ip:
+ private_nets = []
+ for net in radl.networks:
+ if not net.isPublic():
+ private_nets.append(net)
+
+ if private_nets:
+ private_net = None
+ for net in private_nets:
+ num_net = system.getNumNetworkWithConnection(net.id)
+ if num_net is not None:
+ private_net = net
+ break
+
+ if not private_net:
+ # There are a public net but it has not been used in this
+ # VM
+ private_net = private_nets[0]
+ num_net = system.getNumNetworkIfaces()
+ else:
+ # There no public net, create one
+ private_net = network.createNetwork("private_net", False)
+ radl.networks.append(private_net)
+ num_net = system.getNumNetworkIfaces()
+
+ system.setValue('net_interface.' + str(num_net) + '.connection', private_net.id)
+
+ # If the node needs a public IP
+ if public_ip:
+ # Always create a public IP per VM
+ # to enable to specify different outports
+ net_name = "public_net"
+ i = 1
+ while radl.get_network_by_id(net_name) is not None:
+ net_name = "public_net_%d" % i
+ i += 1
+ public_net = network.createNetwork(net_name, True)
+ radl.networks.append(public_net)
+ num_net = system.getNumNetworkIfaces()
+
+ if ports:
+ outports = Tosca._format_outports(ports)
+ if public_net.getValue("outports"):
+ outports = "%s,%s" % (public_net.getValue("outports"), outports)
+ public_net.setValue("outports", outports)
+ if net_provider_id:
+ public_net.setValue("provider_id", net_provider_id)
+
+ system.setValue('net_interface.%d.connection' % num_net, public_net.id)
+
+ if not public_ip and net_provider_id:
+ private_net.setValue("provider_id", net_provider_id)
+
+ if dns_name:
+ system.setValue('net_interface.0.dns_name', dns_name)
+
+ def _get_scalable_properties(self, node):
+ count = min_instances = max_instances = default_instances = None
+ removal_list = []
+ scalable = node.get_capability("scalable")
+ if scalable:
+ for prop in scalable.get_properties_objects():
+ if prop.value is not None:
+ final_value = self._final_function_result(prop.value, node)
+ if prop.name == "count":
+ count = final_value
+ elif prop.name == "max_instances":
+ max_instances = final_value
+ elif prop.name == "min_instances":
+ min_instances = final_value
+ elif prop.name == "default_instances":
+ default_instances = final_value
+ elif prop.name == "removal_list":
+ removal_list = final_value
+
+ return min_instances, max_instances, default_instances, count, removal_list
+
+ @staticmethod
+ def _get_relationship_template(rel, src, trgt):
+ rel_tpls = src.get_relationship_template()
+ rel_tpls.extend(trgt.get_relationship_template())
+ for rel_tpl in rel_tpls:
+ if rel.type == rel_tpl.type:
+ return rel_tpl
+
+ @staticmethod
+ def _get_relationships_interfaces(relationships, node):
+ res = {}
+ for src, trgt, rel in relationships:
+ rel_tpl = Tosca._get_relationship_template(rel, src, trgt)
+ if rel_tpl.interfaces:
+ if src.name == node.name:
+ for name in ['pre_configure_source', 'post_configure_source', 'add_source']:
+ for iface in rel_tpl.interfaces:
+ if iface.name == name:
+ res[name] = iface
+ elif trgt.name == node.name:
+ for name in ['pre_configure_target', 'post_configure_target', 'add_target',
+ 'target_changed', 'remove_target']:
+ for iface in rel_tpl.interfaces:
+ if iface.name == name:
+ res[name] = iface
+ return res
+
+ def _get_artifact_full_uri(self, node, artifact_name):
+ res = None
+ artifacts = node.type_definition.get_value(
+ 'artifacts', node.entity_tpl, True)
+ if artifacts:
+ for name, artifact in artifacts.items():
+ if name == artifact_name:
+ if isinstance(artifact, dict):
+ res = artifact['file']
+ if 'repository' in artifact:
+ repo = artifact['repository']
+ repositories = self.tosca.tpl.get('repositories')
+
+ if repositories:
+ for repo_name, repo_def in repositories.items():
+ if repo_name == repo:
+ repo_url = (
+ (repo_def['url']).strip()).rstrip("//")
+ res = repo_url + "/" + artifact['file']
+ else:
+ res = artifact
+
+ return res
+
+ def _get_implementation_url(self, node, implementation):
+ res = implementation
+ if implementation:
+ artifact_url = self._get_artifact_full_uri(node, implementation)
+ if artifact_url:
+ res = artifact_url
+
+ return res
+
+ def _gen_configure_from_interfaces(self, radl, node, interfaces, compute):
+ if not interfaces:
+ return None
+
+ variables = ""
+ tasks = ""
+ recipe_list = []
+ remote_artifacts_path = "/tmp"
+ # Take the interfaces in correct order
+ for name in ['create', 'pre_configure_source', 'pre_configure_target', 'configure',
+ 'post_configure_source', 'post_configure_target', 'start', 'add_target',
+ 'add_source', 'target_changed', 'remove_target']:
+ interface = interfaces.get(name, None)
+ if interface:
+ artifacts = []
+ # Get the inputs
+ env = {}
+ if interface.inputs:
+ for param_name, param_value in interface.inputs.items():
+ val = None
+
+ if self._is_artifact(param_value):
+ artifact_uri = self._get_artifact_uri(
+ param_value, node)
+ if artifact_uri:
+ val = remote_artifacts_path + "/" + \
+ os.path.basename(artifact_uri)
+ artifacts.append(artifact_uri)
+ else:
+ val = self._final_function_result(
+ param_value, node)
+
+ if val is not None:
+ env[param_name] = val
+ else:
+ raise Exception("input value for %s in interface %s of node %s not valid" % (
+ param_name, name, node.name))
+
+ name = node.name + "_" + interface.name
+
+ # if there are artifacts to download
+ if artifacts:
+ for artifact in artifacts:
+ tasks += " - name: Download artifact " + artifact + "\n"
+ tasks += " get_url: dest=" + remote_artifacts_path + "/" + \
+ os.path.basename(artifact) + \
+ " url='" + artifact + "'\n"
+
+ implementation_url = uriparse(
+ self._get_implementation_url(node, interface.implementation))
+
+ if implementation_url[0] in ['http', 'https', 'ftp']:
+ script_path = implementation_url[2]
+ try:
+ response = urlopen(interface.implementation)
+ script_content = response.read()
+ if response.code != 200:
+ raise Exception("")
+ except Exception as ex:
+ raise Exception("Error downloading the implementation script '%s': %s" % (
+ interface.implementation, str(ex)))
+ else:
+ script_path = os.path.join(
+ Tosca.ARTIFACTS_PATH, interface.implementation)
+ if os.path.isfile(script_path):
+ f = open(script_path)
+ script_content = f.read()
+ f.close()
+ else:
+ try:
+ response = urlopen(Tosca.ARTIFACTS_REMOTE_REPO + interface.implementation)
+ script_content = response.read()
+ if response.code != 200:
+ raise Exception("")
+ except Exception as ex:
+ raise Exception("Implementation file: '%s' is not located in the artifacts folder '%s' "
+ "or in the artifacts remote url '%s'." % (interface.implementation,
+ Tosca.ARTIFACTS_PATH,
+ Tosca.ARTIFACTS_REMOTE_REPO))
+
+ if script_path.endswith(".yaml") or script_path.endswith(".yml"):
+ if env:
+ for var_name, var_value in env.items():
+ if isinstance(var_value, str) and not var_value.startswith("|"):
+ var_value = '"%s"' % var_value
+ else:
+ var_value = str(var_value)
+ variables += ' %s: %s ' % (var_name, var_value) + "\n"
+ variables += "\n"
+
+ script_content = self._remove_recipe_header(script_content)
+ recipe_list.append(script_content)
+ else:
+ recipe = "- tasks:\n"
+ recipe += " - name: Copy contents of script of interface " + name + "\n"
+ recipe += " copy: dest=/tmp/" + \
+ os.path.basename(script_path) + " content='" + \
+ script_content + "' mode=0755\n"
+
+ recipe += " - name: " + name + "\n"
+ recipe += " shell: /tmp/" + \
+ os.path.basename(script_path) + "\n"
+ if env:
+ recipe += " environment:\n"
+ for var_name, var_value in env.items():
+ recipe += " %s: %s\n" % (var_name, var_value)
+
+ recipe_list.append(recipe)
+
+ if tasks or recipe_list:
+ name = node.name + "_conf"
+ if variables:
+ recipes = "---\n- vars:\n" + variables + "\n"
+ recipes += " "
+ else:
+ recipes = "- "
+
+ if tasks:
+ recipes += "tasks:\n" + tasks + "\n"
+
+ # Merge the main recipe with the other yaml files
+ for recipe in recipe_list:
+ recipes = Tosca._merge_recipes(recipes, recipe)
+
+ return configure(name, recipes)
+ else:
+ return None
+
+ def _remove_recipe_header(self, script_content):
+ """
+ Removes the "hosts" and "connection" elements from the recipe
+ to make it "RADL" compatible
+ """
+
+ try:
+ yamlo = yaml.load(script_content)
+ if not isinstance(yamlo, list):
+ Tosca.logger.warn("Error parsing YAML: " +
+ script_content + "\n.Do not remove header.")
+ return script_content
+ except Exception:
+ Tosca.logger.exception(
+ "Error parsing YAML: " + script_content + "\n.Do not remove header.")
+ return script_content
+
+ for elem in yamlo:
+ if 'hosts' in elem:
+ del elem['hosts']
+ if 'connection' in elem:
+ del elem['connection']
+
+ return yaml.dump(yamlo, default_flow_style=False, explicit_start=True, width=256)
+
+ @staticmethod
+ def _is_artifact(function):
+ """Returns True if the provided function is a Tosca get_artifact function.
+
+ Examples:
+
+ * "{ get_artifact: { SELF, uri } }"
+
+ :param function: Function as string.
+ :return: True if function is a Tosca get_artifact function, otherwise False.
+ """
+ if isinstance(function, dict) and len(function) == 1:
+ func_name = list(function.keys())[0]
+ return func_name == "get_artifact"
+ return False
+
+ def _get_artifact_uri(self, function, node):
+ if isinstance(function, dict) and len(function) == 1:
+ name = function["get_artifact"][1]
+ return self._get_artifact_full_uri(node, name)
+
+ return None
+
+ @staticmethod
+ def _complete_radl_networks(radl):
+ if not radl.networks:
+ radl.networks.append(network.createNetwork("public", True))
+
+ public_net = None
+ for net in radl.networks:
+ if net.isPublic():
+ public_net = net
+ break
+
+ if not public_net:
+ for net in radl.networks:
+ public_net = net
+
+ for sys in radl.systems:
+ if not sys.hasFeature("net_interface.0.connection"):
+ sys.setValue("net_interface.0.connection", public_net.id)
+
+ return radl
+
+ @staticmethod
+ def _is_intrinsic(function):
+ """Returns True if the provided function is a Tosca get_artifact function.
+
+ Examples:
+
+ * "{ concat: ['str1', 'str2'] }"
+ * "{ token: [ , , ] }"
+
+ :param function: Function as string.
+ :return: True if function is a Tosca get_artifact function, otherwise False.
+ """
+ if isinstance(function, dict) and len(function) == 1:
+ func_name = list(function.keys())[0]
+ return func_name in ["concat", "token"]
+ return False
+
+ def _get_intrinsic_value(self, func, node, inf_info):
+ if isinstance(func, dict) and len(func) == 1:
+ func_name = list(func.keys())[0]
+ if func_name == "concat":
+ items = func["concat"]
+ res = ""
+ for item in items:
+ if is_function(item):
+ res += str(self._final_function_result(item, node, inf_info))
+ else:
+ res += str(item)
+ return res
+ elif func_name == "token":
+ items = func["token"]
+ if len(items) == 3:
+ string_with_tokens = items[0]
+ string_of_token_chars = items[1]
+ substring_index = int(items[2])
+
+ if is_function(string_with_tokens):
+ string_with_tokens = str(self._final_function_result(string_with_tokens, node, inf_info))
+
+ parts = string_with_tokens.split(string_of_token_chars)
+ if len(parts) > substring_index:
+ return parts[substring_index]
+ else:
+ Tosca.logger.error(
+ "Incorrect substring_index in function token.")
+ return None
+ else:
+ Tosca.logger.warn(
+ "Intrinsic function token must receive 3 parameters.")
+ return None
+ else:
+ Tosca.logger.warn(
+ "Intrinsic function %s not supported." % func_name)
+ return None
+
+ def _get_attribute_result(self, func, node, inf_info):
+ """Get an attribute value of an entity defined in the service template
+
+ Node template attributes values are set in runtime and therefore its the
+ responsibility of the Tosca engine to implement the evaluation of
+ get_attribute functions.
+
+ Arguments:
+
+ * Node template name | HOST.
+ * Attribute name.
+ * Index (optional)
+
+ If the HOST keyword is passed as the node template name argument the
+ function will search each node template along the HostedOn relationship
+ chain until a node which contains the attribute is found.
+
+ Examples:
+
+ * { get_attribute: [ server, private_address ] }
+ * { get_attribute: [ HOST, private_address ] }
+ * { get_attribute: [ SELF, private_address ] }
+ * { get_attribute: [ HOST, private_address, 0 ] }
+ * { get_attribute: [ server, endpoint, credential, 0 ] }
+ """
+ node_name = func.args[0]
+ capability_name = None
+ attribute_name = func.args[1]
+
+ index = None
+ # Currently only support 2,3 or 4 parameters
+ if len(func.args) == 3:
+ try:
+ index = int(func.args[2])
+ except:
+ Tosca.logger.exception("Error getting get_attribute index.")
+ pass
+ elif len(func.args) == 4:
+ capability_name = func.args[1]
+ attribute_name = func.args[2]
+ try:
+ index = int(func.args[3])
+ except:
+ Tosca.logger.exception("Error getting get_attribute index.")
+ pass
+
+ if node_name == "HOST":
+ node = self._find_host_compute(node, self.tosca.nodetemplates)
+ elif node_name != "SELF":
+ node = None
+ for n in self.tosca.nodetemplates:
+ if n.name == node_name:
+ node = n
+ break
+ if not node:
+ Tosca.logger.error(
+ "Calling get_attribute function for non existing node: %s" % node_name)
+ return None
+
+ root_type = Tosca._get_root_parent_type(node).type
+
+ if inf_info:
+ vm_list = inf_info.get_vm_list_by_system_name()
+
+ if node.name not in vm_list:
+ Tosca.logger.warn(
+ "There are no VM associated with the name %s." % node.name)
+ return None
+ else:
+ # As default assume that there will be only one VM per group
+ vm = vm_list[node.name][0]
+ if index is not None and len(vm_list[node.name]) < index:
+ index = len(vm_list[node.name]) - 1
+
+ if attribute_name == "tosca_id":
+ return vm.id
+ elif attribute_name == "tosca_name":
+ return node.name
+ elif attribute_name == "ctxt_log":
+ if node.type == "tosca.nodes.indigo.Compute":
+ return vm.cont_out
+ else:
+ Tosca.logger.warn("Attribute ctxt_log only supported"
+ " in tosca.nodes.indigo.Compute nodes.")
+ return None
+ elif attribute_name == "credential" and capability_name == "endpoint":
+ if node.type == "tosca.nodes.indigo.Compute":
+ res = []
+ for vm in vm_list[node.name]:
+ user, password, _, private_key = vm.getCredentialValues()
+ val = {"user": user}
+ if password:
+ val["token"] = password
+ val["token_type"] = "password"
+ if private_key:
+ val["token_type"] = "private_key"
+ val["token"] = private_key
+ res.append(val)
+ if index is not None:
+ res = res[index]
+ return res
+ else:
+ Tosca.logger.warn("Attribute credential of capability endpoint only"
+ " supported in tosca.nodes.indigo.Compute nodes.")
+ return None
+ elif attribute_name == "private_address":
+ if node.type == "tosca.nodes.indigo.Compute":
+ res = [vm.getPrivateIP() for vm in vm_list[node.name]]
+ if index is not None:
+ res = res[index]
+ return res
+ else:
+ return vm.getPrivateIP()
+ elif attribute_name == "public_address":
+ if node.type == "tosca.nodes.indigo.Compute":
+ res = [vm.getPublicIP() for vm in vm_list[node.name]]
+ if index is not None:
+ res = res[index]
+ return res
+ else:
+ return vm.getPublicIP()
+ elif attribute_name == "ip_address":
+ if root_type == "tosca.nodes.network.Port":
+ order = node.get_property_value('order')
+ return vm.getNumNetworkWithConnection(order)
+ elif root_type == "tosca.capabilities.Endpoint":
+ if vm.getPublicIP():
+ return vm.getPublicIP()
+ else:
+ return vm.getPrivateIP()
+ else:
+ Tosca.logger.warn("Attribute ip_address only supported in tosca.nodes.network.Port "
+ "and tosca.capabilities.Endpoint nodes.")
+ return None
+ else:
+ Tosca.logger.warn("Attribute %s not supported." %
+ attribute_name)
+ return None
+ else:
+ if attribute_name == "tosca_id":
+ if node_name in ["HOST", "SELF"]:
+ return "{{ IM_NODE_VMID }}"
+ else:
+ return "{{ hostvars[groups['%s'][0]]['IM_NODE_VMID'] }}" % node.name
+ elif attribute_name == "tosca_name":
+ return node.name
+ elif attribute_name == "private_address":
+ if node.type == "tosca.nodes.indigo.Compute":
+ if index is not None:
+ return "{{ hostvars[groups['%s'][%d]]['IM_NODE_PRIVATE_IP'] }}" % (node.name, index)
+ else:
+ return ("{{ groups['%s']|map('extract', hostvars,'IM_NODE_PRIVATE_IP')|list"
+ " if '%s' in groups else []}}" % (node.name, node.name))
+ else:
+ if node_name in ["HOST", "SELF"]:
+ return "{{ IM_NODE_PRIVATE_IP }}"
+ else:
+ return "{{ hostvars[groups['%s'][0]]['IM_NODE_PRIVATE_IP'] }}" % node.name
+ elif attribute_name == "public_address":
+ if node.type == "tosca.nodes.indigo.Compute":
+ if index is not None:
+ return "{{ hostvars[groups['%s'][%d]]['IM_NODE_PUBLIC_IP'] }}" % (node.name, index)
+ else:
+ return ("{{ groups['%s']|map('extract', hostvars,'IM_NODE_PUBLIC_IP')|list"
+ " if '%s' in groups else []}}" % (node.name, node.name))
+ else:
+ if node_name in ["HOST", "SELF"]:
+ return "{{ IM_NODE_PUBLIC_IP }}"
+ else:
+ return "{{ hostvars[groups['%s'][0]]['IM_NODE_PUBLIC_IP'] }}" % node.name
+ elif attribute_name == "ip_address":
+ if root_type == "tosca.nodes.network.Port":
+ order = node.get_property_value('order')
+ return "{{ hostvars[groups['%s'][0]]['IM_NODE_NET_%s_IP'] }}" % (node.name, order)
+ elif root_type == "tosca.capabilities.Endpoint":
+ # TODO: check this
+ if node_name in ["HOST", "SELF"]:
+ return "{{ IM_NODE_PUBLIC_IP }}"
+ else:
+ return "{{ hostvars[groups['%s'][0]]['IM_NODE_PUBLIC_IP'] }}" % node.name
+ else:
+ Tosca.logger.warn("Attribute ip_address only supported in tosca.nodes.network.Port and "
+ "tosca.capabilities.Endpoint nodes.")
+ return None
+ else:
+ Tosca.logger.warn("Attribute %s not supported." %
+ attribute_name)
+ return None
+
+ def _final_function_result(self, func, node, inf_info=None):
+ """
+ Take a translator.toscalib.functions.Function and return the final result
+ (in some cases the result of a function is another function)
+ """
+ if isinstance(func, dict):
+ if is_function(func):
+ func = get_function(self.tosca, node, func)
+
+ while isinstance(func, Function):
+ if isinstance(func, GetAttribute):
+ func = self._get_attribute_result(func, node, inf_info)
+ elif isinstance(func, Concat):
+ func = self._get_intrinsic_value(
+ {"concat": func.args}, node, inf_info)
+ elif isinstance(func, Token):
+ func = self._get_intrinsic_value(
+ {"token": func.args}, node, inf_info)
+ else:
+ func = func.result()
+
+ if isinstance(func, dict):
+ if self._is_intrinsic(func):
+ func = self._get_intrinsic_value(func, node, inf_info)
+
+ if func is None:
+ # TODO: resolve function values related with run-time values as IM
+ # or ansible variables
+ pass
+ return func
+
+ def _find_host_compute(self, node, nodetemplates):
+ """
+ Select the node to host each node, using the node requirements
+ In most of the cases the are directly specified, otherwise "node_filter" is used
+ """
+
+ # check for a HosteOn relation
+ root_type = Tosca._get_root_parent_type(node).type
+ if root_type == "tosca.nodes.Compute":
+ return node
+
+ if node.requirements:
+ for r, n in node.relationships.items():
+ if Tosca._is_derived_from(r, r.HOSTEDON) or Tosca._is_derived_from(r, r.BINDSTO):
+ root_type = Tosca._get_root_parent_type(n).type
+ if root_type == "tosca.nodes.Compute":
+ return n
+ else:
+ return self._find_host_compute(n, nodetemplates)
+
+ # There are no direct HostedOn node
+ # check node_filter requirements
+ if node.requirements:
+ for requires in node.requirements:
+ if 'host' in requires:
+ value = requires.get('host')
+ if isinstance(value, dict):
+ if 'node_filter' in value:
+ node_filter = value.get('node_filter')
+ return self._get_compute_from_node_filter(node_filter, nodetemplates)
+
+ return None
+
+ def _node_fulfill_filter(self, node, node_filter):
+ """
+ Check if a node fulfills the features of a node filter
+ """
+
+ # Get node properties
+ node_props = {}
+ for cap_type in ['os', 'host']:
+ if node.get_capability(cap_type):
+ for prop in node.get_capability(cap_type).get_properties_objects():
+ if prop.value is not None:
+ unit = None
+ value = self._final_function_result(prop.value, node)
+ if prop.name in ['disk_size', 'mem_size']:
+ value, unit = Tosca._get_size_and_unit(value)
+ node_props[prop.name] = (value, unit)
+
+ filter_props = {}
+ # Get node_filter properties
+ for elem in node_filter:
+ if isinstance(elem, dict):
+ for cap_type in ['os', 'host']:
+ if cap_type in elem:
+ for p in elem.get(cap_type).get('properties'):
+ p_name = list(p.keys())[0]
+ p_value = list(p.values())[0]
+ if isinstance(p_value, dict):
+ filter_props[p_name] = (list(p_value.keys())[0],
+ list(p_value.values())[0])
+ else:
+ filter_props[p_name] = ("equal", p_value)
+
+ operator_map = {
+ 'equal': '==',
+ 'greater_than': '>',
+ 'greater_or_equal': '>=',
+ 'less_than': '<',
+ 'less_or_equal': '<='
+ }
+
+ # Compare the properties
+ for name, value in filter_props.items():
+ operator, filter_value = value
+ if name in ['disk_size', 'mem_size']:
+ filter_value, _ = Tosca._get_size_and_unit(filter_value)
+
+ if name in node_props:
+ node_value, _ = node_props[name]
+
+ if isinstance(node_value, str) or isinstance(node_value, unicode):
+ str_node_value = "'" + node_value + "'"
+ else:
+ str_node_value = str(node_value)
+
+ conv_operator = operator_map.get(operator, None)
+ if conv_operator:
+ if isinstance(filter_value, str) or isinstance(filter_value, unicode):
+ str_filter_value = "'" + filter_value + "'"
+ else:
+ str_filter_value = str(filter_value)
+
+ comparation = str_node_value + conv_operator + str_filter_value
+ else:
+ if operator == "in_range":
+ minv = filter_value[0]
+ maxv = filter_value[1]
+ comparation = str_node_value + ">=" + \
+ str(minv) + " and " + \
+ str_node_value + "<=" + str(maxv)
+ elif operator == "valid_values":
+ comparation = str_node_value + \
+ " in " + str(filter_value)
+ else:
+ Tosca.logger.warn(
+ "Logical operator %s not supported." % operator)
+
+ if not eval(comparation):
+ return False
+ else:
+ # if this property is not specified in the node, return False
+ # TODO: we must think about default values
+ return False
+
+ return True
+
+ def _get_compute_from_node_filter(self, node_filter, nodetemplates):
+ """
+ Select the first node that fulfills the specified "node_filter"
+ """
+
+ for node in nodetemplates:
+ root_type = Tosca._get_root_parent_type(node).type
+
+ if root_type == "tosca.nodes.Compute":
+ if self._node_fulfill_filter(node, node_filter.get('capabilities')):
+ return node
+
+ return None
+
+ @staticmethod
+ def _get_dependency_level(node):
+ """
+ Check the relations to get the contextualization level
+ """
+ if node.requirements:
+ maxl = 0
+ for r, n in node.relationships.items():
+ if Tosca._is_derived_from(r, [r.HOSTEDON, r.DEPENDSON]):
+ level = Tosca._get_dependency_level(n)
+ else:
+ level = 0
+
+ if level > maxl:
+ maxl = level
+ return maxl + 1
+ else:
+ return 1
+
+ @staticmethod
+ def _unit_to_bytes(unit):
+ """Return the value of an unit."""
+ if not unit:
+ return 1
+ unit = unit.upper()
+
+ if unit.startswith("KI"):
+ return 1024
+ elif unit.startswith("K"):
+ return 1000
+ elif unit.startswith("MI"):
+ return 1048576
+ elif unit.startswith("M"):
+ return 1000000
+ elif unit.startswith("GI"):
+ return 1073741824
+ elif unit.startswith("G"):
+ return 1000000000
+ elif unit.startswith("TI"):
+ return 1099511627776
+ elif unit.startswith("T"):
+ return 1000000000000
+ else:
+ return 1
+
+ @staticmethod
+ def _get_size_and_unit(str_value):
+ """
+ Normalize the size and units to bytes
+ """
+ parts = str_value.split(" ")
+ value = float(parts[0])
+ unit = 'M'
+ if len(parts) > 1:
+ unit = parts[1]
+
+ value = int(value * Tosca._unit_to_bytes(unit))
+
+ return value, 'B'
+
+ @staticmethod
+ def _gen_network(node):
+ """
+ Take a node of type "Network" and get the RADL.network to represent it
+ """
+ res = network(node.name)
+
+ nework_type = node.get_property_value("network_type")
+ network_name = node.get_property_value("network_name")
+
+ # TODO: get more properties -> must be implemented in the RADL
+ if nework_type == "public":
+ res.setValue("outbound", "yes")
+
+ if network_name:
+ res.setValue("provider_id", network_name)
+
+ return res
+
+ def _add_ansible_roles(self, node, nodetemplates, system):
+ """
+ Find all the roles to be applied to this node and
+ add them to the system as ansible.modules.* in 'disk.0.applications'
+ """
+ roles = []
+ for other_node in nodetemplates:
+ root_type = Tosca._get_root_parent_type(other_node).type
+ if root_type == "tosca.nodes.Compute":
+ compute = other_node
+ else:
+ # Select the host to host this element
+ compute = self._find_host_compute(other_node, nodetemplates)
+
+ if compute and compute.name == node.name:
+ # Get the artifacts to see if there is a ansible galaxy role
+ # and add it as an "ansible.modules" app requirement in RADL
+ artifacts = other_node.type_definition.get_value(
+ 'artifacts', other_node.entity_tpl, True)
+ if artifacts:
+ for name, artifact in artifacts.items():
+ name
+ if ('type' in artifact and artifact['type'] == 'tosca.artifacts.AnsibleGalaxy.role' and
+ 'file' in artifact and artifact['file']):
+ if artifact['file'] not in roles:
+ roles.append(artifact['file'])
+
+ for role in roles:
+ app_features = Features()
+ app_features.addFeature(
+ Feature('name', '=', 'ansible.modules.' + role))
+ feature = Feature(
+ 'disk.0.applications', 'contains', app_features)
+ system.addFeature(feature)
+
+ def _gen_system(self, node, nodetemplates):
+ """
+ Take a node of type "Compute" and get the RADL.system to represent it
+ """
+ res = system(node.name)
+
+ property_map = {
+ 'architecture': 'cpu.arch',
+ 'type': 'disk.0.os.name',
+ 'distribution': 'disk.0.os.flavour',
+ 'version': 'disk.0.os.version',
+ 'image': 'disk.0.image.url',
+ 'credential': 'disk.0.os.credentials',
+ 'num_cpus': 'cpu.count',
+ 'disk_size': 'disks.free_size',
+ 'mem_size': 'memory.size',
+ 'cpu_frequency': 'cpu.performance',
+ 'instance_type': 'instance_type',
+ }
+
+ for cap_type in ['os', 'host']:
+ if node.get_capability(cap_type):
+ for prop in node.get_capability(cap_type).get_properties_objects():
+ name = property_map.get(prop.name, None)
+ if name and prop.value:
+ unit = None
+ value = self._final_function_result(prop.value, node)
+ if prop.name in ['disk_size', 'mem_size']:
+ value, unit = Tosca._get_size_and_unit(value)
+ elif prop.name == "version":
+ value = str(value)
+ elif prop.name == "image":
+ if value.find("://") == -1:
+ value = "docker://%s" % value
+ elif prop.name == "credential":
+ token_type = "password"
+ if 'token_type' in value and value['token_type']:
+ token_type = value['token_type']
+
+ token = None
+ if 'token' in value and value['token']:
+ token = value['token']
+
+ if token:
+ if token_type == "password":
+ feature = Feature("disk.0.os.credentials.password", "=", token)
+ res.addFeature(feature)
+ elif token_type == "private_key":
+ feature = Feature("disk.0.os.credentials.private_key", "=", token)
+ res.addFeature(feature)
+ elif token_type == "public_key":
+ feature = Feature("disk.0.os.credentials.public_key", "=", token)
+ res.addFeature(feature)
+ else:
+ Tosca.logger.warn("Unknown tyoe of token %s. Ignoring." % token_type)
+ if 'user' not in value or not value['user']:
+ raise Exception("User must be specified in the image credentials.")
+ name = "disk.0.os.credentials.username"
+ value = value['user']
+
+ if isinstance(value, float) or isinstance(value, int):
+ operator = ">="
+ else:
+ operator = "="
+
+ feature = Feature(name, operator, value, unit)
+ res.addFeature(feature)
+
+ # Find associated BlockStorages
+ disks = self._get_attached_disks(node, nodetemplates)
+
+ for size, unit, location, device, num, fstype in disks:
+ if size:
+ res.setValue('disk.%d.size' % num, size, unit)
+ if device:
+ res.setValue('disk.%d.device' % num, device)
+ if location:
+ res.setValue('disk.%d.mount_path' % num, location)
+ res.setValue('disk.%d.fstype' % num, fstype)
+
+ self._add_ansible_roles(node, nodetemplates, res)
+
+ return res
+
+ @staticmethod
+ def _get_bind_networks(node, nodetemplates):
+ nets = []
+
+ for port in nodetemplates:
+ root_type = Tosca._get_root_parent_type(port).type
+ if root_type == "tosca.nodes.network.Port":
+ binding = None
+ link = None
+ for requires in port.requirements:
+ binding = requires.get('binding', binding)
+ link = requires.get('link', link)
+
+ if binding == node.name:
+ ip = port.get_property_value('ip_address')
+ order = port.get_property_value('order')
+ dns_name = port.get_property_value('dns_name')
+ nets.append((link, ip, dns_name, order))
+
+ return nets
+
+ def _get_attached_disks(self, node, nodetemplates):
+ """
+ Get the disks attached to a node
+ """
+ disks = []
+ count = 1
+
+ for rel, trgt in node.relationships.items():
+ src = node
+ rel_tpl = Tosca._get_relationship_template(rel, src, trgt)
+ # TODO: ver root_type
+ if rel.type.endswith("AttachesTo"):
+ rel_tpl.entity_tpl
+ props = rel_tpl.get_properties_objects()
+
+ size = None
+ location = None
+ # set a default device
+ device = None
+
+ for prop in props:
+ value = self._final_function_result(prop.value, node)
+ if prop.name == "location":
+ location = value
+ elif prop.name == "device":
+ device = value
+
+ if trgt.type_definition.type == "tosca.nodes.BlockStorage":
+ full_size = self._final_function_result(trgt.get_property_value('size'), trgt)
+ size, unit = Tosca._get_size_and_unit(full_size)
+ disks.append((size, unit, location, device, count, "ext4"))
+ count += 1
+ else:
+ Tosca.logger.debug(
+ "Attached item of type %s ignored." % trgt.type_definition.type)
+
+ return disks
+
+ @staticmethod
+ def _is_derived_from(rel, parent_type):
+ """
+ Check if a node is a descendant from a specified parent type
+ """
+ if isinstance(parent_type, list):
+ parent_types = parent_type
+ else:
+ parent_types = [parent_type]
+ while True:
+ if rel.type in parent_types:
+ return True
+ else:
+ if rel.parent_type:
+ rel = rel.parent_type
+ else:
+ return False
+
+ @staticmethod
+ def _get_root_parent_type(node):
+ """
+ Get the root parent type of a node (just before the tosca.nodes.Root)
+ """
+ node_type = node.type_definition
+
+ while True:
+ if node_type.parent_type is not None:
+ if node_type.parent_type.type.endswith(".Root"):
+ return node_type
+ else:
+ node_type = node_type.parent_type
+ else:
+ return node_type
+
+ @staticmethod
+ def _get_interfaces(node):
+ """
+ Get a dict of InterfacesDef of the specified node
+ """
+ interfaces = {}
+ for interface in node.interfaces:
+ interfaces[interface.name] = interface
+
+ node_type = node.type_definition
+
+ while True:
+ if node_type.interfaces and 'Standard' in node_type.interfaces:
+ for name, elems in node_type.interfaces['Standard'].items():
+ if name in ['create', 'configure', 'start', 'stop', 'delete']:
+ if name not in interfaces:
+ interfaces[name] = InterfacesDef(
+ node_type, 'Standard', name=name, value=elems)
+
+ if node_type.parent_type is not None:
+ node_type = node_type.parent_type
+ else:
+ return interfaces
+
+ @staticmethod
+ def _merge_recipes(yaml1, yaml2):
+ """
+ Merge two ansible recipes yaml docs
+
+ Arguments:
+ - yaml1(str): string with the first YAML
+ - yaml1(str): string with the second YAML
+ Returns: The merged YAML. In case of errors, it concatenates both strings
+ """
+ yamlo1o = {}
+ try:
+ yamlo1o = yaml.load(yaml1)[0]
+ if not isinstance(yamlo1o, dict):
+ yamlo1o = {}
+ except Exception as ex:
+ raise Exception("Error parsing YAML: " + yaml1 + "\n. Error: %s" % str(ex))
+
+ yamlo2s = {}
+ try:
+ yamlo2s = yaml.load(yaml2)
+ if not isinstance(yamlo2s, list) or any([not isinstance(d, dict) for d in yamlo2s]):
+ yamlo2s = {}
+ except Exception as ex:
+ raise Exception("Error parsing YAML: " + yaml2 + "\n. Error: %s" % str(ex))
+
+ if not yamlo2s and not yamlo1o:
+ return ""
+
+ result = []
+ for yamlo2 in yamlo2s:
+ yamlo1 = copy.deepcopy(yamlo1o)
+ all_keys = []
+ all_keys.extend(yamlo1.keys())
+ all_keys.extend(yamlo2.keys())
+ all_keys = set(all_keys)
+
+ for key in all_keys:
+ if key in yamlo1 and yamlo1[key]:
+ if key in yamlo2 and yamlo2[key]:
+ if isinstance(yamlo1[key], dict):
+ yamlo1[key].update(yamlo2[key])
+ elif isinstance(yamlo1[key], list):
+ yamlo1[key].extend(yamlo2[key])
+ else:
+ # Both use have the same key with merge in a lists
+ v1 = yamlo1[key]
+ v2 = yamlo2[key]
+ yamlo1[key] = [v1, v2]
+ elif key in yamlo2 and yamlo2[key]:
+ yamlo1[key] = yamlo2[key]
+ result.append(yamlo1)
+
+ return yaml.dump(result, default_flow_style=False, explicit_start=True, width=256)
+
+ def get_outputs(self, inf_info):
+ """
+ Get the outputs of the TOSCA document using the InfrastructureInfo
+ object 'inf_info' to get the data of the VMs
+ """
+ res = {}
+
+ for output in self.tosca.outputs:
+ val = self._final_function_result(
+ output.attrs.get(output.VALUE), None, inf_info)
+ res[output.name] = val
+
+ return res
+
+ def merge(self, other_tosca):
+ Tosca._merge_yaml(self.yaml, other_tosca.yaml)
+ self.tosca = ToscaTemplate(yaml_dict_tpl=copy.deepcopy(self.yaml))
+ return self
+
+ @staticmethod
+ def _merge_yaml(yaml1, yaml2):
+ if yaml2 is None:
+ return yaml1
+ elif isinstance(yaml1, dict) and isinstance(yaml2, dict):
+ for k, v in yaml2.items():
+ if k not in yaml1:
+ yaml1[k] = v
+ else:
+ yaml1[k] = Tosca._merge_yaml(yaml1[k], v)
+ elif isinstance(yaml1, list) and isinstance(yaml2, (list, tuple)):
+ for v in yaml2:
+ if v not in yaml1:
+ yaml1.append(v)
+ else:
+ yaml1 = yaml2
+
+ return yaml1
diff --git a/IM/tosca/__init__.py b/IM/tosca/__init__.py
new file mode 100644
index 000000000..726d4a196
--- /dev/null
+++ b/IM/tosca/__init__.py
@@ -0,0 +1,15 @@
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
diff --git a/IM/tts/__init__.py b/IM/tts/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/IM/tts/onetts.py b/IM/tts/onetts.py
new file mode 100644
index 000000000..c3db4480d
--- /dev/null
+++ b/IM/tts/onetts.py
@@ -0,0 +1,45 @@
+'''
+Created on 16 de jun. de 2016
+
+@author: micafer
+'''
+
+from IM.uriparse import uriparse
+from IM.tts.tts import TTSClient
+
+
+class ONETTSClient():
+ """
+ Class to interact get user/password credentials to OpenNebula using the TTS
+ """
+
+ @staticmethod
+ def get_auth_from_tts(tts_url, one_server, token):
+ """
+ Get username and password from the TTS service
+ """
+ tts_uri = uriparse(tts_url)
+ scheme = tts_uri[0]
+ host = tts_uri[1]
+ port = None
+ if host.find(":") != -1:
+ parts = host.split(":")
+ host = parts[0]
+ port = int(parts[1])
+
+ ttsc = TTSClient(token, host, port, scheme)
+
+ success, svc = ttsc.find_service(one_server)
+ if not success:
+ raise Exception("Error getting credentials from TTS: %s" % svc)
+ succes, cred = ttsc.request_credential(svc["id"])
+ if succes:
+ username = password = None
+ for elem in cred['credential']['entries']:
+ if elem['name'] == 'Username':
+ username = elem['value']
+ elif elem['name'] == 'Password':
+ password = elem['value']
+ return username, password
+ else:
+ raise Exception("Error getting credentials from TTS: %s" % cred)
diff --git a/IM/tts/tts.py b/IM/tts/tts.py
new file mode 100644
index 000000000..a50199e49
--- /dev/null
+++ b/IM/tts/tts.py
@@ -0,0 +1,134 @@
+'''
+Created on 16 de jun. de 2016
+
+@author: micafer
+'''
+
+import json
+import requests
+
+
+class TTSClient:
+ """
+ Class to interact with the TTS using v2 of the REST API
+ https://github.com/indigo-dc/tts
+ """
+
+ def __init__(self, token, host, port=None, uri_scheme=None, ssl_verify=False):
+ self.host = host
+ self.port = port
+ if not self.port:
+ self.port = 8080
+ self.token = token
+ self.uri_scheme = uri_scheme
+ if not self.uri_scheme:
+ self.uri_scheme = "http"
+ self.ssl_verify = ssl_verify
+
+ def _perform_get(self, url, headers={}):
+ """
+ Perform the GET operation on the TTS with the specified URL
+ """
+ url = "%s://%s:%s%s" % (self.uri_scheme, self.host, self.port, url)
+ resp = requests.request("GET", url, verify=self.ssl_verify, headers=headers)
+
+ if resp.status_code >= 200 and resp.status_code <= 299:
+ return True, resp.text
+ else:
+ return False, "Error code %d. Msg: %s" % (resp.status_code, resp.text)
+
+ def _perform_post(self, url, headers, body):
+ """
+ Perform the POST operation on the TTS with the specified URL
+ and using the body specified
+ """
+ url = "%s://%s:%s%s" % (self.uri_scheme, self.host, self.port, url)
+ resp = requests.request("POST", url, verify=self.ssl_verify, headers=headers, data=body)
+ if resp.status_code >= 200 and resp.status_code <= 299:
+ return True, resp.text
+ else:
+ return False, "Error code %d. Msg: %s" % (resp.status_code, resp.text)
+
+ def request_credential(self, sid):
+ """
+ Request a credential for the specified service
+ """
+ success, provider = self.get_provider()
+ if not success:
+ return False, provider
+
+ body = '{"service_id":"%s"}' % sid
+ url = "/api/v2/%s/credential" % provider["id"]
+ try:
+ headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'}
+ success, res = self._perform_post(url, headers, body)
+ except Exception as ex:
+ success = False
+ res = str(ex)
+ if success:
+ return True, json.loads(res)
+ else:
+ return False, res
+
+ def list_providers(self):
+ """
+ Get the list of providers
+ """
+ url = "/api/v2/oidcp"
+ try:
+ success, output = self._perform_get(url)
+ except Exception as ex:
+ success = False
+ output = str(ex)
+ if not success:
+ return False, output
+ else:
+ return True, json.loads(output)
+
+ def list_endservices(self, provider):
+ """
+ Get the list of services
+ """
+ url = "/api/v2/%s/service" % provider
+ try:
+ headers = {'Authorization': 'Bearer %s' % self.token}
+ success, output = self._perform_get(url, headers)
+ except Exception as ex:
+ success = False
+ output = str(ex)
+ if not success:
+ return False, output
+ else:
+ return True, json.loads(output)
+
+ def get_provider(self):
+ """
+ Get the first provider available
+ """
+ success, providers = self.list_providers()
+ if not success:
+ return False, providers
+ else:
+ if providers['openid_provider_list']:
+ return True, providers['openid_provider_list'][0]
+ else:
+ return False, "No provider found."
+
+ def find_service(self, host):
+ """
+ Find a service for the specified host
+ """
+ success, provider = self.get_provider()
+ if not success:
+ return False, provider
+
+ success, services = self.list_endservices(provider["id"])
+ if success:
+ for service in services["service_list"]:
+ # we assume that if the host appears in the description it is our service
+ if service["description"].find(host) != -1:
+ return True, service
+ else:
+ return False, services
+
+ return False, "Cloud site %s not found in TTS" % host
diff --git a/SUMMARY.md b/SUMMARY.md
new file mode 100644
index 000000000..6404d5d10
--- /dev/null
+++ b/SUMMARY.md
@@ -0,0 +1,7 @@
+# Summary
+
+* [About IM](README.md)
+* [Installation](doc/gitbook/installation.md)
+* [Docker Image](doc/gitbook/docker-image.md)
+* [REST API](doc/gitbook/rest-api.md)
+* [Service Card](doc/gitbook/service-reference.md)
diff --git a/doc/gitbook/docker-image.md b/doc/gitbook/docker-image.md
new file mode 100644
index 000000000..33a8bacfe
--- /dev/null
+++ b/doc/gitbook/docker-image.md
@@ -0,0 +1,29 @@
+2. DOCKER IMAGE
+===============
+
+A Docker image named `indigodatacloud/im` has been created to make easier the deployment of an IM service using the
+default configuration. Information about this image can be found here: https://hub.docker.com/r/indigodatacloud/im/.
+
+How to launch the IM service using docker::
+
+```sh
+$ sudo docker run -d -p 8899:8899 -p 8800:8800 --name im indigodatacloud/im
+```
+
+To make the IM data persistent you also have to specify a persistent location for the IM database using the IM_DATA_DB environment variable and adding a volume::
+
+```sh
+$ sudo docker run -d -p 8899:8899 -p 8800:8800 -v "/some_local_path/db:/db" -e IM_DATA_DB=/db/inf.dat --name im indigodatacloud/im
+```
+
+You can also specify an external MySQL server to store IM data using the IM_DATA_DB environment variable::
+
+```sh
+$ sudo docker run -d -p 8899:8899 -p 8800:8800 -e IM_DATA_DB=mysql://username:password@server/db_name --name im indigodatacloud/im
+```
+
+You can use the IM as an entry point of an OpenNebula cloud provider as a TOSCA compliant endpoint for your site::
+
+```sh
+$ sudo docker run -d -p 8899:8899 -p 8800:8800 -e IM_SINGLE_SITE_ONE_HOST=oneserver.com --name im indigodatacloud/im
+```
diff --git a/doc/gitbook/installation.md b/doc/gitbook/installation.md
new file mode 100644
index 000000000..790a92792
--- /dev/null
+++ b/doc/gitbook/installation.md
@@ -0,0 +1,268 @@
+1. INSTALLATION
+===============
+
+1.1 REQUISITES
+--------------
+
+IM is based on Python, so Python 2.6 or higher runtime and standard library must
+be installed in the system.
+
+ + The RADL parser (https://github.com/grycap/radl), available in pip
+ as the 'RADL' package. (Since IM version 1.5.3, it requires RADL version 1.1.0 or later).
+
+ + The paramiko ssh2 protocol library for python version 1.14 or later
+(http://www.lag.net/paramiko/), typically available as the 'python-paramiko' package.
+
+ + The YAML library for Python, typically available as the 'python-yaml' or 'PyYAML' package.
+
+ + The suds library for Python, typically available as the 'python-suds' package.
+
+ + The Netaddr library for Python, typically available as the 'python-netaddr' package.
+
+ + The Requests library for Python, typically available as the 'python-requests' package.
+
+ + The TOSCA-Parser library for Python. Currently it must be used the INDIGO version located at
+ https://github.com/indigo-dc/tosca-parser but we are working to improve the mainstream version
+ to enable to use it with the IM.
+
+ + The CherryPy Web framework (http://www.cherrypy.org/), is needed for the REST API.
+ It is typically available as the 'python-cherrypy' or 'python-cherrypy3' package.
+ In newer versions (9.0 and later) the functionality has been moved to the 'cheroot' library
+ (https://github.com/cherrypy/cheroot) it can be installed using pip.
+
+ + Ansible (http://www.ansibleworks.com/) to configure nodes in the infrastructures.
+ In particular, Ansible 1.4.2+ must be installed. The current recommended version is 1.9.4 untill the 2.X versions become stable.
+ To ensure the functionality the following values must be set in the ansible.cfg file (usually found in /etc/ansible/):
+
+```
+[defaults]
+transport = smart
+host_key_checking = False
+# For old versions 1.X
+sudo_user = root
+sudo_exe = sudo
+
+# For new versions 2.X
+become_user = root
+become_method = sudo
+
+[paramiko_connection]
+
+record_host_keys=False
+
+[ssh_connection]
+
+# Only in systems with OpenSSH support to ControlPersist
+ssh_args = -o ControlMaster=auto -o ControlPersist=900s
+# In systems with older versions of OpenSSH (RHEL 6, CentOS 6, SLES 10 or SLES 11)
+#ssh_args =
+pipelining = True
+```
+
+1.2 OPTIONAL PACKAGES
+---------------------
+
+The Bottle framework (http://bottlepy.org/) is used for the REST API.
+It is typically available as the 'python-bottle' package.
+
+Apache-libcloud (http://libcloud.apache.org/) 0.17 or later is used in the
+LibCloud, OpenStack and GCE connectors.
+
+Boto (http://boto.readthedocs.org) 2.29.0 or later is used as interface to
+Amazon EC2. It is available as package named ``python-boto`` in Debian based
+distributions. It can also be downloaded from `boto GitHub repository (https://github.com/boto/boto).
+Download the file and copy the boto subdirectory into the IM install path.
+
+In case of using the a MySQL DB as the backend to store IM data. The Python interface to MySQL
+must be installed, typically available as the package 'python-mysqldb' or 'MySQL-python' package.
+In case of using Python 3 use the PyMySQL package, available as the package 'python3-pymysql' on
+debian systems or PyMySQL package in pip.
+
+In case of using the SSL secured version of the REST API pyOpenSSL must be installed.
+
+Azure python SDK (https://azure.microsoft.com/es-es/develop/python/) is used to connect with the
+Microsoft Azure platform.
+
+
+1.3 INSTALLING
+--------------
+
+### 1.3.1 Using installer (Recommended option)
+
+The IM provides a script to install the IM in one single step.
+You only need to execute the following command:
+
+```sh
+$ wget -qO- https://raw.githubusercontent.com/indigo-dc/im/master/install.sh | bash
+```
+
+It works for the most recent version of the main Linux distributions (RHEL/CentOS 7, Ubuntu 14/16).
+
+### 1.3.2 FROM RPM
+
+You must have the epel repository enabled:
+
+```sh
+$ yum install epel-release
+```
+
+Then you have to enable the INDIGO - DataCloud packages repositories. See full instructions
+[here](https://indigo-dc.gitbooks.io/indigo-datacloud-releases/content/generic_installation_and_configuration_guide_2.html#id4). Briefly you have to download the repo file from [INDIGO SW Repository](http://repo.indigo-datacloud.eu/repos/2/indigo2.repo) in your /etc/yum.repos.d folder.
+
+```sh
+$ cd /etc/yum.repos.d
+$ wget http://repo.indigo-datacloud.eu/repos/2/indigo2.repo
+```
+
+And then install the GPG key for the INDIGO repository:
+
+```sh
+$ rpm --import http://repo.indigo-datacloud.eu/repository/RPM-GPG-KEY-indigodc
+```
+
+Finally install the IM package.
+
+```sh
+$ yum install IM
+```
+
+### 1.3.3 FROM DEB
+
+You have to enable the INDIGO - DataCloud packages repositories. See full instructions
+[here](https://indigo-dc.gitbooks.io/indigo-datacloud-releases/content/generic_installation_and_configuration_guide_2.html#id4). Briefly you have to download the list file from [INDIGO SW Repository](http://repo.indigo-datacloud.eu/repos/2/indigo2-ubuntu16_04.list) in your /etc/apt/sources.list.d folder.
+
+```sh
+$ cd /etc/apt/sources.list.d
+$ wget http://repo.indigo-datacloud.eu/repos/2/indigo2-ubuntu16_04.list
+```
+
+And then install the GPG key for INDIGO the repository:
+
+```sh
+$ wget -q -O - http://repo.indigo-datacloud.eu/repository/RPM-GPG-KEY-indigodc | sudo apt-key add -
+```
+
+Finally install the IM package.
+
+```sh
+$ apt update
+$ apt install python-im
+```
+
+1.4 CONFIGURATION
+-----------------
+
+In case that you want the IM service to be started at boot time, you must
+execute the next set of commands:
+
+On Debian Systems:
+
+```sh
+$ chkconfig im on
+```
+
+Or for newer systems like ubuntu 14.04:
+
+```sh
+$ sysv-rc-conf im on
+```
+
+On RedHat Systems:
+
+```sh
+$ update-rc.d im start 99 2 3 4 5 . stop 05 0 1 6 .
+```
+
+Or you can do it manually:
+
+```sh
+$ ln -s /etc/init.d/im /etc/rc2.d/S99im
+$ ln -s /etc/init.d/im /etc/rc3.d/S99im
+$ ln -s /etc/init.d/im /etc/rc5.d/S99im
+$ ln -s /etc/init.d/im /etc/rc1.d/K05im
+$ ln -s /etc/init.d/im /etc/rc6.d/K05im
+```
+
+Adjust the installation path by setting the IMDAEMON variable at /etc/init.d/im
+to the path where the IM im_service.py file is installed (e.g. /usr/local/im/im_service.py),
+or set the name of the script file (im_service.py) if the file is in the PATH
+(pip puts the im_service.py file in the PATH as default).
+
+Check the parameters in $IM_PATH/etc/im.cfg or /etc/im/im.cfg. Please pay attention
+to the next configuration variables, as they are the most important
+
+```
+DATA_DB - must be set to the URL to access the database to store the IM data.
+ Be careful if you have two different instances of the IM service running in the same machine!!.
+ It can be a MySQL DB: 'mysql://username:password@server/db_name' or
+ a SQLite one: 'sqlite:///etc/im/inf.dat'.
+
+CONTEXTUALIZATION_DIR - must be set to the full path where the IM contextualization files
+ are located. In case of using pip installation the default value is correct
+ (/usr/share/im/contextualization) in case of installing from sources set to
+ $IM_PATH/contextualization (e.g. /usr/local/im/contextualization)
+```
+
+### 1.4.1 SECURITY
+
+Security is disabled by default. Please notice that someone with local network access can "sniff" the traffic and
+get the messages with the IM with the authorisation data with the cloud providers.
+
+Security can be activated both in the XMLRPC and REST APIs. Setting this variables:
+
+```
+XMLRCP_SSL = True
+```
+
+or
+
+```
+REST_SSL = True
+```
+
+And then set the variables: XMLRCP_SSL_* or REST_SSL_* to your certificates paths.
+
+### 1.4.2 SINGLE SITE
+
+To configure the IM as the orchestrator of a single site you can use the SINGLE_SITE*
+configuration variables. This is an example for an OpenNebula based site assuming that
+the hostname of the OpenNebula server is 'server.com':
+
+```
+SINGLE_SITE = True
+SINGLE_SITE_TYPE = OpenNebula
+SINGLE_SITE_AUTH_HOST = http://server.com:2633
+SINGLE_SITE_IMAGE_URL_PREFIX = one://server.com
+```
+
+And this second example shows how to configure for an OpenStack based site assuming that
+the hostname of the OpenStack keystone server is 'server.com':
+
+```
+SINGLE_SITE = True
+SINGLE_SITE_TYPE = OpenStack
+SINGLE_SITE_AUTH_HOST = https://server.com:5000
+SINGLE_SITE_IMAGE_URL_PREFIX = ost://server.com
+```
+
+Using this kind of configuration combined with OIDC tokens the IM authentication is
+simplified and an standard 'Bearer' authorization header can be using to interact with the IM service.
+
+### 1.4.3 OPENNEBULA TTS INTEGRATION
+
+The IM service enables to configure a WaTTS - the INDIGO Token Translation Service (https://github.com/indigo-dc/tts)
+to access OpenNebula sites. IM uses version 2 of the WaTTS API (https://indigo-dc.gitbooks.io/token-translation-service/content/api.html)
+
+To configure it you must set the value of the TTS_URL in the OpenNebula section:
+
+```
+TTS_URL = https://localhost:8443
+```
+
+In particular the WaTTS instance must be configured to include the hostname of the OpenNebula server
+in the plugin configuration of the WaTTS service, for example like this:
+
+```
+service.onesite.description = server.com
+```
+
diff --git a/doc/gitbook/rest-api.md b/doc/gitbook/rest-api.md
new file mode 100644
index 000000000..b51209523
--- /dev/null
+++ b/doc/gitbook/rest-api.md
@@ -0,0 +1,380 @@
+# IM REST API
+
+
+The IM Service can be accessed through a REST(ful) API.
+
+Every HTTP request must be accompanied by the header `AUTHORIZATION` with
+the content of the [auth-file](http://imdocs.readthedocs.io/en/devel/client.html#authorization-file), but putting all the elements in one line
+using "\\n" as separator. If the content of some of the values has a also a "new line" character it must be replaced by a "\\\\n" as separator.
+If the content cannot be parsed successfully,
+or the user and password are not valid, it is returned the HTTP error
+code 401.
+
+In the special case of an IM configured as "Single site" support standard HTTP `AUTHORIZATION` header can be used:
+* Basic: With a cloud provider that supports simple user/password authentication.
+* Bearer: With a cloud provider that supports INDIGO IAM token authentication.
+
+Next tables summaries the resources and the HTTP methods available.
+
+| HTTP method | /infrastructures | /infrastructures/<infId> | /infrastructures/<infId>/vms/<vmId> |
+| -- | -- | -- | -- |
+| **GET** | List the infrastructure IDs.| List the virtual machines in the infrastructure infId | Get information associated to the virtual machine vmId in infId. |
+| **POST** | Create a new infrastructure based on the RADL posted | Create a new virtual machine based on the RADL posted. | |
+| **PUT** | | | Modify the virtual machine based on the RADL posted. |
+| **DELETE** | | Undeploy all the virtual machines in the infrastructure. | Undeploy the virtual machine. |
+
+
+| HTTP method | /infrastructures/<infId>/stop | /infrastructures/<infId>/start | /infrastructures/<infId>/reconfigure |
+| -- | -- | -- | -- |
+| **PUT** | Stop the infrastructure. | Start the infrastructure. | Reconfigure the infrastructure. |
+
+
+| HTTP method | /infrastructures/<infId>/vms/<vmId>/<property_name> | /infrastructures/<infId>/<property_name> |
+| -- | -- | -- |
+| **GET** | Get the specified property property_name associated to the machine vmId in infId. It has one special property: contmsg. | Get the specified property property_name associated to the infrastructure infId. It has four properties: contmsg, radl, state and outputs. |
+
+
+| HTTP method | /infrastructures/<infId>/vms/<vmId>/stop | /infrastructures/<infId>/vms/<vmId>/start |
+| -- | -- | -- |
+| **PUT** | Stop the machine vmId in infId. | Start the machine vmId in infId. |
+
+
+
+The error message returned by the service will depend on the `Accept`
+header of the request:
+
+- text/plain: (default option).
+- application/json: The request has a "Accept" header with
+ value "application/json". In this case the format will be:
+
+```json
+ {
+ "message": "Error message text",
+ "code" : 400
+ }
+```
+
+- text/html: The request has a "Accept" with value to "text/html".
+
+**GET** `http://imserver.com/infrastructures`:
+
+ * Response Content-type: text/uri-list or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 400
+
+ Return a list of URIs referencing the infrastructures associated to
+ the IM user. The result is JSON format has the following format:
+
+```json
+ {
+ "uri-list": [
+ { "uri" : "http://server.com:8800/infrastructures/inf_id1" },
+ { "uri" : "http://server.com:8800/infrastructures/inf_id2" }
+ ]
+ }
+```
+
+**POST** `http://imserver.com/infrastructures`:
+
+ * body: `RADL or TOSCA document`
+
+ * body Content-type: text/yaml, text/plain or application/json
+
+ * Response Content-type: text/uri-list
+
+ * ok response: 200 OK
+
+ * fail response: 401, 400, 415
+
+ Create and configure an infrastructure with the requirements
+ specified in the RADL or TOSCA document of the body contents (RADL in plain text
+ or in JSON formats). If success, it is returned the URI of the new
+ infrastructure. The result is JSON format has the following format:
+
+```json
+ {
+ "uri" : "http://server.com:8800/infrastructures/inf_id
+ }
+```
+
+**GET** `http://imserver.com/infrastructures/`:
+
+ * Response Content-type: text/uri-list or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Return a list of URIs referencing the virtual machines associated to
+ the infrastructure with ID `infId`. The result is JSON format has
+ the following format:
+
+```json
+ {
+ "uri-list": [
+ { "uri" : "http://server.com:8800/infrastructures/inf_id/vms/0" },
+ { "uri" : "http://server.com:8800/infrastructures/inf_id/vms/1" }
+ ]
+ }
+```
+
+**GET** `http://imserver.com/infrastructures//`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400, 403
+
+ Return property `property_name` associated to the infrastructure with ID `infId`. It has three properties:
+
+ * `outputs`: in case of TOSCA documents it will return a JSON object with
+ the outputs of the TOSCA document.
+
+ * `contmsg`: a string with the contextualization message.
+
+ * `radl`: a string with the original specified RADL of the infrastructure.
+
+ * `state`: a JSON object with two elements:
+
+ * `state`: a string with the aggregated state of the infrastructure.
+
+ * `vm_states`: a dict indexed with the VM ID and the value the VM state.
+
+ The result is JSON format has the following format:
+
+```json
+ {
+ ["radl"|"state"|"contmsg"|"outputs"]:
+ }
+```
+
+**POST** `http://imserver.com/infrastructures/`:
+
+ * body: `RADL or TOSCA document`
+
+ * body Content-type: text/yaml or text/plain or application/json
+
+ * input fields: `context` (optional)
+
+ * Response Content-type: text/uri-list
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400, 415
+
+ Add the resources specified in the body contents (in TOSCA, plain RADL or
+ in JSON formats) to the infrastructure with ID `infId`. The RADL
+ restrictions are the same as in RPC-XML AddResource <addresource-xmlrpc>.
+
+ In case of TOSCA a whole TOSCA document is expected. In case of new template is
+ added to the TOSCA document or the ``count`` of a node is increased new nodes
+ will be added to de infrastructure. In case decreasing the number of the ``count``
+ scalable property of a node a ``removal_list`` property has to be added to specify
+ the ID of the VM to delete (see an example [here](https://github.com/indigo-dc/im/blob/master/test/files/tosca_remove.yml)).
+
+ If success, it is returned a list of URIs of the new virtual machines.
+ The `context` parameter is optional and is a flag to specify if the
+ contextualization step will be launched just after the VM addition.
+ Acceptable values: yes, no, true, false, 1 or 0. If not specified the
+ flag is set to True. The result is JSON format has the following
+ format:
+
+```json
+ {
+ "uri-list": [
+ { "uri" : "http://server.com:8800/infrastructures/inf_id/vms/2" },
+ { "uri" : "http://server.com:8800/infrastructures/inf_id/vms/3" }
+ ]
+ }
+```
+
+**PUT** `http://imserver.com/infrastructures//stop`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Perform the `stop` action in all the virtual machines in the the
+ infrastructure with ID `infID`. If the operation has been performed
+ successfully the return value is an empty string.
+
+**PUT** `http://imserver.com/infrastructures//start`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Perform the `start` action in all the virtual machines in the the
+ infrastructure with ID `infID`. If the operation has been performed
+ successfully the return value is an empty string.
+
+**PUT** `http://imserver.com/infrastructures//reconfigure`:
+
+ * body: `RADL document`
+
+ * body Content-type: text/plain or application/json
+
+ * input fields: `vm_list` (optional)
+
+ * Response Content-type: text/plain
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400, 415
+
+ Perform the `reconfigure` action in all the virtual machines in the
+ the infrastructure with ID `infID`. It updates the configuration of
+ the infrastructure as indicated in the body contents (in plain RADL
+ or in JSON formats). The RADL restrictions are the same as
+ in RPC-XML Reconfigure <reconfigure-xmlrpc>. If no RADL are
+ specified, the contextualization process is stated again. The
+ `vm_list` parameter is optional and is a coma separated list of IDs
+ of the VMs to reconfigure. If not specified all the VMs will be
+ reconfigured. If the operation has been performed successfully the
+ return value is an empty string.
+
+**DELETE** `http://imserver.com/infrastructures/`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Undeploy the virtual machines associated to the infrastructure with
+ ID `infId`. If the operation has been performed successfully the
+ return value is an empty string.
+
+**GET** `http://imserver.com/infrastructures//vms/`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Return information about the virtual machine with ID `vmId`
+ associated to the infrastructure with ID `infId`. The returned
+ string is in RADL format, either in plain RADL or in JSON formats.
+ See more the details of the output in
+ GetVMInfo <GetVMInfo-xmlrpc>. The result is JSON format has
+ the following format:
+
+```json
+ {
+ ["radl"|"state"|"contmsg"]: ""
+ }
+```
+
+**GET** `http://imserver.com/infrastructures//vms//`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Return property `property_name` from to the virtual machine with ID
+ `vmId` associated to the infrastructure with ID `infId`. It also has
+ one special property `contmsg` that provides a string with the
+ contextualization message of this VM. The result is JSON format has
+ the following format:
+
+```json
+ {
+ "": ""
+ }
+```
+
+**PUT** `http://imserver.com/infrastructures//vms/`:
+
+ * body: `RADL document`
+
+ * body Content-type: text/plain or application/json
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400, 415
+
+ Change the features of the virtual machine with ID `vmId` in the
+ infrastructure with with ID `infId`, specified by the RADL document
+ specified in the body contents (in plain RADL or in JSON formats).
+ If the operation has been performed successfully the return value
+ the return value is an RADL document with the VM properties modified
+ (also in plain RADL or in JSON formats). The result is JSON format
+ has the following format:
+
+```json
+ {
+ "radl":
+ }
+```
+
+**DELETE** `http://imserver.com/infrastructures//vms/`:
+
+ * input fields: `context` (optional)
+
+ * Response Content-type: text/plain
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Undeploy the virtual machine with ID `vmId` associated to the
+ infrastructure with ID `infId`. If `vmId` is a comma separated list
+ of VM IDs, all the VMs of this list will be undeployed. The
+ `context` parameter is optional and is a flag to specify if the
+ contextualization step will be launched just after the VM addition.
+ Acceptable values: yes, no, true, false, 1 or 0. If not specified the
+ flag is set to True. If the operation has been performed
+ successfully the return value is an empty string.
+
+**PUT** `http://imserver.com/infrastructures//vms//start`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Perform the `start` action in the virtual machine with ID `vmId`
+ associated to the infrastructure with ID `infId`. If the operation
+ has been performed successfully the return value is an empty string.
+
+**PUT** `http://imserver.com/infrastructures//vms//stop`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 401, 403, 404, 400
+
+ Perform the `stop` action in the virtual machine with ID `vmId`
+ associated to the infrastructure with ID `infId`. If the operation
+ has been performed successfully the return value is an empty string.
+
+**GET** `http://imserver.com/version`:
+
+ * Response Content-type: text/plain or application/json
+
+ * ok response: 200 OK
+
+ * fail response: 400
+
+ Return the version of the IM service. The result is JSON format has
+ the following format:
+
+```json
+ {
+ "version": "1.4.4"
+ }
+```
diff --git a/doc/gitbook/service-reference.md b/doc/gitbook/service-reference.md
new file mode 100644
index 000000000..328b56f79
--- /dev/null
+++ b/doc/gitbook/service-reference.md
@@ -0,0 +1,48 @@
+# Infrastructure Manager - Service Reference Card
+
+**Functional description:**
+ * IM is a tool that deploys complex and customized virtual infrastructures on IaaS Cloud deployments (such as AWS, OpenStack, etc.). It eases the access and the usability of IaaS clouds by automating the VMI (Virtual Machine Image) selection, deployment, configuration, software installation, monitoring and update of the virtual infrastructure.
+ It supports APIs from a large number of virtual platforms, making user applications cloud-agnostic. In addition it integrates a contextualization system to enable the installation and configuration of all the user required applications providing the user with a fully functional infrastructure.
+ This version evolved in the INDIGO-Datacloud project (https://www.indigo-datacloud.eu/). It is used by the [INDIGO Orchestrator](https://github.com/indigo-dc/orchestrator) to contact Cloud sites to finally deploy the VMs/containers.
+
+**Services running:**
+ * im: Im daemon
+
+**Configuration:**
+ * Adjust the installation path by setting the IMDAEMON variable at `/etc/init.d/im` to the path where the IM im_service.py file is installed (e.g. /usr/local/im/im_service.py), or set the name of the script file (im_service.py) if the file is in the PATH (pip puts the im_service.py file in the PATH as default).
+
+ * Check the parameters in `$IM_PATH/etc/im.cfg` or `/etc/im/im.cfg`. Please pay attention to the next configuration variables, as they are the most important
+
+ * DATA_FILE - must be set to the full path where the IM data file will be created (e.g. `/usr/local/im/inf.dat`). Be careful if you have two different instances of the IM service running in the same machine!!.
+
+ * DATA_DB - must be set to a full URL of a MySQL databse to store the IM data (e.g. mysql://username:password@server/db_name). If this value is set it overwrites the DATA_FILE value.
+
+ * CONTEXTUALIZATION_DIR - must be set to the full path where the IM contextualization files are located. In case of using pip installation the default value is correct (`/usr/share/im/contextualization`) in case of installing from sources set to $IM_PATH/contextualization (e.g. /usr/local/im/contextualization)
+
+**Logfile locations (and management) and other useful audit information:**
+ * *IM log:* The log file is defined in the LOG_FILE variable of the im.cfg file. The default value is `/var/log/im/im.log`.
+
+**Open ports needed:**
+ * Default ports used by the IM:
+ * XML-RPC API:
+ * 8899
+ * REST API:
+ * 8800
+
+**Where is service state held (and can it be rebuilt):**
+ * Configuration information is stored in a data file or data base. Check the configuration section for more info about the files.
+
+**Cron jobs:**
+ * None
+
+**Security information**
+ * Security is disabled by default. Please notice that someone with local network access can "sniff" the traffic and get the messages with the IM with the authorization data with the cloud providers.
+ * Security can be activated both in the XMLRPC and REST APIs. Setting this variables:
+ * XMLRCP_SSL = True
+ or
+ * REST_SSL = True
+
+ And then set the variables: XMLRCPSSL or RESTSSL to your certificates paths.
+
+**Location of reference documentation:**
+ [IM on Gitbook](https://indigo-dc.gitbooks.io/im/content/)
diff --git a/doc/source/REST.rst b/doc/source/REST.rst
index dfce18c1d..0f84d142f 100644
--- a/doc/source/REST.rst
+++ b/doc/source/REST.rst
@@ -40,7 +40,7 @@ Next tables summaries the resources and the HTTP methods available.
| **GET** | | **Get** the specified property ``property_name`` | | **Get** the specified property ``property_name`` |
| | | associated to the machine ``vmId`` in ``infId``. | | associated to the infrastructure ``infId``. |
| | | It has one special property: ``contmsg``. | | It has three properties: ``contmsg``, ``radl``, |
-| | | | ``state``. |
+| | | | ``state`` and ``outputs``. |
+-------------+-----------------------------------------------------+----------------------------------------------------+
+-------------+-----------------------------------------------+------------------------------------------------+
@@ -121,6 +121,7 @@ GET ``http://imserver.com/infrastructures//``
:fail response: 401, 404, 400, 403
Return property ``property_name`` associated to the infrastructure with ID ``infId``. It has three properties:
+ :``outputs``: in case of TOSCA documents it will return a JSON object with the outputs of the TOSCA document.
:``contmsg``: a string with the contextualization message. In case of ``headeronly`` flag is set to 'yes',
'true' or '1' only the initial part of the infrastructure contextualization log will be
returned (without any VM contextualization log).
@@ -133,7 +134,7 @@ GET ``http://imserver.com/infrastructures//``
The result is JSON format has the following format::
{
- ["radl"|"state"|"contmsg"]:
+ ["radl"|"state"|"contmsg"|"outputs"]:
}
POST ``http://imserver.com/infrastructures/``
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 33b38df15..ef0c86c77 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -231,7 +231,7 @@
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'IMDocumentation.tex', u'IM Documentation',
- u'IM', 'manual'),
+ u'I3M-GRyCAP', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -261,7 +261,7 @@
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'imdocumentation', u'IM Documentation',
- [u'IM'], 1)
+ [u'I3M-GRyCAP'], 1)
]
# If true, show URL addresses after external links.
@@ -275,7 +275,7 @@
# dir menu entry, description, category)
texinfo_documents = [
('index', 'IMDocumentation', u'IM Documentation',
- u'IM', 'IMDocumentation', 'One line description of project.',
+ u'I3M-GRyCAP', 'IMDocumentation', 'One line description of project.',
'Miscellaneous'),
]
diff --git a/docker-devel/README.md b/docker-devel/README.md
new file mode 100644
index 000000000..1565b430b
--- /dev/null
+++ b/docker-devel/README.md
@@ -0,0 +1,32 @@
+ IM - Infrastructure Manager (With TOSCA Support)
+=================================================
+
+IM is a tool that deploys complex and customized virtual infrastructures on IaaS
+Cloud deployments (such as AWS, OpenStack, etc.). It eases the access and the
+usability of IaaS clouds by automating the VMI (Virtual Machine Image)
+selection, deployment, configuration, software installation, monitoring and
+update of the virtual infrastructure. It supports APIs from a large number of virtual
+platforms, making user applications cloud-agnostic. In addition it integrates a
+contextualization system to enable the installation and configuration of all the
+user required applications providing the user with a fully functional
+infrastructure.
+
+This version evolved in the INDIGO-Datacloud project (https://www.indigo-datacloud.eu/) has
+added support to TOSCA documents as input for the infrastructure creation.
+
+Read the documentation and more at http://www.grycap.upv.es/im.
+
+There is also an Infrastructure Manager YouTube reproduction list with a set of videos with demos
+of the functionality of the platform: https://www.youtube.com/playlist?list=PLgPH186Qwh_37AMhEruhVKZSfoYpHkrUp.
+
+DOCKER IMAGE
+=============
+
+A Docker image named `indigodatacloud/im` has been created to make easier the deployment of an IM service using the
+default configuration. Information about this image can be found here: https://hub.docker.com/r/indigodatacloud/im/.
+
+How to launch the IM service using docker:
+
+```sh
+sudo docker run -d -p 8899:8899 -p 8800:8800 --name im indigodatacloud/im
+```
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 781988701..82d76426e 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -17,6 +17,7 @@ RUN pip install msrest msrestazure azure-common azure-mgmt-storage azure-mgmt-co
# Install IM
RUN apt-get update && apt-get install --no-install-recommends -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev && \
pip install MySQL-python && \
+ pip install xmltodict && \
pip install IM==1.6.6 && \
apt-get purge -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev python-dev python-pip && \
apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 000000000..e8a067a43
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,45 @@
+ IM - Infrastructure Manager (With TOSCA Support)
+=================================================
+
+IM is a tool that deploys complex and customized virtual infrastructures on IaaS
+Cloud deployments (such as AWS, OpenStack, etc.). It eases the access and the
+usability of IaaS clouds by automating the VMI (Virtual Machine Image)
+selection, deployment, configuration, software installation, monitoring and
+update of the virtual infrastructure. It supports APIs from a large number of virtual
+platforms, making user applications cloud-agnostic. In addition it integrates a
+contextualization system to enable the installation and configuration of all the
+user required applications providing the user with a fully functional
+infrastructure.
+
+This version evolved in the INDIGO-Datacloud project (https://www.indigo-datacloud.eu/) has
+added support to TOSCA documents as input for the infrastructure creation.
+
+Read the documentation and more at http://www.grycap.upv.es/im.
+
+There is also an Infrastructure Manager YouTube reproduction list with a set of videos with demos
+of the functionality of the platform: https://www.youtube.com/playlist?list=PLgPH186Qwh_37AMhEruhVKZSfoYpHkrUp.
+
+DOCKER IMAGE
+=============
+
+A Docker image named `indigodatacloud/im` has been created to make easier the deployment of an IM service using the
+default configuration. Information about this image can be found here: https://hub.docker.com/r/indigodatacloud/im/.
+
+How to launch the IM service using docker:
+
+```sh
+sudo docker run -d -p 8899:8899 -p 8800:8800 --name im indigodatacloud/im
+```
+
+You can also specify an external MySQL server to store IM data using the IM_DATA_DB environment variable::
+
+```sh
+sudo docker run -d -p 8899:8899 -p 8800:8800 -e IM_DATA_DB=mysql://username:password@server/db_name --name im indigodatacloud/im
+```
+
+You can use the IM as an entry point of an OpenNebula cloud provider as a TOSCA compliant endpoint for your site::
+
+```sh
+$ sudo docker run -d -p 8899:8899 -p 8800:8800 -e IM_SINGLE_SITE_ONE_HOST=oneserver.com --name im indigodatacloud/im
+```
+
\ No newline at end of file
diff --git a/etc/im.cfg b/etc/im.cfg
index a41f45f62..8509ed24b 100644
--- a/etc/im.cfg
+++ b/etc/im.cfg
@@ -106,6 +106,26 @@ PLAYBOOK_RETRIES = 3
# This are the default values:
# PRIVATE_NET_MASKS = 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,169.254.0.0/16,100.64.0.0/10,192.0.0.0/24,198.18.0.0/15
+# Flag to use the IM as interface to a single site (OpenNebula or OpenStack site)
+SINGLE_SITE = False
+# Set the type of the single site
+SINGLE_SITE_TYPE = OpenNebula
+# Set the host to be used in the auth line of the single site
+SINGLE_SITE_AUTH_HOST = http://server.com:2633
+# Set the url prefix of the images of the single site
+SINGLE_SITE_IMAGE_URL_PREFIX = one://server.com/
+
+# List of OIDC issuers supported
+OIDC_ISSUERS = https://iam-test.indigo-datacloud.eu/
+# If set the IM will check that the string defined here appear in the "aud" claim of the OpenID access token
+#OIDC_AUDIENCE =
+# OIDC client ID and secret of the IM service
+#OIDC_CLIENT_ID =
+#OIDC_CLIENT_SECRET =
+# List of scopes that must appear in the token request to access the IM service
+# Client ID and Secret must be provided to make it work
+#OIDC_SCOPES =
+
# Time (in seconds) the IM service will maintain the information of an infrastructure
# in memory. Only used in case of IM in HA mode.
#INF_CACHE_TIME = 3600
@@ -119,4 +139,5 @@ TEMPLATE_CONTEXT =
TEMPLATE_OTHER = GRAPHICS = [type="vnc",listen="0.0.0.0", keymap="es"]
# Set the IMAGE_UNAME value in case of using the name of the disk image in the Template
IMAGE_UNAME = oneadmin
-
+# URL of the OpenNebula TTS endpoint (https://www.gitbook.com/book/indigo-dc/token-translation-service)
+TTS_URL = https://localhost:8443
diff --git a/examples/clues_tosca.yml b/examples/clues_tosca.yml
new file mode 100644
index 000000000..41e9e1cc1
--- /dev/null
+++ b/examples/clues_tosca.yml
@@ -0,0 +1,50 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: TOSCA CLUES test for the IM
+
+topology_template:
+
+ node_templates:
+
+ elastic_cluster:
+ type: tosca.nodes.indigo.ElasticCluster
+ capabilities:
+ lrms:
+ properties:
+ type: torque
+ scalable:
+ properties:
+ max_instances: 5
+ min_instances: 0
+ default_instances: 0
+ requirements:
+ - host: torque_server
+ - wn: wn_node
+
+ torque_server:
+ type: tosca.nodes.indigo.Compute
+ properties:
+ public_ip: yes
+ capabilities:
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ #distribution: scientific
+ #version: 6.6
+
+ wn_node:
+ type: tosca.nodes.indigo.ElasticCluster.WorkerNode
+ capabilities:
+ wn:
+ properties:
+ name: vnode
+ type: tosca.nodes.indigo.LRMS.WorkerNode.Torque
+ host:
+ num_cpus: 1
+ os:
+ type: linux
\ No newline at end of file
diff --git a/examples/galaxy_tosca.yml b/examples/galaxy_tosca.yml
new file mode 100644
index 000000000..7912ea296
--- /dev/null
+++ b/examples/galaxy_tosca.yml
@@ -0,0 +1,38 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: TOSCA Galaxy test for the IM
+
+topology_template:
+
+ node_templates:
+
+ bowtie2_galaxy_tool:
+ type: tosca.nodes.indigo.GalaxyShedTool
+ properties:
+ name: bowtie2
+ owner: devteam
+ tool_panel_section_id: ngs_mapping
+ requirements:
+ - host: galaxy
+
+ galaxy:
+ type: tosca.nodes.indigo.GalaxyPortal
+ requirements:
+ - host: galaxy_server
+
+ galaxy_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+ # Guest Operating System properties
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ #distribution: scientific
+ #version: 6.6
+
diff --git a/examples/tosca.yml b/examples/tosca.yml
new file mode 100644
index 000000000..ea0f9689a
--- /dev/null
+++ b/examples/tosca.yml
@@ -0,0 +1,118 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: TOSCA test for the IM
+
+
+topology_template:
+ inputs:
+ db_name:
+ type: string
+ default: dbname
+ db_user:
+ type: string
+ default: dbuser
+ db_password:
+ type: string
+ default: pass
+ mysql_root_password:
+ type: string
+ default: mypass
+
+ relationship_templates:
+ my_custom_connection:
+ type: HostedOn
+ interfaces:
+ Configure:
+ pre_configure_source: scripts/wp_db_configure.sh
+
+ node_templates:
+ apache:
+ type: tosca.nodes.WebServer.Apache
+ requirements:
+ - host: web_server
+
+ web_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+ # Guest Operating System properties
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ distribution: scientific
+ version: 6.6
+
+ test_db:
+ type: tosca.nodes.Database.MySQL
+ properties:
+ name: { get_input: db_name }
+ user: { get_input: db_user }
+ password: { get_input: db_password }
+ root_password: { get_input: mysql_root_password }
+ requirements:
+ - host:
+ node: mysql
+ relationship: my_custom_connection
+
+ mysql:
+ type: tosca.nodes.DBMS.MySQL
+ properties:
+ root_password: { get_input: mysql_root_password }
+ requirements:
+ - host:
+ node_filter:
+ capabilities:
+ # Constraints for selecting “host” (Container Capability)
+ - host:
+ properties:
+ - num_cpus: { in_range: [1,4] }
+ - mem_size: { greater_or_equal: 1 GB }
+ # Constraints for selecting “os” (OperatingSystem Capability)
+ - os:
+ properties:
+ - architecture: { equal: x86_64 }
+ - type: linux
+ - distribution: ubuntu
+
+ db_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ disk_size: 10 GB
+ mem_size: 4 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: linux
+ distribution: ubuntu
+ requirements:
+ # contextually this can only be a relationship type
+ - local_storage:
+ # capability is provided by Compute Node Type
+ node: my_block_storage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: /mnt/disk
+ # This maps the local requirement name ‘local_storage’ to the
+ # target node’s capability name ‘attachment’
+ device: hdb
+ interfaces:
+ Configure:
+ pre_configure_source: scripts/wp_db_configure.sh
+
+ my_block_storage:
+ type: BlockStorage
+ properties:
+ size: 1 GB
+
+
+
diff --git a/install.sh b/install.sh
index a2f3cbdfe..38350b6de 100755
--- a/install.sh
+++ b/install.sh
@@ -97,7 +97,7 @@ then
echo "ansible_install.yaml file present. Do not download."
else
echo "Downloading ansible_install.yaml file from github."
- wget http://raw.githubusercontent.com/grycap/im/master/ansible_install.yaml
+ wget http://raw.githubusercontent.com/indigo-dc/im/master/ansible_install.yaml
fi
echo "Call Ansible playbook to install the IM."
diff --git a/packages/generate_deb.sh b/packages/generate_deb.sh
index 28a360dc4..6daad14ef 100755
--- a/packages/generate_deb.sh
+++ b/packages/generate_deb.sh
@@ -5,6 +5,8 @@ apt install -y python-stdeb
# remove the ansible requirement as it makes to generate an incorrect dependency python-ansible
# also remove the pysqlite requirement as it makes to generate an incorrect dependency python-pysqlite1.1
sed -i '/install_requires/c\ install_requires=["paramiko >= 1.14", "PyYAML", suds_pkg,' setup.py
-python setup.py --command-packages=stdeb.command sdist_dsc --depends "python-radl, python-mysqldb, python-pysqlite2, ansible, python-paramiko, python-yaml, python-suds, python-boto, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy3, python-requests" bdist_deb
+python setup.py --command-packages=stdeb.command sdist_dsc --depends "python-radl, python-mysqldb, python-pysqlite2, ansible, python-paramiko, python-yaml, python-suds, python-boto, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy3, python-requests, python-tosca-parser" bdist_deb
mkdir dist_pkg
cp deb_dist/*.deb dist_pkg
+
+
diff --git a/packages/generate_rpm.sh b/packages/generate_rpm.sh
index 6328bb346..a2cc6790a 100755
--- a/packages/generate_rpm.sh
+++ b/packages/generate_rpm.sh
@@ -2,6 +2,6 @@
yum -y install rpm-build python-setuptools
echo "%_unpackaged_files_terminate_build 0" > ~/.rpmmacros
-python setup.py bdist_rpm --release="$1" --requires="which, MySQL-python, python-sqlite3dbm, RADL, ansible, python-paramiko, PyYAML, python-suds, python-boto >= 2.29, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy, python-requests, python-xmltodict"
+python setup.py bdist_rpm --release="$1" --requires="which, MySQL-python, python-sqlite3dbm, RADL, ansible, python-paramiko, PyYAML, python-suds, python-boto >= 2.29, python-libcloud, python-bottle, python-netaddr, python-scp, python-cherrypy, python-requests, python-xmltodict, tosca-parser"
mkdir dist_pkg
-cp dist/*.noarch.rpm dist_pkg
\ No newline at end of file
+cp dist/*.noarch.rpm dist_pkg
diff --git a/scripts/db_1_5_0_to_1_5_1.py b/scripts/db_1_5_0_to_1_5_1.py
index 0c377d0da..1c4ce1fa9 100644
--- a/scripts/db_1_5_0_to_1_5_1.py
+++ b/scripts/db_1_5_0_to_1_5_1.py
@@ -60,6 +60,8 @@ def deserialize_info(str_data):
dic['auth'] = Authentication.deserialize(dic['auth'])
if dic['radl']:
dic['radl'] = parse_radl_json(dic['radl'])
+ if 'extra_info' in dic and dic['extra_info'] and "TOSCA" in dic['extra_info']:
+ dic['extra_info']['TOSCA'] = Tosca.deserialize(dic['extra_info']['TOSCA'])
newinf.__dict__.update(dic)
newinf.cloud_connector = None
# Set the ConfManager object and the lock to the data loaded
diff --git a/setup.py b/setup.py
index ca1fecf9b..25b90982f 100644
--- a/setup.py
+++ b/setup.py
@@ -49,7 +49,8 @@
author='GRyCAP - Universitat Politecnica de Valencia',
author_email='micafer1@upv.es',
url='http://www.grycap.upv.es/im',
- packages=['IM', 'IM.ansible_utils', 'IM.connectors'],
+ include_package_data=True,
+ packages=['IM', 'IM.ansible_utils', 'IM.connectors', 'IM.tosca', 'IM.openid', 'IM.tts'],
scripts=["im_service.py"],
data_files=datafiles,
license="GPL version 3, http://www.gnu.org/licenses/gpl-3.0.txt",
@@ -63,5 +64,5 @@
platforms=["any"],
install_requires=["ansible >= 2.0", "paramiko >= 1.14", "PyYAML", suds_pkg, sqlite_pkg, "cheroot",
"boto >= 2.29", "apache-libcloud >= 0.17", "RADL >= 1.1.0", "bottle", "netaddr",
- "requests", "scp"]
+ "requests", "scp", "tosca-parser"]
)
diff --git a/test/files/data.json b/test/files/data.json
index 047a64ba3..c6a55ea48 100644
--- a/test/files/data.json
+++ b/test/files/data.json
@@ -1 +1 @@
-{"system_counter": 0, "vm_list": ["{\"info\": \"[\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"public\\\",\\n \\\"outbound\\\": \\\"yes\\\",\\n \\\"provider_id\\\": \\\"publica\\\"\\n },\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"privada\\\",\\n \\\"provider_id\\\": \\\"privada\\\"\\n },\\n {\\n \\\"class\\\": \\\"system\\\",\\n \\\"cpu.arch\\\": \\\"x86_64\\\",\\n \\\"cpu.count\\\": 1,\\n \\\"disk.0.device\\\": \\\"hda\\\",\\n \\\"disk.0.image.name\\\": \\\"OneCloud_Ubuntu_14_04\\\",\\n \\\"disk.0.image.url\\\": \\\"one://onecloud.i3m.upv.es/77\\\",\\n \\\"disk.0.os.credentials.new.password\\\": \\\"N0tan+mala\\\",\\n \\\"disk.0.os.credentials.password\\\": \\\"yoyoyo\\\",\\n \\\"disk.0.os.credentials.username\\\": \\\"ubuntu\\\",\\n \\\"disk.0.os.flavour\\\": \\\"ubuntu\\\",\\n \\\"disk.0.os.name\\\": \\\"linux\\\",\\n \\\"disk.0.os.version\\\": \\\"14.04\\\",\\n \\\"disk.0.size\\\": 20971520000,\\n \\\"id\\\": \\\"node\\\",\\n \\\"instance_id\\\": \\\"24962\\\",\\n \\\"instance_name\\\": \\\"OneCloud_Ubuntu_14_04\\\",\\n \\\"launch_time\\\": 1479402015,\\n \\\"memory.size\\\": 1073741824,\\n \\\"net_interface.0.connection\\\": \\\"public\\\",\\n \\\"net_interface.0.dns_name\\\": \\\"testnode\\\",\\n \\\"net_interface.0.ip\\\": \\\"158.42.105.16\\\",\\n \\\"net_interface.1.connection\\\": \\\"privada\\\",\\n \\\"net_interface.1.ip\\\": \\\"10.10.2.24\\\",\\n \\\"provider.host\\\": \\\"onecloud.i3m.upv.es\\\",\\n \\\"provider.port\\\": 2633,\\n \\\"provider.type\\\": \\\"OpenNebula\\\",\\n \\\"state\\\": \\\"off\\\",\\n \\\"virtual_system_type\\\": \\\"qemu\\\"\\n },\\n {\\n \\\"class\\\": \\\"configure\\\",\\n \\\"id\\\": \\\"node\\\",\\n \\\"recipes\\\": \\\"\\\\n\\\\n---\\\\n - tasks:\\\\n - name: test\\\\n command: sleep 30\\\\n\\\\n\\\\n\\\"\\n },\\n {\\n \\\"class\\\": \\\"deploy\\\",\\n \\\"system\\\": \\\"node\\\",\\n \\\"vm_number\\\": 1\\n }\\n]\", \"im_id\": 0, \"requested_radl\": \"[\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"public\\\",\\n \\\"outbound\\\": \\\"yes\\\"\\n },\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"privada\\\"\\n },\\n {\\n \\\"class\\\": \\\"system\\\",\\n \\\"cpu.count_min\\\": 1,\\n \\\"disk.0.os.credentials.new.password\\\": \\\"N0tan+mala\\\",\\n \\\"disk.0.os.flavour\\\": \\\"ubuntu\\\",\\n \\\"disk.0.os.name\\\": \\\"linux\\\",\\n \\\"id\\\": \\\"node\\\",\\n \\\"memory.size_min\\\": 1073741824,\\n \\\"net_interface.0.connection\\\": \\\"public\\\",\\n \\\"net_interface.0.dns_name\\\": \\\"testnode\\\",\\n \\\"net_interface.1.connection\\\": \\\"privada\\\"\\n },\\n {\\n \\\"class\\\": \\\"configure\\\",\\n \\\"id\\\": \\\"node\\\",\\n \\\"recipes\\\": \\\"\\\\n\\\\n---\\\\n - tasks:\\\\n - name: test\\\\n command: sleep 30\\\\n\\\\n\\\\n\\\"\\n },\\n {\\n \\\"class\\\": \\\"deploy\\\",\\n \\\"system\\\": \\\"node\\\",\\n \\\"vm_number\\\": 1\\n }\\n]\", \"ctxt_pid\": null, \"last_update\": 1479401757, \"state\": \"off\", \"cont_out\": \"\", \"id\": \"24962\", \"destroy\": true, \"ssh_connect_errors\": 0, \"configured\": null, \"cloud\": \"{\\\"protocol\\\": \\\"\\\", \\\"id\\\": \\\"onecloud\\\", \\\"path\\\": \\\"\\\", \\\"server\\\": \\\"onecloud.i3m.upv.es\\\", \\\"type\\\": \\\"OpenNebula\\\", \\\"port\\\": 2633}\"}"], "radl": "[\n {\n \"class\": \"network\",\n \"id\": \"public\",\n \"outbound\": \"yes\"\n },\n {\n \"class\": \"network\",\n \"id\": \"privada\"\n },\n {\n \"class\": \"system\",\n \"cpu.count_min\": 1,\n \"disk.0.os.credentials.new.password\": \"N0tan+mala\",\n \"disk.0.os.flavour\": \"ubuntu\",\n \"disk.0.os.name\": \"linux\",\n \"id\": \"node\",\n \"memory.size_min\": 1073741824,\n \"net_interface.0.connection\": \"public\",\n \"net_interface.0.dns_name\": \"testnode\",\n \"net_interface.1.connection\": \"privada\"\n },\n {\n \"class\": \"configure\",\n \"id\": \"node\",\n \"recipes\": \"\\n\\n---\\n - tasks:\\n - name: test\\n command: sleep 30\\n\\n\\n\"\n },\n {\n \"class\": \"deploy\",\n \"cloud\": \"onecloud\",\n \"system\": \"node\",\n \"vm_number\": 1\n }\n]", "deleted": true, "configured": true, "vm_master": 0, "auth": "[{\"username\": \"micafer\", \"password\": \"grycap01\", \"type\": \"InfrastructureManager\"}]", "cont_out": "2016-11-17 17:55:57.713062: Select master VM\n2016-11-17 17:55:57.713789: Wait master VM to boot\n", "id": "a091a1d8-ace6-11e6-903b-02421c3eaeeb", "private_networks": {"privada": "onecloud"}, "last_ganglia_update": 0, "vm_id": 1, "ansible_configured": null}
\ No newline at end of file
+{"system_counter": 0, "vm_list": ["{\"info\": \"[\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"public\\\",\\n \\\"outbound\\\": \\\"yes\\\",\\n \\\"provider_id\\\": \\\"publica\\\"\\n },\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"privada\\\",\\n \\\"provider_id\\\": \\\"privada\\\"\\n },\\n {\\n \\\"class\\\": \\\"system\\\",\\n \\\"cpu.arch\\\": \\\"x86_64\\\",\\n \\\"cpu.count\\\": 1,\\n \\\"disk.0.device\\\": \\\"hda\\\",\\n \\\"disk.0.image.name\\\": \\\"OneCloud_Ubuntu_14_04\\\",\\n \\\"disk.0.image.url\\\": \\\"one://onecloud.i3m.upv.es/77\\\",\\n \\\"disk.0.os.credentials.new.password\\\": \\\"N0tan+mala\\\",\\n \\\"disk.0.os.credentials.password\\\": \\\"yoyoyo\\\",\\n \\\"disk.0.os.credentials.username\\\": \\\"ubuntu\\\",\\n \\\"disk.0.os.flavour\\\": \\\"ubuntu\\\",\\n \\\"disk.0.os.name\\\": \\\"linux\\\",\\n \\\"disk.0.os.version\\\": \\\"14.04\\\",\\n \\\"disk.0.size\\\": 20971520000,\\n \\\"id\\\": \\\"node\\\",\\n \\\"instance_id\\\": \\\"24962\\\",\\n \\\"instance_name\\\": \\\"OneCloud_Ubuntu_14_04\\\",\\n \\\"launch_time\\\": 1479402015,\\n \\\"memory.size\\\": 1073741824,\\n \\\"net_interface.0.connection\\\": \\\"public\\\",\\n \\\"net_interface.0.dns_name\\\": \\\"testnode\\\",\\n \\\"net_interface.0.ip\\\": \\\"158.42.105.16\\\",\\n \\\"net_interface.1.connection\\\": \\\"privada\\\",\\n \\\"net_interface.1.ip\\\": \\\"10.10.2.24\\\",\\n \\\"provider.host\\\": \\\"onecloud.i3m.upv.es\\\",\\n \\\"provider.port\\\": 2633,\\n \\\"provider.type\\\": \\\"OpenNebula\\\",\\n \\\"state\\\": \\\"off\\\",\\n \\\"virtual_system_type\\\": \\\"qemu\\\"\\n },\\n {\\n \\\"class\\\": \\\"configure\\\",\\n \\\"id\\\": \\\"node\\\",\\n \\\"recipes\\\": \\\"\\\\n\\\\n---\\\\n - tasks:\\\\n - name: test\\\\n command: sleep 30\\\\n\\\\n\\\\n\\\"\\n },\\n {\\n \\\"class\\\": \\\"deploy\\\",\\n \\\"system\\\": \\\"node\\\",\\n \\\"vm_number\\\": 1\\n }\\n]\", \"im_id\": 0, \"requested_radl\": \"[\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"public\\\",\\n \\\"outbound\\\": \\\"yes\\\"\\n },\\n {\\n \\\"class\\\": \\\"network\\\",\\n \\\"id\\\": \\\"privada\\\"\\n },\\n {\\n \\\"class\\\": \\\"system\\\",\\n \\\"cpu.count_min\\\": 1,\\n \\\"disk.0.os.credentials.new.password\\\": \\\"N0tan+mala\\\",\\n \\\"disk.0.os.flavour\\\": \\\"ubuntu\\\",\\n \\\"disk.0.os.name\\\": \\\"linux\\\",\\n \\\"id\\\": \\\"node\\\",\\n \\\"memory.size_min\\\": 1073741824,\\n \\\"net_interface.0.connection\\\": \\\"public\\\",\\n \\\"net_interface.0.dns_name\\\": \\\"testnode\\\",\\n \\\"net_interface.1.connection\\\": \\\"privada\\\"\\n },\\n {\\n \\\"class\\\": \\\"configure\\\",\\n \\\"id\\\": \\\"node\\\",\\n \\\"recipes\\\": \\\"\\\\n\\\\n---\\\\n - tasks:\\\\n - name: test\\\\n command: sleep 30\\\\n\\\\n\\\\n\\\"\\n },\\n {\\n \\\"class\\\": \\\"deploy\\\",\\n \\\"system\\\": \\\"node\\\",\\n \\\"vm_number\\\": 1\\n }\\n]\", \"ctxt_pid\": null, \"last_update\": 1479401757, \"state\": \"off\", \"cont_out\": \"\", \"id\": \"24962\", \"destroy\": true, \"ssh_connect_errors\": 0, \"configured\": null, \"cloud\": \"{\\\"protocol\\\": \\\"\\\", \\\"id\\\": \\\"onecloud\\\", \\\"path\\\": \\\"\\\", \\\"server\\\": \\\"onecloud.i3m.upv.es\\\", \\\"type\\\": \\\"OpenNebula\\\", \\\"port\\\": 2633}\"}"], "radl": "[\n {\n \"class\": \"network\",\n \"id\": \"public\",\n \"outbound\": \"yes\"\n },\n {\n \"class\": \"network\",\n \"id\": \"privada\"\n },\n {\n \"class\": \"system\",\n \"cpu.count_min\": 1,\n \"disk.0.os.credentials.new.password\": \"N0tan+mala\",\n \"disk.0.os.flavour\": \"ubuntu\",\n \"disk.0.os.name\": \"linux\",\n \"id\": \"node\",\n \"memory.size_min\": 1073741824,\n \"net_interface.0.connection\": \"public\",\n \"net_interface.0.dns_name\": \"testnode\",\n \"net_interface.1.connection\": \"privada\"\n },\n {\n \"class\": \"configure\",\n \"id\": \"node\",\n \"recipes\": \"\\n\\n---\\n - tasks:\\n - name: test\\n command: sleep 30\\n\\n\\n\"\n },\n {\n \"class\": \"deploy\",\n \"cloud\": \"onecloud\",\n \"system\": \"node\",\n \"vm_number\": 1\n }\n]", "extra_info":"{}", "deleted": true, "configured": true, "vm_master": 0, "auth": "[{\"username\": \"micafer\", \"password\": \"grycap01\", \"type\": \"InfrastructureManager\"}]", "cont_out": "2016-11-17 17:55:57.713062: Select master VM\n2016-11-17 17:55:57.713789: Wait master VM to boot\n", "id": "a091a1d8-ace6-11e6-903b-02421c3eaeeb", "private_networks": {"privada": "onecloud"}, "last_ganglia_update": 0, "vm_id": 1, "ansible_configured": null}
\ No newline at end of file
diff --git a/test/files/iam_token_info.json b/test/files/iam_token_info.json
new file mode 100644
index 000000000..c58697450
--- /dev/null
+++ b/test/files/iam_token_info.json
@@ -0,0 +1,17 @@
+{
+ "active": true,
+ "scope": "address phone openid profile offline_access email",
+ "expires_at": "2016-02-16T09:17:46+0000",
+ "exp": 1480000000,
+ "sub": "xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxxx",
+ "user_id": "username",
+ "client_id": "cid",
+ "token_type": "Bearer",
+ "groups": [
+ "Users",
+ "Developers"
+ ],
+ "preferred_username": "username",
+ "organisation_name": "indigo-d",
+ "email": "me@server.com"
+}
diff --git a/test/files/iam_user_info.json b/test/files/iam_user_info.json
new file mode 100644
index 000000000..7bc8b105c
--- /dev/null
+++ b/test/files/iam_user_info.json
@@ -0,0 +1,20 @@
+{
+ "sub": "sub",
+ "name": "Miguel",
+ "preferred_username": "micafer",
+ "family_name": "Caballer",
+ "email": "",
+ "email_verified": true,
+ "phone_number_verified": false,
+ "groups": [
+ {
+ "id": "gid",
+ "name": "Users"
+ },
+ {
+ "id": "gid",
+ "name": "Developers"
+ }
+ ],
+ "organisation_name": "indigo-dc"
+}
\ No newline at end of file
diff --git a/test/files/tosca_add.yml b/test/files/tosca_add.yml
new file mode 100644
index 000000000..b394be9a4
--- /dev/null
+++ b/test/files/tosca_add.yml
@@ -0,0 +1,115 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: TOSCA test for the IM
+
+repositories:
+ indigo_repository:
+ description: INDIGO Custom types repository
+ url: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/
+
+imports:
+ - indigo_custom_types:
+ file: custom_types.yaml
+ repository: indigo_repository
+
+topology_template:
+ inputs:
+ db_name:
+ type: string
+ default: world
+ db_user:
+ type: string
+ default: dbuser
+ db_password:
+ type: string
+ default: pass
+ mysql_root_password:
+ type: string
+ default: mypass
+
+ node_templates:
+
+ apache:
+ type: tosca.nodes.WebServer.Apache
+ requirements:
+ - host: web_server
+
+ web_server:
+ type: tosca.nodes.indigo.Compute
+ capabilities:
+ endpoint:
+ properties:
+ network_name: PUBLIC
+ ports:
+ ssh_port:
+ protocol: tcp
+ source: 22
+ http_port:
+ protocol: tcp
+ source: 80
+ capabilities:
+ scalable:
+ properties:
+ count: 2
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+ # Guest Operating System properties
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ distribution: ubuntu
+
+ test_db:
+ type: tosca.nodes.indigo.Database.MySQL
+ properties:
+ name: { get_input: db_name }
+ user: { get_input: db_user }
+ password: { get_input: db_password }
+ root_password: { get_input: mysql_root_password }
+ artifacts:
+ db_content:
+ file: http://downloads.mysql.com/docs/world.sql.gz
+ type: tosca.artifacts.File
+ requirements:
+ - host:
+ node: mysql
+ interfaces:
+ Standard:
+ configure:
+ implementation: mysql/mysql_db_import.yml
+ inputs:
+ db_name: { get_property: [ SELF, name ] }
+ db_data: { get_artifact: [ SELF, db_content ] }
+ db_name: { get_property: [ SELF, name ] }
+ db_user: { get_property: [ SELF, user ] }
+
+ mysql:
+ type: tosca.nodes.DBMS.MySQL
+ properties:
+ root_password: { get_input: mysql_root_password }
+ requirements:
+ - host:
+ node: db_server
+
+ db_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ disk_size: 10 GB
+ mem_size: 4 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: linux
+ distribution: ubuntu
+
+ outputs:
+ server_url:
+ value: { get_attribute: [ web_server, public_address ] }
\ No newline at end of file
diff --git a/test/files/tosca_create.yml b/test/files/tosca_create.yml
new file mode 100644
index 000000000..a4d5d9b8e
--- /dev/null
+++ b/test/files/tosca_create.yml
@@ -0,0 +1,113 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: TOSCA test for the IM
+
+repositories:
+ indigo_repository:
+ description: INDIGO Custom types repository
+ url: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/
+
+imports:
+ - indigo_custom_types:
+ file: custom_types.yaml
+ repository: indigo_repository
+
+topology_template:
+ inputs:
+ db_name:
+ type: string
+ default: world
+ db_user:
+ type: string
+ default: dbuser
+ db_password:
+ type: string
+ default: pass
+ mysql_root_password:
+ type: string
+ default: mypass
+
+ node_templates:
+
+ apache:
+ type: tosca.nodes.WebServer.Apache
+ requirements:
+ - host: web_server
+
+ web_server:
+ type: tosca.nodes.indigo.Compute
+ capabilities:
+ endpoint:
+ properties:
+ network_name: PUBLIC
+ ports:
+ ssh_port:
+ protocol: tcp
+ source: 22
+ http_port:
+ protocol: tcp
+ source: 80
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+ # Guest Operating System properties
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ distribution: ubuntu
+
+ test_db:
+ type: tosca.nodes.indigo.Database.MySQL
+ properties:
+ name: { get_input: db_name }
+ user: { get_input: db_user }
+ password: { get_input: db_password }
+ root_password: { get_input: mysql_root_password }
+ artifacts:
+ db_content:
+ file: http://downloads.mysql.com/docs/world.sql.gz
+ type: tosca.artifacts.File
+ requirements:
+ - host:
+ node: mysql
+ interfaces:
+ Standard:
+ configure:
+ implementation: mysql/mysql_db_import.yml
+ inputs:
+ db_name: { get_property: [ SELF, name ] }
+ db_data: { get_artifact: [ SELF, db_content ] }
+ db_name: { get_property: [ SELF, name ] }
+ db_user: { get_property: [ SELF, user ] }
+
+ mysql:
+ type: tosca.nodes.DBMS.MySQL
+ properties:
+ root_password: { get_input: mysql_root_password }
+ requirements:
+ - host:
+ node: db_server
+
+ db_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ disk_size: 10 GB
+ mem_size: 4 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: linux
+ distribution: ubuntu
+
+ outputs:
+ server_url:
+ value: { get_attribute: [ web_server, public_address ] }
+ server_creds:
+ value: { get_attribute: [ web_server, endpoint, credential, 0 ] }
\ No newline at end of file
diff --git a/test/files/tosca_long.yml b/test/files/tosca_long.yml
new file mode 100644
index 000000000..9c9c792e0
--- /dev/null
+++ b/test/files/tosca_long.yml
@@ -0,0 +1,172 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+ - indigo_custom_types: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/custom_types.yaml
+
+description: >
+ TOSCA test for launching a Virtual Elastic Cluster. It will launch
+ a single front-end that will be in change of managing the elasticity
+ using the specified LRMS (torque, sge, slurm and condor) workload.
+
+topology_template:
+
+ inputs:
+
+ network_name:
+ type: string
+ default: vpc-XX.subnet-XX
+
+ access_key:
+ type: string
+ default: AKXX
+
+ secret_key:
+ type: string
+ default: SKXX
+
+ node_templates:
+
+ elastic_cluster_front_end:
+ type: tosca.nodes.indigo.ElasticCluster
+ properties:
+ # fake value to test token intrinsic functions
+ deployment_id: { token: [ get_attribute: [ lrms_server, public_address, 0 ], ':', 0 ] }
+ # fake value to test concat intrinsic functions
+ orchestrator_url: { concat: [ 'http://', get_attribute: [ lrms_server, public_address, 0 ], ':8080' ] }
+ iam_access_token: iam_access_token
+ requirements:
+ - lrms: lrms_front_end
+ - wn: wn_node
+
+ lrms_front_end:
+ type: tosca.nodes.indigo.LRMS.FrontEnd.Slurm
+ properties:
+ wn_ips: { get_attribute: [ lrms_wn, private_address ] }
+ requirements:
+ - host: lrms_server
+
+ other_server:
+ type: tosca.nodes.indigo.Compute
+ capabilities:
+ endpoint:
+ properties:
+ network_name: PUBLIC
+ ports:
+ port_range:
+ protocol: tcp
+ source_range: [ 1, 4 ]
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+
+ lrms_server:
+ type: tosca.nodes.indigo.Compute
+ capabilities:
+ endpoint:
+ properties:
+ dns_name: slurmserver
+ network_name: { concat: [ { get_input: network_name }, ".PUBLIC" ] }
+ ports:
+ http_port:
+ protocol: tcp
+ source: 8080
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ #distribution: scientific
+ #version: 6.6
+ requirements:
+ - local_storage:
+ node: my_onedata_storage
+ relationship:
+ type: AttachesTo
+ properties:
+ location: /mnt/disk
+ interfaces:
+ Configure:
+ pre_configure_source:
+ implementation: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/artifacts/onedata/oneclient_install.yml
+ inputs:
+ onedata_token: { get_property: [ TARGET, credential, token ] }
+ onedata_location: { get_property: [ SELF, location ] }
+
+ my_onedata_storage:
+ type: tosca.nodes.indigo.OneDataStorage
+ properties:
+ oneprovider_host: ["oneprovider.com", "twoprovider.net"]
+ dataspace: ["space1","space2"]
+ onezone_endpoint: http://server.com
+ credential:
+ token: some_token
+ token_type: token
+
+ wn_node:
+ type: tosca.nodes.indigo.LRMS.WorkerNode.Slurm
+ properties:
+ front_end_ip: { get_attribute: [ lrms_server, private_address, 0 ] }
+ capabilities:
+ wn:
+ properties:
+ max_instances: 5
+ min_instances: 0
+ requirements:
+ - host: lrms_wn
+
+ lrms_wn:
+ type: tosca.nodes.indigo.Compute
+ capabilities:
+ scalable:
+ properties:
+ count: 1
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 2 GB
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ #distribution: scientific
+ #version: 6.6
+
+ mysql:
+ type: tosca.nodes.DBMS
+ requirements:
+ - host:
+ node_filter:
+ capabilities:
+ # Constraints for selecting "host" (Container Capability)
+ - host:
+ properties:
+ - num_cpus: { in_range: [ 1, 4 ] }
+ - mem_size: { greater_or_equal: 2 GB }
+ # Constraints for selecting "os" (OperatingSystem Capability)
+ - os:
+ properties:
+ - type: linux
+
+ outputs:
+ galaxy_url:
+ value: { concat: [ 'http://', get_attribute: [ lrms_server, public_address, 0 ], ':8080' ] }
+
+ groups:
+ my_placement_group:
+ type: tosca.groups.Root
+ members: [ lrms_server, lrms_wn ]
+
+ policies:
+ - deploy_on_cloudid:
+ type: tosca.policies.Placement
+ properties: { cloud_id: cloudid }
+ targets: [ other_server ]
+
+ - deploy_group_on_cloudid:
+ type: tosca.policies.Placement
+ properties: { cloud_id: cloudid }
+ targets: [ my_placement_group ]
\ No newline at end of file
diff --git a/test/files/tosca_remove.yml b/test/files/tosca_remove.yml
new file mode 100644
index 000000000..997af4cef
--- /dev/null
+++ b/test/files/tosca_remove.yml
@@ -0,0 +1,117 @@
+
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: TOSCA test for the IM
+
+repositories:
+ indigo_repository:
+ description: INDIGO Custom types repository
+ url: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/
+
+imports:
+ - indigo_custom_types:
+ file: custom_types.yaml
+ repository: indigo_repository
+
+topology_template:
+ inputs:
+ db_name:
+ type: string
+ default: world
+ db_user:
+ type: string
+ default: dbuser
+ db_password:
+ type: string
+ default: pass
+ mysql_root_password:
+ type: string
+ default: mypass
+
+ node_templates:
+
+ apache:
+ type: tosca.nodes.WebServer.Apache
+ requirements:
+ - host: web_server
+
+ web_server:
+ type: tosca.nodes.indigo.Compute
+ capabilities:
+ endpoint:
+ properties:
+ network_name: PUBLIC
+ ports:
+ ssh_port:
+ protocol: tcp
+ source: 22
+ http_port:
+ protocol: tcp
+ source: 80
+ capabilities:
+ scalable:
+ properties:
+ count: 1
+ removal_list: ['2']
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ mem_size: 1 GB
+ # Guest Operating System properties
+ os:
+ properties:
+ # host Operating System image properties
+ type: linux
+ distribution: ubuntu
+
+ test_db:
+ type: tosca.nodes.indigo.Database.MySQL
+ properties:
+ name: { get_input: db_name }
+ user: { get_input: db_user }
+ password: { get_input: db_password }
+ root_password: { get_input: mysql_root_password }
+ artifacts:
+ db_content:
+ file: http://downloads.mysql.com/docs/world.sql.gz
+ type: tosca.artifacts.File
+ requirements:
+ - host:
+ node: mysql
+ interfaces:
+ Standard:
+ configure:
+ implementation: mysql/mysql_db_import.yml
+ inputs:
+ db_name: { get_property: [ SELF, name ] }
+ db_data: { get_artifact: [ SELF, db_content ] }
+ db_name: { get_property: [ SELF, name ] }
+ db_user: { get_property: [ SELF, user ] }
+
+ mysql:
+ type: tosca.nodes.DBMS.MySQL
+ properties:
+ root_password: { get_input: mysql_root_password }
+ requirements:
+ - host:
+ node: db_server
+
+ db_server:
+ type: tosca.nodes.Compute
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ disk_size: 10 GB
+ mem_size: 4 GB
+ os:
+ properties:
+ architecture: x86_64
+ type: linux
+ distribution: ubuntu
+
+ outputs:
+ server_url:
+ value: { get_attribute: [ web_server, public_address ] }
\ No newline at end of file
diff --git a/test/functional/test_im.py b/test/functional/test_im.py
index 97198b32a..4d35c32ec 100755
--- a/test/functional/test_im.py
+++ b/test/functional/test_im.py
@@ -148,7 +148,7 @@ def test_inf_lifecycle(self):
infId = IM.CreateInfrastructure(str(radl), auth0)
- time.sleep(10)
+ time.sleep(15)
state = IM.GetInfrastructureState(infId, auth0)
self.assertEqual(state["state"], "unconfigured")
diff --git a/test/integration/QuickTestIM.py b/test/integration/QuickTestIM.py
index 9c243762c..3c39df082 100755
--- a/test/integration/QuickTestIM.py
+++ b/test/integration/QuickTestIM.py
@@ -194,18 +194,12 @@ def test_16_get_vm_property(self):
"""
Test the GetVMProperty IM function
"""
- (success, vm_ids) = self.server.GetInfrastructureInfo(
- self.inf_id, self.auth_data)
- self.assertTrue(
- success, msg="ERROR calling GetInfrastructureInfo: " + str(vm_ids))
- (success, info) = self.server.GetVMProperty(
- self.inf_id, vm_ids[0], "state", self.auth_data)
- self.assertTrue(
- success, msg="ERROR calling GetVMProperty: " + str(info))
- self.assertNotEqual(
- info, None, msg="ERROR in the value returned by GetVMProperty: " + info)
- self.assertNotEqual(
- info, "", msg="ERROR in the value returned by GetVMPropert: " + info)
+ (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling GetInfrastructureInfo: " + str(vm_ids))
+ (success, info) = self.server.GetVMProperty(self.inf_id, vm_ids[0], "state", self.auth_data)
+ self.assertTrue(success, msg="ERROR calling GetVMProperty: " + str(info))
+ self.assertNotEqual(info, None, msg="ERROR in the value returned by GetVMProperty: " + info)
+ self.assertNotEqual(info, "", msg="ERROR in the value returned by GetVMPropert: " + info)
def test_18_error_addresource(self):
"""
@@ -529,7 +523,6 @@ def test_70_create_cloud_init(self):
)
"""
- a = radl_parse.parse_radl(radl)
(success, inf_id) = self.server.CreateInfrastructure(radl, self.auth_data)
self.assertTrue(
success, msg="ERROR calling CreateInfrastructure: " + str(inf_id))
diff --git a/test/integration/TestIM.py b/test/integration/TestIM.py
index 86c8cceba..731ca346d 100755
--- a/test/integration/TestIM.py
+++ b/test/integration/TestIM.py
@@ -669,7 +669,7 @@ def test_90_create(self):
self.__class__.inf_id = [inf_id]
all_configured = self.wait_inf_state(
- inf_id, VirtualMachine.CONFIGURED, 1000)
+ inf_id, VirtualMachine.CONFIGURED, 1200)
self.assertTrue(
all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
diff --git a/test/integration/TestREST.py b/test/integration/TestREST.py
index d95316773..1017c1e90 100755
--- a/test/integration/TestREST.py
+++ b/test/integration/TestREST.py
@@ -135,6 +135,22 @@ def test_10_list(self):
self.assertEqual(resp.status_code, 200,
msg="ERROR listing user infrastructures:" + resp.text)
+ def test_12_list_with_incorrect_token(self):
+ auth_data_lines = read_file_as_string('../auth.dat').split("\n")
+ token = ("eyJraWQiOiJyc2ExIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiJkYzVkNWFiNy02ZGI5LTQwNzktOTg1Yy04MGFjMDUwMTcwNjYi"
+ "LCJpc3MiOiJodHRwczpcL1wvaWFtLXRlc3QuaW5kaWdvLWRhdGFjbG91ZC5ldVwvIiwiZXhwIjoxNDYyODY5MjgxLCJpYXQiOjE"
+ "0NjI4NjU2ODEsImp0aSI6Ijc1M2M4ZTI1LWU3MGMtNGI5MS05YWJhLTcxNDI5NTg3MzUzOSJ9.iA9nv7QdkmfgJPSQ_77_eKrvh"
+ "P1xwZ1Z91xzrZ0Bzue0ark4qRMlHCdZvad1tunURaSsHHMsFYQ3H7oQj-ZSYWOfr1KxMaIo4pWaVHrW8qsCMLmqdNfubR54GmTh"
+ "M4cA2ZdNZa8neVT8jUvzR1YX-5cz7sp2gWbW9LAwejoXDtk")
+ auth_data = "type = InfrastructureManager; token = %s\\n" % token
+ for line in auth_data_lines:
+ if line.find("type = InfrastructureManager") == -1:
+ auth_data += line.strip() + "\\n"
+
+ resp = self.create_request("GET", "/infrastructures", headers={'AUTHORIZATION': auth_data})
+ self.assertEqual(resp.status_code, 401,
+ msg="ERROR using an invalid token. A 401 error is expected:" + resp.text)
+
def test_15_get_incorrect_info(self):
resp = self.create_request("GET", "/infrastructures/999999")
self.assertEqual(resp.status_code, 404,
@@ -384,7 +400,79 @@ def test_90_start_vm(self):
self.assertTrue(
all_configured, msg="ERROR waiting the vm to be started (timeout).")
- def test_95_destroy(self):
+ def test_92_destroy(self):
+ resp = self.create_request("DELETE", "/infrastructures/" + self.inf_id)
+ self.assertEqual(resp.status_code, 200,
+ msg="ERROR destroying the infrastructure:" + resp.text)
+
+ def test_93_create_tosca(self):
+ """
+ Test the CreateInfrastructure IM function with a TOSCA document
+ """
+ tosca = read_file_as_string('../files/tosca_create.yml')
+
+ resp = self.create_request("POST", "/infrastructures", headers={'Content-Type': 'text/yaml'}, body=tosca)
+ self.assertEqual(resp.status_code, 200,
+ msg="ERROR creating the infrastructure:" + resp.text)
+
+ self.__class__.inf_id = str(os.path.basename(resp.text))
+
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
+ self.assertTrue(
+ all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
+ def test_94_get_outputs(self):
+ resp = self.create_request("GET", "/infrastructures/" + self.inf_id + "/outputs")
+ self.assertEqual(resp.status_code, 200,
+ msg="ERROR getting TOSCA outputs:" + resp.text)
+ res = json.loads(resp.text)
+ server_url = str(res['outputs']['server_url'][0])
+ self.assertRegexpMatches(
+ server_url, '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', msg="Unexpected outputs: " + resp.text)
+
+ def test_95_add_tosca(self):
+ """
+ Test the AddResource IM function with a TOSCA document
+ """
+ tosca = read_file_as_string('../files/tosca_add.yml')
+
+ resp = self.create_request("POST", "/infrastructures/" + self.inf_id,
+ headers={'Content-Type': 'text/yaml'}, body=tosca)
+ self.assertEqual(resp.status_code, 200,
+ msg="ERROR adding resources:" + resp.text)
+
+ resp = self.create_request("GET", "/infrastructures/" + self.inf_id)
+ self.assertEqual(resp.status_code, 200,
+ msg="ERROR getting the infrastructure info:" + resp.text)
+ vm_ids = resp.text.split("\n")
+ self.assertEqual(len(vm_ids), 3, msg=("ERROR getting infrastructure info: Incorrect number of VMs(" +
+ str(len(vm_ids)) + "). It must be 2"))
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
+ self.assertTrue(
+ all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
+ def test_96_remove_tosca(self):
+ """
+ Test the RemoveResource IM function with a TOSCA document
+ """
+ tosca = read_file_as_string('../files/tosca_remove.yml')
+
+ resp = self.create_request("POST", "/infrastructures/" + self.inf_id,
+ headers={'Content-Type': 'text/yaml'}, body=tosca)
+ self.assertEqual(resp.status_code, 200,
+ msg="ERROR removing resources:" + resp.text)
+
+ resp = self.create_request("GET", "/infrastructures/" + self.inf_id)
+ self.assertEqual(resp.status_code, 200,
+ msg="ERROR getting the infrastructure info:" + resp.text)
+ vm_ids = resp.text.split("\n")
+ self.assertEqual(len(vm_ids), 2, msg=("ERROR getting infrastructure info: Incorrect number of VMs(" +
+ str(len(vm_ids)) + "). It must be 2"))
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 600)
+ self.assertTrue(
+ all_configured, msg="ERROR waiting the infrastructure to be configured (timeout).")
+
+ def test_98_destroy(self):
resp = self.create_request("DELETE", "/infrastructures/" + self.inf_id)
self.assertEqual(resp.status_code, 200,
msg="ERROR destroying the infrastructure:" + resp.text)
diff --git a/test/unit/REST.py b/test/unit/REST.py
index c0a7534c7..8e22a7fcd 100755
--- a/test/unit/REST.py
+++ b/test/unit/REST.py
@@ -22,6 +22,7 @@
import sys
from io import BytesIO
from mock import patch, MagicMock
+from IM.InfrastructureInfo import InfrastructureInfo
sys.path.append("..")
sys.path.append(".")
@@ -136,8 +137,9 @@ def test_GetInfrastructureInfo(self, bottle_request, GetInfrastructureInfo):
@patch("IM.InfrastructureManager.InfrastructureManager.GetInfrastructureContMsg")
@patch("IM.InfrastructureManager.InfrastructureManager.GetInfrastructureRADL")
@patch("IM.InfrastructureManager.InfrastructureManager.GetInfrastructureState")
+ @patch("IM.InfrastructureManager.InfrastructureManager.get_infrastructure")
@patch("bottle.request")
- def test_GetInfrastructureProperty(self, bottle_request, GetInfrastructureState,
+ def test_GetInfrastructureProperty(self, bottle_request, get_infrastructure, GetInfrastructureState,
GetInfrastructureRADL, GetInfrastructureContMsg):
"""Test REST GetInfrastructureProperty."""
bottle_request.return_value = MagicMock()
@@ -149,6 +151,12 @@ def test_GetInfrastructureProperty(self, bottle_request, GetInfrastructureState,
GetInfrastructureRADL.return_value = "radl"
GetInfrastructureContMsg.return_value = "contmsg"
+ inf = MagicMock()
+ get_infrastructure.return_value = inf
+ tosca = MagicMock()
+ inf.extra_info = {"TOSCA": tosca}
+ tosca.get_outputs.return_value = "outputs"
+
res = RESTGetInfrastructureProperty("1", "state")
self.assertEqual(json.loads(res)["state"]["state"], "running")
@@ -203,8 +211,9 @@ def test_DestroyInfrastructure(self, bottle_request, DestroyInfrastructure):
self.assertEqual(res, "Error Destroying Inf: Access to this infrastructure not granted.")
@patch("IM.InfrastructureManager.InfrastructureManager.CreateInfrastructure")
+ @patch("IM.InfrastructureManager.InfrastructureManager.get_infrastructure")
@patch("bottle.request")
- def test_CreateInfrastructure(self, bottle_request, CreateInfrastructure):
+ def test_CreateInfrastructure(self, bottle_request, get_infrastructure, CreateInfrastructure):
"""Test REST CreateInfrastructure."""
bottle_request.environ = {'HTTP_HOST': 'imserver.com'}
bottle_request.return_value = MagicMock()
@@ -229,12 +238,23 @@ def test_CreateInfrastructure(self, bottle_request, CreateInfrastructure):
res = RESTCreateInfrastructure()
self.assertEqual(res, "http://imserver.com/infrastructures/1")
- bottle_request.body = read_file_as_bytes("../files/test_simple.json")
+ bottle_request.headers = {"AUTHORIZATION": ("type = InfrastructureManager; username = user; password = pass\n"
+ "id = one; type = OpenNebula; host = onedock.i3m.upv.es:2633; "
+ "username = user; password = pass"),
+ "Content-Type": "text/yaml"}
+ bottle_request.body = read_file_as_bytes("../files/tosca_create.yml")
+
+ CreateInfrastructure.return_value = "1"
+
+ res = RESTCreateInfrastructure()
+ self.assertEqual(res, "http://imserver.com/infrastructures/1")
+
+ bottle_request.body = read_file_as_bytes("../files/tosca_create.yml")
CreateInfrastructure.side_effect = InvaliddUserException()
res = RESTCreateInfrastructure()
self.assertEqual(res, "Error Getting Inf. info: Invalid InfrastructureManager credentials")
- bottle_request.body = read_file_as_bytes("../files/test_simple.json")
+ bottle_request.body = read_file_as_bytes("../files/tosca_create.yml")
CreateInfrastructure.side_effect = UnauthorizedUserException()
res = RESTCreateInfrastructure()
self.assertEqual(res, "Error Creating Inf.: Access to this infrastructure not granted.")
@@ -336,8 +356,9 @@ def test_GetVMProperty(self, bottle_request, GetVMContMsg, GetVMProperty):
self.assertEqual(res, "Error Getting VM. property: Invalid VM ID")
@patch("IM.InfrastructureManager.InfrastructureManager.AddResource")
+ @patch("IM.InfrastructureManager.InfrastructureManager.get_infrastructure")
@patch("bottle.request")
- def test_AddResource(self, bottle_request, AddResource):
+ def test_AddResource(self, bottle_request, get_infrastructure, AddResource):
"""Test REST AddResource."""
bottle_request.environ = {'HTTP_HOST': 'imserver.com'}
bottle_request.return_value = MagicMock()
@@ -361,17 +382,26 @@ def test_AddResource(self, bottle_request, AddResource):
res = RESTAddResource("1")
self.assertEqual(res, "http://imserver.com/infrastructures/1/vms/1")
- bottle_request.body = read_file_as_bytes("../files/test_simple.json")
+ bottle_request.headers = {"AUTHORIZATION": ("type = InfrastructureManager; username = user; password = pass\n"
+ "id = one; type = OpenNebula; host = onedock.i3m.upv.es:2633; "
+ "username = user; password = pass"),
+ "Content-Type": "text/yaml"}
+ bottle_request.body = read_file_as_bytes("../files/tosca_create.yml")
+
+ res = RESTAddResource("1")
+ self.assertEqual(res, "http://imserver.com/infrastructures/1/vms/1")
+
+ bottle_request.body = read_file_as_bytes("../files/tosca_create.yml")
AddResource.side_effect = DeletedInfrastructureException()
res = RESTAddResource("1")
self.assertEqual(res, "Error Adding resources: Deleted infrastructure.")
- bottle_request.body = read_file_as_bytes("../files/test_simple.json")
+ bottle_request.body = read_file_as_bytes("../files/tosca_create.yml")
AddResource.side_effect = IncorrectInfrastructureException()
res = RESTAddResource("1")
self.assertEqual(res, "Error Adding resources: Invalid infrastructure ID or access not granted.")
- bottle_request.body = read_file_as_bytes("../files/test_simple.json")
+ bottle_request.body = read_file_as_bytes("../files/tosca_create.yml")
AddResource.side_effect = UnauthorizedUserException()
res = RESTAddResource("1")
self.assertEqual(res, "Error Adding resources: Access to this infrastructure not granted.")
diff --git a/test/unit/Tosca.py b/test/unit/Tosca.py
new file mode 100755
index 000000000..d8bb45d52
--- /dev/null
+++ b/test/unit/Tosca.py
@@ -0,0 +1,91 @@
+#! /usr/bin/env python
+#
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import os
+import unittest
+import sys
+
+from mock import Mock, patch, MagicMock
+
+sys.path.append("..")
+sys.path.append(".")
+
+from IM.VirtualMachine import VirtualMachine
+from radl.radl_parse import parse_radl
+from IM.InfrastructureInfo import InfrastructureInfo
+from IM.tosca.Tosca import Tosca
+
+
+def read_file_as_string(file_name):
+ tests_path = os.path.dirname(os.path.abspath(__file__))
+ abs_file_path = os.path.join(tests_path, file_name)
+ return open(abs_file_path, 'r').read()
+
+
+class TestTosca(unittest.TestCase):
+
+ def __init__(self, *args):
+ unittest.TestCase.__init__(self, *args)
+
+ def test_tosca_to_radl(self):
+ """Test TOSCA RADL translation"""
+ tosca_data = read_file_as_string('../files/tosca_long.yml')
+ tosca = Tosca(tosca_data)
+ _, radl = tosca.to_radl()
+ radl = parse_radl(str(radl))
+ net = radl.get_network_by_id('public_net')
+ net1 = radl.get_network_by_id('public_net_1')
+ self.assertIn(net.getValue('provider_id'), ['vpc-XX.subnet-XX', None])
+ if net.getValue('provider_id') is None:
+ self.assertEqual(net.getValue("outports"), '1:4/tcp')
+ self.assertEqual(net1.getValue("outports"), '8080/tcp-8080/tcp')
+ else:
+ self.assertEqual(net.getValue('provider_id'), 'vpc-XX.subnet-XX')
+ self.assertEqual(net.getValue("outports"), '8080/tcp-8080/tcp')
+ self.assertEqual(net1.getValue("outports"), '1:4/tcp')
+ lrms_wn = radl.get_system_by_name('lrms_wn')
+ self.assertEqual(lrms_wn.getValue('memory.size'), 2000000000)
+ lrms_server = radl.get_system_by_name('lrms_server')
+ self.assertEqual(lrms_server.getValue('memory.size'), 1000000000)
+ self.assertEqual(lrms_server.getValue('net_interface.0.dns_name'), 'slurmserver')
+ self.assertEqual("cloudid", radl.deploys[0].cloud_id)
+ self.assertEqual("cloudid", radl.deploys[1].cloud_id)
+ self.assertEqual("cloudid", radl.deploys[2].cloud_id)
+
+ def test_tosca_get_outputs(self):
+ """Test TOSCA get_outputs function"""
+ tosca_data = read_file_as_string('../files/tosca_create.yml')
+ tosca = Tosca(tosca_data)
+ _, radl = tosca.to_radl()
+ radl1 = radl.clone()
+ radl1.systems = [radl.get_system_by_name('web_server')]
+ radl1.systems[0].setValue("net_interface.1.ip", "158.42.1.1")
+ radl1.systems[0].setValue("disk.0.os.credentials.username", "ubuntu")
+ radl1.systems[0].setValue("disk.0.os.credentials.password", "pass")
+ inf = InfrastructureInfo()
+ vm = VirtualMachine(inf, "1", None, radl1, radl1, None)
+ vm.requested_radl = radl1
+ inf.vm_list = [vm]
+ outputs = tosca.get_outputs(inf)
+ self.assertEqual(outputs, {'server_url': ['158.42.1.1'],
+ 'server_creds': {'token_type': 'password',
+ 'token': 'pass',
+ 'user': 'ubuntu'}})
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/unit/onetts.py b/test/unit/onetts.py
new file mode 100755
index 000000000..7017ead4b
--- /dev/null
+++ b/test/unit/onetts.py
@@ -0,0 +1,51 @@
+#! /usr/bin/env python
+#
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import unittest
+
+from IM.uriparse import uriparse
+from IM.tts.onetts import ONETTSClient
+from mock import patch, MagicMock
+
+
+class TestONETTSClient(unittest.TestCase):
+ """
+ Class to test the OneTTSClient class
+ """
+ @patch('IM.tts.onetts.TTSClient')
+ def test_list_providers(self, ttscli):
+ tts = MagicMock()
+ ttscli.return_value = tts
+ tts.get_provider.return_value = True, {"id": "iam"}
+ tts.find_service.return_value = True, {"id": "sid"}
+ tts.request_credential.return_value = True, {"credential": {"entries": [
+ {'name': 'Username', 'type': 'text', 'value': 'username'},
+ {'name': 'Password', 'type': 'text', 'value': 'password'}]}}
+
+ token = ("eyJraWQiOiJyc2ExIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiJkYzVkNWFiNy02ZGI5LTQwNzktOTg1Yy04MGFjMDUwMTcwNjYi"
+ "LCJpc3MiOiJodHRwczpcL1wvaWFtLXRlc3QuaW5kaWdvLWRhdGFjbG91ZC5ldVwvIiwiZXhwIjoxNDY2MDkzOTE3LCJpYXQiOjE"
+ "0NjYwOTAzMTcsImp0aSI6IjE1OTU2N2U2LTdiYzItNDUzOC1hYzNhLWJjNGU5MmE1NjlhMCJ9.eINKxJa2J--xdGAZWIOKtx9Wi"
+ "0Vz3xHzaSJWWY-UHWy044TQ5xYtt0VTvmY5Af-ngwAMGfyaqAAvNn1VEP-_fMYQZdwMqcXLsND4KkDi1ygiCIwQ3JBz9azBT1o_"
+ "oAHE5BsPsE2BjfDoVRasZxxW5UoXCmBslonYd8HK2tUVjz0")
+ username, password = ONETTSClient.get_auth_from_tts("https://localhost:8443", "oneserver", token)
+
+ self.assertEqual(username, "username", msg="ERROR: getting one auth from TTS, incorrect username.")
+ self.assertEqual(password, "password", msg="ERROR: getting one auth from TTS, incorrect password.")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unit/openid.py b/test/unit/openid.py
new file mode 100755
index 000000000..70a3061a0
--- /dev/null
+++ b/test/unit/openid.py
@@ -0,0 +1,78 @@
+#! /usr/bin/env python
+#
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import unittest
+import os
+import json
+
+from IM.openid.OpenIDClient import OpenIDClient
+from mock import patch, MagicMock
+
+
+def read_file_as_string(file_name):
+ tests_path = os.path.dirname(os.path.abspath(__file__))
+ abs_file_path = os.path.join(tests_path, file_name)
+ return open(abs_file_path, 'r').read()
+
+
+class TestOpenIDClient(unittest.TestCase):
+ """
+ Class to test the TTCLient class
+ """
+ @classmethod
+ def setUpClass(cls):
+ cls.token = ("eyJraWQiOiJyc2ExIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiJkYzVkNWFiNy02ZGI5LTQwNzktOTg1Yy04MGFjMDUwMTcw"
+ "NjYiLCJpc3MiOiJodHRwczpcL1wvaWFtLXRlc3QuaW5kaWdvLWRhdGFjbG91ZC5ldVwvIiwiZXhwIjoxNDY2MDkzOTE3LCJ"
+ "pYXQiOjE0NjYwOTAzMTcsImp0aSI6IjE1OTU2N2U2LTdiYzItNDUzOC1hYzNhLWJjNGU5MmE1NjlhMCJ9.eINKxJa2J--xd"
+ "GAZWIOKtx9Wi0Vz3xHzaSJWWY-UHWy044TQ5xYtt0VTvmY5Af-ngwAMGfyaqAAvNn1VEP-_fMYQZdwMqcXLsND4KkDi1ygiC"
+ "IwQ3JBz9azBT1o_oAHE5BsPsE2BjfDoVRasZxxW5UoXCmBslonYd8HK2tUVjz0")
+
+ def test_is_access_token_expired(self):
+ expired, msg = OpenIDClient.is_access_token_expired(self.token)
+
+ self.assertTrue(expired)
+ self.assertEqual(msg, "Token expired")
+
+ @patch('requests.request')
+ def test_get_user_info_request(self, requests):
+ mock_response = MagicMock()
+ mock_response.status_code = 200
+ user_info = read_file_as_string('../files/iam_user_info.json')
+ mock_response.text = user_info
+ requests.return_value = mock_response
+
+ success, user_info_resp = OpenIDClient.get_user_info_request(self.token)
+
+ self.assertTrue(success)
+ self.assertEqual(json.loads(user_info), user_info_resp)
+
+ @patch('requests.request')
+ def test_get_token_introspection(self, requests):
+ mock_response = MagicMock()
+ mock_response.status_code = 200
+ token_info = read_file_as_string('../files/iam_token_info.json')
+ mock_response.text = token_info
+ requests.return_value = mock_response
+
+ success, token_info_resp = OpenIDClient.get_token_introspection(self.token, "cid", "csec")
+
+ self.assertTrue(success)
+ self.assertEqual(json.loads(token_info), token_info_resp)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unit/test_im_logic.py b/test/unit/test_im_logic.py
index c4f8dcc19..9ad7f1aa0 100755
--- a/test/unit/test_im_logic.py
+++ b/test/unit/test_im_logic.py
@@ -21,6 +21,9 @@
import logging
import unittest
import sys
+import json
+import base64
+
from mock import Mock, patch, MagicMock
sys.path.append("..")
@@ -125,6 +128,21 @@ def get_cloud_connector_mock(self, name="MyMock0"):
cloud.launch = Mock(side_effect=self.gen_launch_res)
return cloud
+ def gen_token(self, aud=None, exp=None):
+ data = {
+ "sub": "user_sub",
+ "iss": "https://iam-test.indigo-datacloud.eu/",
+ "exp": 1465471354,
+ "iat": 1465467755,
+ "jti": "jti",
+ }
+ if aud:
+ data["aud"] = aud
+ if exp:
+ data["exp"] = int(time.time()) + exp
+ return ("eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.%s.ignored" %
+ base64.urlsafe_b64encode(json.dumps(data).encode("utf-8")).decode("utf-8"))
+
def test_inf_creation0(self):
"""Create infrastructure with empty RADL."""
@@ -955,7 +973,7 @@ def test_contextualize(self):
infId = IM.CreateInfrastructure(str(radl), auth0)
- time.sleep(10)
+ time.sleep(15)
state = IM.GetInfrastructureState(infId, auth0)
self.assertEqual(state["state"], "unconfigured")
@@ -976,6 +994,80 @@ def test_contextualize(self):
IM.DestroyInfrastructure(infId, auth0)
+ @patch('requests.request')
+ def test_check_oidc_invalid_token(self, request):
+ im_auth = {"token": self.gen_token()}
+
+ with self.assertRaises(Exception) as ex:
+ IM.check_oidc_token(im_auth)
+ self.assertEqual(str(ex.exception),
+ 'Invalid InfrastructureManager credentials. OIDC auth Token expired.')
+
+ im_auth_aud = {"token": self.gen_token(aud="test1,test2")}
+
+ Config.OIDC_AUDIENCE = "test"
+ with self.assertRaises(Exception) as ex:
+ IM.check_oidc_token(im_auth_aud)
+ self.assertEqual(str(ex.exception),
+ 'Invalid InfrastructureManager credentials. Audience not accepted.')
+
+ Config.OIDC_AUDIENCE = "test2"
+ with self.assertRaises(Exception) as ex:
+ IM.check_oidc_token(im_auth_aud)
+ self.assertEqual(str(ex.exception),
+ 'Invalid InfrastructureManager credentials. OIDC auth Token expired.')
+ Config.OIDC_AUDIENCE = None
+
+ Config.OIDC_SCOPES = ["scope1", "scope2"]
+ Config.OIDC_CLIENT_ID = "client"
+ Config.OIDC_CLIENT_SECRET = "secret"
+ response = MagicMock()
+ response.status_code = 200
+ response.text = '{ "scope": "profile scope1" }'
+ request.return_value = response
+ with self.assertRaises(Exception) as ex:
+ IM.check_oidc_token(im_auth_aud)
+ self.assertEqual(str(ex.exception),
+ 'Invalid InfrastructureManager credentials. '
+ 'Scopes scope1 scope2 not in introspection scopes: profile scope1')
+
+ response.status_code = 200
+ response.text = '{ "scope": "address profile scope1 scope2" }'
+ request.return_value = response
+ with self.assertRaises(Exception) as ex:
+ IM.check_oidc_token(im_auth_aud)
+ self.assertEqual(str(ex.exception),
+ 'Invalid InfrastructureManager credentials. '
+ 'OIDC auth Token expired.')
+
+ Config.OIDC_SCOPES = []
+ Config.OIDC_CLIENT_ID = None
+ Config.OIDC_CLIENT_SECRET = None
+
+ Config.OIDC_ISSUERS = ["https://other_issuer"]
+
+ with self.assertRaises(Exception) as ex:
+ IM.check_oidc_token(im_auth)
+ self.assertEqual(str(ex.exception),
+ "Invalid InfrastructureManager credentials. Issuer not accepted.")
+
+ @patch('IM.InfrastructureManager.OpenIDClient')
+ def test_check_oidc_valid_token(self, openidclient):
+ im_auth = {"token": (self.gen_token())}
+
+ user_info = json.loads(read_file_as_string('../files/iam_user_info.json'))
+
+ openidclient.is_access_token_expired.return_value = False, "Valid Token for 100 seconds"
+ openidclient.get_user_info_request.return_value = True, user_info
+
+ Config.OIDC_ISSUERS = ["https://iam-test.indigo-datacloud.eu/"]
+ Config.OIDC_AUDIENCE = None
+
+ IM.check_oidc_token(im_auth)
+
+ self.assertEqual(im_auth['username'], "micafer")
+ self.assertEqual(im_auth['password'], "https://iam-test.indigo-datacloud.eu/sub")
+
def test_db(self):
""" Test DB data access """
inf = InfrastructureInfo()
diff --git a/test/unit/tts.py b/test/unit/tts.py
new file mode 100755
index 000000000..d71efe58e
--- /dev/null
+++ b/test/unit/tts.py
@@ -0,0 +1,112 @@
+#! /usr/bin/env python
+#
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import unittest
+
+from IM.uriparse import uriparse
+from IM.tts.tts import TTSClient
+from mock import patch, MagicMock
+
+
+class TestTTSClient(unittest.TestCase):
+ """
+ Class to test the TTCLient class
+ """
+ @classmethod
+ def setUpClass(cls):
+ token = ("eyJraWQiOiJyc2ExIiwiYWxnIjoiUlMyNTYifQ.eyJzdWIiOiJkYzVkNWFiNy02ZGI5LTQwNzktOTg1Yy04MGFjMDUwMTcwNjYi"
+ "LCJpc3MiOiJodHRwczpcL1wvaWFtLXRlc3QuaW5kaWdvLWRhdGFjbG91ZC5ldVwvIiwiZXhwIjoxNDY2MDkzOTE3LCJpYXQiOjE"
+ "0NjYwOTAzMTcsImp0aSI6IjE1OTU2N2U2LTdiYzItNDUzOC1hYzNhLWJjNGU5MmE1NjlhMCJ9.eINKxJa2J--xdGAZWIOKtx9Wi"
+ "0Vz3xHzaSJWWY-UHWy044TQ5xYtt0VTvmY5Af-ngwAMGfyaqAAvNn1VEP-_fMYQZdwMqcXLsND4KkDi1ygiCIwQ3JBz9azBT1o_"
+ "oAHE5BsPsE2BjfDoVRasZxxW5UoXCmBslonYd8HK2tUVjz0")
+ cls.ttsc = TTSClient(token, "localhost")
+
+ def get_response(self, method, url, verify=False, cert=None, headers={}, data=None):
+ resp = MagicMock()
+ parts = uriparse(url)
+ url = parts[2]
+
+ if method == "GET":
+ if "/api/v2/oidcp" == url:
+ resp.status_code = 200
+ resp.text = '{"openid_provider_list": [{"id": "iam"}]}'
+ elif "/api/v2/iam/service" == url:
+ resp.status_code = 200
+ resp.text = ('{"service_list": [{"id":"sid", "description": "shost"}]}')
+ else:
+ resp.status_code = 400
+ elif method == "POST":
+ if url == "/api/v2/iam/credential":
+ resp.status_code = 200
+ resp.text = ('{ "credential": { "entries": [{"name": "Username", "type": "text", "value": "username"},'
+ '{"name": "Password", "type": "text", "value": "password"}]}}')
+ else:
+ resp.status_code = 401
+ else:
+ resp.status_code = 402
+
+ return resp
+
+ @patch('requests.request')
+ def test_list_providers(self, requests):
+ requests.side_effect = self.get_response
+
+ success, providers = self.ttsc.list_providers()
+
+ expected_providers = {"openid_provider_list": [{"id": "iam"}]}
+
+ self.assertTrue(success, msg="ERROR: getting providers: %s." % providers)
+ self.assertEqual(providers, expected_providers, msg="ERROR: getting providers: Unexpected providers.")
+
+ @patch('requests.request')
+ def test_list_endservices(self, requests):
+ requests.side_effect = self.get_response
+
+ _, provider = self.ttsc.get_provider()
+ success, services = self.ttsc.list_endservices(provider["id"])
+
+ expected_services = {"service_list": [{"id": "sid", "description": "shost"}]}
+
+ self.assertTrue(success, msg="ERROR: getting services: %s." % services)
+ self.assertEqual(services, expected_services, msg="ERROR: getting services: Unexpected services.")
+
+ @patch('requests.request')
+ def test_find_service(self, requests):
+ requests.side_effect = self.get_response
+
+ success, service = self.ttsc.find_service("shost")
+
+ expected_service = {"id": "sid", "description": "shost"}
+
+ self.assertTrue(success)
+ self.assertEqual(service, expected_service)
+
+ @patch('requests.request')
+ def test_request_credential(self, requests):
+ requests.side_effect = self.get_response
+
+ success, cred = self.ttsc.request_credential("sid")
+
+ expected_cred = {"credential": {"entries": [
+ {'name': 'Username', 'type': 'text', 'value': 'username'},
+ {'name': 'Password', 'type': 'text', 'value': 'password'}]}}
+
+ self.assertTrue(success, msg="ERROR: getting credentials: %s." % cred)
+ self.assertEqual(cred, expected_cred, msg="ERROR: getting credentials: Unexpected credetials.")
+if __name__ == '__main__':
+ unittest.main()