', 'exec')
-
- @cached_property
- def code(self):
- source = self.source
- if not source:
- with open(self.filename, 'rb') as f:
- source = f.read()
- try:
- source, encoding = touni(source), 'utf8'
- except UnicodeError:
- depr('Template encodings other than utf8 are no longer supported.') #0.11
- source, encoding = touni(source, 'latin1'), 'latin1'
- parser = StplParser(source, encoding=encoding, syntax=self.syntax)
- code = parser.translate()
- self.encoding = parser.encoding
- return code
-
- def _rebase(self, _env, _name=None, **kwargs):
- _env['_rebase'] = (_name, kwargs)
-
- def _include(self, _env, _name=None, **kwargs):
- env = _env.copy()
- env.update(kwargs)
- if _name not in self.cache:
- self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
- return self.cache[_name].execute(env['_stdout'], env)
-
- def execute(self, _stdout, kwargs):
- env = self.defaults.copy()
- env.update(kwargs)
- env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
- 'include': functools.partial(self._include, env),
- 'rebase': functools.partial(self._rebase, env), '_rebase': None,
- '_str': self._str, '_escape': self._escape, 'get': env.get,
- 'setdefault': env.setdefault, 'defined': env.__contains__ })
- eval(self.co, env)
- if env.get('_rebase'):
- subtpl, rargs = env.pop('_rebase')
- rargs['base'] = ''.join(_stdout) #copy stdout
- del _stdout[:] # clear stdout
- return self._include(env, subtpl, **rargs)
- return env
-
- def render(self, *args, **kwargs):
- """ Render the template using keyword arguments as local variables. """
- env = {}; stdout = []
- for dictarg in args: env.update(dictarg)
- env.update(kwargs)
- self.execute(stdout, env)
- return ''.join(stdout)
-
-
-class StplSyntaxError(TemplateError): pass
-
-
-class StplParser(object):
- """ Parser for stpl templates. """
- _re_cache = {} #: Cache for compiled re patterns
- # This huge pile of voodoo magic splits python code into 8 different tokens.
- # 1: All kinds of python strings (trust me, it works)
- _re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
- '|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
- '|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
- '|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
- _re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
- # 2: Comments (until end of line, but not the newline itself)
- _re_tok += '|(#.*)'
- # 3,4: Keywords that start or continue a python block (only start of line)
- _re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
- '|^([ \\t]*(?:elif|else|except|finally)\\b)'
- # 5: Our special 'end' keyword (but only if it stands alone)
- _re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
- # 6: A customizable end-of-code-block template token (only end of line)
- _re_tok += '|(%(block_close)s[ \\t]*(?=$))'
- # 7: And finally, a single newline. The 8th token is 'everything else'
- _re_tok += '|(\\r?\\n)'
- # Match the start tokens of code areas in a template
- _re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
- # Match inline statements (may contain python strings)
- _re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
-
- default_syntax = '<% %> % {{ }}'
-
- def __init__(self, source, syntax=None, encoding='utf8'):
- self.source, self.encoding = touni(source, encoding), encoding
- self.set_syntax(syntax or self.default_syntax)
- self.code_buffer, self.text_buffer = [], []
- self.lineno, self.offset = 1, 0
- self.indent, self.indent_mod = 0, 0
-
- def get_syntax(self):
- """ Tokens as a space separated string (default: <% %> % {{ }}) """
- return self._syntax
-
- def set_syntax(self, syntax):
- self._syntax = syntax
- self._tokens = syntax.split()
- if not syntax in self._re_cache:
- names = 'block_start block_close line_start inline_start inline_end'
- etokens = map(re.escape, self._tokens)
- pattern_vars = dict(zip(names.split(), etokens))
- patterns = (self._re_split, self._re_tok, self._re_inl)
- patterns = [re.compile(p%pattern_vars) for p in patterns]
- self._re_cache[syntax] = patterns
- self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
-
- syntax = property(get_syntax, set_syntax)
-
- def translate(self):
- if self.offset: raise RuntimeError('Parser is a one time instance.')
- while True:
- m = self.re_split.search(self.source[self.offset:])
- if m:
- text = self.source[self.offset:self.offset+m.start()]
- self.text_buffer.append(text)
- self.offset += m.end()
- if m.group(1): # Escape syntax
- line, sep, _ = self.source[self.offset:].partition('\n')
- self.text_buffer.append(m.group(2)+line+sep)
- self.offset += len(line+sep)+1
- continue
- self.flush_text()
- self.read_code(multiline=bool(m.group(4)))
- else: break
- self.text_buffer.append(self.source[self.offset:])
- self.flush_text()
- return ''.join(self.code_buffer)
-
- def read_code(self, multiline):
- code_line, comment = '', ''
- while True:
- m = self.re_tok.search(self.source[self.offset:])
- if not m:
- code_line += self.source[self.offset:]
- self.offset = len(self.source)
- self.write_code(code_line.strip(), comment)
- return
- code_line += self.source[self.offset:self.offset+m.start()]
- self.offset += m.end()
- _str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
- if code_line and (_blk1 or _blk2): # a if b else c
- code_line += _blk1 or _blk2
- continue
- if _str: # Python string
- code_line += _str
- elif _com: # Python comment (up to EOL)
- comment = _com
- if multiline and _com.strip().endswith(self._tokens[1]):
- multiline = False # Allow end-of-block in comments
- elif _blk1: # Start-block keyword (if/for/while/def/try/...)
- code_line, self.indent_mod = _blk1, -1
- self.indent += 1
- elif _blk2: # Continue-block keyword (else/elif/except/...)
- code_line, self.indent_mod = _blk2, -1
- elif _end: # The non-standard 'end'-keyword (ends a block)
- self.indent -= 1
- elif _cend: # The end-code-block template token (usually '%>')
- if multiline: multiline = False
- else: code_line += _cend
- else: # \n
- self.write_code(code_line.strip(), comment)
- self.lineno += 1
- code_line, comment, self.indent_mod = '', '', 0
- if not multiline:
- break
-
- def flush_text(self):
- text = ''.join(self.text_buffer)
- del self.text_buffer[:]
- if not text: return
- parts, pos, nl = [], 0, '\\\n'+' '*self.indent
- for m in self.re_inl.finditer(text):
- prefix, pos = text[pos:m.start()], m.end()
- if prefix:
- parts.append(nl.join(map(repr, prefix.splitlines(True))))
- if prefix.endswith('\n'): parts[-1] += nl
- parts.append(self.process_inline(m.group(1).strip()))
- if pos < len(text):
- prefix = text[pos:]
- lines = prefix.splitlines(True)
- if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
- elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
- parts.append(nl.join(map(repr, lines)))
- code = '_printlist((%s,))' % ', '.join(parts)
- self.lineno += code.count('\n')+1
- self.write_code(code)
-
- @staticmethod
- def process_inline(chunk):
- if chunk[0] == '!': return '_str(%s)' % chunk[1:]
- return '_escape(%s)' % chunk
-
- def write_code(self, line, comment=''):
- code = ' ' * (self.indent+self.indent_mod)
- code += line.lstrip() + comment + '\n'
- self.code_buffer.append(code)
-
-
-def template(*args, **kwargs):
- """
- Get a rendered template as a string iterator.
- You can use a name, a filename or a template string as first parameter.
- Template rendering arguments can be passed as dictionaries
- or directly (as keyword arguments).
- """
- tpl = args[0] if args else None
- adapter = kwargs.pop('template_adapter', SimpleTemplate)
- lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
- tplid = (id(lookup), tpl)
- if tplid not in TEMPLATES or DEBUG:
- settings = kwargs.pop('template_settings', {})
- if isinstance(tpl, adapter):
- TEMPLATES[tplid] = tpl
- if settings: TEMPLATES[tplid].prepare(**settings)
- elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
- TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
- else:
- TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
- if not TEMPLATES[tplid]:
- abort(500, 'Template (%s) not found' % tpl)
- for dictarg in args[1:]: kwargs.update(dictarg)
- return TEMPLATES[tplid].render(kwargs)
-
-mako_template = functools.partial(template, template_adapter=MakoTemplate)
-cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
-jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
-
-
-def view(tpl_name, **defaults):
- """ Decorator: renders a template for a handler.
- The handler can control its behavior like that:
-
- - return a dict of template vars to fill out the template
- - return something other than a dict and the view decorator will not
- process the template, but return the handler result as is.
- This includes returning a HTTPResponse(dict) to get,
- for instance, JSON with autojson or other castfilters.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- result = func(*args, **kwargs)
- if isinstance(result, (dict, DictMixin)):
- tplvars = defaults.copy()
- tplvars.update(result)
- return template(tpl_name, **tplvars)
- elif result is None:
- return template(tpl_name, defaults)
- return result
- return wrapper
- return decorator
-
-mako_view = functools.partial(view, template_adapter=MakoTemplate)
-cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
-jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
-
-
-
-
-
-
-###############################################################################
-# Constants and Globals ########################################################
-###############################################################################
-
-
-TEMPLATE_PATH = ['./', './views/']
-TEMPLATES = {}
-DEBUG = False
-NORUN = False # If set, run() does nothing. Used by load_app()
-
-#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
-HTTP_CODES = httplib.responses
-HTTP_CODES[418] = "I'm a teapot" # RFC 2324
-HTTP_CODES[428] = "Precondition Required"
-HTTP_CODES[429] = "Too Many Requests"
-HTTP_CODES[431] = "Request Header Fields Too Large"
-HTTP_CODES[511] = "Network Authentication Required"
-_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
-
-#: The default template used for error pages. Override with @error()
-ERROR_PAGE_TEMPLATE = """
-%%try:
- %%from %s import DEBUG, request
-
-
-
- Error: {{e.status}}
-
-
-
- Error: {{e.status}}
- Sorry, the requested URL {{repr(request.url)}}
- caused an error:
- {{e.body}}
- %%if DEBUG and e.exception:
- Exception:
- {{repr(e.exception)}}
- %%end
- %%if DEBUG and e.traceback:
- Traceback:
- {{e.traceback}}
- %%end
-
-
-%%except ImportError:
- ImportError: Could not generate the error page. Please add bottle to
- the import path.
-%%end
-""" % __name__
-
-#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
-#: request callback, this instance always refers to the *current* request
-#: (even on a multithreaded server).
-request = LocalRequest()
-
-#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
-#: HTTP response for the *current* request.
-response = LocalResponse()
-
-#: A thread-safe namespace. Not used by Bottle.
-local = threading.local()
-
-# Initialize app stack (create first empty Bottle app)
-# BC: 0.6.4 and needed for run()
-app = default_app = AppStack()
-app.push()
-
-#: A virtual package that redirects import statements.
-#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
-ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
-
-if __name__ == '__main__':
- opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
- if opt.version:
- _stdout('Bottle %s\n'%__version__)
- sys.exit(0)
- if not args:
- parser.print_help()
- _stderr('\nError: No application entry point specified.\n')
- sys.exit(1)
-
- sys.path.insert(0, '.')
- sys.modules.setdefault('bottle', sys.modules['__main__'])
-
- host, port = (opt.bind or 'localhost'), 8080
- if ':' in host and host.rfind(']') < host.rfind(':'):
- host, port = host.rsplit(':', 1)
- host = host.strip('[]')
-
- run(args[0], host=host, port=int(port), server=opt.server,
- reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
-
-
-
-
-# THE END
diff --git a/IM/radl/radl.py b/IM/radl/radl.py
index 0878cd207..92775b902 100644
--- a/IM/radl/radl.py
+++ b/IM/radl/radl.py
@@ -102,7 +102,7 @@ def __init__(self, prop = None, operator = None, value = None, unit = '', line=N
def __str__(self):
return ("{0} {1} ({2})" if self.operator == "contains" else
- "{0} {1} '{2}'" if isinstance(self.value, str) else
+ "{0} {1} '{2}'" if isinstance(self.value, str) or isinstance(self.value, unicode) else
"{0} {1} {2}{3}").format(self.prop, self.operator, self.value,
self.unit if self.unit else "")
@@ -152,12 +152,16 @@ def _check(self, check, radl):
if not isinstance(self.value, int) and not isinstance(self.value, float):
raise RADLParseException("Invalid type; expected %s" % check[0],
line=self.line)
+ elif check[0] == str:
+ if not isinstance(self.value, str) and not isinstance(self.value, unicode):
+ raise RADLParseException("Invalid type; expected %s" % check[0],
+ line=self.line)
else:
if not isinstance(self.value, check[0]):
raise RADLParseException("Invalid type; expected %s" % check[0],
line=self.line)
# Check operator
- if isinstance(self.value, str) and self.prop.find('version') == -1:
+ if (isinstance(self.value, str) or isinstance(self.value, unicode)) and self.prop.find('version') == -1:
if self.operator != "=":
raise RADLParseException("Invalid operator; expected '='",
line=self.line)
@@ -1055,6 +1059,7 @@ def check_app(f, x):
"image.name": (str, None),
"type": (str, ["SWAP", "ISO", "FILESYSTEM"]),
"device": (str, None),
+ "mount_path": (str, None),
"size": (float, positive, mem_units),
"free_size": (float, positive, mem_units),
"os.name": (str, ["LINUX", "WINDOWS", "MAC OS X"]),
diff --git a/IM/retry.py b/IM/retry.py
new file mode 100644
index 000000000..83176a0ee
--- /dev/null
+++ b/IM/retry.py
@@ -0,0 +1,48 @@
+import time
+from functools import wraps
+
+
+def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None, quiet = True):
+ """Retry calling the decorated function using an exponential backoff.
+
+ http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
+ original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
+
+ :param ExceptionToCheck: the exception to check. may be a tuple of
+ exceptions to check
+ :type ExceptionToCheck: Exception or tuple
+ :param tries: number of times to try (not retry) before giving up
+ :type tries: int
+ :param delay: initial delay between retries in seconds
+ :type delay: int
+ :param backoff: backoff multiplier e.g. value of 2 will double the delay
+ each retry
+ :type backoff: int
+ :param logger: logger to use. If None, print
+ :type logger: logging.Logger instance
+ :param quiet: flat to specify not to print any message.
+ :type quit: bool
+ """
+ def deco_retry(f):
+
+ @wraps(f)
+ def f_retry(*args, **kwargs):
+ mtries, mdelay = tries, delay
+ while mtries > 1:
+ try:
+ return f(*args, **kwargs)
+ except ExceptionToCheck, e:
+ if not quiet:
+ msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
+ if logger:
+ logger.warning(msg)
+ else:
+ print msg
+ time.sleep(mdelay)
+ mtries -= 1
+ mdelay *= backoff
+ return f(*args, **kwargs)
+
+ return f_retry # true decorator
+
+ return deco_retry
diff --git a/changelog b/changelog
index 4b6e3e564..f5c04ec80 100644
--- a/changelog
+++ b/changelog
@@ -113,3 +113,12 @@ IM 1.2.4
* Dynamically refresh the Ctxt output
* Minor bugfix in EC2 connector when deleting a non existing instance
+IM 1.3.0
+ * Bugfix in OCCI, OpenNebula and Docker connectors when using incorrect credentials.
+ * Improve Docker connector code.
+ * Add Kubernetes connector.
+ * Bugfix in FogBow with 1.0 version
+ * Bugfix in RADL with unicode strings
+ * Add StarVM and StopVM functions to the API
+ * Modify contextualziation process to ignore not running VMs enabling to configure the rest of VMs of an Inf.
+ * Enable SSH with retry in all the ctxt steps
diff --git a/connectors/Docker.py b/connectors/Docker.py
index 520bcd1b9..0eed7298f 100644
--- a/connectors/Docker.py
+++ b/connectors/Docker.py
@@ -38,6 +38,8 @@ class DockerCloudConnector(CloudConnector):
""" Base number to assign SSH port on Docker server host."""
_port_counter = 0
""" Counter to assign SSH port on Docker server host."""
+ _root_password = "Aspecial+0ne"
+ """ Default password to set to the root in the container"""
def __init__(self, cloud_info):
self.cert_file = ''
@@ -54,16 +56,20 @@ def get_http_connection(self, auth_data):
"""
self.cert_file or os.path.isfile(self.cert_file)
-
-
- auth = auth_data.getAuthInfo(DockerCloudConnector.type)
+
url = uriparse(self.cloud.server)
+ auths = auth_data.getAuthInfo(DockerCloudConnector.type, url[1])
+ if not auths:
+ self.logger.error("No correct auth data has been specified to Docker.")
+ return None
+ else:
+ auth = auths[0]
if url[0] == 'unix':
socket_path = "/" + url[1] + url[2]
conn = UnixHTTPConnection.UnixHTTPConnection(socket_path)
elif url[0] == 'https':
- if auth and 'cert' in auth[0] and 'key' in auth[0]:
+ if 'cert' in auth and 'key' in auth:
if os.path.isfile(self.cert_file) and os.path.isfile(self.key_file):
cert_file = self.cert_file
key_file = self.key_file
@@ -84,13 +90,13 @@ def get_user_cert_data(self, auth):
"""
Get the Docker private_key and public_key files from the auth data
"""
- certificate = auth[0]['cert']
+ certificate = auth['cert']
fd, cert_file = tempfile.mkstemp()
os.write(fd, certificate)
os.close(fd)
os.chmod(cert_file,0644)
- private_key = auth[0]['key']
+ private_key = auth['key']
fd, key_file = tempfile.mkstemp()
os.write(fd, private_key)
os.close(fd)
@@ -111,6 +117,9 @@ def concreteSystem(self, radl_system, auth_data):
res_system.getFeature("cpu.count").operator = "="
res_system.getFeature("memory.size").operator = "="
+ res_system.setValue('disk.0.os.credentials.username', 'root')
+ res_system.setValue('disk.0.os.credentials.password', self._root_password)
+
res_system.addFeature(Feature("provider.type", "=", self.type), conflict="other", missing="other")
res_system.addFeature(Feature("provider.host", "=", self.cloud.server), conflict="other", missing="other")
res_system.addFeature(Feature("provider.port", "=", self.cloud.port), conflict="other", missing="other")
@@ -143,57 +152,90 @@ def setIPs(self, vm, cont_info):
vm.setIps(public_ips, private_ips)
- def _generate_volumes(self, system):
- volumes = ',"Volumes":{'
+ def _generate_create_request_data(self, outports, system, vm, ssh_port):
+ cont_data = {}
+
+ cpu = int(system.getValue('cpu.count')) - 1
+ memory = system.getFeature('memory.size').getValue('B')
+ #name = system.getValue("disk.0.image.name")
+ # The URI has this format: docker://image_name
+ image_name = system.getValue("disk.0.image.url")[9:]
+
+ (nodename, nodedom) = vm.getRequestedName(default_hostname = Config.DEFAULT_VM_NAME, default_domain = Config.DEFAULT_DOMAIN)
+
+ volumes = self._generate_volumes(system)
+
+ cont_data['Hostname'] = nodename
+ cont_data['Domainname'] = nodedom
+ cont_data['Cmd'] = ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:" + self._root_password + "' | chpasswd ; /usr/sbin/sshd -D"]
+ cont_data['Image'] = image_name
+ cont_data['ExposedPorts'] = self._generate_exposed_ports(outports)
+ if volumes:
+ cont_data['Volumes'] = volumes
+
+ HostConfig = {}
+ HostConfig['CpusetCpus'] = "0-%d" % cpu
+ HostConfig['Memory'] = memory
+ HostConfig['PortBindings'] = self._generate_port_bindings(outports, ssh_port)
+ HostConfig['Binds'] = self._generate_volumes_binds(system)
+ cont_data['HostConfig'] = HostConfig
+
+ return cont_data
+
+ def _generate_volumes_binds(self, system):
+ binds = []
cont = 1
- while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".device"):
- # Use the device as the volume dir
+ while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path") and system.getValue("disk." + str(cont) + ".device"):
+ disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path")
+ # Use the device as volume host path to bind
disk_device = system.getValue("disk." + str(cont) + ".device")
+ if not disk_mount_path.startswith('/'):
+ disk_mount_path = '/' + disk_mount_path
if not disk_device.startswith('/'):
disk_device = '/' + disk_device
- self.logger.debug("Attaching a volume in %s" % disk_device)
- if cont > 1:
- volumes += ','
- volumes += '"' + disk_device + '":{}'
+ self.logger.debug("Binding a volume in %s to %s" % (disk_device, disk_mount_path))
+ binds.append(disk_device + ":" + disk_mount_path)
cont += 1
+
+ return binds
+
+ def _generate_volumes(self, system):
+ volumes = {}
- if cont == 1:
- volumes = ""
- else:
- volumes += "}"
+ cont = 1
+ while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path"):
+ # Use the mount_path as the volume dir
+ disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path")
+ if not disk_mount_path.startswith('/'):
+ disk_mount_path = '/' + disk_mount_path
+ self.logger.debug("Attaching a volume in %s" % disk_mount_path)
+ volumes[disk_mount_path] = {}
+ cont += 1
return volumes
-
- def _generate_port_bindings(self, outports, ssh_port):
- port_bindings = ""
- ssh_found = False
+
+ def _generate_exposed_ports(self, outports):
+ exposed_ports = {"22/tcp": {}}
+ if outports:
+ for _,_,local_port,local_protocol in outports:
+ if local_port != 22:
+ exposed_ports[str(local_port) + '/' + local_protocol.lower()] = {}
+ return exposed_ports
+
+ def _generate_port_bindings(self, outports, ssh_port):
+ res = {}
+ res["22/tcp"] = [{"HostPort":ssh_port}]
if outports:
- num = 0
for remote_port,_,local_port,local_protocol in outports:
- if num > 0:
- port_bindings = port_bindings + ",\n"
- port_bindings = port_bindings + '"PortBindings":{ "' + str(local_port) + '/' + local_protocol + '": [{ "HostPort": "' + str(remote_port) + '" }] }'
- num += 1
-
- if not ssh_found:
- if port_bindings:
- port_bindings += ",\n"
- port_bindings = port_bindings + '"PortBindings":{ "22/tcp": [{ "HostPort": "' + str(ssh_port) + '" }] }\n'
+ if local_port != 22:
+ res[str(local_port) + '/' + local_protocol] = [{"HostPort":remote_port}]
- return port_bindings
+ return res
def launch(self, inf, radl, requested_radl, num_vm, auth_data):
system = radl.systems[0]
- cpu = int(system.getValue('cpu.count'))
- memory = system.getFeature('memory.size').getValue('B')
- #name = system.getValue("disk.0.image.name")
- # The URI has this format: docker://image_name
- image_name = system.getValue("disk.0.image.url")[9:]
-
- volumes = self._generate_volumes(system)
-
public_net = None
for net in radl.networks:
if net.isPublic():
@@ -203,12 +245,6 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
if public_net:
outports = public_net.getOutPorts()
- exposed_ports = '"22/tcp": {}'
- if outports:
- for _,_,local_port,local_protocol in outports:
- if local_port != 22:
- exposed_ports = exposed_ports + ', "' + str(local_port) + '/' + local_protocol + '": {}'
-
conn = self.get_http_connection(auth_data)
res = []
i = 0
@@ -216,30 +252,21 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
try:
i += 1
+ ssh_port = 22
+ if public_net:
+ ssh_port = (DockerCloudConnector._port_base_num + DockerCloudConnector._port_counter) % 65535
+ DockerCloudConnector._port_counter += 1
+
# Create the VM to get the nodename
vm = VirtualMachine(inf, None, self.cloud, radl, requested_radl, self)
- (nodename, nodedom) = vm.getRequestedName(default_hostname = Config.DEFAULT_VM_NAME, default_domain = Config.DEFAULT_DOMAIN)
-
- create_request_json = """ {
- "Hostname":"%s",
- "Domainname": "%s",
- "Cpuset": "0-%d",
- "Memory":%s,
- "Cmd":[
- "/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:yoyoyo' | chpasswd ; /usr/sbin/sshd -D"
- ],
- "Image":"%s",
- "ExposedPorts":{
- %s
- }
- %s
- }""" % (nodename, nodedom, cpu-1, memory,image_name,exposed_ports,volumes)
# Create the container
conn.putrequest('POST', "/containers/create")
conn.putheader('Content-Type', 'application/json')
- body = create_request_json
+ cont_data = self._generate_create_request_data(outports, system, vm, ssh_port)
+ body = json.dumps(cont_data)
+
conn.putheader('Content-Length', len(body))
conn.endheaders(body)
@@ -250,41 +277,22 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
continue
output = json.loads(output)
- docker_vm_id = output["Id"]
-
- # Now start it
- conn.putrequest('POST', "/containers/" + docker_vm_id + "/start")
- conn.putheader('Content-Type', 'application/json')
-
- start_request_json = "{}"
- # If the user requested a public IP, specify the PortBindings
- ssh_port = 22
- if public_net:
- start_request_json = " { "
-
- ssh_port = DockerCloudConnector._port_base_num + DockerCloudConnector._port_counter
- DockerCloudConnector._port_counter += 1
-
- start_request_json += self._generate_port_bindings(outports, ssh_port)
-
- start_request_json += "}"
-
- body = start_request_json
- conn.putheader('Content-Length', len(body))
- conn.endheaders(body)
+ # Set the cloud id to the VM
+ vm.id = output["Id"]
- resp = conn.getresponse()
- output = resp.read()
- if resp.status != 204:
- res.append((False, "Error staring the Container: " + output))
+ # Now start it
+ success, _ = self.start(vm, auth_data)
+ if not success:
+ res.append((False, "Error starting the Container: " + output))
# Delete the container
- conn.request('DELETE', "/containers/" + docker_vm_id)
+ conn.request('DELETE', "/containers/" + vm.id)
resp = conn.getresponse()
resp.read()
continue
- # Now set the cloud id to the VM
- vm.id = docker_vm_id
+ # Set the default user and password to access the container
+ vm.info.systems[0].setValue('disk.0.os.credentials.username', 'root')
+ vm.info.systems[0].setValue('disk.0.os.credentials.password', self._root_password)
# Set ssh port in the RADL info of the VM
vm.setSSHPort(ssh_port)
@@ -347,7 +355,6 @@ def finalize(self, vm, auth_data):
self.logger.exception("Error connecting with Docker server")
return (False, "Error connecting with Docker server")
-
def stop(self, vm, auth_data):
try:
conn = self.get_http_connection(auth_data)
diff --git a/connectors/EC2.py b/connectors/EC2.py
index 392d9e153..65c0dac19 100644
--- a/connectors/EC2.py
+++ b/connectors/EC2.py
@@ -864,8 +864,8 @@ def updateVMInfo(self, vm, auth_data):
self.logger.exception("Error updating the instance " + instance_id)
return (False, "Error updating the instance " + instance_id + ": " + str(ex))
- vm.info.systems[0].setValue("virtual_system_type", "'" + instance.virtualization_type + "'")
- vm.info.systems[0].setValue("availability_zone", "'" + instance.placement + "'")
+ vm.info.systems[0].setValue("virtual_system_type", instance.virtualization_type)
+ vm.info.systems[0].setValue("availability_zone", instance.placement)
vm.state = self.VM_STATE_MAP.get(instance.state, VirtualMachine.UNKNOWN)
diff --git a/connectors/FogBow.py b/connectors/FogBow.py
index 38e19116e..c2f4933d0 100644
--- a/connectors/FogBow.py
+++ b/connectors/FogBow.py
@@ -101,7 +101,7 @@ def get_occi_attribute_value(self, occi_res, attr_name):
lines = occi_res.split("\n")
for l in lines:
if l.find('X-OCCI-Attribute: ' + attr_name + '=') != -1:
- return l.split('=')[1].strip('"')
+ return str(l.split('=')[1].strip().strip('"'))
return None
"""
diff --git a/connectors/GCE.py b/connectors/GCE.py
index 59612c422..24a2c3f06 100644
--- a/connectors/GCE.py
+++ b/connectors/GCE.py
@@ -54,7 +54,9 @@ def get_driver(self, auth_data):
auth = auth_data.getAuthInfo(self.type)
if auth and 'username' in auth[0] and 'password' in auth[0] and 'project' in auth[0]:
- cls = get_driver(Provider.GCE)
+ cls = get_driver(Provider.GCE)
+ # Patch to solve some client problems with \\n
+ auth[0]['password'] = auth[0]['password'].replace('\\n','\n')
lines = len(auth[0]['password'].replace(" ","").split())
if lines < 2:
raise Exception("The certificate provided to the GCE plugin has an incorrect format. Check that it has more than one line.")
diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py
new file mode 100644
index 000000000..92b5f6220
--- /dev/null
+++ b/connectors/Kubernetes.py
@@ -0,0 +1,519 @@
+# IM - Infrastructure Manager
+# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import time
+import string
+import base64
+import json
+import httplib
+from IM.uriparse import uriparse
+from IM.VirtualMachine import VirtualMachine
+from CloudConnector import CloudConnector
+from IM.radl.radl import Feature
+from IM.config import Config
+
+
+class KubernetesCloudConnector(CloudConnector):
+ """
+ Cloud Launcher to Kubernetes platform
+ """
+
+ type = "Kubernetes"
+
+ _port_base_num = 35000
+ """ Base number to assign SSH port on Kubernetes node."""
+ _port_counter = 0
+ """ Counter to assign SSH port on Kubernetes node."""
+ _root_password = "Aspecial+0ne"
+ """ Default password to set to the root in the container"""
+ _apiVersions = ["v1", "v1beta3"]
+ """ Supported API versions"""
+
+ VM_STATE_MAP = {
+ 'Pending': VirtualMachine.PENDING,
+ 'Running': VirtualMachine.RUNNING,
+ 'Succeeded': VirtualMachine.OFF,
+ 'Failed': VirtualMachine.FAILED
+ }
+ """Dictionary with a map with the Kubernetes POD states to the IM states."""
+
+ def get_http_connection(self):
+ """
+ Get the HTTPConnection object to contact the Kubernetes API
+
+ Returns(HTTPConnection or HTTPSConnection): HTTPConnection connection object
+ """
+
+ url = uriparse(self.cloud.server)
+
+ if url[0] == 'https':
+ conn = httplib.HTTPSConnection(url[1], self.cloud.port)
+ elif url[0] == 'http':
+ self.logger.warn("Using a unsecure connection to Kubernetes API!")
+ conn = httplib.HTTPConnection(url[1], self.cloud.port)
+
+ return conn
+
+ def get_auth_header(self, auth_data):
+ """
+ Generate the auth header needed to contact with the Kubernetes API server.
+ """
+ url = uriparse(self.cloud.server)
+ auths = auth_data.getAuthInfo(self.type, url[1])
+ if not auths:
+ self.logger.error("No correct auth data has been specified to Kubernetes.")
+ return None
+ else:
+ auth = auths[0]
+
+ auth_header = None
+
+ if 'username' in auth and 'password' in auth:
+ passwd = auth['password']
+ user = auth['username']
+ auth_header = { 'Authorization' : 'Basic ' + string.strip(base64.encodestring(user + ':' + passwd))}
+ elif 'token' in auth:
+ token = auth['token']
+ auth_header = { 'Authorization' : 'Bearer ' + token }
+
+ return auth_header
+
+ def get_api_version(self, auth_data):
+ """
+ Return the API version to use to connect with kubernetes API server
+ """
+ version = self._apiVersions[0]
+
+ try:
+ auth = self.get_auth_header(auth_data)
+ headers = {}
+ if auth:
+ headers.update(auth)
+ conn = self.get_http_connection()
+
+ conn.request('GET', "/api/", headers = headers)
+ resp = conn.getresponse()
+
+ output = resp.read()
+
+ if resp.status == 200:
+ output = json.loads(output)
+ for v in self._apiVersions:
+ if v in output["versions"]:
+ return v
+
+ except Exception:
+ self.logger.exception("Error connecting with Kubernetes API server")
+
+ self.logger.warn("Error getting a compatible API version. Setting the default one.")
+ self.logger.debug("Using %2 API version." % version)
+ return version
+
+
+ def concreteSystem(self, radl_system, auth_data):
+ if radl_system.getValue("disk.0.image.url"):
+ url = uriparse(radl_system.getValue("disk.0.image.url"))
+ protocol = url[0]
+ if protocol == 'docker' and url[1]:
+ res_system = radl_system.clone()
+
+ res_system.addFeature(Feature("virtual_system_type", "=", "docker"), conflict="other", missing="other")
+
+ res_system.getFeature("cpu.count").operator = "="
+ res_system.getFeature("memory.size").operator = "="
+
+ res_system.setValue('disk.0.os.credentials.username', 'root')
+ res_system.setValue('disk.0.os.credentials.password', self._root_password)
+
+ res_system.addFeature(Feature("provider.type", "=", self.type), conflict="other", missing="other")
+ res_system.addFeature(Feature("provider.host", "=", self.cloud.server), conflict="other", missing="other")
+ res_system.addFeature(Feature("provider.port", "=", self.cloud.port), conflict="other", missing="other")
+
+ return [res_system]
+ else:
+ return []
+ else:
+ return [radl_system.clone()]
+
+ def _delete_volume_claim(self, namespace, vc_name, auth_data):
+ try:
+ auth = self.get_auth_header(auth_data)
+ headers = {}
+ if auth:
+ headers.update(auth)
+ conn = self.get_http_connection()
+ apiVersion = self.get_api_version(auth_data)
+
+ conn.request('DELETE', "/api/" + apiVersion + "/namespaces/" + namespace + "/persistentvolumeclaims/" + vc_name, headers = headers)
+ resp = conn.getresponse()
+ output = str(resp.read())
+ if resp.status == 404:
+ self.logger.warn("Trying to remove a non existing PersistentVolumeClaim: " + vc_name)
+ return True
+ elif resp.status != 200:
+ self.logger.error("Error deleting the PersistentVolumeClaim: " + output)
+ return False
+ else:
+ return True
+ except Exception:
+ self.logger.exception("Error connecting with Kubernetes API server")
+ return False
+
+ def _delete_volume_claims(self, pod_data, auth_data):
+ if 'volumes' in pod_data['spec']:
+ for volume in pod_data['spec']['volumes']:
+ if 'persistentVolumeClaim' in volume and 'claimName' in volume['persistentVolumeClaim']:
+ vc_name = volume['persistentVolumeClaim']['claimName']
+ success = self._delete_volume_claim(pod_data["metadata"]["namespace"], vc_name, auth_data)
+ if not success:
+ self.logger.error("Error deleting PersistentVolumeClaim:" + vc_name)
+
+ def _create_volume_claim(self, claim_data, auth_data):
+ try:
+ auth_header = self.get_auth_header(auth_data)
+ conn = self.get_http_connection()
+ apiVersion = self.get_api_version(auth_data)
+
+ conn.putrequest('POST', "/api/" + apiVersion + "/namespaces/" + claim_data['metadata']['namespace'] + "/persistentvolumeclaims")
+ conn.putheader('Content-Type', 'application/json')
+ if auth_header:
+ conn.putheader(auth_header.keys()[0], auth_header.values()[0])
+
+ body = json.dumps(claim_data)
+ conn.putheader('Content-Length', len(body))
+ conn.endheaders(body)
+ resp = conn.getresponse()
+
+ output = str(resp.read())
+ if resp.status != 201:
+ self.logger.error("Error deleting the POD: " + output)
+ return False
+ else:
+ return True
+ except Exception:
+ self.logger.exception("Error connecting with Kubernetes API server")
+ return False
+
+ def _create_volumes(self, apiVersion, namespace, system, pod_name, auth_data, persistent = False):
+ res = []
+ cont = 1
+ while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path") and system.getValue("disk." + str(cont) + ".device"):
+ disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path")
+ # Use the device as volume host path to bind
+ disk_device = system.getValue("disk." + str(cont) + ".device")
+ disk_size = system.getFeature("disk." + str(cont) + ".size").getValue('B')
+ if not disk_mount_path.startswith('/'):
+ disk_mount_path = '/' + disk_mount_path
+ if not disk_device.startswith('/'):
+ disk_device = '/' + disk_device
+ self.logger.debug("Binding a volume in %s to %s" % (disk_device, disk_mount_path))
+ name = "%s-%d" % (pod_name, cont)
+
+ if persistent:
+ claim_data = { 'apiVersion': apiVersion, 'kind': 'PersistentVolumeClaim' }
+ claim_data['metadata'] = { 'name': name, 'namespace': namespace }
+ claim_data['spec'] = { 'accessModes' : ['ReadWriteOnce'], 'resources' : {'requests' : {'storage' : disk_size} } }
+
+ success = self._create_volume_claim(claim_data, auth_data)
+ if success:
+ res.append((name, disk_device, disk_size, disk_mount_path, persistent))
+ else:
+ self.logger.error("Error creating PersistentVolumeClaim:" + name)
+ else:
+ res.append((name, disk_device, disk_size, disk_mount_path, persistent))
+
+ cont += 1
+
+ return res
+
+ def _generate_pod_data(self, apiVersion, namespace, name, outports, system, ssh_port, volumes):
+ cpu = str(system.getValue('cpu.count'))
+ memory = "%s" % system.getFeature('memory.size').getValue('B')
+ # The URI has this format: docker://image_name
+ image_name = system.getValue("disk.0.image.url")[9:]
+
+ ports = [{'containerPort': 22, 'protocol': 'TCP', 'hostPort':ssh_port}]
+ if outports:
+ for remote_port,_,local_port,local_protocol in outports:
+ if local_port != 22:
+ ports.append({'containerPort':local_port, 'protocol': local_protocol.upper(), 'hostPort': remote_port})
+
+ pod_data = { 'apiVersion': apiVersion, 'kind': 'Pod' }
+ pod_data['metadata'] = {
+ 'name': name,
+ 'namespace': namespace,
+ 'labels': {'name': name}
+ }
+ containers = [{
+ 'name': name,
+ 'image': image_name,
+ 'command': ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:" + self._root_password + "' | chpasswd ; /usr/sbin/sshd -D"],
+ 'imagePullPolicy': 'IfNotPresent',
+ 'ports': ports,
+ 'resources': {'limits': {'cpu': cpu, 'memory': memory}}
+ }]
+
+ if volumes:
+ containers[0]['volumeMounts'] = []
+ for (v_name, _, _, v_mount_path, _) in volumes:
+ containers[0]['volumeMounts'].append({'name':v_name, 'mountPath':v_mount_path})
+
+ pod_data['spec'] = {'containers' : containers, 'restartPolicy': 'Never'}
+
+ if volumes:
+ pod_data['spec']['volumes'] = []
+ for (v_name, v_device, _, _, persistent) in volumes:
+ if persistent:
+ pod_data['spec']['volumes'].append({'name': v_name, 'persistentVolumeClaim': {'claimName': v_name}})
+ else:
+ if v_device:
+ # Use the device as volume host path to bind
+ pod_data['spec']['volumes'].append({'name': v_name, 'hostPath:': {'path': v_device}})
+ else:
+ pod_data['spec']['volumes'].append({'name': v_name, 'emptyDir:': {}})
+
+ return pod_data
+
+ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
+ system = radl.systems[0]
+
+ public_net = None
+ for net in radl.networks:
+ if net.isPublic():
+ public_net = net
+
+ outports = None
+ if public_net:
+ outports = public_net.getOutPorts()
+
+ auth_header = self.get_auth_header(auth_data)
+ conn = self.get_http_connection()
+ apiVersion = self.get_api_version(auth_data)
+
+ res = []
+ i = 0
+ while i < num_vm:
+ try:
+ i += 1
+
+ namespace = "im%d" % int(time.time()*100)
+ vm = VirtualMachine(inf, None, self.cloud, radl, requested_radl, self)
+ (nodename, _) = vm.getRequestedName(default_hostname = Config.DEFAULT_VM_NAME, default_domain = Config.DEFAULT_DOMAIN)
+ pod_name = nodename
+
+ # Do not use the Persistent volumes yet
+ volumes = self._create_volumes(apiVersion, namespace, system, pod_name, auth_data)
+
+ # Create the pod
+ conn.putrequest('POST', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods")
+ conn.putheader('Content-Type', 'application/json')
+ if auth_header:
+ conn.putheader(auth_header.keys()[0], auth_header.values()[0])
+
+ ssh_port = (KubernetesCloudConnector._port_base_num + KubernetesCloudConnector._port_counter) % 65535
+ KubernetesCloudConnector._port_counter += 1
+ pod_data = self._generate_pod_data(apiVersion, namespace, pod_name, outports, system, ssh_port, volumes)
+ body = json.dumps(pod_data)
+ conn.putheader('Content-Length', len(body))
+ conn.endheaders(body)
+
+ resp = conn.getresponse()
+ output = resp.read()
+ if resp.status != 201:
+ res.append((False, "Error creating the Container: " + output))
+ else:
+ output = json.loads(output)
+ vm.id = output["metadata"]["namespace"] + "/" + output["metadata"]["name"]
+ # Set SSH port in the RADL info of the VM
+ vm.setSSHPort(ssh_port)
+ # Set the default user and password to access the container
+ vm.info.systems[0].setValue('disk.0.os.credentials.username', 'root')
+ vm.info.systems[0].setValue('disk.0.os.credentials.password', self._root_password)
+
+ res.append((True, vm))
+
+ except Exception, ex:
+ self.logger.exception("Error connecting with Kubernetes API server")
+ res.append((False, "ERROR: " + str(ex)))
+
+ return res
+
+ def _get_pod(self, vm_id, auth_data):
+ try:
+ namespace = vm_id.split("/")[0]
+ pod_name = vm_id.split("/")[1]
+
+ auth = self.get_auth_header(auth_data)
+ headers = {}
+ if auth:
+ headers.update(auth)
+ conn = self.get_http_connection()
+ apiVersion = self.get_api_version(auth_data)
+
+ conn.request('GET', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers)
+ resp = conn.getresponse()
+
+ output = resp.read()
+
+ if resp.status == 404:
+ return (True, resp.status, output)
+ elif resp.status != 200:
+ return (False, resp.status, output)
+ else:
+ return (True, resp.status, output)
+
+ except Exception, ex:
+ self.logger.exception("Error connecting with Kubernetes API server")
+ return (False, None, "Error connecting with Kubernetes API server: " + str(ex))
+
+ def updateVMInfo(self, vm, auth_data):
+ success, status, output = self._get_pod(vm.id, auth_data)
+ if success:
+ if status == 404:
+ # If the container does not exist, set state to OFF
+ vm.state = VirtualMachine.OFF
+ return (True, vm)
+ else:
+ output = json.loads(output)
+ vm.state = self.VM_STATE_MAP.get(output["status"]["phase"], VirtualMachine.UNKNOWN)
+
+ # Update the network info
+ self.setIPs(vm,output)
+ return (True, vm)
+ else:
+ self.logger.error("Error getting info about the POD: " + output)
+ return (False, "Error getting info about the POD: " + output)
+
+ def setIPs(self, vm, pod_info):
+ """
+ Adapt the RADL information of the VM to the real IPs assigned by the cloud provider
+
+ Arguments:
+ - vm(:py:class:`IM.VirtualMachine`): VM information.
+ - pod_info(dict): JSON information about the POD
+ """
+
+ public_ips = []
+ private_ips = []
+ if 'hostIP' in pod_info["status"]:
+ public_ips = [str(pod_info["status"]["hostIP"])]
+ if 'podIP' in pod_info["status"]:
+ private_ips = [str(pod_info["status"]["podIP"])]
+
+ vm.setIps(public_ips, private_ips)
+
+ def finalize(self, vm, auth_data):
+ success, status, output = self._get_pod(vm.id, auth_data)
+ if success:
+ if status == 404:
+ self.logger.warn("Trying to remove a non existing POD id: " + vm.id)
+ return (True, vm.id)
+ else:
+ pod_data = json.loads(output)
+ self._delete_volume_claims(pod_data, auth_data)
+
+ return self._delete_pod(vm.id, auth_data)
+
+ def _delete_pod(self, vm_id, auth_data):
+ try:
+ namespace = vm_id.split("/")[0]
+ pod_name = vm_id.split("/")[1]
+
+ auth = self.get_auth_header(auth_data)
+ headers = {}
+ if auth:
+ headers.update(auth)
+ conn = self.get_http_connection()
+ apiVersion = self.get_api_version(auth_data)
+
+ conn.request('DELETE', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers)
+ resp = conn.getresponse()
+ output = str(resp.read())
+ if resp.status == 404:
+ self.logger.warn("Trying to remove a non existing POD id: " + pod_name)
+ return (True, pod_name)
+ elif resp.status != 200:
+ return (False, "Error deleting the POD: " + output)
+ else:
+ return (True, pod_name)
+ except Exception:
+ self.logger.exception("Error connecting with Kubernetes API server")
+ return (False, "Error connecting with Kubernetes API server")
+
+ def stop(self, vm, auth_data):
+ return (False, "Not supported")
+
+ def start(self, vm, auth_data):
+ return (False, "Not supported")
+
+ def alterVM(self, vm, radl, auth_data):
+ # This function is correctly implemented
+ # But kubernetes does not permit cpu to be updated yet
+ system = radl.systems[0]
+
+ auth_header = self.get_auth_header(auth_data)
+ conn = self.get_http_connection()
+ apiVersion = self.get_api_version(auth_data)
+
+ try:
+ pod_data = []
+
+ cpu = vm.info.systems[0].getValue('cpu.count')
+ memory = vm.info.systems[0].getFeature('memory.size').getValue('B')
+
+ new_cpu = system.getValue('cpu.count')
+ new_memory = system.getFeature('memory.size').getValue('B')
+
+ changed = False
+ if new_cpu and new_cpu != cpu:
+ pod_data.append({"op": "replace", "path": "/spec/containers/0/resources/limits/cpu", "value": new_cpu})
+ changed = True
+ if new_memory and new_memory != memory:
+ pod_data.append({"op": "replace", "path": "/spec/containers/0/resources/limits/memory", "value": new_memory})
+ changed = True
+
+ if not changed:
+ self.logger.debug("Nothing changes in the kubernetes pod: " + str(vm.id))
+ return (True, vm)
+
+ # Create the container
+ namespace = vm.id.split("/")[0]
+ pod_name = vm.id.split("/")[1]
+ conn.putrequest('PATCH', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name)
+ conn.putheader('Content-Type', 'application/json-patch+json')
+ if auth_header:
+ conn.putheader(auth_header.keys()[0], auth_header.values()[0])
+ body = json.dumps(pod_data)
+ conn.putheader('Content-Length', len(body))
+ conn.endheaders(body)
+
+ resp = conn.getresponse()
+ output = resp.read()
+ if resp.status != 201:
+ return (False, "Error updating the Pod: " + output)
+ else:
+ if new_cpu:
+ vm.info.systems[0].setValue('cpu.count', new_cpu)
+ if new_memory:
+ vm.info.systems[0].addFeature(Feature("memory.size", "=", new_memory, 'B'), conflict="other", missing="other")
+ return (True, self.updateVMInfo(vm, auth_data))
+
+ except Exception, ex:
+ self.logger.exception("Error connecting with Kubernetes API server")
+ return (False, "ERROR: " + str(ex))
+
+
+ return (False, "Not supported")
diff --git a/connectors/OCCI.py b/connectors/OCCI.py
index 315212a00..ea4246610 100644
--- a/connectors/OCCI.py
+++ b/connectors/OCCI.py
@@ -43,7 +43,7 @@ class OCCICloudConnector(CloudConnector):
'active': VirtualMachine.RUNNING,
'inactive': VirtualMachine.OFF,
'error': VirtualMachine.FAILED,
- 'suspended': VirtualMachine.OFF
+ 'suspended': VirtualMachine.STOPPED
}
"""Dictionary with a map with the OCCI VM states to the IM states."""
@@ -56,7 +56,7 @@ def get_https_connection(self, auth, server, port):
Get a HTTPS connection with the specified server.
It uses a proxy file if it has been specified in the auth credentials
"""
- if 'proxy' in auth:
+ if auth and 'proxy' in auth:
if self.proxy_filename and os.path.isfile(self.proxy_filename):
proxy_filename = self.proxy_filename
else:
@@ -78,6 +78,7 @@ def get_http_connection(self, auth_data):
auths = auth_data.getAuthInfo(self.type, self.cloud.server)
if not auths:
self.logger.error("No correct auth data has been specified to OCCI.")
+ auth = None
else:
auth = auths[0]
@@ -227,7 +228,15 @@ def updateVMInfo(self, vm, auth_data):
elif resp.status != 200:
return (False, resp.reason + "\n" + output)
else:
- vm.state = self.VM_STATE_MAP.get(self.get_occi_attribute_value(output, 'occi.compute.state'), VirtualMachine.UNKNOWN)
+ old_state = vm.state
+ occi_state = self.get_occi_attribute_value(output, 'occi.compute.state')
+
+ # I have to do that because OCCI returns 'inactive' when a VM is starting
+ # to distinguish from the OFF state
+ if old_state == VirtualMachine.PENDING and occi_state == 'inactive':
+ vm.state = VirtualMachine.PENDING
+ else:
+ vm.state = self.VM_STATE_MAP.get(occi_state, VirtualMachine.UNKNOWN)
cores = self.get_occi_attribute_value(output, 'occi.compute.cores')
if cores:
@@ -345,10 +354,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data):
(public_key, private_key) = self.keygen()
system.setValue('disk.0.os.credentials.private_key', private_key)
- user = system.getValue('disk.os.credentials.username')
+ user = system.getValue('disk.0.os.credentials.username')
if not user:
user = "cloudadm"
- system.setValue('disk.os.credentials.username', user)
+ system.setValue('disk.0.os.credentials.username', user)
cloud_config = self.gen_cloud_config(public_key, user)
user_data = base64.b64encode(cloud_config).replace("\n","")
diff --git a/connectors/OpenNebula.py b/connectors/OpenNebula.py
index b9d052538..f88ed0a50 100644
--- a/connectors/OpenNebula.py
+++ b/connectors/OpenNebula.py
@@ -147,11 +147,15 @@ def getSessionID(self, auth_data, hash_password = None):
Returns: str with the Session ID
"""
if self.session_id:
+ # TODO: known issue: If the IM service is restarted, the first attempt to access this VM
+ # will set this session_id. If the credentials are not correct the session_id will be always
+ # incorrect until the IM service is restarted again ...
return self.session_id
else:
auths = auth_data.getAuthInfo(self.type, self.cloud.server)
if not auths:
self.logger.error("No correct auth data has been specified to OpenNebula.")
+ return None
else:
auth = auths[0]
@@ -211,6 +215,8 @@ def updateVMInfo(self, vm, auth_data):
if res_vm.STATE == 3:
if res_vm.LCM_STATE == 3:
res_state = VirtualMachine.RUNNING
+ elif res_vm.LCM_STATE == 5 or res_vm.LCM_STATE == 6:
+ res_state = VirtualMachine.STOPPED
else:
res_state = VirtualMachine.PENDING
elif res_vm.STATE < 3 :
diff --git a/connectors/__init__.py b/connectors/__init__.py
index 8f4529c07..801dc15d4 100644
--- a/connectors/__init__.py
+++ b/connectors/__init__.py
@@ -15,4 +15,4 @@
# along with this program. If not, see .
-__all__ = ['CloudConnector','EC2','OCCI','OpenNebula','OpenStack','LibVirt','LibCloud','Docker','GCE','FogBow', 'Azure', 'DeployedNode']
+__all__ = ['CloudConnector','EC2','OCCI','OpenNebula','OpenStack','LibVirt','LibCloud','Docker','GCE','FogBow', 'Azure', 'DeployedNode','Kubernetes']
diff --git a/doc/source/REST.rst b/doc/source/REST.rst
index 08f49a367..ed4e85fa2 100644
--- a/doc/source/REST.rst
+++ b/doc/source/REST.rst
@@ -12,39 +12,45 @@ password are not valid, it is returned the HTTP error code 401.
Next tables summaries the resources and the HTTP methods available.
-+-------------+------------------------+-------------------------------+-----------------------------------------+
-| HTTP method | /infrastructures | /infrastructures/ | /infrastructures//vms/ |
-+=============+========================+===============================+=========================================+
-| **GET** | **List** the | **List** the virtual machines | **Get** information associated to the |
-| | infrastructure | in the infrastructure | virtual machine ``vmId`` in ``infId``. |
-| | IDs. | ``infId`` | |
-+-------------+------------------------+-------------------------------+-----------------------------------------+
-| **POST** | **Create** a new | **Create** a new virtual | |
-| | infrastructure | machine based on the RADL | |
-| | based on the RADL | posted. | |
-| | posted. | | |
-+-------------+------------------------+-------------------------------+-----------------------------------------+
-| **PUT** | | | **Modify** the virtual machine based on |
-| | | | the RADL posted. |
-+-------------+------------------------+-------------------------------+-----------------------------------------+
-| **DELETE** | | **Undeploy** all the virtual | **Undeploy** the virtual machine. |
-| | | machines in the | |
-| | | infrastructure. | |
-+-------------+------------------------+-------------------------------+-----------------------------------------+
++-------------+-------------------+-------------------------------+-----------------------------------------+
+| HTTP method | /infrastructures | /infrastructures/ | /infrastructures//vms/ |
++=============+===================+===============================+=========================================+
+| **GET** | **List** the | **List** the virtual machines | **Get** information associated to the |
+| | infrastructure | in the infrastructure | virtual machine ``vmId`` in ``infId``. |
+| | IDs. | ``infId`` | |
++-------------+-------------------+-------------------------------+-----------------------------------------+
+| **POST** | **Create** a new | **Create** a new virtual | |
+| | infrastructure | machine based on the RADL | |
+| | based on the RADL | posted. | |
+| | posted. | | |
++-------------+-------------------+-------------------------------+-----------------------------------------+
+| **PUT** | | | **Modify** the virtual machine based on |
+| | | | the RADL posted. |
++-------------+-------------------+-------------------------------+-----------------------------------------+
+| **DELETE** | | **Undeploy** all the virtual | **Undeploy** the virtual machine. |
+| | | machines in the | |
+| | | infrastructure. | |
++-------------+-------------------+-------------------------------+-----------------------------------------+
-+-------------+--------------------------------+---------------------------------+----------------------------------------+
-| HTTP method | /infrastructures//stop | /infrastructures//start | /infrastructures//reconfigure |
-+=============+================================+=================================+========================================+
-| **PUT** | **Stop** the infrastructure. | **Start** the infrastructure. | **Reconfigure** the infrastructure. |
-+-------------+--------------------------------+---------------------------------+----------------------------------------+
-
-+-------------+--------------------------------------------------------+--------------------------------------------------+
-| HTTP method | /infrastructures//vms// | /infrastructures// |
-+=============+========================================================+==================================================+
-| **GET** | **Get** the specified property ``property_name`` | **Get** the specified property ``property_name`` |
-| | associated to the machine ``vmId`` in ``infId`` | associated to the infrastructure ``infId``. |
-| | | It has two properties: ``contmsg`` and ``radl`` |
-+-------------+--------------------------------------------------------+--------------------------------------------------+
++-------------+-------------------------------+--------------------------------+--------------------------------------+
+| HTTP method | /infrastructures//stop | /infrastructures//start | /infrastructures//reconfigure |
++=============+===============================+================================+======================================+
+| **PUT** | **Stop** the infrastructure. | **Start** the infrastructure. | **Reconfigure** the infrastructure. |
++-------------+-------------------------------+--------------------------------+--------------------------------------+
+
++-------------+-----------------------------------------------------+--------------------------------------------------+
+| HTTP method | /infrastructures//vms// | /infrastructures// |
++=============+=====================================================+==================================================+
+| **GET** | **Get** the specified property ``property_name`` | **Get** the specified property ``property_name`` |
+| | associated to the machine ``vmId`` in ``infId`` | associated to the infrastructure ``infId``. |
+| | | It has two properties: ``contmsg`` and ``radl`` |
++-------------+-----------------------------------------------------+--------------------------------------------------+
+
++-------------+--------------------------------------------+---------------------------------------------+
+| HTTP method | /infrastructures//vms//stop | /infrastructures//start |
++=============+============================================+=============================================+
+| **PUT** | **Stop** the machine ``vmId`` in ``infId`` | **Start** the machine ``vmId`` in ``infId`` |
++-------------+--------------------------------------------+---------------------------------------------+
GET ``http://imserver.com/infrastructures``
:Content-type: text/uri-list
@@ -160,3 +166,19 @@ DELETE ``http://imserver.com/infrastructures//vms/``
Undeploy the virtual machine with ID ``vmId`` associated to the
infrastructure with ID ``infId``.
+
+PUT ``http://imserver.com/infrastructures//vms//start``
+ :Content-type: text/plain
+ :ok response: 200 OK
+ :fail response: 401, 404, 400
+
+ Perform the ``start`` action in the virtual machine with ID
+ ``vmId`` associated to the infrastructure with ID ``infId``.
+
+PUT ``http://imserver.com/infrastructures//vms//stop``
+ :Content-type: text/plain
+ :ok response: 200 OK
+ :fail response: 401, 404, 400
+
+ Perform the ``stop`` action in the virtual machine with ID
+ ``vmId`` associated to the infrastructure with ID ``infId``.
\ No newline at end of file
diff --git a/doc/source/manual.rst b/doc/source/manual.rst
index f875ccfe0..4b09af2b9 100644
--- a/doc/source/manual.rst
+++ b/doc/source/manual.rst
@@ -61,6 +61,8 @@ Optional Packages
if the access to XML-RPC API is secured with SSL certificates (see
:confval:`XMLRCP_SSL`).
The Debian package is named ``python-springpython``.
+* `Bottle `_ is needed if needed to use the REST API
+ (see :confval:`ACTIVATE_REST`). The Debian package is named ``python-bottle``.
* `CherryPy `_ is needed if needed to secure the REST API
with SSL certificates (see :confval:`REST_SSL`).
The Debian package is named ``python-cherrypy3``.
diff --git a/doc/source/xmlrpc.rst b/doc/source/xmlrpc.rst
index 36a483100..06a8c4cb0 100644
--- a/doc/source/xmlrpc.rst
+++ b/doc/source/xmlrpc.rst
@@ -175,12 +175,13 @@ This is the list of method names:
:parameter 0: ``infId``: integer
:parameter 1: ``vmIds``: string
:parameter 2: ``auth``: array of structs
- :ok response: [true, ``infId``: integer]
+ :ok response: [true, integer]
:fail response: [false, ``error``: string]
Updeploy the virtual machines with IDs in ``vmIds`` associated to the
infrastructure with ID ``infId``. The different virtual machine IDs in
- ``vmIds`` are separated by commas.
+ ``vmIds`` are separated by commas. On success it returns the number of
+ VMs that have been undeployed.
.. _StopInfrastructure-xmlrpc:
@@ -194,6 +195,19 @@ This is the list of method names:
infrastructure with ID ``infId``. They can resume by
:ref:`StartInfrastructure `.
+.. _StopVM-xmlrpc:
+
+``StopVM``
+ :parameter 0: ``infId``: integer
+ :parameter 1: ``vmId``: integer
+ :parameter 2: ``auth``: array of structs
+ :ok response: [true, string of length zero]
+ :fail response: [false, ``error``: string]
+
+ Stop (but do not undeploy) the specified virtual machine with ID ``vmId``
+ associated to the infrastructure with ID ``infId``. They can resume by
+ :ref:`StartVM `.
+
.. _StartInfrastructure-xmlrpc:
``StartInfrastructure``
@@ -206,6 +220,20 @@ This is the list of method names:
infrastructure with ID ``infId``, previously stopped by
:ref:`StopInfrastructure `.
+.. _StartVM-xmlrpc:
+
+``StartVM``
+ :parameter 0: ``infId``: integer
+ :parameter 1: ``vmId``: integer
+ :parameter 2: ``auth``: array of structs
+ :ok response: [true, string of length zero]
+ :fail response: [false, ``error``: string]
+
+ Resume the specified virtual machine with ID ``vmId`` associated to the
+ infrastructure with ID ``infId``, previously stopped by
+ :ref:`StopInfrastructure ` or
+ :ref:`StopVM `
+
.. _Reconfigure-xmlrpc:
``Reconfigure``
diff --git a/im_service.py b/im_service.py
index da43fa788..3190788a7 100755
--- a/im_service.py
+++ b/im_service.py
@@ -79,7 +79,8 @@ def StartInfrastructure(inf_id, auth_data):
return WaitRequest(request)
def DestroyInfrastructure(inf_id, auth_data):
- request = IMBaseRequest.create_request(IMBaseRequest.DESTROY_INFRASTRUCTURE,(inf_id, auth_data))
+ request = IMBaseRequest.create_request(IMBaseRequest.DESTROY_INFRASTRUCTURE,(inf_id, auth_data))
+ # This function take a lot of time in some connectors. We can make it async: return (True, "")
return WaitRequest(request)
def CreateInfrastructure(radl_data, auth_data):
@@ -114,6 +115,14 @@ def GetInfrastructureContMsg(inf_id, auth_data):
request = IMBaseRequest.create_request(IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG,(inf_id, auth_data))
return WaitRequest(request)
+def StopVM(inf_id, vm_id, auth_data):
+ request = IMBaseRequest.create_request(IMBaseRequest.STOP_VM,(inf_id, vm_id, auth_data))
+ return WaitRequest(request)
+
+def StartVM(inf_id, vm_id, auth_data):
+ request = IMBaseRequest.create_request(IMBaseRequest.START_VM,(inf_id, vm_id, auth_data))
+ return WaitRequest(request)
+
def launch_daemon():
"""
Launch the IM daemon
@@ -150,6 +159,8 @@ def launch_daemon():
server.register_function(GetInfrastructureRADL)
server.register_function(GetInfrastructureContMsg)
server.register_function(GetVMContMsg)
+ server.register_function(StartVM)
+ server.register_function(StopVM)
InfrastructureManager.logger.info('************ Start Infrastructure Manager daemon (v.%s) ************' % version)
diff --git a/setup.py b/setup.py
index bde1304c2..95ccecff3 100644
--- a/setup.py
+++ b/setup.py
@@ -42,5 +42,5 @@
long_description="IM is a tool that ease the access and the usability of IaaS clouds by automating the VMI selection, deployment, configuration, software installation, monitoring and update of Virtual Appliances. It supports APIs from a large number of virtual platforms, making user applications cloud-agnostic. In addition it integrates a contextualization system to enable the installation and configuration of all the user required applications providing the user with a fully functional infrastructure.",
description="IM is a tool to manage virtual infrastructures on Cloud deployments",
platforms=["any"],
- install_requires=["ansible >= 1.4","paramiko >= 1.14","PyYAML","SOAPpy","boto >= 2.29","apache-libcloud >= 0.17","ply"]
+ install_requires=["ansible >= 1.4","paramiko >= 1.14","PyYAML","SOAPpy","boto >= 2.29","apache-libcloud >= 0.17","ply", "bottle"]
)
diff --git a/test/TestIM.py b/test/TestIM.py
index a42ca333a..e2dc94fc6 100755
--- a/test/TestIM.py
+++ b/test/TestIM.py
@@ -53,12 +53,13 @@ def tearDownClass(cls):
except Exception:
pass
- def wait_inf_state(self, state, timeout, incorrect_states = []):
+ def wait_inf_state(self, state, timeout, incorrect_states = [], vm_ids = None):
"""
Wait for an infrastructure to have a specific state
"""
- (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data)
- self.assertTrue(success, msg="ERROR calling the GetInfrastructureInfo function:" + str(vm_ids))
+ if not vm_ids:
+ (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling the GetInfrastructureInfo function:" + str(vm_ids))
err_states = [VirtualMachine.FAILED, VirtualMachine.OFF, VirtualMachine.UNCONFIGURED]
err_states.extend(incorrect_states)
@@ -229,12 +230,36 @@ def test_22_start(self):
"""
Test StartInfrastructure function
"""
+ # Assure the VM to be stopped
+ time.sleep(10)
(success, res) = self.server.StartInfrastructure(self.inf_id, self.auth_data)
self.assertTrue(success, msg="ERROR calling StartInfrastructure: " + str(res))
- all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING])
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 150, [VirtualMachine.RUNNING])
self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be started (timeout).")
+ def test_23_stop_vm(self):
+ """
+ Test StopVM function
+ """
+ (success, res) = self.server.StopVM(self.inf_id, 0, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling StopVM: " + str(res))
+
+ all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING], [0])
+ self.assertTrue(all_stopped, msg="ERROR waiting the vm to be stopped (timeout).")
+
+ def test_24_start_vm(self):
+ """
+ Test StartVM function
+ """
+ # Assure the VM to be stopped
+ time.sleep(10)
+ (success, res) = self.server.StartVM(self.inf_id, 0, self.auth_data)
+ self.assertTrue(success, msg="ERROR calling StartVM: " + str(res))
+
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 150, [VirtualMachine.RUNNING], [0])
+ self.assertTrue(all_configured, msg="ERROR waiting the vm to be started (timeout).")
+
def test_50_destroy(self):
"""
Test DestroyInfrastructure function
diff --git a/test/TestREST.py b/test/TestREST.py
index b42f973e0..5b3cbed17 100755
--- a/test/TestREST.py
+++ b/test/TestREST.py
@@ -60,16 +60,17 @@ def tearDownClass(cls):
except Exception:
pass
- def wait_inf_state(self, state, timeout, incorrect_states = []):
+ def wait_inf_state(self, state, timeout, incorrect_states = [], vm_ids = None):
"""
Wait for an infrastructure to have a specific state
"""
- self.server.request('GET', "/infrastructures/" + self.inf_id, headers = {'AUTHORIZATION' : self.auth_data})
- resp = self.server.getresponse()
- output = str(resp.read())
- self.assertEqual(resp.status, 200, msg="ERROR getting infrastructure info:" + output)
-
- vm_ids = output.split("\n")
+ if not vm_ids:
+ self.server.request('GET', "/infrastructures/" + self.inf_id, headers = {'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR getting infrastructure info:" + output)
+
+ vm_ids = output.split("\n")
err_states = [VirtualMachine.FAILED, VirtualMachine.OFF, VirtualMachine.UNCONFIGURED]
err_states.extend(incorrect_states)
@@ -243,10 +244,28 @@ def test_70_start(self):
output = str(resp.read())
self.assertEqual(resp.status, 200, msg="ERROR starting the infrastructure:" + output)
- all_stopped = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING])
- self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be started (timeout).")
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING])
+ self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be started (timeout).")
+
+ def test_80_stop_vm(self):
+ self.server.request('PUT', "/infrastructures/" + self.inf_id + "/0/stop", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR stopping the vm:" + output)
+
+ all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING], [0])
+ self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be stopped (timeout).")
+
+ def test_90_start_vm(self):
+ self.server.request('PUT', "/infrastructures/" + self.inf_id + "/0/start", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data})
+ resp = self.server.getresponse()
+ output = str(resp.read())
+ self.assertEqual(resp.status, 200, msg="ERROR starting the vm:" + output)
+
+ all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING], [0])
+ self.assertTrue(all_configured, msg="ERROR waiting the vm to be started (timeout).")
- def test_80_destroy(self):
+ def test_100_destroy(self):
self.server.request('DELETE', "/infrastructures/" + self.inf_id, headers = {'Authorization' : self.auth_data})
resp = self.server.getresponse()
output = str(resp.read())