From 5294cc385fc15e2de846a71b6bd8151483fc76cc Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 26 May 2015 10:13:25 +0200 Subject: [PATCH 01/23] Bugfix in Docker connector in case of multiple docker servers in auth data --- connectors/Docker.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/connectors/Docker.py b/connectors/Docker.py index 520bcd1b9..c4fef8d64 100644 --- a/connectors/Docker.py +++ b/connectors/Docker.py @@ -54,16 +54,19 @@ def get_http_connection(self, auth_data): """ self.cert_file or os.path.isfile(self.cert_file) - - - auth = auth_data.getAuthInfo(DockerCloudConnector.type) + url = uriparse(self.cloud.server) + auths = auth_data.getAuthInfo(DockerCloudConnector.type, url[1]) + if not auths: + self.logger.error("No correct auth data has been specified to Docker.") + else: + auth = auths[0] if url[0] == 'unix': socket_path = "/" + url[1] + url[2] conn = UnixHTTPConnection.UnixHTTPConnection(socket_path) elif url[0] == 'https': - if auth and 'cert' in auth[0] and 'key' in auth[0]: + if 'cert' in auth and 'key' in auth: if os.path.isfile(self.cert_file) and os.path.isfile(self.key_file): cert_file = self.cert_file key_file = self.key_file @@ -84,13 +87,13 @@ def get_user_cert_data(self, auth): """ Get the Docker private_key and public_key files from the auth data """ - certificate = auth[0]['cert'] + certificate = auth['cert'] fd, cert_file = tempfile.mkstemp() os.write(fd, certificate) os.close(fd) os.chmod(cert_file,0644) - private_key = auth[0]['key'] + private_key = auth['key'] fd, key_file = tempfile.mkstemp() os.write(fd, private_key) os.close(fd) From f1a0b2045af2b3b7a9c7f4ca42f21329316fc89d Mon Sep 17 00:00:00 2001 From: micafer Date: Tue, 26 May 2015 18:48:54 +0200 Subject: [PATCH 02/23] Add the first version of the kubernetes connector --- connectors/Kubernetes.py | 275 +++++++++++++++++++++++++++++++++++++++ connectors/__init__.py | 2 +- 2 files changed, 276 insertions(+), 1 deletion(-) create mode 100644 connectors/Kubernetes.py diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py new file mode 100644 index 000000000..c38f8533e --- /dev/null +++ b/connectors/Kubernetes.py @@ -0,0 +1,275 @@ +# IM - Infrastructure Manager +# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import time +import string +import base64 +import json +import httplib +from IM.uriparse import uriparse +from IM.VirtualMachine import VirtualMachine +from CloudConnector import CloudConnector +from IM.radl.radl import Feature + + +class KubernetesCloudConnector(CloudConnector): + """ + Cloud Launcher to Kubernetes platform + """ + + type = "Kubernetes" + + _port_base_num = 35000 + """ Base number to assign SSH port on Docker server host.""" + _port_counter = 0 + """ Counter to assign SSH port on Docker server host.""" + _namespace = "default" + _apiVersion = "v1beta3" + + VM_STATE_MAP = { + 'Pending': VirtualMachine.PENDING, + 'Running': VirtualMachine.RUNNING, + 'Succeeded': VirtualMachine.OFF, + 'Failed': VirtualMachine.FAILED + } + """Dictionary with a map with the Kubernetes POD states to the IM states.""" + + def get_http_connection(self): + """ + Get the HTTPConnection object to contact the Kubernetes API + + Returns(HTTPConnection or HTTPSConnection): HTTPConnection connection object + """ + + url = uriparse(self.cloud.server) + + if url[0] == 'https': + conn = httplib.HTTPSConnection(url[1], self.cloud.port) + elif url[0] == 'http': + self.logger.warn("Using a unsecure connection to Kubernetes API!") + conn = httplib.HTTPConnection(url[1], self.cloud.port) + + return conn + + def get_auth_header(self, auth_data): + """ + Generate the auth header needed to contact with the Kubernetes API server. + """ + url = uriparse(self.cloud.server) + auths = auth_data.getAuthInfo(self.type, url[1]) + if not auths: + self.logger.error("No correct auth data has been specified to Kubernetes.") + return None + else: + auth = auths[0] + + auth_header = None + + if 'username' in auth and 'password' in auth: + passwd = auth['password'] + user = auth['username'] + auth_header = { 'Authorization' : 'Basic ' + string.strip(base64.encodestring(user + ':' + passwd))} + elif 'token' in auth: + token = auth['token'] + auth_header = { 'Authorization' : 'Bearer ' + token } + + return auth_header + + + def concreteSystem(self, radl_system, auth_data): + if radl_system.getValue("disk.0.image.url"): + url = uriparse(radl_system.getValue("disk.0.image.url")) + protocol = url[0] + if protocol == 'docker' and url[1]: + res_system = radl_system.clone() + + res_system.addFeature(Feature("virtual_system_type", "=", "docker"), conflict="other", missing="other") + + res_system.getFeature("cpu.count").operator = "=" + res_system.getFeature("memory.size").operator = "=" + + res_system.addFeature(Feature("provider.type", "=", self.type), conflict="other", missing="other") + res_system.addFeature(Feature("provider.host", "=", self.cloud.server), conflict="other", missing="other") + res_system.addFeature(Feature("provider.port", "=", self.cloud.port), conflict="other", missing="other") + + return [res_system] + else: + return [] + else: + return [radl_system.clone()] + + def _generate_pod_data(self, outports, system, ssh_port): + cpu = str(system.getValue('cpu.count')) + memory = "%s" % system.getFeature('memory.size').getValue('B') + # The URI has this format: docker://image_name + image_name = system.getValue("disk.0.image.url")[9:] + name = "im%d" % int(time.time()*100) + + ports = [{'containerPort': 22, 'protocol': 'TCP', 'hostPort':ssh_port}] + if outports: + for remote_port,_,local_port,local_protocol in outports: + if local_port != 22: + ports.append({'containerPort':local_port, 'protocol': local_protocol.upper(), 'hostPort': remote_port}) + + pod_data = { 'apiVersion': self._apiVersion, 'kind': 'Pod' } + pod_data['metadata'] = { + 'name': name, + 'namespace': self._namespace, + 'labels': {'name': name} + } + containers = [{ + 'name': name, + 'image': image_name, + 'imagePullPolicy': 'IfNotPresent', + 'restartPolicy': 'Always', + 'ports': ports, + 'resources': {'limits': {'cpu': cpu, 'memory': memory}} + }] + + pod_data['spec'] = {'containers' : containers} + + return pod_data + + def launch(self, inf, radl, requested_radl, num_vm, auth_data): + system = radl.systems[0] + + public_net = None + for net in radl.networks: + if net.isPublic(): + public_net = net + + outports = None + if public_net: + outports = public_net.getOutPorts() + + auth_header = self.get_auth_header(auth_data) + conn = self.get_http_connection() + + res = [] + i = 0 + while i < num_vm: + try: + i += 1 + # Create the container + conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods") + conn.putheader('Content-Type', 'application/json') + if auth_header: + conn.putheader(auth_header.keys()[0], auth_header.values()[0]) + + ssh_port = KubernetesCloudConnector._port_base_num + KubernetesCloudConnector._port_counter + KubernetesCloudConnector._port_counter += 1 + pod_data = self._generate_pod_data(outports, system, ssh_port) + body = json.dumps(pod_data) + conn.putheader('Content-Length', len(body)) + conn.endheaders(body) + + resp = conn.getresponse() + output = resp.read() + if resp.status != 201: + res.append((False, "Error creating the Container: " + output)) + else: + output = json.loads(output) + vm = VirtualMachine(inf, output["metadata"]["name"], self.cloud, radl, requested_radl, self) + # Set SSH port in the RADL info of the VM + vm.setSSHPort(ssh_port) + res.append((True, vm)) + + except Exception, ex: + self.logger.exception("Error connecting with Kubernetes API server") + res.append((False, "ERROR: " + str(ex))) + + return res + + def updateVMInfo(self, vm, auth_data): + try: + auth = self.get_auth_header(auth_data) + headers = {} + if auth: + headers.update(auth) + conn = self.get_http_connection() + + conn.request('GET', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + vm.id, headers = headers) + resp = conn.getresponse() + + output = resp.read() + if resp.status == 404: + # If the container does not exist, set state to OFF + vm.state = VirtualMachine.OFF + return (True, vm) + elif resp.status != 200: + return (False, "Error getting info about the POD: " + output) + else: + output = json.loads(output) + vm.state = self.VM_STATE_MAP.get(output["status"]["phase"], VirtualMachine.UNKNOWN) + + # Update the network info + self.setIPs(vm,output) + return (True, vm) + + except Exception, ex: + self.logger.exception("Error connecting with Kubernetes API server") + self.logger.error(ex) + return (False, "Error connecting with Kubernetes API server") + + + def setIPs(self, vm, pod_info): + """ + Adapt the RADL information of the VM to the real IPs assigned by the cloud provider + + Arguments: + - vm(:py:class:`IM.VirtualMachine`): VM information. + - pod_info(dict): JSON information about the POD + """ + + public_ips = [] + private_ips = [] + if 'hostIP' in pod_info["status"]: + public_ips = [pod_info["status"]["hostIP"]] + if 'podIP' in pod_info["status"]: + private_ips = [pod_info["status"]["podIP"]] + + vm.setIps(public_ips, private_ips) + + def finalize(self, vm, auth_data): + try: + auth = self.get_auth_header(auth_data) + headers = {} + if auth: + headers.update(auth) + conn = self.get_http_connection() + + conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + vm.id, headers = headers) + resp = conn.getresponse() + output = str(resp.read()) + if resp.status == 404: + self.logger.warn("Trying to remove a non existing POD id: " + vm.id) + return (True, vm.id) + elif resp.status != 200: + return (False, "Error deleting the POD: " + output) + else: + return (True, vm.id) + except Exception: + self.logger.exception("Error connecting with Kubernetes API server") + return (False, "Error connecting with Kubernetes API server") + + def stop(self, vm, auth_data): + return (False, "Not supported") + + def start(self, vm, auth_data): + return (False, "Not supported") + + def alterVM(self, vm, radl, auth_data): + return (False, "Not supported") diff --git a/connectors/__init__.py b/connectors/__init__.py index 8f4529c07..801dc15d4 100644 --- a/connectors/__init__.py +++ b/connectors/__init__.py @@ -15,4 +15,4 @@ # along with this program. If not, see . -__all__ = ['CloudConnector','EC2','OCCI','OpenNebula','OpenStack','LibVirt','LibCloud','Docker','GCE','FogBow', 'Azure', 'DeployedNode'] +__all__ = ['CloudConnector','EC2','OCCI','OpenNebula','OpenStack','LibVirt','LibCloud','Docker','GCE','FogBow', 'Azure', 'DeployedNode','Kubernetes'] From a5f60b0d14aa2884d486e94dbff77d396f261576 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 27 May 2015 10:43:34 +0200 Subject: [PATCH 03/23] Improve Docker connector code --- connectors/Docker.py | 111 +++++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 62 deletions(-) diff --git a/connectors/Docker.py b/connectors/Docker.py index c4fef8d64..326439de9 100644 --- a/connectors/Docker.py +++ b/connectors/Docker.py @@ -59,6 +59,7 @@ def get_http_connection(self, auth_data): auths = auth_data.getAuthInfo(DockerCloudConnector.type, url[1]) if not auths: self.logger.error("No correct auth data has been specified to Docker.") + return None else: auth = auths[0] @@ -147,7 +148,7 @@ def setIPs(self, vm, cont_info): vm.setIps(public_ips, private_ips) def _generate_volumes(self, system): - volumes = ',"Volumes":{' + volumes = {} cont = 1 while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".device"): @@ -156,46 +157,56 @@ def _generate_volumes(self, system): if not disk_device.startswith('/'): disk_device = '/' + disk_device self.logger.debug("Attaching a volume in %s" % disk_device) - if cont > 1: - volumes += ',' - volumes += '"' + disk_device + '":{}' + volumes[disk_device] = {} cont += 1 - - if cont == 1: - volumes = "" - else: - volumes += "}" return volumes - - def _generate_port_bindings(self, outports, ssh_port): - port_bindings = "" - ssh_found = False - if outports: - num = 0 - for remote_port,_,local_port,local_protocol in outports: - if num > 0: - port_bindings = port_bindings + ",\n" - port_bindings = port_bindings + '"PortBindings":{ "' + str(local_port) + '/' + local_protocol + '": [{ "HostPort": "' + str(remote_port) + '" }] }' - num += 1 - - if not ssh_found: - if port_bindings: - port_bindings += ",\n" - port_bindings = port_bindings + '"PortBindings":{ "22/tcp": [{ "HostPort": "' + str(ssh_port) + '" }] }\n' - - return port_bindings - def launch(self, inf, radl, requested_radl, num_vm, auth_data): - system = radl.systems[0] + def _generate_create_request_data(self, outports, system, vm): + cont_data = {} - cpu = int(system.getValue('cpu.count')) + cpu = int(system.getValue('cpu.count')) - 1 memory = system.getFeature('memory.size').getValue('B') #name = system.getValue("disk.0.image.name") # The URI has this format: docker://image_name image_name = system.getValue("disk.0.image.url")[9:] + (nodename, nodedom) = vm.getRequestedName(default_hostname = Config.DEFAULT_VM_NAME, default_domain = Config.DEFAULT_DOMAIN) + volumes = self._generate_volumes(system) + + exposed_ports = {"22/tcp": {}} + if outports: + for _,_,local_port,local_protocol in outports: + if local_port != 22: + exposed_ports[str(local_port) + '/' + local_protocol.lower()] = {} + + cont_data['Hostname'] = nodename + cont_data['Domainname'] = nodedom + cont_data['Cpuset'] = "0-%d" % cpu + cont_data['Memory'] = memory + cont_data['Cmd'] = ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:yoyoyo' | chpasswd ; /usr/sbin/sshd -D"] + cont_data['Image'] = image_name + cont_data['ExposedPorts'] = exposed_ports + if volumes: + cont_data['Volumes'] = volumes + + return cont_data + + def _generate_start_request_data(self, outports, ssh_port): + data = {} + + data["PortBindings"] = {} + data["PortBindings"]["22/tcp"] = [{"HostPort":ssh_port}] + if outports: + for remote_port,_,local_port,local_protocol in outports: + if local_port != 22: + data["PortBindings"][str(local_port) + '/' + local_protocol] = [{"HostPort":remote_port}] + + return data + + def launch(self, inf, radl, requested_radl, num_vm, auth_data): + system = radl.systems[0] public_net = None for net in radl.networks: @@ -206,12 +217,6 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if public_net: outports = public_net.getOutPorts() - exposed_ports = '"22/tcp": {}' - if outports: - for _,_,local_port,local_protocol in outports: - if local_port != 22: - exposed_ports = exposed_ports + ', "' + str(local_port) + '/' + local_protocol + '": {}' - conn = self.get_http_connection(auth_data) res = [] i = 0 @@ -221,28 +226,14 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): # Create the VM to get the nodename vm = VirtualMachine(inf, None, self.cloud, radl, requested_radl, self) - (nodename, nodedom) = vm.getRequestedName(default_hostname = Config.DEFAULT_VM_NAME, default_domain = Config.DEFAULT_DOMAIN) - - create_request_json = """ { - "Hostname":"%s", - "Domainname": "%s", - "Cpuset": "0-%d", - "Memory":%s, - "Cmd":[ - "/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:yoyoyo' | chpasswd ; /usr/sbin/sshd -D" - ], - "Image":"%s", - "ExposedPorts":{ - %s - } - %s - }""" % (nodename, nodedom, cpu-1, memory,image_name,exposed_ports,volumes) # Create the container conn.putrequest('POST', "/containers/create") conn.putheader('Content-Type', 'application/json') - body = create_request_json + cont_data = self._generate_create_request_data(outports, system, vm) + body = json.dumps(cont_data) + conn.putheader('Content-Length', len(body)) conn.endheaders(body) @@ -259,20 +250,16 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): conn.putrequest('POST', "/containers/" + docker_vm_id + "/start") conn.putheader('Content-Type', 'application/json') - start_request_json = "{}" + start_req = {} # If the user requested a public IP, specify the PortBindings ssh_port = 22 if public_net: - start_request_json = " { " - - ssh_port = DockerCloudConnector._port_base_num + DockerCloudConnector._port_counter + ssh_port = (DockerCloudConnector._port_base_num + DockerCloudConnector._port_counter) % 65535 DockerCloudConnector._port_counter += 1 + start_req = self._generate_start_request_data(outports, ssh_port) + + body = json.dumps(start_req) - start_request_json += self._generate_port_bindings(outports, ssh_port) - - start_request_json += "}" - - body = start_request_json conn.putheader('Content-Length', len(body)) conn.endheaders(body) From 7fa2f6df949c45e668d126ff4b7c48bb2c906b28 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 27 May 2015 18:49:10 +0200 Subject: [PATCH 04/23] Minor bugfixes in connectors --- connectors/Docker.py | 7 +++- connectors/Kubernetes.py | 70 +++++++++++++++++++++++++++++++++++++--- connectors/OCCI.py | 7 ++-- connectors/OpenNebula.py | 4 +++ 4 files changed, 80 insertions(+), 8 deletions(-) diff --git a/connectors/Docker.py b/connectors/Docker.py index 326439de9..08d5b8620 100644 --- a/connectors/Docker.py +++ b/connectors/Docker.py @@ -38,6 +38,8 @@ class DockerCloudConnector(CloudConnector): """ Base number to assign SSH port on Docker server host.""" _port_counter = 0 """ Counter to assign SSH port on Docker server host.""" + _root_password = "Aspecial+0ne" + """ Default password to set to the root in the container""" def __init__(self, cloud_info): self.cert_file = '' @@ -185,7 +187,7 @@ def _generate_create_request_data(self, outports, system, vm): cont_data['Domainname'] = nodedom cont_data['Cpuset'] = "0-%d" % cpu cont_data['Memory'] = memory - cont_data['Cmd'] = ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:yoyoyo' | chpasswd ; /usr/sbin/sshd -D"] + cont_data['Cmd'] = ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:" + self._root_password + "' | chpasswd ; /usr/sbin/sshd -D"] cont_data['Image'] = image_name cont_data['ExposedPorts'] = exposed_ports if volumes: @@ -275,6 +277,9 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): # Now set the cloud id to the VM vm.id = docker_vm_id + # Set the default user and password to access the container + vm.info.systems[0].setValue('disk.0.os.credentials.username', 'root') + vm.info.systems[0].setValue('disk.0.os.credentials.password', self._root_password) # Set ssh port in the RADL info of the VM vm.setSSHPort(ssh_port) diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py index c38f8533e..4551fd21f 100644 --- a/connectors/Kubernetes.py +++ b/connectors/Kubernetes.py @@ -33,11 +33,15 @@ class KubernetesCloudConnector(CloudConnector): type = "Kubernetes" _port_base_num = 35000 - """ Base number to assign SSH port on Docker server host.""" + """ Base number to assign SSH port on Kubernetes node.""" _port_counter = 0 - """ Counter to assign SSH port on Docker server host.""" + """ Counter to assign SSH port on Kubernetes node.""" + _root_password = "Aspecial+0ne" + """ Default password to set to the root in the container""" _namespace = "default" + """ Default namespace""" _apiVersion = "v1beta3" + """ API version to access""" VM_STATE_MAP = { 'Pending': VirtualMachine.PENDING, @@ -133,6 +137,7 @@ def _generate_pod_data(self, outports, system, ssh_port): containers = [{ 'name': name, 'image': image_name, + 'command': ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:" + self._root_password + "' | chpasswd ; /usr/sbin/sshd -D"], 'imagePullPolicy': 'IfNotPresent', 'restartPolicy': 'Always', 'ports': ports, @@ -169,7 +174,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): if auth_header: conn.putheader(auth_header.keys()[0], auth_header.values()[0]) - ssh_port = KubernetesCloudConnector._port_base_num + KubernetesCloudConnector._port_counter + ssh_port = (KubernetesCloudConnector._port_base_num + KubernetesCloudConnector._port_counter) % 65535 KubernetesCloudConnector._port_counter += 1 pod_data = self._generate_pod_data(outports, system, ssh_port) body = json.dumps(pod_data) @@ -184,7 +189,11 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): output = json.loads(output) vm = VirtualMachine(inf, output["metadata"]["name"], self.cloud, radl, requested_radl, self) # Set SSH port in the RADL info of the VM - vm.setSSHPort(ssh_port) + vm.setSSHPort(ssh_port) + # Set the default user and password to access the container + vm.info.systems[0].setValue('disk.0.os.credentials.username', 'root') + vm.info.systems[0].setValue('disk.0.os.credentials.password', self._root_password) + res.append((True, vm)) except Exception, ex: @@ -272,4 +281,57 @@ def start(self, vm, auth_data): return (False, "Not supported") def alterVM(self, vm, radl, auth_data): + # This function is correctly implemented + # But kubernetes does not permit cpu to be updated yet + system = radl.systems[0] + + auth_header = self.get_auth_header(auth_data) + conn = self.get_http_connection() + + try: + pod_data = [] + + cpu = vm.info.systems[0].getValue('cpu.count') + memory = vm.info.systems[0].getFeature('memory.size').getValue('B') + + new_cpu = system.getValue('cpu.count') + new_memory = system.getFeature('memory.size').getValue('B') + + changed = False + if new_cpu and new_cpu != cpu: + pod_data.append({"op": "replace", "path": "/spec/containers/0/resources/limits/cpu", "value": new_cpu}) + changed = True + if new_memory and new_memory != memory: + pod_data.append({"op": "replace", "path": "/spec/containers/0/resources/limits/memory", "value": new_memory}) + changed = True + + if not changed: + self.logger.debug("Nothing changes in the kubernetes pod: " + str(vm.id)) + return (True, vm) + + # Create the container + conn.putrequest('PATCH', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + str(vm.id)) + conn.putheader('Content-Type', 'application/json-patch+json') + if auth_header: + conn.putheader(auth_header.keys()[0], auth_header.values()[0]) + body = json.dumps(pod_data) + conn.putheader('Content-Length', len(body)) + conn.endheaders(body) + + resp = conn.getresponse() + output = resp.read() + if resp.status != 201: + return (False, "Error updating the Pod: " + output) + else: + if new_cpu: + vm.info.systems[0].setValue('cpu.count', new_cpu) + if new_memory: + vm.info.systems[0].addFeature(Feature("memory.size", "=", new_memory, 'B'), conflict="other", missing="other") + return (True, self.updateVMInfo(vm, auth_data)) + + except Exception, ex: + self.logger.exception("Error connecting with Kubernetes API server") + return (False, "ERROR: " + str(ex)) + + return (False, "Not supported") diff --git a/connectors/OCCI.py b/connectors/OCCI.py index 315212a00..200ae9adb 100644 --- a/connectors/OCCI.py +++ b/connectors/OCCI.py @@ -56,7 +56,7 @@ def get_https_connection(self, auth, server, port): Get a HTTPS connection with the specified server. It uses a proxy file if it has been specified in the auth credentials """ - if 'proxy' in auth: + if auth and 'proxy' in auth: if self.proxy_filename and os.path.isfile(self.proxy_filename): proxy_filename = self.proxy_filename else: @@ -78,6 +78,7 @@ def get_http_connection(self, auth_data): auths = auth_data.getAuthInfo(self.type, self.cloud.server) if not auths: self.logger.error("No correct auth data has been specified to OCCI.") + auth = None else: auth = auths[0] @@ -345,10 +346,10 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): (public_key, private_key) = self.keygen() system.setValue('disk.0.os.credentials.private_key', private_key) - user = system.getValue('disk.os.credentials.username') + user = system.getValue('disk.0.os.credentials.username') if not user: user = "cloudadm" - system.setValue('disk.os.credentials.username', user) + system.setValue('disk.0.os.credentials.username', user) cloud_config = self.gen_cloud_config(public_key, user) user_data = base64.b64encode(cloud_config).replace("\n","") diff --git a/connectors/OpenNebula.py b/connectors/OpenNebula.py index b9d052538..9164f7c6a 100644 --- a/connectors/OpenNebula.py +++ b/connectors/OpenNebula.py @@ -147,11 +147,15 @@ def getSessionID(self, auth_data, hash_password = None): Returns: str with the Session ID """ if self.session_id: + # TODO: known issue: If the IM service is restarted, the first attempt to access this VM + # will set this session_id. If the credentials are not correct the session_id will be always + # incorrect until the IM service is restarted again ... return self.session_id else: auths = auth_data.getAuthInfo(self.type, self.cloud.server) if not auths: self.logger.error("No correct auth data has been specified to OpenNebula.") + return None else: auth = auths[0] From 8dd90d1761a73bebf36acc8da64de379a7fe1647 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 27 May 2015 18:50:59 +0200 Subject: [PATCH 05/23] Set num version to 1.2.5 --- IM/__init__.py | 2 +- changelog | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/IM/__init__.py b/IM/__init__.py index d829a0fcd..9355a4b64 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -16,6 +16,6 @@ __all__ = ['auth','bottle','CloudManager','config','ConfManager','db','ganglia','HTTPHeaderTransport','ImageManager','InfrastructureInfo','InfrastructureManager','parsetab','radl','recipe','request','REST', 'ServiceRequests','SSH','timedcall','uriparse','VMRC','xmlobject'] -__version__ = '1.2.4' +__version__ = '1.2.5' __author__ = 'Miguel Caballer' diff --git a/changelog b/changelog index 4b6e3e564..88fd677c3 100644 --- a/changelog +++ b/changelog @@ -113,3 +113,8 @@ IM 1.2.4 * Dynamically refresh the Ctxt output * Minor bugfix in EC2 connector when deleting a non existing instance +IM 1.2.5 + * Bugfix in OCCI, OpenNebula and Docker connectors when using incorrect credentials. + * Improve Docker connector code. + * Add Kubernetes connector. + From 5715d021ff9126ecf16a0bada9d0c22a92b6267c Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 28 May 2015 09:14:28 +0200 Subject: [PATCH 06/23] Bugfix in getting a correct VM state --- connectors/OCCI.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/connectors/OCCI.py b/connectors/OCCI.py index 200ae9adb..ea4246610 100644 --- a/connectors/OCCI.py +++ b/connectors/OCCI.py @@ -43,7 +43,7 @@ class OCCICloudConnector(CloudConnector): 'active': VirtualMachine.RUNNING, 'inactive': VirtualMachine.OFF, 'error': VirtualMachine.FAILED, - 'suspended': VirtualMachine.OFF + 'suspended': VirtualMachine.STOPPED } """Dictionary with a map with the OCCI VM states to the IM states.""" @@ -228,7 +228,15 @@ def updateVMInfo(self, vm, auth_data): elif resp.status != 200: return (False, resp.reason + "\n" + output) else: - vm.state = self.VM_STATE_MAP.get(self.get_occi_attribute_value(output, 'occi.compute.state'), VirtualMachine.UNKNOWN) + old_state = vm.state + occi_state = self.get_occi_attribute_value(output, 'occi.compute.state') + + # I have to do that because OCCI returns 'inactive' when a VM is starting + # to distinguish from the OFF state + if old_state == VirtualMachine.PENDING and occi_state == 'inactive': + vm.state = VirtualMachine.PENDING + else: + vm.state = self.VM_STATE_MAP.get(occi_state, VirtualMachine.UNKNOWN) cores = self.get_occi_attribute_value(output, 'occi.compute.cores') if cores: From b863b32e6ab08d484d8b737ef6cde7acde657835 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 28 May 2015 10:59:05 +0200 Subject: [PATCH 07/23] Update Docker conn to API 1.18 --- connectors/Docker.py | 127 +++++++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 59 deletions(-) diff --git a/connectors/Docker.py b/connectors/Docker.py index 08d5b8620..bce2aeb6b 100644 --- a/connectors/Docker.py +++ b/connectors/Docker.py @@ -149,22 +149,7 @@ def setIPs(self, vm, cont_info): vm.setIps(public_ips, private_ips) - def _generate_volumes(self, system): - volumes = {} - - cont = 1 - while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".device"): - # Use the device as the volume dir - disk_device = system.getValue("disk." + str(cont) + ".device") - if not disk_device.startswith('/'): - disk_device = '/' + disk_device - self.logger.debug("Attaching a volume in %s" % disk_device) - volumes[disk_device] = {} - cont += 1 - - return volumes - - def _generate_create_request_data(self, outports, system, vm): + def _generate_create_request_data(self, outports, system, vm, ssh_port): cont_data = {} cpu = int(system.getValue('cpu.count')) - 1 @@ -176,36 +161,74 @@ def _generate_create_request_data(self, outports, system, vm): (nodename, nodedom) = vm.getRequestedName(default_hostname = Config.DEFAULT_VM_NAME, default_domain = Config.DEFAULT_DOMAIN) volumes = self._generate_volumes(system) - - exposed_ports = {"22/tcp": {}} - if outports: - for _,_,local_port,local_protocol in outports: - if local_port != 22: - exposed_ports[str(local_port) + '/' + local_protocol.lower()] = {} cont_data['Hostname'] = nodename cont_data['Domainname'] = nodedom - cont_data['Cpuset'] = "0-%d" % cpu - cont_data['Memory'] = memory cont_data['Cmd'] = ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:" + self._root_password + "' | chpasswd ; /usr/sbin/sshd -D"] cont_data['Image'] = image_name - cont_data['ExposedPorts'] = exposed_ports + cont_data['ExposedPorts'] = self._generate_exposed_ports(outports) if volumes: cont_data['Volumes'] = volumes - return cont_data + HostConfig = {} + HostConfig['CpusetCpus'] = "0-%d" % cpu + HostConfig['Memory'] = memory + HostConfig['PortBindings'] = self._generate_port_bindings(outports, ssh_port) + HostConfig['Binds'] = self._generate_volumes_binds(system) + cont_data['HostConfig'] = HostConfig + + return cont_data + + def _generate_volumes_binds(self, system): + binds = [] + + cont = 1 + while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path") and system.getValue("disk." + str(cont) + ".device"): + disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path") + # Use the device as volume host path to bind + disk_device = system.getValue("disk." + str(cont) + ".device") + if not disk_mount_path.startswith('/'): + disk_mount_path = '/' + disk_mount_path + if not disk_device.startswith('/'): + disk_device = '/' + disk_device + self.logger.debug("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) + binds.append(disk_device + ":" + disk_mount_path) + cont += 1 - def _generate_start_request_data(self, outports, ssh_port): - data = {} + return binds + + def _generate_volumes(self, system): + volumes = {} - data["PortBindings"] = {} - data["PortBindings"]["22/tcp"] = [{"HostPort":ssh_port}] + cont = 1 + while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path"): + # Use the mount_path as the volume dir + disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path") + if not disk_mount_path.startswith('/'): + disk_mount_path = '/' + disk_mount_path + self.logger.debug("Attaching a volume in %s" % disk_mount_path) + volumes[disk_mount_path] = {} + cont += 1 + + return volumes + + def _generate_exposed_ports(self, outports): + exposed_ports = {"22/tcp": {}} + if outports: + for _,_,local_port,local_protocol in outports: + if local_port != 22: + exposed_ports[str(local_port) + '/' + local_protocol.lower()] = {} + return exposed_ports + + def _generate_port_bindings(self, outports, ssh_port): + res = {} + res["22/tcp"] = [{"HostPort":ssh_port}] if outports: for remote_port,_,local_port,local_protocol in outports: if local_port != 22: - data["PortBindings"][str(local_port) + '/' + local_protocol] = [{"HostPort":remote_port}] + res[str(local_port) + '/' + local_protocol] = [{"HostPort":remote_port}] - return data + return res def launch(self, inf, radl, requested_radl, num_vm, auth_data): system = radl.systems[0] @@ -226,6 +249,11 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): try: i += 1 + ssh_port = 22 + if public_net: + ssh_port = (DockerCloudConnector._port_base_num + DockerCloudConnector._port_counter) % 65535 + DockerCloudConnector._port_counter += 1 + # Create the VM to get the nodename vm = VirtualMachine(inf, None, self.cloud, radl, requested_radl, self) @@ -233,7 +261,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): conn.putrequest('POST', "/containers/create") conn.putheader('Content-Type', 'application/json') - cont_data = self._generate_create_request_data(outports, system, vm) + cont_data = self._generate_create_request_data(outports, system, vm, ssh_port) body = json.dumps(cont_data) conn.putheader('Content-Length', len(body)) @@ -246,37 +274,19 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): continue output = json.loads(output) - docker_vm_id = output["Id"] - - # Now start it - conn.putrequest('POST', "/containers/" + docker_vm_id + "/start") - conn.putheader('Content-Type', 'application/json') - - start_req = {} - # If the user requested a public IP, specify the PortBindings - ssh_port = 22 - if public_net: - ssh_port = (DockerCloudConnector._port_base_num + DockerCloudConnector._port_counter) % 65535 - DockerCloudConnector._port_counter += 1 - start_req = self._generate_start_request_data(outports, ssh_port) - - body = json.dumps(start_req) + # Set the cloud id to the VM + vm.id = output["Id"] - conn.putheader('Content-Length', len(body)) - conn.endheaders(body) - - resp = conn.getresponse() - output = resp.read() - if resp.status != 204: - res.append((False, "Error staring the Container: " + output)) + # Now start it + success, _ = self.start(vm, auth_data) + if not success: + res.append((False, "Error starting the Container: " + output)) # Delete the container - conn.request('DELETE', "/containers/" + docker_vm_id) + conn.request('DELETE', "/containers/" + vm.id) resp = conn.getresponse() resp.read() continue - # Now set the cloud id to the VM - vm.id = docker_vm_id # Set the default user and password to access the container vm.info.systems[0].setValue('disk.0.os.credentials.username', 'root') vm.info.systems[0].setValue('disk.0.os.credentials.password', self._root_password) @@ -342,7 +352,6 @@ def finalize(self, vm, auth_data): self.logger.exception("Error connecting with Docker server") return (False, "Error connecting with Docker server") - def stop(self, vm, auth_data): try: conn = self.get_http_connection(auth_data) From f40bc0c2a70c093a2b4916293920d1211ef2d91d Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 28 May 2015 14:28:32 +0200 Subject: [PATCH 08/23] add mount_path attribute to disks --- IM/radl/radl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/IM/radl/radl.py b/IM/radl/radl.py index 0878cd207..853eeafd1 100644 --- a/IM/radl/radl.py +++ b/IM/radl/radl.py @@ -1055,6 +1055,7 @@ def check_app(f, x): "image.name": (str, None), "type": (str, ["SWAP", "ISO", "FILESYSTEM"]), "device": (str, None), + "mount_path": (str, None), "size": (float, positive, mem_units), "free_size": (float, positive, mem_units), "os.name": (str, ["LINUX", "WINDOWS", "MAC OS X"]), From 19f1ea75c0f56ee6e60351ca7d58d4e4c7ce41e8 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 28 May 2015 14:28:57 +0200 Subject: [PATCH 09/23] Complete the kubernetes conn --- connectors/Kubernetes.py | 166 ++++++++++++++++++++++++++++++++++----- 1 file changed, 146 insertions(+), 20 deletions(-) diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py index 4551fd21f..5ce448b8a 100644 --- a/connectors/Kubernetes.py +++ b/connectors/Kubernetes.py @@ -92,7 +92,6 @@ def get_auth_header(self, auth_data): return auth_header - def concreteSystem(self, radl_system, auth_data): if radl_system.getValue("disk.0.image.url"): url = uriparse(radl_system.getValue("disk.0.image.url")) @@ -114,13 +113,101 @@ def concreteSystem(self, radl_system, auth_data): return [] else: return [radl_system.clone()] - - def _generate_pod_data(self, outports, system, ssh_port): + + def _delete_volume_claim(self, vc_name, auth_data): + try: + auth = self.get_auth_header(auth_data) + headers = {} + if auth: + headers.update(auth) + conn = self.get_http_connection() + + conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/persistentvolumeclaims/" + vc_name, headers = headers) + resp = conn.getresponse() + output = str(resp.read()) + if resp.status == 404: + self.logger.warn("Trying to remove a non existing PersistentVolumeClaim: " + vc_name) + return True + elif resp.status != 200: + self.logger.error("Error deleting the PersistentVolumeClaim: " + output) + return False + else: + return True + except Exception: + self.logger.exception("Error connecting with Kubernetes API server") + return False + + def _delete_volume_claims(self, pod_data, auth_data): + if 'volumes' in pod_data['spec']: + for volume in pod_data['spec']['volumes']: + if 'persistentVolumeClaim' in volume and 'claimName' in volume['persistentVolumeClaim']: + vc_name = volume['persistentVolumeClaim']['claimName'] + success = self._delete_volume_claim(vc_name, auth_data) + if not success: + self.logger.error("Error deleting PersistentVolumeClaim:" + vc_name) + + def _create_volume_claim(self, claim_data, auth_data): + try: + auth_header = self.get_auth_header(auth_data) + conn = self.get_http_connection() + + conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/persistentvolumeclaims") + conn.putheader('Content-Type', 'application/json') + if auth_header: + conn.putheader(auth_header.keys()[0], auth_header.values()[0]) + + body = json.dumps(claim_data) + conn.putheader('Content-Length', len(body)) + conn.endheaders(body) + resp = conn.getresponse() + + output = str(resp.read()) + if resp.status != 201: + self.logger.error("Error deleting the POD: " + output) + return False + else: + return True + except Exception: + self.logger.exception("Error connecting with Kubernetes API server") + return False + + def _create_volumes(self, system, pod_name, auth_data, persistent = False): + res = [] + cont = 1 + while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path") and system.getValue("disk." + str(cont) + ".device"): + disk_mount_path = system.getValue("disk." + str(cont) + ".mount_path") + # Use the device as volume host path to bind + disk_device = system.getValue("disk." + str(cont) + ".device") + disk_size = system.getFeature("disk." + str(cont) + ".size").getValue('B') + if not disk_mount_path.startswith('/'): + disk_mount_path = '/' + disk_mount_path + if not disk_device.startswith('/'): + disk_device = '/' + disk_device + self.logger.debug("Binding a volume in %s to %s" % (disk_device, disk_mount_path)) + name = "%s-%d" % (pod_name, cont) + + if persistent: + claim_data = { 'apiVersion': self._apiVersion, 'kind': 'PersistentVolumeClaim' } + claim_data['metadata'] = { 'name': name, 'namespace': self._namespace } + claim_data['spec'] = { 'accessModes' : ['ReadWriteOnce'], 'resources' : {'requests' : {'storage' : disk_size} } } + + success = self._create_volume_claim(claim_data, auth_data) + if success: + res.append((name, disk_size, disk_mount_path, persistent)) + else: + self.logger.error("Error creating PersistentVolumeClaim:" + name) + else: + res.append((name, disk_size, disk_mount_path, persistent)) + + cont += 1 + + return res + + def _generate_pod_data(self, name, outports, system, ssh_port, volumes): cpu = str(system.getValue('cpu.count')) memory = "%s" % system.getFeature('memory.size').getValue('B') # The URI has this format: docker://image_name image_name = system.getValue("disk.0.image.url")[9:] - name = "im%d" % int(time.time()*100) ports = [{'containerPort': 22, 'protocol': 'TCP', 'hostPort':ssh_port}] if outports: @@ -144,8 +231,21 @@ def _generate_pod_data(self, outports, system, ssh_port): 'resources': {'limits': {'cpu': cpu, 'memory': memory}} }] + if volumes: + containers[0]['volumeMounts'] = [] + for (v_name, _, v_mount_path, _) in volumes: + containers[0]['volumeMounts'].append({'name':v_name, 'mountPath':v_mount_path}) + pod_data['spec'] = {'containers' : containers} + if volumes: + pod_data['spec']['volumes'] = [] + for (v_name, _, _, persistent) in volumes: + if persistent: + pod_data['spec']['volumes'].append({'name': v_name, 'persistentVolumeClaim': {'claimName': v_name}}) + else: + pod_data['spec']['volumes'].append({'name': v_name, 'emptyDir:': {}}) + return pod_data def launch(self, inf, radl, requested_radl, num_vm, auth_data): @@ -168,7 +268,11 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): while i < num_vm: try: i += 1 - # Create the container + pod_name = "im%d" % int(time.time()*100) + # Do not use the Persistent volumes yet + volumes = self._create_volumes(system, pod_name, auth_data) + + # Create the pod conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods") conn.putheader('Content-Type', 'application/json') if auth_header: @@ -176,7 +280,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): ssh_port = (KubernetesCloudConnector._port_base_num + KubernetesCloudConnector._port_counter) % 65535 KubernetesCloudConnector._port_counter += 1 - pod_data = self._generate_pod_data(outports, system, ssh_port) + pod_data = self._generate_pod_data(pod_name, outports, system, ssh_port, volumes) body = json.dumps(pod_data) conn.putheader('Content-Length', len(body)) conn.endheaders(body) @@ -202,7 +306,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): return res - def updateVMInfo(self, vm, auth_data): + def _get_pod(self, pod_name, auth_data): try: auth = self.get_auth_header(auth_data) headers = {} @@ -210,16 +314,29 @@ def updateVMInfo(self, vm, auth_data): headers.update(auth) conn = self.get_http_connection() - conn.request('GET', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + vm.id, headers = headers) + conn.request('GET', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + pod_name, headers = headers) resp = conn.getresponse() output = resp.read() + if resp.status == 404: + return (True, resp.status, output) + elif resp.status != 200: + return (False, resp.status, output) + else: + return (True, resp.status, output) + + except Exception, ex: + self.logger.exception("Error connecting with Kubernetes API server") + return (False, None, "Error connecting with Kubernetes API server: " + str(ex)) + + def updateVMInfo(self, vm, auth_data): + success, status, output = self._get_pod(vm.id, auth_data) + if success: + if status == 404: # If the container does not exist, set state to OFF vm.state = VirtualMachine.OFF return (True, vm) - elif resp.status != 200: - return (False, "Error getting info about the POD: " + output) else: output = json.loads(output) vm.state = self.VM_STATE_MAP.get(output["status"]["phase"], VirtualMachine.UNKNOWN) @@ -227,12 +344,9 @@ def updateVMInfo(self, vm, auth_data): # Update the network info self.setIPs(vm,output) return (True, vm) - - except Exception, ex: - self.logger.exception("Error connecting with Kubernetes API server") - self.logger.error(ex) - return (False, "Error connecting with Kubernetes API server") - + else: + self.logger.error("Error getting info about the POD: " + output) + return (False, "Error getting info about the POD: " + output) def setIPs(self, vm, pod_info): """ @@ -253,6 +367,18 @@ def setIPs(self, vm, pod_info): vm.setIps(public_ips, private_ips) def finalize(self, vm, auth_data): + success, status, output = self._get_pod(vm.id, auth_data) + if success: + if status == 404: + self.logger.warn("Trying to remove a non existing POD id: " + vm.id) + return (True, vm.id) + else: + pod_data = json.loads(output) + self._delete_volume_claims(pod_data, auth_data) + + return self._delete_pod(vm.id, auth_data) + + def _delete_pod(self, pod_name, auth_data): try: auth = self.get_auth_header(auth_data) headers = {} @@ -260,16 +386,16 @@ def finalize(self, vm, auth_data): headers.update(auth) conn = self.get_http_connection() - conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + vm.id, headers = headers) + conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + pod_name, headers = headers) resp = conn.getresponse() output = str(resp.read()) if resp.status == 404: - self.logger.warn("Trying to remove a non existing POD id: " + vm.id) - return (True, vm.id) + self.logger.warn("Trying to remove a non existing POD id: " + pod_name) + return (True, pod_name) elif resp.status != 200: return (False, "Error deleting the POD: " + output) else: - return (True, vm.id) + return (True, pod_name) except Exception: self.logger.exception("Error connecting with Kubernetes API server") return (False, "Error connecting with Kubernetes API server") From 4bd13fa2e8fc17a9b28498da6c206544df8db2cf Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 28 May 2015 14:39:14 +0200 Subject: [PATCH 10/23] Complete the kubernetes conn --- connectors/Kubernetes.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py index 5ce448b8a..41bc3180a 100644 --- a/connectors/Kubernetes.py +++ b/connectors/Kubernetes.py @@ -193,11 +193,11 @@ def _create_volumes(self, system, pod_name, auth_data, persistent = False): success = self._create_volume_claim(claim_data, auth_data) if success: - res.append((name, disk_size, disk_mount_path, persistent)) + res.append((name, disk_device, disk_size, disk_mount_path, persistent)) else: self.logger.error("Error creating PersistentVolumeClaim:" + name) else: - res.append((name, disk_size, disk_mount_path, persistent)) + res.append((name, disk_device, disk_size, disk_mount_path, persistent)) cont += 1 @@ -233,18 +233,22 @@ def _generate_pod_data(self, name, outports, system, ssh_port, volumes): if volumes: containers[0]['volumeMounts'] = [] - for (v_name, _, v_mount_path, _) in volumes: + for (v_name, _, _, v_mount_path, _) in volumes: containers[0]['volumeMounts'].append({'name':v_name, 'mountPath':v_mount_path}) pod_data['spec'] = {'containers' : containers} if volumes: pod_data['spec']['volumes'] = [] - for (v_name, _, _, persistent) in volumes: + for (v_name, v_device, _, _, persistent) in volumes: if persistent: pod_data['spec']['volumes'].append({'name': v_name, 'persistentVolumeClaim': {'claimName': v_name}}) else: - pod_data['spec']['volumes'].append({'name': v_name, 'emptyDir:': {}}) + if v_device: + # Use the device as volume host path to bind + pod_data['spec']['volumes'].append({'name': v_name, 'hostPath:': {'path': v_device}}) + else: + pod_data['spec']['volumes'].append({'name': v_name, 'emptyDir:': {}}) return pod_data From 57a2f1419970b19e01627edbf4b508f12a49887f Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 29 May 2015 09:44:19 +0200 Subject: [PATCH 11/23] Minor Bugfixes --- connectors/Docker.py | 3 +++ connectors/Kubernetes.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/connectors/Docker.py b/connectors/Docker.py index bce2aeb6b..0eed7298f 100644 --- a/connectors/Docker.py +++ b/connectors/Docker.py @@ -117,6 +117,9 @@ def concreteSystem(self, radl_system, auth_data): res_system.getFeature("cpu.count").operator = "=" res_system.getFeature("memory.size").operator = "=" + res_system.setValue('disk.0.os.credentials.username', 'root') + res_system.setValue('disk.0.os.credentials.password', self._root_password) + res_system.addFeature(Feature("provider.type", "=", self.type), conflict="other", missing="other") res_system.addFeature(Feature("provider.host", "=", self.cloud.server), conflict="other", missing="other") res_system.addFeature(Feature("provider.port", "=", self.cloud.port), conflict="other", missing="other") diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py index 41bc3180a..1ba916462 100644 --- a/connectors/Kubernetes.py +++ b/connectors/Kubernetes.py @@ -104,6 +104,9 @@ def concreteSystem(self, radl_system, auth_data): res_system.getFeature("cpu.count").operator = "=" res_system.getFeature("memory.size").operator = "=" + res_system.setValue('disk.0.os.credentials.username', 'root') + res_system.setValue('disk.0.os.credentials.password', self._root_password) + res_system.addFeature(Feature("provider.type", "=", self.type), conflict="other", missing="other") res_system.addFeature(Feature("provider.host", "=", self.cloud.server), conflict="other", missing="other") res_system.addFeature(Feature("provider.port", "=", self.cloud.port), conflict="other", missing="other") @@ -226,7 +229,7 @@ def _generate_pod_data(self, name, outports, system, ssh_port, volumes): 'image': image_name, 'command': ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:" + self._root_password + "' | chpasswd ; /usr/sbin/sshd -D"], 'imagePullPolicy': 'IfNotPresent', - 'restartPolicy': 'Always', + 'restartPolicy': 'Never', 'ports': ports, 'resources': {'limits': {'cpu': cpu, 'memory': memory}} }] From 410dd9f78bf2afa7bff484caae6c64535743e3d0 Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 29 May 2015 13:24:51 +0200 Subject: [PATCH 12/23] Improve Kubernetes connector --- connectors/Kubernetes.py | 55 ++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py index 1ba916462..e3dbd0d42 100644 --- a/connectors/Kubernetes.py +++ b/connectors/Kubernetes.py @@ -23,6 +23,7 @@ from IM.VirtualMachine import VirtualMachine from CloudConnector import CloudConnector from IM.radl.radl import Feature +from IM.config import Config class KubernetesCloudConnector(CloudConnector): @@ -38,8 +39,6 @@ class KubernetesCloudConnector(CloudConnector): """ Counter to assign SSH port on Kubernetes node.""" _root_password = "Aspecial+0ne" """ Default password to set to the root in the container""" - _namespace = "default" - """ Default namespace""" _apiVersion = "v1beta3" """ API version to access""" @@ -117,7 +116,7 @@ def concreteSystem(self, radl_system, auth_data): else: return [radl_system.clone()] - def _delete_volume_claim(self, vc_name, auth_data): + def _delete_volume_claim(self, namespace, vc_name, auth_data): try: auth = self.get_auth_header(auth_data) headers = {} @@ -125,7 +124,7 @@ def _delete_volume_claim(self, vc_name, auth_data): headers.update(auth) conn = self.get_http_connection() - conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/persistentvolumeclaims/" + vc_name, headers = headers) + conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/persistentvolumeclaims/" + vc_name, headers = headers) resp = conn.getresponse() output = str(resp.read()) if resp.status == 404: @@ -145,7 +144,7 @@ def _delete_volume_claims(self, pod_data, auth_data): for volume in pod_data['spec']['volumes']: if 'persistentVolumeClaim' in volume and 'claimName' in volume['persistentVolumeClaim']: vc_name = volume['persistentVolumeClaim']['claimName'] - success = self._delete_volume_claim(vc_name, auth_data) + success = self._delete_volume_claim(pod_data["metadata"]["namespace"], vc_name, auth_data) if not success: self.logger.error("Error deleting PersistentVolumeClaim:" + vc_name) @@ -154,7 +153,7 @@ def _create_volume_claim(self, claim_data, auth_data): auth_header = self.get_auth_header(auth_data) conn = self.get_http_connection() - conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/persistentvolumeclaims") + conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + claim_data['metadata']['namespace'] + "/persistentvolumeclaims") conn.putheader('Content-Type', 'application/json') if auth_header: conn.putheader(auth_header.keys()[0], auth_header.values()[0]) @@ -174,7 +173,7 @@ def _create_volume_claim(self, claim_data, auth_data): self.logger.exception("Error connecting with Kubernetes API server") return False - def _create_volumes(self, system, pod_name, auth_data, persistent = False): + def _create_volumes(self, namespace, system, pod_name, auth_data, persistent = False): res = [] cont = 1 while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path") and system.getValue("disk." + str(cont) + ".device"): @@ -191,7 +190,7 @@ def _create_volumes(self, system, pod_name, auth_data, persistent = False): if persistent: claim_data = { 'apiVersion': self._apiVersion, 'kind': 'PersistentVolumeClaim' } - claim_data['metadata'] = { 'name': name, 'namespace': self._namespace } + claim_data['metadata'] = { 'name': name, 'namespace': namespace } claim_data['spec'] = { 'accessModes' : ['ReadWriteOnce'], 'resources' : {'requests' : {'storage' : disk_size} } } success = self._create_volume_claim(claim_data, auth_data) @@ -206,7 +205,7 @@ def _create_volumes(self, system, pod_name, auth_data, persistent = False): return res - def _generate_pod_data(self, name, outports, system, ssh_port, volumes): + def _generate_pod_data(self, namespace, name, outports, system, ssh_port, volumes): cpu = str(system.getValue('cpu.count')) memory = "%s" % system.getFeature('memory.size').getValue('B') # The URI has this format: docker://image_name @@ -221,7 +220,7 @@ def _generate_pod_data(self, name, outports, system, ssh_port, volumes): pod_data = { 'apiVersion': self._apiVersion, 'kind': 'Pod' } pod_data['metadata'] = { 'name': name, - 'namespace': self._namespace, + 'namespace': namespace, 'labels': {'name': name} } containers = [{ @@ -229,7 +228,6 @@ def _generate_pod_data(self, name, outports, system, ssh_port, volumes): 'image': image_name, 'command': ["/bin/bash", "-c", "yum install -y openssh-server ; apt-get update && apt-get install -y openssh-server && sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/g' /etc/ssh/sshd_config && service ssh start && service ssh stop ; echo 'root:" + self._root_password + "' | chpasswd ; /usr/sbin/sshd -D"], 'imagePullPolicy': 'IfNotPresent', - 'restartPolicy': 'Never', 'ports': ports, 'resources': {'limits': {'cpu': cpu, 'memory': memory}} }] @@ -239,7 +237,7 @@ def _generate_pod_data(self, name, outports, system, ssh_port, volumes): for (v_name, _, _, v_mount_path, _) in volumes: containers[0]['volumeMounts'].append({'name':v_name, 'mountPath':v_mount_path}) - pod_data['spec'] = {'containers' : containers} + pod_data['spec'] = {'containers' : containers, 'restartPolicy': 'Never'} if volumes: pod_data['spec']['volumes'] = [] @@ -275,19 +273,24 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): while i < num_vm: try: i += 1 - pod_name = "im%d" % int(time.time()*100) + + namespace = "im%d" % int(time.time()*100) + vm = VirtualMachine(inf, None, self.cloud, radl, requested_radl, self) + (nodename, _) = vm.getRequestedName(default_hostname = Config.DEFAULT_VM_NAME, default_domain = Config.DEFAULT_DOMAIN) + pod_name = nodename + # Do not use the Persistent volumes yet - volumes = self._create_volumes(system, pod_name, auth_data) + volumes = self._create_volumes(namespace, system, pod_name, auth_data) # Create the pod - conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods") + conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods") conn.putheader('Content-Type', 'application/json') if auth_header: conn.putheader(auth_header.keys()[0], auth_header.values()[0]) ssh_port = (KubernetesCloudConnector._port_base_num + KubernetesCloudConnector._port_counter) % 65535 KubernetesCloudConnector._port_counter += 1 - pod_data = self._generate_pod_data(pod_name, outports, system, ssh_port, volumes) + pod_data = self._generate_pod_data(namespace, pod_name, outports, system, ssh_port, volumes) body = json.dumps(pod_data) conn.putheader('Content-Length', len(body)) conn.endheaders(body) @@ -298,7 +301,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): res.append((False, "Error creating the Container: " + output)) else: output = json.loads(output) - vm = VirtualMachine(inf, output["metadata"]["name"], self.cloud, radl, requested_radl, self) + vm.id = output["metadata"]["namespace"] + "/" + output["metadata"]["name"] # Set SSH port in the RADL info of the VM vm.setSSHPort(ssh_port) # Set the default user and password to access the container @@ -313,15 +316,18 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): return res - def _get_pod(self, pod_name, auth_data): + def _get_pod(self, vm_id, auth_data): try: + namespace = vm_id.split("/")[0] + pod_name = vm_id.split("/")[1] + auth = self.get_auth_header(auth_data) headers = {} if auth: headers.update(auth) conn = self.get_http_connection() - conn.request('GET', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + pod_name, headers = headers) + conn.request('GET', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers) resp = conn.getresponse() output = resp.read() @@ -385,15 +391,18 @@ def finalize(self, vm, auth_data): return self._delete_pod(vm.id, auth_data) - def _delete_pod(self, pod_name, auth_data): + def _delete_pod(self, vm_id, auth_data): try: + namespace = vm_id.split("/")[0] + pod_name = vm_id.split("/")[1] + auth = self.get_auth_header(auth_data) headers = {} if auth: headers.update(auth) conn = self.get_http_connection() - conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + pod_name, headers = headers) + conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers) resp = conn.getresponse() output = str(resp.read()) if resp.status == 404: @@ -443,7 +452,9 @@ def alterVM(self, vm, radl, auth_data): return (True, vm) # Create the container - conn.putrequest('PATCH', "/api/" + self._apiVersion + "/namespaces/" + self._namespace + "/pods/" + str(vm.id)) + namespace = vm.id.split("/")[0] + pod_name = vm.id.split("/")[1] + conn.putrequest('PATCH', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name) conn.putheader('Content-Type', 'application/json-patch+json') if auth_header: conn.putheader(auth_header.keys()[0], auth_header.values()[0]) From bb84a2a6c609a8e751038883a27efe45b34b23dd Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 3 Jun 2015 10:30:28 +0200 Subject: [PATCH 13/23] Bugfix in RADL with unicode strings --- IM/InfrastructureManager.py | 8 +++++++- IM/radl/radl.py | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index 07434b879..ab49eb550 100755 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -589,7 +589,13 @@ def GetVMProperty(inf_id, vm_id, property_name, auth): Return: a str with the property value """ radl_data = InfrastructureManager.GetVMInfo(inf_id, vm_id, auth) - radl = radl_parse.parse_radl(radl_data) + + try: + radl = radl_parse.parse_radl(radl_data) + except Exception, ex: + InfrastructureManager.logger.exception("Error parsing the RADL: " + radl_data) + raise ex + res = None if radl.systems: res = radl.systems[0].getValue(property_name) diff --git a/IM/radl/radl.py b/IM/radl/radl.py index 853eeafd1..9dd9277df 100644 --- a/IM/radl/radl.py +++ b/IM/radl/radl.py @@ -102,7 +102,7 @@ def __init__(self, prop = None, operator = None, value = None, unit = '', line=N def __str__(self): return ("{0} {1} ({2})" if self.operator == "contains" else - "{0} {1} '{2}'" if isinstance(self.value, str) else + "{0} {1} '{2}'" if isinstance(self.value, str) or isinstance(self.value, unicode) else "{0} {1} {2}{3}").format(self.prop, self.operator, self.value, self.unit if self.unit else "") From f0d3d3f6f0b43aa31c91a55da9af6ceabdfb5c2a Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 3 Jun 2015 10:31:22 +0200 Subject: [PATCH 14/23] Bugfix in RADL with unicode strings --- changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/changelog b/changelog index 88fd677c3..0459bf62a 100644 --- a/changelog +++ b/changelog @@ -117,4 +117,5 @@ IM 1.2.5 * Bugfix in OCCI, OpenNebula and Docker connectors when using incorrect credentials. * Improve Docker connector code. * Add Kubernetes connector. + * Bugfix in RADL with unicode strings From 80d3115aeaa74bd7f16a7e1ff09121fdb20c988f Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 3 Jun 2015 13:26:30 +0200 Subject: [PATCH 15/23] Bugfix in check RADL functions with unicode strings --- IM/radl/radl.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/IM/radl/radl.py b/IM/radl/radl.py index 9dd9277df..92775b902 100644 --- a/IM/radl/radl.py +++ b/IM/radl/radl.py @@ -152,12 +152,16 @@ def _check(self, check, radl): if not isinstance(self.value, int) and not isinstance(self.value, float): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) + elif check[0] == str: + if not isinstance(self.value, str) and not isinstance(self.value, unicode): + raise RADLParseException("Invalid type; expected %s" % check[0], + line=self.line) else: if not isinstance(self.value, check[0]): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) # Check operator - if isinstance(self.value, str) and self.prop.find('version') == -1: + if (isinstance(self.value, str) or isinstance(self.value, unicode)) and self.prop.find('version') == -1: if self.operator != "=": raise RADLParseException("Invalid operator; expected '='", line=self.line) From 822ada9ecf76845e2029e5633b97606d5b7a8d92 Mon Sep 17 00:00:00 2001 From: micafer Date: Wed, 3 Jun 2015 18:30:49 +0200 Subject: [PATCH 16/23] Code improvements --- IM/InfrastructureManager.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index ab49eb550..d61e1f35e 100755 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -450,7 +450,12 @@ def AddResource(inf_id, radl_data, auth, context = True, failed_clouds = []): if len(suggested_cloud_ids) > 1: raise Exception("Two deployments that have to be launched in the same cloud provider are asked to be deployed in different cloud providers: %s" % deploy_group) elif len(suggested_cloud_ids) == 1: - cloud_list0 = [ (suggested_cloud_ids[0], cloud_list[suggested_cloud_ids[0]]) ] + if suggested_cloud_ids[0] not in cloud_list: + InfrastructureManager.logger.debug("Cloud Provider list:") + InfrastructureManager.logger.debug(cloud_list) + raise Exception("No auth data for cloud with ID: %s" % suggested_cloud_ids[0]) + else: + cloud_list0 = [ (suggested_cloud_ids[0], cloud_list[suggested_cloud_ids[0]]) ] else: cloud_list0 = cloud_list.items() if d.vm_number: @@ -536,7 +541,7 @@ def RemoveResource(inf_id, vm_list, auth): Args: - inf_id(int): infrastructure id. - - vm_list(list of int): list of virtual machine ids. + - vm_list(str, int or list of str): list of virtual machine ids. - auth(Authentication): parsed authentication tokens. Return(int): number of undeployed virtual machines. @@ -546,7 +551,14 @@ def RemoveResource(inf_id, vm_list, auth): sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth) - vm_ids = vm_list.split(",") + if isinstance(vm_list, str): + vm_ids = vm_list.split(",") + elif isinstance(vm_list, int): + vm_ids = [str(vm_list)] + elif isinstance(vm_list, list): + vm_ids = vm_list + else: + raise Exception('Incorrect parameter type to RemoveResource function: expected: str, int or list of str.') cont = 0 exceptions = [] From 699b8102d647b314809148137f3f57f870c56139 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 4 Jun 2015 18:23:31 +0200 Subject: [PATCH 17/23] Correct documentation --- doc/source/xmlrpc.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/xmlrpc.rst b/doc/source/xmlrpc.rst index 36a483100..4b09f4285 100644 --- a/doc/source/xmlrpc.rst +++ b/doc/source/xmlrpc.rst @@ -175,12 +175,13 @@ This is the list of method names: :parameter 0: ``infId``: integer :parameter 1: ``vmIds``: string :parameter 2: ``auth``: array of structs - :ok response: [true, ``infId``: integer] + :ok response: [true, integer] :fail response: [false, ``error``: string] Updeploy the virtual machines with IDs in ``vmIds`` associated to the infrastructure with ID ``infId``. The different virtual machine IDs in - ``vmIds`` are separated by commas. + ``vmIds`` are separated by commas. On success it returns the number of + VMs that have been undeployed. .. _StopInfrastructure-xmlrpc: From 214f8c91a8bffe528cbcf412fd17680cf6dbf847 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 4 Jun 2015 18:55:44 +0200 Subject: [PATCH 18/23] Bugfix if private IP of a VM is not in the well known ranges --- IM/VirtualMachine.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index 4e663156f..3e7f975c4 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -462,6 +462,11 @@ def setIps(self,public_ips,private_ips): private_net_mask = mask break + if not private_net_mask: + parts = private_ip.split(".") + private_net_mask = "%s.0.0.0/8" % parts[0] + VirtualMachine.logger.warn("%s is not in known private net groups. Using mask: %s" % (private_ip, private_net_mask)) + # Search in previous user private ips private_net = None for net_mask, net in private_net_map.iteritems(): From dfe9a1326ef727232fc2143c789eb45b7626fa68 Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 8 Jun 2015 11:31:36 +0200 Subject: [PATCH 19/23] Add different versions support in kubernetes connector --- connectors/Kubernetes.py | 70 +++++++++++++++++++++++++++++++--------- 1 file changed, 54 insertions(+), 16 deletions(-) diff --git a/connectors/Kubernetes.py b/connectors/Kubernetes.py index e3dbd0d42..92b5f6220 100644 --- a/connectors/Kubernetes.py +++ b/connectors/Kubernetes.py @@ -39,8 +39,8 @@ class KubernetesCloudConnector(CloudConnector): """ Counter to assign SSH port on Kubernetes node.""" _root_password = "Aspecial+0ne" """ Default password to set to the root in the container""" - _apiVersion = "v1beta3" - """ API version to access""" + _apiVersions = ["v1", "v1beta3"] + """ Supported API versions""" VM_STATE_MAP = { 'Pending': VirtualMachine.PENDING, @@ -91,6 +91,38 @@ def get_auth_header(self, auth_data): return auth_header + def get_api_version(self, auth_data): + """ + Return the API version to use to connect with kubernetes API server + """ + version = self._apiVersions[0] + + try: + auth = self.get_auth_header(auth_data) + headers = {} + if auth: + headers.update(auth) + conn = self.get_http_connection() + + conn.request('GET', "/api/", headers = headers) + resp = conn.getresponse() + + output = resp.read() + + if resp.status == 200: + output = json.loads(output) + for v in self._apiVersions: + if v in output["versions"]: + return v + + except Exception: + self.logger.exception("Error connecting with Kubernetes API server") + + self.logger.warn("Error getting a compatible API version. Setting the default one.") + self.logger.debug("Using %2 API version." % version) + return version + + def concreteSystem(self, radl_system, auth_data): if radl_system.getValue("disk.0.image.url"): url = uriparse(radl_system.getValue("disk.0.image.url")) @@ -123,8 +155,9 @@ def _delete_volume_claim(self, namespace, vc_name, auth_data): if auth: headers.update(auth) conn = self.get_http_connection() + apiVersion = self.get_api_version(auth_data) - conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/persistentvolumeclaims/" + vc_name, headers = headers) + conn.request('DELETE', "/api/" + apiVersion + "/namespaces/" + namespace + "/persistentvolumeclaims/" + vc_name, headers = headers) resp = conn.getresponse() output = str(resp.read()) if resp.status == 404: @@ -152,8 +185,9 @@ def _create_volume_claim(self, claim_data, auth_data): try: auth_header = self.get_auth_header(auth_data) conn = self.get_http_connection() + apiVersion = self.get_api_version(auth_data) - conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + claim_data['metadata']['namespace'] + "/persistentvolumeclaims") + conn.putrequest('POST', "/api/" + apiVersion + "/namespaces/" + claim_data['metadata']['namespace'] + "/persistentvolumeclaims") conn.putheader('Content-Type', 'application/json') if auth_header: conn.putheader(auth_header.keys()[0], auth_header.values()[0]) @@ -173,7 +207,7 @@ def _create_volume_claim(self, claim_data, auth_data): self.logger.exception("Error connecting with Kubernetes API server") return False - def _create_volumes(self, namespace, system, pod_name, auth_data, persistent = False): + def _create_volumes(self, apiVersion, namespace, system, pod_name, auth_data, persistent = False): res = [] cont = 1 while system.getValue("disk." + str(cont) + ".size") and system.getValue("disk." + str(cont) + ".mount_path") and system.getValue("disk." + str(cont) + ".device"): @@ -189,7 +223,7 @@ def _create_volumes(self, namespace, system, pod_name, auth_data, persistent = F name = "%s-%d" % (pod_name, cont) if persistent: - claim_data = { 'apiVersion': self._apiVersion, 'kind': 'PersistentVolumeClaim' } + claim_data = { 'apiVersion': apiVersion, 'kind': 'PersistentVolumeClaim' } claim_data['metadata'] = { 'name': name, 'namespace': namespace } claim_data['spec'] = { 'accessModes' : ['ReadWriteOnce'], 'resources' : {'requests' : {'storage' : disk_size} } } @@ -205,7 +239,7 @@ def _create_volumes(self, namespace, system, pod_name, auth_data, persistent = F return res - def _generate_pod_data(self, namespace, name, outports, system, ssh_port, volumes): + def _generate_pod_data(self, apiVersion, namespace, name, outports, system, ssh_port, volumes): cpu = str(system.getValue('cpu.count')) memory = "%s" % system.getFeature('memory.size').getValue('B') # The URI has this format: docker://image_name @@ -217,7 +251,7 @@ def _generate_pod_data(self, namespace, name, outports, system, ssh_port, volume if local_port != 22: ports.append({'containerPort':local_port, 'protocol': local_protocol.upper(), 'hostPort': remote_port}) - pod_data = { 'apiVersion': self._apiVersion, 'kind': 'Pod' } + pod_data = { 'apiVersion': apiVersion, 'kind': 'Pod' } pod_data['metadata'] = { 'name': name, 'namespace': namespace, @@ -267,6 +301,7 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): auth_header = self.get_auth_header(auth_data) conn = self.get_http_connection() + apiVersion = self.get_api_version(auth_data) res = [] i = 0 @@ -280,17 +315,17 @@ def launch(self, inf, radl, requested_radl, num_vm, auth_data): pod_name = nodename # Do not use the Persistent volumes yet - volumes = self._create_volumes(namespace, system, pod_name, auth_data) + volumes = self._create_volumes(apiVersion, namespace, system, pod_name, auth_data) # Create the pod - conn.putrequest('POST', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods") + conn.putrequest('POST', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods") conn.putheader('Content-Type', 'application/json') if auth_header: conn.putheader(auth_header.keys()[0], auth_header.values()[0]) ssh_port = (KubernetesCloudConnector._port_base_num + KubernetesCloudConnector._port_counter) % 65535 KubernetesCloudConnector._port_counter += 1 - pod_data = self._generate_pod_data(namespace, pod_name, outports, system, ssh_port, volumes) + pod_data = self._generate_pod_data(apiVersion, namespace, pod_name, outports, system, ssh_port, volumes) body = json.dumps(pod_data) conn.putheader('Content-Length', len(body)) conn.endheaders(body) @@ -326,8 +361,9 @@ def _get_pod(self, vm_id, auth_data): if auth: headers.update(auth) conn = self.get_http_connection() + apiVersion = self.get_api_version(auth_data) - conn.request('GET', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers) + conn.request('GET', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers) resp = conn.getresponse() output = resp.read() @@ -373,9 +409,9 @@ def setIPs(self, vm, pod_info): public_ips = [] private_ips = [] if 'hostIP' in pod_info["status"]: - public_ips = [pod_info["status"]["hostIP"]] + public_ips = [str(pod_info["status"]["hostIP"])] if 'podIP' in pod_info["status"]: - private_ips = [pod_info["status"]["podIP"]] + private_ips = [str(pod_info["status"]["podIP"])] vm.setIps(public_ips, private_ips) @@ -401,8 +437,9 @@ def _delete_pod(self, vm_id, auth_data): if auth: headers.update(auth) conn = self.get_http_connection() + apiVersion = self.get_api_version(auth_data) - conn.request('DELETE', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers) + conn.request('DELETE', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name, headers = headers) resp = conn.getresponse() output = str(resp.read()) if resp.status == 404: @@ -429,6 +466,7 @@ def alterVM(self, vm, radl, auth_data): auth_header = self.get_auth_header(auth_data) conn = self.get_http_connection() + apiVersion = self.get_api_version(auth_data) try: pod_data = [] @@ -454,7 +492,7 @@ def alterVM(self, vm, radl, auth_data): # Create the container namespace = vm.id.split("/")[0] pod_name = vm.id.split("/")[1] - conn.putrequest('PATCH', "/api/" + self._apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name) + conn.putrequest('PATCH', "/api/" + apiVersion + "/namespaces/" + namespace + "/pods/" + pod_name) conn.putheader('Content-Type', 'application/json-patch+json') if auth_header: conn.putheader(auth_header.keys()[0], auth_header.values()[0]) From 929d285c308e1ac7fa6464319ab1f7a47015e3d9 Mon Sep 17 00:00:00 2001 From: micafer Date: Mon, 8 Jun 2015 17:55:55 +0200 Subject: [PATCH 20/23] Bugfix in EC2 setting the availability_zone with quotes --- connectors/EC2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/connectors/EC2.py b/connectors/EC2.py index 392d9e153..65c0dac19 100644 --- a/connectors/EC2.py +++ b/connectors/EC2.py @@ -864,8 +864,8 @@ def updateVMInfo(self, vm, auth_data): self.logger.exception("Error updating the instance " + instance_id) return (False, "Error updating the instance " + instance_id + ": " + str(ex)) - vm.info.systems[0].setValue("virtual_system_type", "'" + instance.virtualization_type + "'") - vm.info.systems[0].setValue("availability_zone", "'" + instance.placement + "'") + vm.info.systems[0].setValue("virtual_system_type", instance.virtualization_type) + vm.info.systems[0].setValue("availability_zone", instance.placement) vm.state = self.VM_STATE_MAP.get(instance.state, VirtualMachine.UNKNOWN) From 62b2330c7367d26be941becc1cb45c46c7906aff Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 11 Jun 2015 12:01:00 +0200 Subject: [PATCH 21/23] Add StarVM and StopVM functions to the API and Modify contextualziation process to ignore not running VMs enabling to configure the rest of VMs of an Inf --- IM/ConfManager.py | 95 ++++++++++++++++++++++--------------- IM/InfrastructureInfo.py | 7 ++- IM/InfrastructureManager.py | 60 +++++++++++++++++++++++ IM/REST.py | 56 ++++++++++++++++++++++ IM/ServiceRequests.py | 28 ++++++++++- IM/VirtualMachine.py | 4 +- IM/__init__.py | 2 +- changelog | 6 ++- connectors/FogBow.py | 2 +- connectors/GCE.py | 4 +- connectors/OpenNebula.py | 2 + doc/source/REST.rst | 86 ++++++++++++++++++++------------- doc/source/xmlrpc.rst | 27 +++++++++++ im_service.py | 13 ++++- test/TestIM.py | 29 +++++++++-- test/TestREST.py | 39 +++++++++++---- 16 files changed, 367 insertions(+), 93 deletions(-) diff --git a/IM/ConfManager.py b/IM/ConfManager.py index 0705c0858..080f406f2 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -115,11 +115,10 @@ def check_vm_ips(self, timeout = Config.WAIT_RUNNING_VM_TIMEOUT): # If the IP is not Available try to update the info vm.update_status(self.auth) - # If the VM is not in a "running" state, return false - if vm.state in [VirtualMachine.OFF, VirtualMachine.FAILED, VirtualMachine.STOPPED]: - ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": Error waiting all the VMs to have a correct IP. VM ID: " + str(vm.id) + " is not running.") - self.inf.set_configured(False) - return False + # If the VM is not in a "running" state, ignore it + if vm.state in VirtualMachine.NOT_RUNNING_STATES: + ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": The VM ID: " + str(vm.id) + " is not running, do not wait it to have an IP.") + continue if vm.hasPublicNet(): ip = vm.getPublicIP() @@ -306,6 +305,19 @@ def generate_inventory(self, tmp_dir): for vm in vm_group[group]: if not vm.destroy: + # first try to use the public IP + ip = vm.getPublicIP() + if not ip: + ip = vm.getPrivateIP() + + if not ip: + ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": The VM ID: " + str(vm.id) + " does not have an IP. It will not be included in the inventory file.") + continue + + if vm.state in VirtualMachine.NOT_RUNNING_STATES: + ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": The VM ID: " + str(vm.id) + " is not running. It will not be included in the inventory file.") + continue + ifaces_im_vars = '' for i in range(vm.getNumNetworkIfaces()): iface_ip = vm.getIfaceIP(i) @@ -317,10 +329,7 @@ def generate_inventory(self, tmp_dir): ifaces_im_vars += ' IM_NODE_NET_' + str(i) + '_DOMAIN=' + nodedom ifaces_im_vars += ' IM_NODE_NET_' + str(i) + '_FQDN=' + nodename + "." + nodedom - # first try to use the public IP - ip = vm.getPublicIP() - if not ip: - ip = vm.getPrivateIP() + # the master node # TODO: Known issue: the master VM must set the public network in the iface 0 @@ -379,6 +388,15 @@ def generate_etc_hosts(self, tmp_dir): vm = vm_group[group][0] for vm in vm_group[group]: + # first try to use the public IP + ip = vm.getPublicIP() + if not ip: + ip = vm.getPrivateIP() + + if not ip: + ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": The VM ID: " + str(vm.id) + " does not have an IP. It will not be included in the /etc/hosts file.") + continue + for i in range(vm.getNumNetworkIfaces()): if vm.getRequestedNameIface(i): if vm.getIfaceIP(i): @@ -386,11 +404,6 @@ def generate_etc_hosts(self, tmp_dir): hosts_out.write(vm.getIfaceIP(i) + " " + nodename + "." + nodedom + " " + nodename + "\r\n") else: ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": Net interface " + str(i) + " request a name, but it does not have an IP.") - - # first try to use the public IP - ip = vm.getPublicIP() - if not ip: - ip = vm.getPrivateIP() # the master node # TODO: Known issue: the master VM must set the public network in the iface 0 @@ -509,13 +522,10 @@ def configure_master(self): # Activate tty mode to avoid some problems with sudo in REL ssh.tty = True - # Get the groups for the different VM types - vm_group = self.inf.get_vm_list_by_system_name() - # configuration dir os th emaster node to copy all the contextualization files tmp_dir = tempfile.mkdtemp() # Now call the ansible installation process on the master node - configured_ok = self.configure_ansible(vm_group, ssh, tmp_dir) + configured_ok = self.configure_ansible(ssh, tmp_dir) if not configured_ok: ConfManager.logger.error("Inf ID: " + str(self.inf.id) + ": Error in the ansible installation process") @@ -951,7 +961,7 @@ def create_all_recipe(self, tmp_dir, filename): conf_all_out.close() return all_filename - def configure_ansible(self, vm_group, ssh, tmp_dir): + def configure_ansible(self, ssh, tmp_dir): """ Install ansible in the master node @@ -960,6 +970,8 @@ def configure_ansible(self, vm_group, ssh, tmp_dir): - tmp_dir(str): Temp directory where all the playbook files will be stored. Returns: True if the process finished sucessfully, False otherwise. """ + # Get the groups for the different VM types + vm_group = self.inf.get_vm_list_by_system_name() # Create the ansible inventory file with open(tmp_dir + "/inventory.cfg", 'w') as inv_out: @@ -1031,26 +1043,33 @@ def create_general_conf_file(self, conf_file, vm_list): conf_data['playbook_retries'] = Config.PLAYBOOK_RETRIES conf_data['vms'] = [] for vm in vm_list: - vm_conf_data = {} - vm_conf_data['id'] = vm.im_id - if vm.im_id == self.inf.vm_master.im_id: - vm_conf_data['master'] = True + if vm.state in VirtualMachine.NOT_RUNNING_STATES: + ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": The VM ID: " + str(vm.id) + " is not running, do not include in the general conf file.") else: - vm_conf_data['master'] = False - # first try to use the public IP - vm_conf_data['ip'] = vm.getPublicIP() - if not vm_conf_data['ip']: - vm_conf_data['ip'] = vm.getPrivateIP() - vm_conf_data['ssh_port'] = vm.getSSHPort() - creds = vm.getCredentialValues() - new_creds = vm.getCredentialValues(new=True) - (vm_conf_data['user'], vm_conf_data['passwd'], _, vm_conf_data['private_key']) = creds - # If there are new creds to set to the VM - if len(list(set(new_creds))) > 1 or list(set(new_creds))[0] != None: - if cmp(new_creds,creds) != 0: - (_, vm_conf_data['new_passwd'], vm_conf_data['new_public_key'], vm_conf_data['new_private_key']) = new_creds - - conf_data['vms'].append(vm_conf_data) + vm_conf_data = {} + vm_conf_data['id'] = vm.im_id + if vm.im_id == self.inf.vm_master.im_id: + vm_conf_data['master'] = True + else: + vm_conf_data['master'] = False + # first try to use the public IP + vm_conf_data['ip'] = vm.getPublicIP() + if not vm_conf_data['ip']: + vm_conf_data['ip'] = vm.getPrivateIP() + vm_conf_data['ssh_port'] = vm.getSSHPort() + creds = vm.getCredentialValues() + new_creds = vm.getCredentialValues(new=True) + (vm_conf_data['user'], vm_conf_data['passwd'], _, vm_conf_data['private_key']) = creds + # If there are new creds to set to the VM + if len(list(set(new_creds))) > 1 or list(set(new_creds))[0] != None: + if cmp(new_creds,creds) != 0: + (_, vm_conf_data['new_passwd'], vm_conf_data['new_public_key'], vm_conf_data['new_private_key']) = new_creds + + if not vm_conf_data['ip']: + # if the vm does not have an IP, do not iclude it to avoid errors configurin gother VMs + ConfManager.logger.warn("Inf ID: " + str(self.inf.id) + ": The VM ID: " + str(vm.id) + " does not have an IP, do not include in the general conf file.") + else: + conf_data['vms'].append(vm_conf_data) conf_data['conf_dir'] = Config.REMOTE_CONF_DIR diff --git a/IM/InfrastructureInfo.py b/IM/InfrastructureInfo.py index 393f868c2..fb70a9afb 100644 --- a/IM/InfrastructureInfo.py +++ b/IM/InfrastructureInfo.py @@ -152,7 +152,7 @@ def add_cont_msg(self, msg): def get_vm_list(self): """ - Get the list of not destroyed VMs + Get the list of not destroyed VMs. """ with self._lock: res = [vm for vm in self.vm_list if not vm.destroy] @@ -176,6 +176,9 @@ def get_vm(self, str_vm_id): raise IncorrectVMException() def get_vm_list_by_system_name(self): + """ + Get the list of not destroyed VMs grouped by the name of system. + """ groups = {} for vm in self.get_vm_list(): if vm.getRequestedSystem().name in groups: @@ -385,6 +388,8 @@ def Contextualize(self, auth): ctxt_task.append((-1,0,self,['configure_master', 'generate_playbooks_and_hosts'])) for vm in self.get_vm_list(): + # Assure to update the VM status before running the ctxt process + vm.update_status(self.auth) vm.cont_out = "" vm.configured = None tasks = {} diff --git a/IM/InfrastructureManager.py b/IM/InfrastructureManager.py index d61e1f35e..e21a81a20 100755 --- a/IM/InfrastructureManager.py +++ b/IM/InfrastructureManager.py @@ -848,6 +848,66 @@ def StartInfrastructure(inf_id, auth): InfrastructureManager.logger.info("Infrastructure successfully restarted") return "" + @staticmethod + def StartVM(inf_id, vm_id, auth): + """ + Start the specified virtual machine in an infrastructure previously stopped. + + Args: + + - inf_id(int): infrastructure id. + - vm_id(int): virtual machine id. + - auth(Authentication): parsed authentication tokens. + + Return(str): error messages; empty string means all was ok. + """ + + InfrastructureManager.logger.info("Starting the VM id %s from the infrastructure id: %s" % (vm_id, inf_id)) + + vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) + success = False + try: + (success, msg) = vm.start(auth) + except Exception, e: + msg = str(e) + + if not success: + InfrastructureManager.logger.info("The VM %s cannot be restarted: %s" % (vm_id, msg)) + raise Exception("Error starting the VM: %s" % msg) + else: + InfrastructureManager.logger.info("The VM %s successfully restarted" % vm_id) + return "" + + @staticmethod + def StopVM(inf_id, vm_id, auth): + """ + Stop the specified virtual machine in an infrastructure + + Args: + + - inf_id(int): infrastructure id. + - vm_id(int): virtual machine id. + - auth(Authentication): parsed authentication tokens. + + Return(str): error messages; empty string means all was ok. + """ + + InfrastructureManager.logger.info("Stopping the VM id %s from the infrastructure id: %s" % (vm_id, inf_id)) + + vm = InfrastructureManager.get_vm_from_inf(inf_id, vm_id, auth) + success = False + try: + (success, msg) = vm.stop(auth) + except Exception, e: + msg = str(e) + + if not success: + InfrastructureManager.logger.info("The VM %s cannot be stopped: %s" % (vm_id, msg)) + raise Exception("Error stopping the VM: %s" % msg) + else: + InfrastructureManager.logger.info("The VM %s successfully stopped" % vm_id) + return "" + @staticmethod def remove_old_inf(): """Remove destroyed infrastructure.""" diff --git a/IM/REST.py b/IM/REST.py index a12baa210..ac74af97e 100644 --- a/IM/REST.py +++ b/IM/REST.py @@ -388,4 +388,60 @@ def RESTStopInfrastructure(id=None): return False except Exception, ex: bottle.abort(400, "Error stopping infrastructure: " + str(ex)) + return False + +@app.route('/infrastructures/:infid/vms/:vmid/start', method='PUT') +def RESTStartVM(infid=None, vmid=None, prop=None): + try: + auth_data = bottle.request.headers['AUTHORIZATION'].split(AUTH_LINE_SEPARATOR) + auth = Authentication(Authentication.read_auth_data(auth_data)) + except: + bottle.abort(401, "No authentication data provided") + + try: + info = InfrastructureManager.StartVM(int(infid), vmid, auth) + bottle.response.content_type = "text/plain" + return info + except DeletedInfrastructureException, ex: + bottle.abort(404, "Error starting VM: " + str(ex)) + return False + except IncorrectInfrastructureException, ex: + bottle.abort(404, "Error starting VM: " + str(ex)) + return False + except DeletedVMException, ex: + bottle.abort(404, "Error starting VM: " + str(ex)) + return False + except IncorrectVMException, ex: + bottle.abort(404, "Error starting VM: " + str(ex)) + return False + except Exception, ex: + bottle.abort(400, "Error starting VM: " + str(ex)) + return False + +@app.route('/infrastructures/:infid/vms/:vmid/stop', method='PUT') +def RESTStopVM(infid=None, vmid=None, prop=None): + try: + auth_data = bottle.request.headers['AUTHORIZATION'].split(AUTH_LINE_SEPARATOR) + auth = Authentication(Authentication.read_auth_data(auth_data)) + except: + bottle.abort(401, "No authentication data provided") + + try: + info = InfrastructureManager.StopVM(int(infid), vmid, auth) + bottle.response.content_type = "text/plain" + return info + except DeletedInfrastructureException, ex: + bottle.abort(404, "Error stopping VM: " + str(ex)) + return False + except IncorrectInfrastructureException, ex: + bottle.abort(404, "Error stopping VM: " + str(ex)) + return False + except DeletedVMException, ex: + bottle.abort(404, "Error stopping VM: " + str(ex)) + return False + except IncorrectVMException, ex: + bottle.abort(404, "Error stopping VM: " + str(ex)) + return False + except Exception, ex: + bottle.abort(400, "Error stopping VM: " + str(ex)) return False \ No newline at end of file diff --git a/IM/ServiceRequests.py b/IM/ServiceRequests.py index 5b931bda6..70895aa83 100644 --- a/IM/ServiceRequests.py +++ b/IM/ServiceRequests.py @@ -46,6 +46,8 @@ class IMBaseRequest(AsyncRequest): START_INFRASTRUCTURE = "StartInfrastructure" STOP_INFRASTRUCTURE = "StopInfrastructure" SAVE_DATA = "SaveData" + START_VM = "StartVM" + STOP_VM = "StopVM" @staticmethod def create_request(function, arguments = (), priority = Request.PRIORITY_NORMAL): @@ -85,6 +87,10 @@ def create_request(function, arguments = (), priority = Request.PRIORITY_NORMAL) return Request_StopInfrastructure(arguments) elif function == IMBaseRequest.SAVE_DATA: return Request_SaveData(arguments) + elif function == IMBaseRequest.START_VM: + return Request_StartVM(arguments) + elif function == IMBaseRequest.STOP_VM: + return Request_StopVM(arguments) else: raise NotImplementedError("Function not Implemented") @@ -264,9 +270,29 @@ def _call_function(self): class Request_SaveData(IMBaseRequest): """ - Request class for the GetInfrastructureContMsg function + Request class for the save_data function """ def _call_function(self): self._error_mesage = "Error saving IM data" InfrastructureManager.InfrastructureManager.save_data() + return "" + +class Request_StartVM(IMBaseRequest): + """ + Request class for the StartVM function + """ + def _call_function(self): + self._error_mesage = "Error starting VM" + (inf_id, vm_id, auth_data) = self.arguments + InfrastructureManager.InfrastructureManager.StartVM(inf_id, vm_id, Authentication(auth_data)) + return "" + +class Request_StopVM(IMBaseRequest): + """ + Request class for the StopVM function + """ + def _call_function(self): + self._error_mesage = "Error stopping VM" + (inf_id, vm_id, auth_data) = self.arguments + InfrastructureManager.InfrastructureManager.StopVM(inf_id, vm_id, Authentication(auth_data)) return "" \ No newline at end of file diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index 3e7f975c4..bfcae1c30 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -40,6 +40,8 @@ class VirtualMachine: WAIT_TO_PID = "WAIT" + NOT_RUNNING_STATES = [OFF, FAILED, STOPPED] + logger = logging.getLogger('InfrastructureManager') def __init__(self, inf, cloud_id, cloud, info, requested_radl, cloud_connector = None): @@ -538,7 +540,7 @@ def check_ctxt_process(self): if self.ctxt_pid != self.WAIT_TO_PID: ssh = self.inf.vm_master.get_ssh() - if self.state in [VirtualMachine.OFF, VirtualMachine.FAILED, VirtualMachine.STOPPED]: + if self.state in VirtualMachine.NOT_RUNNING_STATES: try: ssh.execute("kill -9 " + str(self.ctxt_pid)) except: diff --git a/IM/__init__.py b/IM/__init__.py index 9355a4b64..6faaf348c 100644 --- a/IM/__init__.py +++ b/IM/__init__.py @@ -16,6 +16,6 @@ __all__ = ['auth','bottle','CloudManager','config','ConfManager','db','ganglia','HTTPHeaderTransport','ImageManager','InfrastructureInfo','InfrastructureManager','parsetab','radl','recipe','request','REST', 'ServiceRequests','SSH','timedcall','uriparse','VMRC','xmlobject'] -__version__ = '1.2.5' +__version__ = '1.3.0' __author__ = 'Miguel Caballer' diff --git a/changelog b/changelog index 0459bf62a..41a0f5580 100644 --- a/changelog +++ b/changelog @@ -113,9 +113,11 @@ IM 1.2.4 * Dynamically refresh the Ctxt output * Minor bugfix in EC2 connector when deleting a non existing instance -IM 1.2.5 +IM 1.3.0 * Bugfix in OCCI, OpenNebula and Docker connectors when using incorrect credentials. * Improve Docker connector code. * Add Kubernetes connector. + * Bugfix in FogBow with 1.0 version * Bugfix in RADL with unicode strings - + * Add StarVM and StopVM functions to the API + * Modify contextualziation process to ignore not running VMs enabling to configure the rest of VMs of an Inf. diff --git a/connectors/FogBow.py b/connectors/FogBow.py index 38e19116e..c2f4933d0 100644 --- a/connectors/FogBow.py +++ b/connectors/FogBow.py @@ -101,7 +101,7 @@ def get_occi_attribute_value(self, occi_res, attr_name): lines = occi_res.split("\n") for l in lines: if l.find('X-OCCI-Attribute: ' + attr_name + '=') != -1: - return l.split('=')[1].strip('"') + return str(l.split('=')[1].strip().strip('"')) return None """ diff --git a/connectors/GCE.py b/connectors/GCE.py index 59612c422..24a2c3f06 100644 --- a/connectors/GCE.py +++ b/connectors/GCE.py @@ -54,7 +54,9 @@ def get_driver(self, auth_data): auth = auth_data.getAuthInfo(self.type) if auth and 'username' in auth[0] and 'password' in auth[0] and 'project' in auth[0]: - cls = get_driver(Provider.GCE) + cls = get_driver(Provider.GCE) + # Patch to solve some client problems with \\n + auth[0]['password'] = auth[0]['password'].replace('\\n','\n') lines = len(auth[0]['password'].replace(" ","").split()) if lines < 2: raise Exception("The certificate provided to the GCE plugin has an incorrect format. Check that it has more than one line.") diff --git a/connectors/OpenNebula.py b/connectors/OpenNebula.py index 9164f7c6a..f88ed0a50 100644 --- a/connectors/OpenNebula.py +++ b/connectors/OpenNebula.py @@ -215,6 +215,8 @@ def updateVMInfo(self, vm, auth_data): if res_vm.STATE == 3: if res_vm.LCM_STATE == 3: res_state = VirtualMachine.RUNNING + elif res_vm.LCM_STATE == 5 or res_vm.LCM_STATE == 6: + res_state = VirtualMachine.STOPPED else: res_state = VirtualMachine.PENDING elif res_vm.STATE < 3 : diff --git a/doc/source/REST.rst b/doc/source/REST.rst index 08f49a367..ed4e85fa2 100644 --- a/doc/source/REST.rst +++ b/doc/source/REST.rst @@ -12,39 +12,45 @@ password are not valid, it is returned the HTTP error code 401. Next tables summaries the resources and the HTTP methods available. -+-------------+------------------------+-------------------------------+-----------------------------------------+ -| HTTP method | /infrastructures | /infrastructures/ | /infrastructures//vms/ | -+=============+========================+===============================+=========================================+ -| **GET** | **List** the | **List** the virtual machines | **Get** information associated to the | -| | infrastructure | in the infrastructure | virtual machine ``vmId`` in ``infId``. | -| | IDs. | ``infId`` | | -+-------------+------------------------+-------------------------------+-----------------------------------------+ -| **POST** | **Create** a new | **Create** a new virtual | | -| | infrastructure | machine based on the RADL | | -| | based on the RADL | posted. | | -| | posted. | | | -+-------------+------------------------+-------------------------------+-----------------------------------------+ -| **PUT** | | | **Modify** the virtual machine based on | -| | | | the RADL posted. | -+-------------+------------------------+-------------------------------+-----------------------------------------+ -| **DELETE** | | **Undeploy** all the virtual | **Undeploy** the virtual machine. | -| | | machines in the | | -| | | infrastructure. | | -+-------------+------------------------+-------------------------------+-----------------------------------------+ ++-------------+-------------------+-------------------------------+-----------------------------------------+ +| HTTP method | /infrastructures | /infrastructures/ | /infrastructures//vms/ | ++=============+===================+===============================+=========================================+ +| **GET** | **List** the | **List** the virtual machines | **Get** information associated to the | +| | infrastructure | in the infrastructure | virtual machine ``vmId`` in ``infId``. | +| | IDs. | ``infId`` | | ++-------------+-------------------+-------------------------------+-----------------------------------------+ +| **POST** | **Create** a new | **Create** a new virtual | | +| | infrastructure | machine based on the RADL | | +| | based on the RADL | posted. | | +| | posted. | | | ++-------------+-------------------+-------------------------------+-----------------------------------------+ +| **PUT** | | | **Modify** the virtual machine based on | +| | | | the RADL posted. | ++-------------+-------------------+-------------------------------+-----------------------------------------+ +| **DELETE** | | **Undeploy** all the virtual | **Undeploy** the virtual machine. | +| | | machines in the | | +| | | infrastructure. | | ++-------------+-------------------+-------------------------------+-----------------------------------------+ -+-------------+--------------------------------+---------------------------------+----------------------------------------+ -| HTTP method | /infrastructures//stop | /infrastructures//start | /infrastructures//reconfigure | -+=============+================================+=================================+========================================+ -| **PUT** | **Stop** the infrastructure. | **Start** the infrastructure. | **Reconfigure** the infrastructure. | -+-------------+--------------------------------+---------------------------------+----------------------------------------+ - -+-------------+--------------------------------------------------------+--------------------------------------------------+ -| HTTP method | /infrastructures//vms// | /infrastructures// | -+=============+========================================================+==================================================+ -| **GET** | **Get** the specified property ``property_name`` | **Get** the specified property ``property_name`` | -| | associated to the machine ``vmId`` in ``infId`` | associated to the infrastructure ``infId``. | -| | | It has two properties: ``contmsg`` and ``radl`` | -+-------------+--------------------------------------------------------+--------------------------------------------------+ ++-------------+-------------------------------+--------------------------------+--------------------------------------+ +| HTTP method | /infrastructures//stop | /infrastructures//start | /infrastructures//reconfigure | ++=============+===============================+================================+======================================+ +| **PUT** | **Stop** the infrastructure. | **Start** the infrastructure. | **Reconfigure** the infrastructure. | ++-------------+-------------------------------+--------------------------------+--------------------------------------+ + ++-------------+-----------------------------------------------------+--------------------------------------------------+ +| HTTP method | /infrastructures//vms// | /infrastructures// | ++=============+=====================================================+==================================================+ +| **GET** | **Get** the specified property ``property_name`` | **Get** the specified property ``property_name`` | +| | associated to the machine ``vmId`` in ``infId`` | associated to the infrastructure ``infId``. | +| | | It has two properties: ``contmsg`` and ``radl`` | ++-------------+-----------------------------------------------------+--------------------------------------------------+ + ++-------------+--------------------------------------------+---------------------------------------------+ +| HTTP method | /infrastructures//vms//stop | /infrastructures//start | ++=============+============================================+=============================================+ +| **PUT** | **Stop** the machine ``vmId`` in ``infId`` | **Start** the machine ``vmId`` in ``infId`` | ++-------------+--------------------------------------------+---------------------------------------------+ GET ``http://imserver.com/infrastructures`` :Content-type: text/uri-list @@ -160,3 +166,19 @@ DELETE ``http://imserver.com/infrastructures//vms/`` Undeploy the virtual machine with ID ``vmId`` associated to the infrastructure with ID ``infId``. + +PUT ``http://imserver.com/infrastructures//vms//start`` + :Content-type: text/plain + :ok response: 200 OK + :fail response: 401, 404, 400 + + Perform the ``start`` action in the virtual machine with ID + ``vmId`` associated to the infrastructure with ID ``infId``. + +PUT ``http://imserver.com/infrastructures//vms//stop`` + :Content-type: text/plain + :ok response: 200 OK + :fail response: 401, 404, 400 + + Perform the ``stop`` action in the virtual machine with ID + ``vmId`` associated to the infrastructure with ID ``infId``. \ No newline at end of file diff --git a/doc/source/xmlrpc.rst b/doc/source/xmlrpc.rst index 4b09f4285..06a8c4cb0 100644 --- a/doc/source/xmlrpc.rst +++ b/doc/source/xmlrpc.rst @@ -195,6 +195,19 @@ This is the list of method names: infrastructure with ID ``infId``. They can resume by :ref:`StartInfrastructure `. +.. _StopVM-xmlrpc: + +``StopVM`` + :parameter 0: ``infId``: integer + :parameter 1: ``vmId``: integer + :parameter 2: ``auth``: array of structs + :ok response: [true, string of length zero] + :fail response: [false, ``error``: string] + + Stop (but do not undeploy) the specified virtual machine with ID ``vmId`` + associated to the infrastructure with ID ``infId``. They can resume by + :ref:`StartVM `. + .. _StartInfrastructure-xmlrpc: ``StartInfrastructure`` @@ -207,6 +220,20 @@ This is the list of method names: infrastructure with ID ``infId``, previously stopped by :ref:`StopInfrastructure `. +.. _StartVM-xmlrpc: + +``StartVM`` + :parameter 0: ``infId``: integer + :parameter 1: ``vmId``: integer + :parameter 2: ``auth``: array of structs + :ok response: [true, string of length zero] + :fail response: [false, ``error``: string] + + Resume the specified virtual machine with ID ``vmId`` associated to the + infrastructure with ID ``infId``, previously stopped by + :ref:`StopInfrastructure ` or + :ref:`StopVM ` + .. _Reconfigure-xmlrpc: ``Reconfigure`` diff --git a/im_service.py b/im_service.py index da43fa788..3190788a7 100755 --- a/im_service.py +++ b/im_service.py @@ -79,7 +79,8 @@ def StartInfrastructure(inf_id, auth_data): return WaitRequest(request) def DestroyInfrastructure(inf_id, auth_data): - request = IMBaseRequest.create_request(IMBaseRequest.DESTROY_INFRASTRUCTURE,(inf_id, auth_data)) + request = IMBaseRequest.create_request(IMBaseRequest.DESTROY_INFRASTRUCTURE,(inf_id, auth_data)) + # This function take a lot of time in some connectors. We can make it async: return (True, "") return WaitRequest(request) def CreateInfrastructure(radl_data, auth_data): @@ -114,6 +115,14 @@ def GetInfrastructureContMsg(inf_id, auth_data): request = IMBaseRequest.create_request(IMBaseRequest.GET_INFRASTRUCTURE_CONT_MSG,(inf_id, auth_data)) return WaitRequest(request) +def StopVM(inf_id, vm_id, auth_data): + request = IMBaseRequest.create_request(IMBaseRequest.STOP_VM,(inf_id, vm_id, auth_data)) + return WaitRequest(request) + +def StartVM(inf_id, vm_id, auth_data): + request = IMBaseRequest.create_request(IMBaseRequest.START_VM,(inf_id, vm_id, auth_data)) + return WaitRequest(request) + def launch_daemon(): """ Launch the IM daemon @@ -150,6 +159,8 @@ def launch_daemon(): server.register_function(GetInfrastructureRADL) server.register_function(GetInfrastructureContMsg) server.register_function(GetVMContMsg) + server.register_function(StartVM) + server.register_function(StopVM) InfrastructureManager.logger.info('************ Start Infrastructure Manager daemon (v.%s) ************' % version) diff --git a/test/TestIM.py b/test/TestIM.py index a42ca333a..ba049e0e7 100755 --- a/test/TestIM.py +++ b/test/TestIM.py @@ -53,12 +53,13 @@ def tearDownClass(cls): except Exception: pass - def wait_inf_state(self, state, timeout, incorrect_states = []): + def wait_inf_state(self, state, timeout, incorrect_states = [], vm_ids = None): """ Wait for an infrastructure to have a specific state """ - (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data) - self.assertTrue(success, msg="ERROR calling the GetInfrastructureInfo function:" + str(vm_ids)) + if not vm_ids: + (success, vm_ids) = self.server.GetInfrastructureInfo(self.inf_id, self.auth_data) + self.assertTrue(success, msg="ERROR calling the GetInfrastructureInfo function:" + str(vm_ids)) err_states = [VirtualMachine.FAILED, VirtualMachine.OFF, VirtualMachine.UNCONFIGURED] err_states.extend(incorrect_states) @@ -232,9 +233,29 @@ def test_22_start(self): (success, res) = self.server.StartInfrastructure(self.inf_id, self.auth_data) self.assertTrue(success, msg="ERROR calling StartInfrastructure: " + str(res)) - all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING]) + all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 150, [VirtualMachine.RUNNING]) self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be started (timeout).") + def test_23_stop_vm(self): + """ + Test StopVM function + """ + (success, res) = self.server.StopVM(self.inf_id, 0, self.auth_data) + self.assertTrue(success, msg="ERROR calling StopVM: " + str(res)) + + all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING], [0]) + self.assertTrue(all_stopped, msg="ERROR waiting the vm to be stopped (timeout).") + + def test_24_start_vm(self): + """ + Test StartVM function + """ + (success, res) = self.server.StartVM(self.inf_id, 0, self.auth_data) + self.assertTrue(success, msg="ERROR calling StartVM: " + str(res)) + + all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 150, [VirtualMachine.RUNNING], [0]) + self.assertTrue(all_configured, msg="ERROR waiting the vm to be started (timeout).") + def test_50_destroy(self): """ Test DestroyInfrastructure function diff --git a/test/TestREST.py b/test/TestREST.py index b42f973e0..5b3cbed17 100755 --- a/test/TestREST.py +++ b/test/TestREST.py @@ -60,16 +60,17 @@ def tearDownClass(cls): except Exception: pass - def wait_inf_state(self, state, timeout, incorrect_states = []): + def wait_inf_state(self, state, timeout, incorrect_states = [], vm_ids = None): """ Wait for an infrastructure to have a specific state """ - self.server.request('GET', "/infrastructures/" + self.inf_id, headers = {'AUTHORIZATION' : self.auth_data}) - resp = self.server.getresponse() - output = str(resp.read()) - self.assertEqual(resp.status, 200, msg="ERROR getting infrastructure info:" + output) - - vm_ids = output.split("\n") + if not vm_ids: + self.server.request('GET', "/infrastructures/" + self.inf_id, headers = {'AUTHORIZATION' : self.auth_data}) + resp = self.server.getresponse() + output = str(resp.read()) + self.assertEqual(resp.status, 200, msg="ERROR getting infrastructure info:" + output) + + vm_ids = output.split("\n") err_states = [VirtualMachine.FAILED, VirtualMachine.OFF, VirtualMachine.UNCONFIGURED] err_states.extend(incorrect_states) @@ -243,10 +244,28 @@ def test_70_start(self): output = str(resp.read()) self.assertEqual(resp.status, 200, msg="ERROR starting the infrastructure:" + output) - all_stopped = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING]) - self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be started (timeout).") + all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING]) + self.assertTrue(all_configured, msg="ERROR waiting the infrastructure to be started (timeout).") + + def test_80_stop_vm(self): + self.server.request('PUT', "/infrastructures/" + self.inf_id + "/0/stop", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data}) + resp = self.server.getresponse() + output = str(resp.read()) + self.assertEqual(resp.status, 200, msg="ERROR stopping the vm:" + output) + + all_stopped = self.wait_inf_state(VirtualMachine.STOPPED, 120, [VirtualMachine.RUNNING], [0]) + self.assertTrue(all_stopped, msg="ERROR waiting the infrastructure to be stopped (timeout).") + + def test_90_start_vm(self): + self.server.request('PUT', "/infrastructures/" + self.inf_id + "/0/start", headers = {"Content-type": "application/x-www-form-urlencoded", 'AUTHORIZATION' : self.auth_data}) + resp = self.server.getresponse() + output = str(resp.read()) + self.assertEqual(resp.status, 200, msg="ERROR starting the vm:" + output) + + all_configured = self.wait_inf_state(VirtualMachine.CONFIGURED, 120, [VirtualMachine.RUNNING], [0]) + self.assertTrue(all_configured, msg="ERROR waiting the vm to be started (timeout).") - def test_80_destroy(self): + def test_100_destroy(self): self.server.request('DELETE', "/infrastructures/" + self.inf_id, headers = {'Authorization' : self.auth_data}) resp = self.server.getresponse() output = str(resp.read()) From ce75d1eebe35e70f00bf2b153f35e591aa161fc2 Mon Sep 17 00:00:00 2001 From: micafer Date: Thu, 11 Jun 2015 13:51:26 +0200 Subject: [PATCH 22/23] Bugfix --- IM/InfrastructureInfo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IM/InfrastructureInfo.py b/IM/InfrastructureInfo.py index fb70a9afb..f92f57a93 100644 --- a/IM/InfrastructureInfo.py +++ b/IM/InfrastructureInfo.py @@ -389,7 +389,7 @@ def Contextualize(self, auth): for vm in self.get_vm_list(): # Assure to update the VM status before running the ctxt process - vm.update_status(self.auth) + vm.update_status(auth) vm.cont_out = "" vm.configured = None tasks = {} From 56c7ac9f4af275461357cd5a07ebaa45813bf3a6 Mon Sep 17 00:00:00 2001 From: micafer Date: Fri, 12 Jun 2015 09:32:42 +0200 Subject: [PATCH 23/23] Create the SSHRetry class --- IM/ConfManager.py | 89 +- IM/SSHRetry.py | 73 + IM/VirtualMachine.py | 12 +- IM/bottle.py | 3616 ----------------------------------------- IM/retry.py | 48 + changelog | 1 + doc/source/manual.rst | 2 + setup.py | 2 +- test/TestIM.py | 4 + 9 files changed, 180 insertions(+), 3667 deletions(-) create mode 100755 IM/SSHRetry.py delete mode 100755 IM/bottle.py create mode 100644 IM/retry.py diff --git a/IM/ConfManager.py b/IM/ConfManager.py index 080f406f2..0d21a2cd7 100644 --- a/IM/ConfManager.py +++ b/IM/ConfManager.py @@ -224,55 +224,52 @@ def run(self): last_step = step - def launch_ctxt_agent(self, vm, tasks, max_retries = 3): + def launch_ctxt_agent(self, vm, tasks): """ Launch the ctxt agent to configure the specified tasks in the specified VM """ pid = None - retries = 0 - while not pid and retries < max_retries: - retries += 1 - try: - ip = vm.getPublicIP() - if not ip: - ip = vm.getPrivateIP() - remote_dir = Config.REMOTE_CONF_DIR + "/" + ip + "_" + str(vm.getSSHPort()) - tmp_dir = tempfile.mkdtemp() - - ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Create the configuration file for the contextualization agent") - conf_file = tmp_dir + "/config.cfg" - self.create_vm_conf_file(conf_file, vm.im_id, tasks, remote_dir) - - ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Copy the contextualization agent config file") - - # Copy the contextualization agent config file - ssh = self.inf.vm_master.get_ssh() - ssh.sftp_mkdir(remote_dir) - ssh.sftp_put(conf_file, remote_dir + "/" + os.path.basename(conf_file)) - - shutil.rmtree(tmp_dir, ignore_errors=True) - - (pid, _, _) = ssh.execute("nohup python_ansible " + Config.REMOTE_CONF_DIR + "/ctxt_agent.py " - + Config.REMOTE_CONF_DIR + "/general_info.cfg " - + remote_dir + "/" + os.path.basename(conf_file) - + " > " + remote_dir + "/stdout" + " 2> " + remote_dir + "/stderr < /dev/null & echo -n $!") - - ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) - - vm.ctxt_pid = pid - vm.launch_check_ctxt_process() - except: - pid = None - ConfManager.logger.exception("Inf ID: " + str(self.inf.id) + ": Error (%d/%d) launching the ansible process to configure %s" % (retries, max_retries, str(vm.im_id))) - time.sleep(retries*2) - # If the process is not correctly launched the configuration of this VM fails - if pid is None: - vm.ctxt_pid = None - vm.configured = False - vm.cont_out = "Error launching the contextualization agent to configure the VM. Check the SSH connection." + try: + ip = vm.getPublicIP() + if not ip: + ip = vm.getPrivateIP() + remote_dir = Config.REMOTE_CONF_DIR + "/" + ip + "_" + str(vm.getSSHPort()) + tmp_dir = tempfile.mkdtemp() + + ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Create the configuration file for the contextualization agent") + conf_file = tmp_dir + "/config.cfg" + self.create_vm_conf_file(conf_file, vm.im_id, tasks, remote_dir) + + ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Copy the contextualization agent config file") + + # Copy the contextualization agent config file + ssh = self.inf.vm_master.get_ssh(retry = True) + ssh.sftp_mkdir(remote_dir) + ssh.sftp_put(conf_file, remote_dir + "/" + os.path.basename(conf_file)) + + shutil.rmtree(tmp_dir, ignore_errors=True) + + (pid, _, _) = ssh.execute("nohup python_ansible " + Config.REMOTE_CONF_DIR + "/ctxt_agent.py " + + Config.REMOTE_CONF_DIR + "/general_info.cfg " + + remote_dir + "/" + os.path.basename(conf_file) + + " > " + remote_dir + "/stdout" + " 2> " + remote_dir + "/stderr < /dev/null & echo -n $!") + + ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Ansible process to configure " + str(vm.im_id) + " launched with pid: " + pid) + + vm.ctxt_pid = pid + vm.launch_check_ctxt_process() + except: + pid = None + ConfManager.logger.exception("Inf ID: " + str(self.inf.id) + ": Error launching the ansible process to configure %s" % str(vm.im_id)) + + # If the process is not correctly launched the configuration of this VM fails + if pid is None: + vm.ctxt_pid = None + vm.configured = False + vm.cont_out = "Error launching the contextualization agent to configure the VM. Check the SSH connection." - return pid + return pid def generate_inventory(self, tmp_dir): """ @@ -518,7 +515,7 @@ def configure_master(self): try: ConfManager.logger.info("Inf ID: " + str(self.inf.id) + ": Start the contextualization process.") - ssh = self.inf.vm_master.get_ssh() + ssh = self.inf.vm_master.get_ssh(retry=True) # Activate tty mode to avoid some problems with sudo in REL ssh.tty = True @@ -615,7 +612,7 @@ def wait_master(self): ConfManager.logger.info("Inf ID: " + str(self.inf.id) + ": VMs available.") # Check and change if necessary the credentials of the master vm - ssh = self.inf.vm_master.get_ssh() + ssh = self.inf.vm_master.get_ssh(retry=True) # Activate tty mode to avoid some problems with sudo in REL ssh.tty = True self.change_master_credentials(ssh) @@ -688,7 +685,7 @@ def generate_playbooks_and_hosts(self): # TODO: Study why it is needed time.sleep(2) - ssh = self.inf.vm_master.get_ssh() + ssh = self.inf.vm_master.get_ssh(retry=True) self.inf.add_cont_msg("Copying YAML, hosts and inventory files.") ConfManager.logger.debug("Inf ID: " + str(self.inf.id) + ": Copying YAML files.") ssh.sftp_mkdir(remote_dir) diff --git a/IM/SSHRetry.py b/IM/SSHRetry.py new file mode 100755 index 000000000..158865a0a --- /dev/null +++ b/IM/SSHRetry.py @@ -0,0 +1,73 @@ +#! /usr/bin/env python +# IM - Infrastructure Manager +# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from IM.retry import retry +from IM.SSH import SSH + +class SSHRetry(SSH): + """ SSH class decorated to perform a number of retries """ + TRIES = 3 + DELAY = 3 + BACKOFF = 2 + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def execute(self, command, timeout = None): + return SSH.execute(self, command, timeout) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_get(self, src, dest): + return SSH.sftp_get(self, src, dest) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_get_files(self, src, dest): + return SSH.sftp_get_files(self, src, dest) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_put_files(self, files): + return SSH.sftp_put_files(self, files) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_put(self, src, dest): + return SSH.sftp_put(self, src, dest) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_put_dir(self, src, dest): + return SSH.sftp_put_dir(self, src, dest) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_put_content(self, content, dest): + return SSH.sftp_put_content(self, content, dest) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_mkdir(self, directory): + return SSH.sftp_mkdir(self, directory) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_list(self, directory): + return SSH.sftp_list(self, directory) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_list_attr(self, directory): + return SSH.sftp_list_attr(self, directory) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def getcwd(self): + return SSH.getcwd(self) + + @retry(Exception, tries=TRIES, delay=DELAY, backoff=BACKOFF) + def sftp_remove(self, path): + return SSH.sftp_remove(self, path) \ No newline at end of file diff --git a/IM/VirtualMachine.py b/IM/VirtualMachine.py index bfcae1c30..b70cbb45f 100644 --- a/IM/VirtualMachine.py +++ b/IM/VirtualMachine.py @@ -18,6 +18,7 @@ import threading from IM.radl.radl import network, RADL from IM.SSH import SSH +from IM.SSHRetry import SSHRetry from config import Config import shutil import string @@ -497,7 +498,7 @@ def setIps(self,public_ips,private_ips): vm_system.setValue('net_interface.' + str(num_net) + '.ip', str(private_ip)) vm_system.setValue('net_interface.' + str(num_net) + '.connection',private_net.id) - def get_ssh(self): + def get_ssh(self, retry = False): """ Get SSH object to connect with this VM """ @@ -507,7 +508,10 @@ def get_ssh(self): ip = self.getPrivateIP() if ip == None: return None - return SSH(ip, user, passwd, private_key, self.getSSHPort()) + if retry: + return SSHRetry(ip, user, passwd, private_key, self.getSSHPort()) + else: + return SSH(ip, user, passwd, private_key, self.getSSHPort()) def is_ctxt_process_running(self): """ Return the PID of the running process or None if it is not running """ @@ -599,7 +603,7 @@ def is_configured(self): return self.configured def get_ctxt_log(self, remote_dir, delete = False): - ssh = self.inf.vm_master.get_ssh() + ssh = self.inf.vm_master.get_ssh(retry=True) tmp_dir = tempfile.mkdtemp() conf_out = "" @@ -622,7 +626,7 @@ def get_ctxt_log(self, remote_dir, delete = False): return conf_out def get_ctxt_output(self, remote_dir, delete = False): - ssh = self.inf.vm_master.get_ssh() + ssh = self.inf.vm_master.get_ssh(retry=True) tmp_dir = tempfile.mkdtemp() # Download the contextualization agent log diff --git a/IM/bottle.py b/IM/bottle.py deleted file mode 100755 index d6ea90b32..000000000 --- a/IM/bottle.py +++ /dev/null @@ -1,3616 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -Bottle is a fast and simple micro-framework for small web applications. It -offers request dispatching (Routes) with url parameter support, templates, -a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and -template engines - all in a single file and with no dependencies other than the -Python Standard Library. - -Homepage and documentation: http://bottlepy.org/ - -Copyright (c) 2014, Marcel Hellkamp. -License: MIT (see LICENSE for details) -""" - -from __future__ import with_statement - -__author__ = 'Marcel Hellkamp' -__version__ = '0.13-dev' -__license__ = 'MIT' - -# The gevent and eventlet server adapters need to patch some modules before -# they are imported. This is why we parse the commandline parameters here but -# handle them later -if __name__ == '__main__': - from optparse import OptionParser - _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app") - _opt = _cmd_parser.add_option - _opt("--version", action="store_true", help="show version number.") - _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.") - _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.") - _opt("-p", "--plugin", action="append", help="install additional plugin/s.") - _opt("--debug", action="store_true", help="start server in debug mode.") - _opt("--reload", action="store_true", help="auto-reload on file changes.") - _cmd_options, _cmd_args = _cmd_parser.parse_args() - if _cmd_options.server: - if _cmd_options.server.startswith('gevent'): - import gevent.monkey; gevent.monkey.patch_all() - elif _cmd_options.server.startswith('eventlet'): - import eventlet; eventlet.monkey_patch() - -import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\ - os, re, subprocess, sys, tempfile, threading, time, warnings - -from datetime import date as datedate, datetime, timedelta -from tempfile import TemporaryFile -from traceback import format_exc, print_exc -from inspect import getargspec -from unicodedata import normalize - - -try: from simplejson import dumps as json_dumps, loads as json_lds -except ImportError: # pragma: no cover - try: from json import dumps as json_dumps, loads as json_lds - except ImportError: - try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds - except ImportError: - def json_dumps(data): - raise ImportError("JSON support requires Python 2.6 or simplejson.") - json_lds = json_dumps - - - -# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities. -# It ain't pretty but it works... Sorry for the mess. - -py = sys.version_info -py3k = py >= (3, 0, 0) -py25 = py < (2, 6, 0) -py31 = (3, 1, 0) <= py < (3, 2, 0) - -# Workaround for the missing "as" keyword in py3k. -def _e(): return sys.exc_info()[1] - -# Workaround for the "print is a keyword/function" Python 2/3 dilemma -# and a fallback for mod_wsgi (resticts stdout/err attribute access) -try: - _stdout, _stderr = sys.stdout.write, sys.stderr.write -except IOError: - _stdout = lambda x: sys.stdout.write(x) - _stderr = lambda x: sys.stderr.write(x) - -# Lots of stdlib and builtin differences. -if py3k: - import http.client as httplib - import _thread as thread - from urllib.parse import urljoin, SplitResult as UrlSplitResult - from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote - urlunquote = functools.partial(urlunquote, encoding='latin1') - from http.cookies import SimpleCookie - from collections import MutableMapping as DictMixin - import pickle - from io import BytesIO - from configparser import ConfigParser - basestring = str - unicode = str - json_loads = lambda s: json_lds(touni(s)) - callable = lambda x: hasattr(x, '__call__') - imap = map - def _raise(*a): raise a[0](a[1]).with_traceback(a[2]) -else: # 2.x - import httplib - import thread - from urlparse import urljoin, SplitResult as UrlSplitResult - from urllib import urlencode, quote as urlquote, unquote as urlunquote - from Cookie import SimpleCookie - from itertools import imap - import cPickle as pickle - from StringIO import StringIO as BytesIO - from ConfigParser import SafeConfigParser as ConfigParser - if py25: - msg = "Python 2.5 support may be dropped in future versions of Bottle." - warnings.warn(msg, DeprecationWarning) - from UserDict import DictMixin - def next(it): return it.next() - bytes = str - else: # 2.6, 2.7 - from collections import MutableMapping as DictMixin - unicode = unicode - json_loads = json_lds - eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '', 'exec')) - - -# Some helpers for string/byte handling -def tob(s, enc='utf8'): - return s.encode(enc) if isinstance(s, unicode) else bytes(s) - - -def touni(s, enc='utf8', err='strict'): - if isinstance(s, bytes): - return s.decode(enc, err) - else: - return unicode(s or ("" if s is None else s)) - -tonat = touni if py3k else tob - -# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). -# 3.1 needs a workaround. -if py31: - from io import TextIOWrapper - - class NCTextIOWrapper(TextIOWrapper): - def close(self): pass # Keep wrapped buffer open. - - -# A bug in functools causes it to break if the wrapper is an instance method -def update_wrapper(wrapper, wrapped, *a, **ka): - try: - functools.update_wrapper(wrapper, wrapped, *a, **ka) - except AttributeError: - pass - - -# These helpers are used at module level and need to be defined first. -# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. - -def depr(message, strict=False): - warnings.warn(message, DeprecationWarning, stacklevel=3) - -def makelist(data): # This is just to handy - if isinstance(data, (tuple, list, set, dict)): - return list(data) - elif data: - return [data] - else: - return [] - - -class DictProperty(object): - """ Property that maps to a key in a local dict-like attribute. """ - def __init__(self, attr, key=None, read_only=False): - self.attr, self.key, self.read_only = attr, key, read_only - - def __call__(self, func): - functools.update_wrapper(self, func, updated=[]) - self.getter, self.key = func, self.key or func.__name__ - return self - - def __get__(self, obj, cls): - if obj is None: return self - key, storage = self.key, getattr(obj, self.attr) - if key not in storage: storage[key] = self.getter(obj) - return storage[key] - - def __set__(self, obj, value): - if self.read_only: raise AttributeError("Read-Only property.") - getattr(obj, self.attr)[self.key] = value - - def __delete__(self, obj): - if self.read_only: raise AttributeError("Read-Only property.") - del getattr(obj, self.attr)[self.key] - - -class cached_property(object): - """ A property that is only computed once per instance and then replaces - itself with an ordinary attribute. Deleting the attribute resets the - property. """ - - def __init__(self, func): - self.__doc__ = getattr(func, '__doc__') - self.func = func - - def __get__(self, obj, cls): - if obj is None: return self - value = obj.__dict__[self.func.__name__] = self.func(obj) - return value - - -class lazy_attribute(object): - """ A property that caches itself to the class object. """ - def __init__(self, func): - functools.update_wrapper(self, func, updated=[]) - self.getter = func - - def __get__(self, obj, cls): - value = self.getter(cls) - setattr(cls, self.__name__, value) - return value - - - - - - -############################################################################### -# Exceptions and Events ######################################################## -############################################################################### - - -class BottleException(Exception): - """ A base class for exceptions used by bottle. """ - pass - - - - - - -############################################################################### -# Routing ###################################################################### -############################################################################### - - -class RouteError(BottleException): - """ This is a base class for all routing related exceptions """ - - -class RouteReset(BottleException): - """ If raised by a plugin or request handler, the route is reset and all - plugins are re-applied. """ - -class RouterUnknownModeError(RouteError): pass - - -class RouteSyntaxError(RouteError): - """ The route parser found something not supported by this router. """ - - -class RouteBuildError(RouteError): - """ The route could not be built. """ - - -def _re_flatten(p): - """ Turn all capturing groups in a regular expression pattern into - non-capturing groups. """ - if '(' not in p: - return p - return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', - lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) - - -class Router(object): - """ A Router is an ordered collection of route->target pairs. It is used to - efficiently match WSGI requests against a number of routes and return - the first target that satisfies the request. The target may be anything, - usually a string, ID or callable object. A route consists of a path-rule - and a HTTP method. - - The path-rule is either a static path (e.g. `/contact`) or a dynamic - path that contains wildcards (e.g. `/wiki/`). The wildcard syntax - and details on the matching order are described in docs:`routing`. - """ - - default_pattern = '[^/]+' - default_filter = 're' - - #: The current CPython regexp implementation does not allow more - #: than 99 matching groups per regular expression. - _MAX_GROUPS_PER_PATTERN = 99 - - def __init__(self, strict=False): - self.rules = [] # All rules in order - self._groups = {} # index of regexes to find them in dyna_routes - self.builder = {} # Data structure for the url builder - self.static = {} # Search structure for static routes - self.dyna_routes = {} - self.dyna_regexes = {} # Search structure for dynamic routes - #: If true, static routes are no longer checked first. - self.strict_order = strict - self.filters = { - 're': lambda conf: - (_re_flatten(conf or self.default_pattern), None, None), - 'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))), - 'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))), - 'path': lambda conf: (r'.+?', None, None)} - - def add_filter(self, name, func): - """ Add a filter. The provided function is called with the configuration - string as parameter and must return a (regexp, to_python, to_url) tuple. - The first element is a string, the last two are callables or None. """ - self.filters[name] = func - - rule_syntax = re.compile('(\\\\*)' - '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)' - '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)' - '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))') - - def _itertokens(self, rule): - offset, prefix = 0, '' - for match in self.rule_syntax.finditer(rule): - prefix += rule[offset:match.start()] - g = match.groups() - if len(g[0])%2: # Escaped wildcard - prefix += match.group(0)[len(g[0]):] - offset = match.end() - continue - if prefix: - yield prefix, None, None - name, filtr, conf = g[4:7] if g[2] is None else g[1:4] - yield name, filtr or 'default', conf or None - offset, prefix = match.end(), '' - if offset <= len(rule) or prefix: - yield prefix+rule[offset:], None, None - - def add(self, rule, method, target, name=None): - """ Add a new rule or replace the target for an existing rule. """ - anons = 0 # Number of anonymous wildcards found - keys = [] # Names of keys - pattern = '' # Regular expression pattern with named groups - filters = [] # Lists of wildcard input filters - builder = [] # Data structure for the URL builder - is_static = True - - for key, mode, conf in self._itertokens(rule): - if mode: - is_static = False - if mode == 'default': mode = self.default_filter - mask, in_filter, out_filter = self.filters[mode](conf) - if not key: - pattern += '(?:%s)' % mask - key = 'anon%d' % anons - anons += 1 - else: - pattern += '(?P<%s>%s)' % (key, mask) - keys.append(key) - if in_filter: filters.append((key, in_filter)) - builder.append((key, out_filter or str)) - elif key: - pattern += re.escape(key) - builder.append((None, key)) - - self.builder[rule] = builder - if name: self.builder[name] = builder - - if is_static and not self.strict_order: - self.static.setdefault(method, {}) - self.static[method][self.build(rule)] = (target, None) - return - - try: - re_pattern = re.compile('^(%s)$' % pattern) - re_match = re_pattern.match - except re.error: - raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) - - if filters: - def getargs(path): - url_args = re_match(path).groupdict() - for name, wildcard_filter in filters: - try: - url_args[name] = wildcard_filter(url_args[name]) - except ValueError: - raise HTTPError(400, 'Path has wrong format.') - return url_args - elif re_pattern.groupindex: - def getargs(path): - return re_match(path).groupdict() - else: - getargs = None - - flatpat = _re_flatten(pattern) - whole_rule = (rule, flatpat, target, getargs) - - if (flatpat, method) in self._groups: - if DEBUG: - msg = 'Route <%s %s> overwrites a previously defined route' - warnings.warn(msg % (method, rule), RuntimeWarning) - self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule - else: - self.dyna_routes.setdefault(method, []).append(whole_rule) - self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1 - - self._compile(method) - - def _compile(self, method): - all_rules = self.dyna_routes[method] - comborules = self.dyna_regexes[method] = [] - maxgroups = self._MAX_GROUPS_PER_PATTERN - for x in range(0, len(all_rules), maxgroups): - some = all_rules[x:x+maxgroups] - combined = (flatpat for (_, flatpat, _, _) in some) - combined = '|'.join('(^%s$)' % flatpat for flatpat in combined) - combined = re.compile(combined).match - rules = [(target, getargs) for (_, _, target, getargs) in some] - comborules.append((combined, rules)) - - def build(self, _name, *anons, **query): - """ Build an URL by filling the wildcards in a rule. """ - builder = self.builder.get(_name) - if not builder: raise RouteBuildError("No route with that name.", _name) - try: - for i, value in enumerate(anons): query['anon%d'%i] = value - url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder]) - return url if not query else url+'?'+urlencode(query) - except KeyError: - raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) - - def match(self, environ): - """ Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). """ - verb = environ['REQUEST_METHOD'].upper() - path = environ['PATH_INFO'] or '/' - - if verb == 'HEAD': - methods = ['PROXY', verb, 'GET', 'ANY'] - else: - methods = ['PROXY', verb, 'ANY'] - - for method in methods: - if method in self.static and path in self.static[method]: - target, getargs = self.static[method][path] - return target, getargs(path) if getargs else {} - elif method in self.dyna_regexes: - for combined, rules in self.dyna_regexes[method]: - match = combined(path) - if match: - target, getargs = rules[match.lastindex - 1] - return target, getargs(path) if getargs else {} - - # No matching route found. Collect alternative methods for 405 response - allowed = set([]) - nocheck = set(methods) - for method in set(self.static) - nocheck: - if path in self.static[method]: - allowed.add(verb) - for method in set(self.dyna_regexes) - allowed - nocheck: - for combined, rules in self.dyna_regexes[method]: - match = combined(path) - if match: - allowed.add(method) - if allowed: - allow_header = ",".join(sorted(allowed)) - raise HTTPError(405, "Method not allowed.", Allow=allow_header) - - # No matching route and no alternative method found. We give up - raise HTTPError(404, "Not found: " + repr(path)) - - - - - - -class Route(object): - """ This class wraps a route callback along with route specific metadata and - configuration and applies Plugins on demand. It is also responsible for - turing an URL path rule into a regular expression usable by the Router. - """ - - def __init__(self, app, rule, method, callback, name=None, - plugins=None, skiplist=None, **config): - #: The application this route is installed to. - self.app = app - #: The path-rule string (e.g. ``/wiki/:page``). - self.rule = rule - #: The HTTP method as a string (e.g. ``GET``). - self.method = method - #: The original callback with no plugins applied. Useful for introspection. - self.callback = callback - #: The name of the route (if specified) or ``None``. - self.name = name or None - #: A list of route-specific plugins (see :meth:`Bottle.route`). - self.plugins = plugins or [] - #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). - self.skiplist = skiplist or [] - #: Additional keyword arguments passed to the :meth:`Bottle.route` - #: decorator are stored in this dictionary. Used for route-specific - #: plugin configuration and meta-data. - self.config = ConfigDict().load_dict(config) - - @cached_property - def call(self): - """ The route callback with all plugins applied. This property is - created on demand and then cached to speed up subsequent requests.""" - return self._make_callback() - - def reset(self): - """ Forget any cached values. The next time :attr:`call` is accessed, - all plugins are re-applied. """ - self.__dict__.pop('call', None) - - def prepare(self): - """ Do all on-demand work immediately (useful for debugging).""" - self.call - - def all_plugins(self): - """ Yield all Plugins affecting this route. """ - unique = set() - for p in reversed(self.app.plugins + self.plugins): - if True in self.skiplist: break - name = getattr(p, 'name', False) - if name and (name in self.skiplist or name in unique): continue - if p in self.skiplist or type(p) in self.skiplist: continue - if name: unique.add(name) - yield p - - def _make_callback(self): - callback = self.callback - for plugin in self.all_plugins(): - try: - if hasattr(plugin, 'apply'): - callback = plugin.apply(callback, self) - else: - callback = plugin(callback) - except RouteReset: # Try again with changed configuration. - return self._make_callback() - if not callback is self.callback: - update_wrapper(callback, self.callback) - return callback - - def get_undecorated_callback(self): - """ Return the callback. If the callback is a decorated function, try to - recover the original function. """ - func = self.callback - func = getattr(func, '__func__' if py3k else 'im_func', func) - closure_attr = '__closure__' if py3k else 'func_closure' - while hasattr(func, closure_attr) and getattr(func, closure_attr): - func = getattr(func, closure_attr)[0].cell_contents - return func - - def get_callback_args(self): - """ Return a list of argument names the callback (most likely) accepts - as keyword arguments. If the callback is a decorated function, try - to recover the original function before inspection. """ - return getargspec(self.get_undecorated_callback())[0] - - def get_config(self, key, default=None): - """ Lookup a config field and return its value, first checking the - route.config, then route.app.config.""" - for conf in (self.config, self.app.conifg): - if key in conf: return conf[key] - return default - - def __repr__(self): - cb = self.get_undecorated_callback() - return '<%s %r %r>' % (self.method, self.rule, cb) - - - - - - -############################################################################### -# Application Object ########################################################### -############################################################################### - - -class Bottle(object): - """ Each Bottle object represents a single, distinct web application and - consists of routes, callbacks, plugins, resources and configuration. - Instances are callable WSGI applications. - - :param catchall: If true (default), handle all exceptions. Turn off to - let debugging middleware handle exceptions. - """ - - def __init__(self, catchall=True, autojson=True): - - #: A :class:`ConfigDict` for app specific configuration. - self.config = ConfigDict() - self.config._on_change = functools.partial(self.trigger_hook, 'config') - self.config.meta_set('autojson', 'validate', bool) - self.config.meta_set('catchall', 'validate', bool) - self.config['catchall'] = catchall - self.config['autojson'] = autojson - - #: A :class:`ResourceManager` for application files - self.resources = ResourceManager() - - self.routes = [] # List of installed :class:`Route` instances. - self.router = Router() # Maps requests to :class:`Route` instances. - self.error_handler = {} - - # Core plugins - self.plugins = [] # List of installed plugins. - if self.config['autojson']: - self.install(JSONPlugin()) - self.install(TemplatePlugin()) - - #: If true, most exceptions are caught and returned as :exc:`HTTPError` - catchall = DictProperty('config', 'catchall') - - __hook_names = 'before_request', 'after_request', 'app_reset', 'config' - __hook_reversed = 'after_request' - - @cached_property - def _hooks(self): - return dict((name, []) for name in self.__hook_names) - - def add_hook(self, name, func): - """ Attach a callback to a hook. Three hooks are currently implemented: - - before_request - Executed once before each request. The request context is - available, but no routing has happened yet. - after_request - Executed once after each request regardless of its outcome. - app_reset - Called whenever :meth:`Bottle.reset` is called. - """ - if name in self.__hook_reversed: - self._hooks[name].insert(0, func) - else: - self._hooks[name].append(func) - - def remove_hook(self, name, func): - """ Remove a callback from a hook. """ - if name in self._hooks and func in self._hooks[name]: - self._hooks[name].remove(func) - return True - - def trigger_hook(self, __name, *args, **kwargs): - """ Trigger a hook and return a list of results. """ - return [hook(*args, **kwargs) for hook in self._hooks[__name][:]] - - def hook(self, name): - """ Return a decorator that attaches a callback to a hook. See - :meth:`add_hook` for details.""" - def decorator(func): - self.add_hook(name, func) - return func - return decorator - - def mount(self, prefix, app, **options): - """ Mount an application (:class:`Bottle` or plain WSGI) to a specific - URL prefix. Example:: - - root_app.mount('/admin/', admin_app) - - :param prefix: path prefix or `mount-point`. If it ends in a slash, - that slash is mandatory. - :param app: an instance of :class:`Bottle` or a WSGI application. - - All other parameters are passed to the underlying :meth:`route` call. - """ - - segments = [p for p in prefix.split('/') if p] - if not segments: raise ValueError('Empty path prefix.') - path_depth = len(segments) - - def mountpoint_wrapper(): - try: - request.path_shift(path_depth) - rs = HTTPResponse([]) - def start_response(status, headerlist, exc_info=None): - if exc_info: - _raise(*exc_info) - rs.status = status - for name, value in headerlist: rs.add_header(name, value) - return rs.body.append - body = app(request.environ, start_response) - if body and rs.body: body = itertools.chain(rs.body, body) - rs.body = body or rs.body - return rs - finally: - request.path_shift(-path_depth) - - options.setdefault('skip', True) - options.setdefault('method', 'PROXY') - options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) - options['callback'] = mountpoint_wrapper - - self.route('/%s/<:re:.*>' % '/'.join(segments), **options) - if not prefix.endswith('/'): - self.route('/' + '/'.join(segments), **options) - - def merge(self, routes): - """ Merge the routes of another :class:`Bottle` application or a list of - :class:`Route` objects into this application. The routes keep their - 'owner', meaning that the :data:`Route.app` attribute is not - changed. """ - if isinstance(routes, Bottle): - routes = routes.routes - for route in routes: - self.add_route(route) - - def install(self, plugin): - """ Add a plugin to the list of plugins and prepare it for being - applied to all routes of this application. A plugin may be a simple - decorator or an object that implements the :class:`Plugin` API. - """ - if hasattr(plugin, 'setup'): plugin.setup(self) - if not callable(plugin) and not hasattr(plugin, 'apply'): - raise TypeError("Plugins must be callable or implement .apply()") - self.plugins.append(plugin) - self.reset() - return plugin - - def uninstall(self, plugin): - """ Uninstall plugins. Pass an instance to remove a specific plugin, a type - object to remove all plugins that match that type, a string to remove - all plugins with a matching ``name`` attribute or ``True`` to remove all - plugins. Return the list of removed plugins. """ - removed, remove = [], plugin - for i, plugin in list(enumerate(self.plugins))[::-1]: - if remove is True or remove is plugin or remove is type(plugin) \ - or getattr(plugin, 'name', True) == remove: - removed.append(plugin) - del self.plugins[i] - if hasattr(plugin, 'close'): plugin.close() - if removed: self.reset() - return removed - - def reset(self, route=None): - """ Reset all routes (force plugins to be re-applied) and clear all - caches. If an ID or route object is given, only that specific route - is affected. """ - if route is None: routes = self.routes - elif isinstance(route, Route): routes = [route] - else: routes = [self.routes[route]] - for route in routes: route.reset() - if DEBUG: - for route in routes: route.prepare() - self.trigger_hook('app_reset') - - def close(self): - """ Close the application and all installed plugins. """ - for plugin in self.plugins: - if hasattr(plugin, 'close'): plugin.close() - - def run(self, **kwargs): - """ Calls :func:`run` with the same parameters. """ - run(self, **kwargs) - - def match(self, environ): - """ Search for a matching route and return a (:class:`Route` , urlargs) - tuple. The second value is a dictionary with parameters extracted - from the URL. Raise :exc:`HTTPError` (404/405) on a non-match.""" - return self.router.match(environ) - - def get_url(self, routename, **kargs): - """ Return a string that matches a named route """ - scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' - location = self.router.build(routename, **kargs).lstrip('/') - return urljoin(urljoin('/', scriptname), location) - - def add_route(self, route): - """ Add a route object, but do not change the :data:`Route.app` - attribute.""" - self.routes.append(route) - self.router.add(route.rule, route.method, route, name=route.name) - if DEBUG: route.prepare() - - def route(self, path=None, method='GET', callback=None, name=None, - apply=None, skip=None, **config): - """ A decorator to bind a function to a request URL. Example:: - - @app.route('/hello/:name') - def hello(name): - return 'Hello %s' % name - - The ``:name`` part is a wildcard. See :class:`Router` for syntax - details. - - :param path: Request path or a list of paths to listen to. If no - path is specified, it is automatically generated from the - signature of the function. - :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of - methods to listen to. (default: `GET`) - :param callback: An optional shortcut to avoid the decorator - syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` - :param name: The name for this route. (default: None) - :param apply: A decorator or plugin or a list of plugins. These are - applied to the route callback in addition to installed plugins. - :param skip: A list of plugins, plugin classes or names. Matching - plugins are not installed to this route. ``True`` skips all. - - Any additional keyword arguments are stored as route-specific - configuration and passed to plugins (see :meth:`Plugin.apply`). - """ - if callable(path): path, callback = None, path - plugins = makelist(apply) - skiplist = makelist(skip) - def decorator(callback): - if isinstance(callback, basestring): callback = load(callback) - for rule in makelist(path) or yieldroutes(callback): - for verb in makelist(method): - verb = verb.upper() - route = Route(self, rule, verb, callback, name=name, - plugins=plugins, skiplist=skiplist, **config) - self.add_route(route) - return callback - return decorator(callback) if callback else decorator - - def get(self, path=None, method='GET', **options): - """ Equals :meth:`route`. """ - return self.route(path, method, **options) - - def post(self, path=None, method='POST', **options): - """ Equals :meth:`route` with a ``POST`` method parameter. """ - return self.route(path, method, **options) - - def put(self, path=None, method='PUT', **options): - """ Equals :meth:`route` with a ``PUT`` method parameter. """ - return self.route(path, method, **options) - - def delete(self, path=None, method='DELETE', **options): - """ Equals :meth:`route` with a ``DELETE`` method parameter. """ - return self.route(path, method, **options) - - def patch(self, path=None, method='PATCH', **options): - """ Equals :meth:`route` with a ``PATCH`` method parameter. """ - return self.route(path, method, **options) - - def error(self, code=500): - """ Decorator: Register an output handler for a HTTP error code""" - def wrapper(handler): - self.error_handler[int(code)] = handler - return handler - return wrapper - - def default_error_handler(self, res): - return tob(template(ERROR_PAGE_TEMPLATE, e=res)) - - def _handle(self, environ): - path = environ['bottle.raw_path'] = environ['PATH_INFO'] - if py3k: - try: - environ['PATH_INFO'] = path.encode('latin1').decode('utf8') - except UnicodeError: - return HTTPError(400, 'Invalid path string. Expected UTF-8') - - try: - environ['bottle.app'] = self - request.bind(environ) - response.bind() - try: - self.trigger_hook('before_request') - route, args = self.router.match(environ) - environ['route.handle'] = route - environ['bottle.route'] = route - environ['route.url_args'] = args - return route.call(**args) - finally: - self.trigger_hook('after_request') - except HTTPResponse: - return _e() - except RouteReset: - route.reset() - return self._handle(environ) - except (KeyboardInterrupt, SystemExit, MemoryError): - raise - except Exception: - if not self.catchall: raise - stacktrace = format_exc() - environ['wsgi.errors'].write(stacktrace) - return HTTPError(500, "Internal Server Error", _e(), stacktrace) - - def _cast(self, out, peek=None): - """ Try to convert the parameter into something WSGI compatible and set - correct HTTP headers when possible. - Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, - iterable of strings and iterable of unicodes - """ - - # Empty output is done here - if not out: - if 'Content-Length' not in response: - response['Content-Length'] = 0 - return [] - # Join lists of byte or unicode strings. Mixed lists are NOT supported - if isinstance(out, (tuple, list))\ - and isinstance(out[0], (bytes, unicode)): - out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' - # Encode unicode strings - if isinstance(out, unicode): - out = out.encode(response.charset) - # Byte Strings are just returned - if isinstance(out, bytes): - if 'Content-Length' not in response: - response['Content-Length'] = len(out) - return [out] - # HTTPError or HTTPException (recursive, because they may wrap anything) - # TODO: Handle these explicitly in handle() or make them iterable. - if isinstance(out, HTTPError): - out.apply(response) - out = self.error_handler.get(out.status_code, self.default_error_handler)(out) - return self._cast(out) - if isinstance(out, HTTPResponse): - out.apply(response) - return self._cast(out.body) - - # File-like objects. - if hasattr(out, 'read'): - if 'wsgi.file_wrapper' in request.environ: - return request.environ['wsgi.file_wrapper'](out) - elif hasattr(out, 'close') or not hasattr(out, '__iter__'): - return WSGIFileWrapper(out) - - # Handle Iterables. We peek into them to detect their inner type. - try: - iout = iter(out) - first = next(iout) - while not first: - first = next(iout) - except StopIteration: - return self._cast('') - except HTTPResponse: - first = _e() - except (KeyboardInterrupt, SystemExit, MemoryError): - raise - except: - if not self.catchall: raise - first = HTTPError(500, 'Unhandled exception', _e(), format_exc()) - - # These are the inner types allowed in iterator or generator objects. - if isinstance(first, HTTPResponse): - return self._cast(first) - elif isinstance(first, bytes): - new_iter = itertools.chain([first], iout) - elif isinstance(first, unicode): - encoder = lambda x: x.encode(response.charset) - new_iter = imap(encoder, itertools.chain([first], iout)) - else: - msg = 'Unsupported response type: %s' % type(first) - return self._cast(HTTPError(500, msg)) - if hasattr(out, 'close'): - new_iter = _closeiter(new_iter, out.close) - return new_iter - - def wsgi(self, environ, start_response): - """ The bottle WSGI-interface. """ - try: - out = self._cast(self._handle(environ)) - # rfc2616 section 4.3 - if response._status_code in (100, 101, 204, 304)\ - or environ['REQUEST_METHOD'] == 'HEAD': - if hasattr(out, 'close'): out.close() - out = [] - start_response(response._status_line, response.headerlist) - return out - except (KeyboardInterrupt, SystemExit, MemoryError): - raise - except: - if not self.catchall: raise - err = '

Critical error while processing request: %s

' \ - % html_escape(environ.get('PATH_INFO', '/')) - if DEBUG: - err += '

Error:

\n
\n%s\n
\n' \ - '

Traceback:

\n
\n%s\n
\n' \ - % (html_escape(repr(_e())), html_escape(format_exc())) - environ['wsgi.errors'].write(err) - headers = [('Content-Type', 'text/html; charset=UTF-8')] - start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info()) - return [tob(err)] - - def __call__(self, environ, start_response): - """ Each instance of :class:'Bottle' is a WSGI application. """ - return self.wsgi(environ, start_response) - - def __enter__(self): - """ Use this application as default for all module-level shortcuts. """ - default_app.push(self) - return self - - def __exit__(self, exc_type, exc_value, traceback): - default_app.pop() - - - - - -############################################################################### -# HTTP and WSGI Tools ########################################################## -############################################################################### - -class BaseRequest(object): - """ A wrapper for WSGI environment dictionaries that adds a lot of - convenient access methods and properties. Most of them are read-only. - - Adding new attributes to a request actually adds them to the environ - dictionary (as 'bottle.request.ext.'). This is the recommended - way to store and access request-specific data. - """ - - __slots__ = ('environ', ) - - #: Maximum size of memory buffer for :attr:`body` in bytes. - MEMFILE_MAX = 102400 - - def __init__(self, environ=None): - """ Wrap a WSGI environ dictionary. """ - #: The wrapped WSGI environ dictionary. This is the only real attribute. - #: All other attributes actually are read-only properties. - self.environ = {} if environ is None else environ - self.environ['bottle.request'] = self - - @DictProperty('environ', 'bottle.app', read_only=True) - def app(self): - """ Bottle application handling this request. """ - raise RuntimeError('This request is not connected to an application.') - - @DictProperty('environ', 'bottle.route', read_only=True) - def route(self): - """ The bottle :class:`Route` object that matches this request. """ - raise RuntimeError('This request is not connected to a route.') - - @DictProperty('environ', 'route.url_args', read_only=True) - def url_args(self): - """ The arguments extracted from the URL. """ - raise RuntimeError('This request is not connected to a route.') - - @property - def path(self): - """ The value of ``PATH_INFO`` with exactly one prefixed slash (to fix - broken clients and avoid the "empty path" edge case). """ - return '/' + self.environ.get('PATH_INFO','').lstrip('/') - - @property - def method(self): - """ The ``REQUEST_METHOD`` value as an uppercase string. """ - return self.environ.get('REQUEST_METHOD', 'GET').upper() - - @DictProperty('environ', 'bottle.request.headers', read_only=True) - def headers(self): - """ A :class:`WSGIHeaderDict` that provides case-insensitive access to - HTTP request headers. """ - return WSGIHeaderDict(self.environ) - - def get_header(self, name, default=None): - """ Return the value of a request header, or a given default value. """ - return self.headers.get(name, default) - - @DictProperty('environ', 'bottle.request.cookies', read_only=True) - def cookies(self): - """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT - decoded. Use :meth:`get_cookie` if you expect signed cookies. """ - cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values() - return FormsDict((c.key, c.value) for c in cookies) - - def get_cookie(self, key, default=None, secret=None): - """ Return the content of a cookie. To read a `Signed Cookie`, the - `secret` must match the one used to create the cookie (see - :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing - cookie or wrong signature), return a default value. """ - value = self.cookies.get(key) - if secret and value: - dec = cookie_decode(value, secret) # (key, value) tuple or None - return dec[1] if dec and dec[0] == key else default - return value or default - - @DictProperty('environ', 'bottle.request.query', read_only=True) - def query(self): - """ The :attr:`query_string` parsed into a :class:`FormsDict`. These - values are sometimes called "URL arguments" or "GET parameters", but - not to be confused with "URL wildcards" as they are provided by the - :class:`Router`. """ - get = self.environ['bottle.get'] = FormsDict() - pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) - for key, value in pairs: - get[key] = value - return get - - @DictProperty('environ', 'bottle.request.forms', read_only=True) - def forms(self): - """ Form values parsed from an `url-encoded` or `multipart/form-data` - encoded POST or PUT request body. The result is returned as a - :class:`FormsDict`. All keys and values are strings. File uploads - are stored separately in :attr:`files`. """ - forms = FormsDict() - for name, item in self.POST.allitems(): - if not isinstance(item, FileUpload): - forms[name] = item - return forms - - @DictProperty('environ', 'bottle.request.params', read_only=True) - def params(self): - """ A :class:`FormsDict` with the combined values of :attr:`query` and - :attr:`forms`. File uploads are stored in :attr:`files`. """ - params = FormsDict() - for key, value in self.query.allitems(): - params[key] = value - for key, value in self.forms.allitems(): - params[key] = value - return params - - @DictProperty('environ', 'bottle.request.files', read_only=True) - def files(self): - """ File uploads parsed from `multipart/form-data` encoded POST or PUT - request body. The values are instances of :class:`FileUpload`. - - """ - files = FormsDict() - for name, item in self.POST.allitems(): - if isinstance(item, FileUpload): - files[name] = item - return files - - @DictProperty('environ', 'bottle.request.json', read_only=True) - def json(self): - """ If the ``Content-Type`` header is ``application/json``, this - property holds the parsed content of the request body. Only requests - smaller than :attr:`MEMFILE_MAX` are processed to avoid memory - exhaustion. """ - ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0] - if ctype == 'application/json': - b = self._get_body_string() - if not b: - return None - return json_loads(b) - return None - - def _iter_body(self, read, bufsize): - maxread = max(0, self.content_length) - while maxread: - part = read(min(maxread, bufsize)) - if not part: break - yield part - maxread -= len(part) - - @staticmethod - def _iter_chunked(read, bufsize): - err = HTTPError(400, 'Error while parsing chunked transfer body.') - rn, sem, bs = tob('\r\n'), tob(';'), tob('') - while True: - header = read(1) - while header[-2:] != rn: - c = read(1) - header += c - if not c: raise err - if len(header) > bufsize: raise err - size, _, _ = header.partition(sem) - try: - maxread = int(tonat(size.strip()), 16) - except ValueError: - raise err - if maxread == 0: break - buff = bs - while maxread > 0: - if not buff: - buff = read(min(maxread, bufsize)) - part, buff = buff[:maxread], buff[maxread:] - if not part: raise err - yield part - maxread -= len(part) - if read(2) != rn: - raise err - - @DictProperty('environ', 'bottle.request.body', read_only=True) - def _body(self): - body_iter = self._iter_chunked if self.chunked else self._iter_body - read_func = self.environ['wsgi.input'].read - body, body_size, is_temp_file = BytesIO(), 0, False - for part in body_iter(read_func, self.MEMFILE_MAX): - body.write(part) - body_size += len(part) - if not is_temp_file and body_size > self.MEMFILE_MAX: - body, tmp = TemporaryFile(mode='w+b'), body - body.write(tmp.getvalue()) - del tmp - is_temp_file = True - self.environ['wsgi.input'] = body - body.seek(0) - return body - - def _get_body_string(self): - """ read body until content-length or MEMFILE_MAX into a string. Raise - HTTPError(413) on requests that are to large. """ - clen = self.content_length - if clen > self.MEMFILE_MAX: - raise HTTPError(413, 'Request to large') - if clen < 0: clen = self.MEMFILE_MAX + 1 - data = self.body.read(clen) - if len(data) > self.MEMFILE_MAX: # Fail fast - raise HTTPError(413, 'Request to large') - return data - - @property - def body(self): - """ The HTTP request body as a seek-able file-like object. Depending on - :attr:`MEMFILE_MAX`, this is either a temporary file or a - :class:`io.BytesIO` instance. Accessing this property for the first - time reads and replaces the ``wsgi.input`` environ variable. - Subsequent accesses just do a `seek(0)` on the file object. """ - self._body.seek(0) - return self._body - - @property - def chunked(self): - """ True if Chunked transfer encoding was. """ - return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower() - - #: An alias for :attr:`query`. - GET = query - - @DictProperty('environ', 'bottle.request.post', read_only=True) - def POST(self): - """ The values of :attr:`forms` and :attr:`files` combined into a single - :class:`FormsDict`. Values are either strings (form values) or - instances of :class:`cgi.FieldStorage` (file uploads). - """ - post = FormsDict() - # We default to application/x-www-form-urlencoded for everything that - # is not multipart and take the fast path (also: 3.1 workaround) - if not self.content_type.startswith('multipart/'): - pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1')) - for key, value in pairs: - post[key] = value - return post - - safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi - for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): - if key in self.environ: safe_env[key] = self.environ[key] - args = dict(fp=self.body, environ=safe_env, keep_blank_values=True) - if py31: - args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8', - newline='\n') - elif py3k: - args['encoding'] = 'utf8' - data = cgi.FieldStorage(**args) - self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958 - data = data.list or [] - for item in data: - if item.filename: - post[item.name] = FileUpload(item.file, item.name, - item.filename, item.headers) - else: - post[item.name] = item.value - return post - - @property - def url(self): - """ The full request URI including hostname and scheme. If your app - lives behind a reverse proxy or load balancer and you get confusing - results, make sure that the ``X-Forwarded-Host`` header is set - correctly. """ - return self.urlparts.geturl() - - @DictProperty('environ', 'bottle.request.urlparts', read_only=True) - def urlparts(self): - """ The :attr:`url` string as an :class:`urlparse.SplitResult` tuple. - The tuple contains (scheme, host, path, query_string and fragment), - but the fragment is always empty because it is not visible to the - server. """ - env = self.environ - http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http') - host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') - if not host: - # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. - host = env.get('SERVER_NAME', '127.0.0.1') - port = env.get('SERVER_PORT') - if port and port != ('80' if http == 'http' else '443'): - host += ':' + port - path = urlquote(self.fullpath) - return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') - - @property - def fullpath(self): - """ Request path including :attr:`script_name` (if present). """ - return urljoin(self.script_name, self.path.lstrip('/')) - - @property - def query_string(self): - """ The raw :attr:`query` part of the URL (everything in between ``?`` - and ``#``) as a string. """ - return self.environ.get('QUERY_STRING', '') - - @property - def script_name(self): - """ The initial portion of the URL's `path` that was removed by a higher - level (server or routing middleware) before the application was - called. This script path is returned with leading and tailing - slashes. """ - script_name = self.environ.get('SCRIPT_NAME', '').strip('/') - return '/' + script_name + '/' if script_name else '/' - - def path_shift(self, shift=1): - """ Shift path segments from :attr:`path` to :attr:`script_name` and - vice versa. - - :param shift: The number of path segments to shift. May be negative - to change the shift direction. (default: 1) - """ - script = self.environ.get('SCRIPT_NAME','/') - self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift) - - @property - def content_length(self): - """ The request body length as an integer. The client is responsible to - set this header. Otherwise, the real length of the body is unknown - and -1 is returned. In this case, :attr:`body` will be empty. """ - return int(self.environ.get('CONTENT_LENGTH') or -1) - - @property - def content_type(self): - """ The Content-Type header as a lowercase-string (default: empty). """ - return self.environ.get('CONTENT_TYPE', '').lower() - - @property - def is_xhr(self): - """ True if the request was triggered by a XMLHttpRequest. This only - works with JavaScript libraries that support the `X-Requested-With` - header (most of the popular libraries do). """ - requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','') - return requested_with.lower() == 'xmlhttprequest' - - @property - def is_ajax(self): - """ Alias for :attr:`is_xhr`. "Ajax" is not the right term. """ - return self.is_xhr - - @property - def auth(self): - """ HTTP authentication data as a (user, password) tuple. This - implementation currently supports basic (not digest) authentication - only. If the authentication happened at a higher level (e.g. in the - front web-server or a middleware), the password field is None, but - the user field is looked up from the ``REMOTE_USER`` environ - variable. On any errors, None is returned. """ - basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION','')) - if basic: return basic - ruser = self.environ.get('REMOTE_USER') - if ruser: return (ruser, None) - return None - - @property - def remote_route(self): - """ A list of all IPs that were involved in this request, starting with - the client IP and followed by zero or more proxies. This does only - work if all proxies support the ```X-Forwarded-For`` header. Note - that this information can be forged by malicious clients. """ - proxy = self.environ.get('HTTP_X_FORWARDED_FOR') - if proxy: return [ip.strip() for ip in proxy.split(',')] - remote = self.environ.get('REMOTE_ADDR') - return [remote] if remote else [] - - @property - def remote_addr(self): - """ The client IP as a string. Note that this information can be forged - by malicious clients. """ - route = self.remote_route - return route[0] if route else None - - def copy(self): - """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """ - return Request(self.environ.copy()) - - def get(self, value, default=None): return self.environ.get(value, default) - def __getitem__(self, key): return self.environ[key] - def __delitem__(self, key): self[key] = ""; del(self.environ[key]) - def __iter__(self): return iter(self.environ) - def __len__(self): return len(self.environ) - def keys(self): return self.environ.keys() - def __setitem__(self, key, value): - """ Change an environ value and clear all caches that depend on it. """ - - if self.environ.get('bottle.request.readonly'): - raise KeyError('The environ dictionary is read-only.') - - self.environ[key] = value - todelete = () - - if key == 'wsgi.input': - todelete = ('body', 'forms', 'files', 'params', 'post', 'json') - elif key == 'QUERY_STRING': - todelete = ('query', 'params') - elif key.startswith('HTTP_'): - todelete = ('headers', 'cookies') - - for key in todelete: - self.environ.pop('bottle.request.'+key, None) - - def __repr__(self): - return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) - - def __getattr__(self, name): - """ Search in self.environ for additional user defined attributes. """ - try: - var = self.environ['bottle.request.ext.%s'%name] - return var.__get__(self) if hasattr(var, '__get__') else var - except KeyError: - raise AttributeError('Attribute %r not defined.' % name) - - def __setattr__(self, name, value): - if name == 'environ': return object.__setattr__(self, name, value) - self.environ['bottle.request.ext.%s'%name] = value - - - - -def _hkey(s): - return s.title().replace('_','-') - - -class HeaderProperty(object): - def __init__(self, name, reader=None, writer=str, default=''): - self.name, self.default = name, default - self.reader, self.writer = reader, writer - self.__doc__ = 'Current value of the %r header.' % name.title() - - def __get__(self, obj, _): - if obj is None: return self - value = obj.headers.get(self.name, self.default) - return self.reader(value) if self.reader else value - - def __set__(self, obj, value): - obj.headers[self.name] = self.writer(value) - - def __delete__(self, obj): - del obj.headers[self.name] - - -class BaseResponse(object): - """ Storage class for a response body as well as headers and cookies. - - This class does support dict-like case-insensitive item-access to - headers, but is NOT a dict. Most notably, iterating over a response - yields parts of the body and not the headers. - - :param body: The response body as one of the supported types. - :param status: Either an HTTP status code (e.g. 200) or a status line - including the reason phrase (e.g. '200 OK'). - :param headers: A dictionary or a list of name-value pairs. - - Additional keyword arguments are added to the list of headers. - Underscores in the header name are replaced with dashes. - """ - - default_status = 200 - default_content_type = 'text/html; charset=UTF-8' - - # Header blacklist for specific response codes - # (rfc2616 section 10.2.3 and 10.3.5) - bad_headers = { - 204: set(('Content-Type',)), - 304: set(('Allow', 'Content-Encoding', 'Content-Language', - 'Content-Length', 'Content-Range', 'Content-Type', - 'Content-Md5', 'Last-Modified'))} - - def __init__(self, body='', status=None, headers=None, **more_headers): - self._cookies = None - self._headers = {} - self.body = body - self.status = status or self.default_status - if headers: - if isinstance(headers, dict): - headers = headers.items() - for name, value in headers: - self.add_header(name, value) - if more_headers: - for name, value in more_headers.items(): - self.add_header(name, value) - - def copy(self, cls=None): - """ Returns a copy of self. """ - cls = cls or BaseResponse - assert issubclass(cls, BaseResponse) - copy = cls() - copy.status = self.status - copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) - if self._cookies: - copy._cookies = SimpleCookie() - copy._cookies.load(self._cookies.output()) - return copy - - def __iter__(self): - return iter(self.body) - - def close(self): - if hasattr(self.body, 'close'): - self.body.close() - - @property - def status_line(self): - """ The HTTP status line as a string (e.g. ``404 Not Found``).""" - return self._status_line - - @property - def status_code(self): - """ The HTTP status code as an integer (e.g. 404).""" - return self._status_code - - def _set_status(self, status): - if isinstance(status, int): - code, status = status, _HTTP_STATUS_LINES.get(status) - elif ' ' in status: - status = status.strip() - code = int(status.split()[0]) - else: - raise ValueError('String status line without a reason phrase.') - if not 100 <= code <= 999: raise ValueError('Status code out of range.') - self._status_code = code - self._status_line = str(status or ('%d Unknown' % code)) - - def _get_status(self): - return self._status_line - - status = property(_get_status, _set_status, None, - ''' A writeable property to change the HTTP response status. It accepts - either a numeric code (100-999) or a string with a custom reason - phrase (e.g. "404 Brain not found"). Both :data:`status_line` and - :data:`status_code` are updated accordingly. The return value is - always a status string. ''') - del _get_status, _set_status - - @property - def headers(self): - """ An instance of :class:`HeaderDict`, a case-insensitive dict-like - view on the response headers. """ - hdict = HeaderDict() - hdict.dict = self._headers - return hdict - - def __contains__(self, name): return _hkey(name) in self._headers - def __delitem__(self, name): del self._headers[_hkey(name)] - def __getitem__(self, name): return self._headers[_hkey(name)][-1] - def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)] - - def get_header(self, name, default=None): - """ Return the value of a previously defined header. If there is no - header with that name, return a default value. """ - return self._headers.get(_hkey(name), [default])[-1] - - def set_header(self, name, value): - """ Create a new response header, replacing any previously defined - headers with the same name. """ - self._headers[_hkey(name)] = [str(value)] - - def add_header(self, name, value): - """ Add an additional response header, not removing duplicates. """ - self._headers.setdefault(_hkey(name), []).append(str(value)) - - def iter_headers(self): - """ Yield (header, value) tuples, skipping headers that are not - allowed with the current response status code. """ - return self.headerlist - - @property - def headerlist(self): - """ WSGI conform list of (header, value) tuples. """ - out = [] - headers = list(self._headers.items()) - if 'Content-Type' not in self._headers: - headers.append(('Content-Type', [self.default_content_type])) - if self._status_code in self.bad_headers: - bad_headers = self.bad_headers[self._status_code] - headers = [h for h in headers if h[0] not in bad_headers] - out += [(name, val) for name, vals in headers for val in vals] - if self._cookies: - for c in self._cookies.values(): - out.append(('Set-Cookie', c.OutputString())) - return out - - content_type = HeaderProperty('Content-Type') - content_length = HeaderProperty('Content-Length', reader=int) - expires = HeaderProperty('Expires', - reader=lambda x: datetime.utcfromtimestamp(parse_date(x)), - writer=lambda x: http_date(x)) - - @property - def charset(self, default='UTF-8'): - """ Return the charset specified in the content-type header (default: utf8). """ - if 'charset=' in self.content_type: - return self.content_type.split('charset=')[-1].split(';')[0].strip() - return default - - def set_cookie(self, name, value, secret=None, **options): - """ Create a new cookie or replace an old one. If the `secret` parameter is - set, create a `Signed Cookie` (described below). - - :param name: the name of the cookie. - :param value: the value of the cookie. - :param secret: a signature key required for signed cookies. - - Additionally, this method accepts all RFC 2109 attributes that are - supported by :class:`cookie.Morsel`, including: - - :param max_age: maximum age in seconds. (default: None) - :param expires: a datetime object or UNIX timestamp. (default: None) - :param domain: the domain that is allowed to read the cookie. - (default: current domain) - :param path: limits the cookie to a given path (default: current path) - :param secure: limit the cookie to HTTPS connections (default: off). - :param httponly: prevents client-side javascript to read this cookie - (default: off, requires Python 2.6 or newer). - - If neither `expires` nor `max_age` is set (default), the cookie will - expire at the end of the browser session (as soon as the browser - window is closed). - - Signed cookies may store any pickle-able object and are - cryptographically signed to prevent manipulation. Keep in mind that - cookies are limited to 4kb in most browsers. - - Warning: Signed cookies are not encrypted (the client can still see - the content) and not copy-protected (the client can restore an old - cookie). The main intention is to make pickling and unpickling - save, not to store secret information at client side. - """ - if not self._cookies: - self._cookies = SimpleCookie() - - if secret: - value = touni(cookie_encode((name, value), secret)) - elif not isinstance(value, basestring): - raise TypeError('Secret key missing for non-string Cookie.') - - if len(value) > 4096: raise ValueError('Cookie value to long.') - self._cookies[name] = value - - for key, value in options.items(): - if key == 'max_age': - if isinstance(value, timedelta): - value = value.seconds + value.days * 24 * 3600 - if key == 'expires': - if isinstance(value, (datedate, datetime)): - value = value.timetuple() - elif isinstance(value, (int, float)): - value = time.gmtime(value) - value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) - self._cookies[name][key.replace('_', '-')] = value - - def delete_cookie(self, key, **kwargs): - """ Delete a cookie. Be sure to use the same `domain` and `path` - settings as used to create the cookie. """ - kwargs['max_age'] = -1 - kwargs['expires'] = 0 - self.set_cookie(key, '', **kwargs) - - def __repr__(self): - out = '' - for name, value in self.headerlist: - out += '%s: %s\n' % (name.title(), value.strip()) - return out - - -def _local_property(): - ls = threading.local() - def fget(_): - try: return ls.var - except AttributeError: - raise RuntimeError("Request context not initialized.") - def fset(_, value): ls.var = value - def fdel(_): del ls.var - return property(fget, fset, fdel, 'Thread-local property') - - -class LocalRequest(BaseRequest): - """ A thread-local subclass of :class:`BaseRequest` with a different - set of attributes for each thread. There is usually only one global - instance of this class (:data:`request`). If accessed during a - request/response cycle, this instance always refers to the *current* - request (even on a multithreaded server). """ - bind = BaseRequest.__init__ - environ = _local_property() - - -class LocalResponse(BaseResponse): - """ A thread-local subclass of :class:`BaseResponse` with a different - set of attributes for each thread. There is usually only one global - instance of this class (:data:`response`). Its attributes are used - to build the HTTP response at the end of the request/response cycle. - """ - bind = BaseResponse.__init__ - _status_line = _local_property() - _status_code = _local_property() - _cookies = _local_property() - _headers = _local_property() - body = _local_property() - - -Request = BaseRequest -Response = BaseResponse - - -class HTTPResponse(Response, BottleException): - def __init__(self, body='', status=None, headers=None, **more_headers): - super(HTTPResponse, self).__init__(body, status, headers, **more_headers) - - def apply(self, other): - other._status_code = self._status_code - other._status_line = self._status_line - other._headers = self._headers - other._cookies = self._cookies - other.body = self.body - - -class HTTPError(HTTPResponse): - default_status = 500 - def __init__(self, status=None, body=None, exception=None, traceback=None, - **options): - self.exception = exception - self.traceback = traceback - super(HTTPError, self).__init__(body, status, **options) - - - - - -############################################################################### -# Plugins ###################################################################### -############################################################################### - -class PluginError(BottleException): pass - - -class JSONPlugin(object): - name = 'json' - api = 2 - - def __init__(self, json_dumps=json_dumps): - self.json_dumps = json_dumps - - def apply(self, callback, _): - dumps = self.json_dumps - if not dumps: return callback - def wrapper(*a, **ka): - try: - rv = callback(*a, **ka) - except HTTPError: - rv = _e() - - if isinstance(rv, dict): - #Attempt to serialize, raises exception on failure - json_response = dumps(rv) - #Set content type only if serialization successful - response.content_type = 'application/json' - return json_response - elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict): - rv.body = dumps(rv.body) - rv.content_type = 'application/json' - return rv - - return wrapper - - -class TemplatePlugin(object): - """ This plugin applies the :func:`view` decorator to all routes with a - `template` config parameter. If the parameter is a tuple, the second - element must be a dict with additional options (e.g. `template_engine`) - or default variables for the template. """ - name = 'template' - api = 2 - - def apply(self, callback, route): - conf = route.config.get('template') - if isinstance(conf, (tuple, list)) and len(conf) == 2: - return view(conf[0], **conf[1])(callback) - elif isinstance(conf, str): - return view(conf)(callback) - else: - return callback - - -#: Not a plugin, but part of the plugin API. TODO: Find a better place. -class _ImportRedirect(object): - def __init__(self, name, impmask): - """ Create a virtual package that redirects imports (see PEP 302). """ - self.name = name - self.impmask = impmask - self.module = sys.modules.setdefault(name, imp.new_module(name)) - self.module.__dict__.update({'__file__': __file__, '__path__': [], - '__all__': [], '__loader__': self}) - sys.meta_path.append(self) - - def find_module(self, fullname, path=None): - if '.' not in fullname: return - packname = fullname.rsplit('.', 1)[0] - if packname != self.name: return - return self - - def load_module(self, fullname): - if fullname in sys.modules: return sys.modules[fullname] - modname = fullname.rsplit('.', 1)[1] - realname = self.impmask % modname - __import__(realname) - module = sys.modules[fullname] = sys.modules[realname] - setattr(self.module, modname, module) - module.__loader__ = self - return module - - - - - - -############################################################################### -# Common Utilities ############################################################# -############################################################################### - - -class MultiDict(DictMixin): - """ This dict stores multiple values per key, but behaves exactly like a - normal dict in that it returns only the newest value for any given key. - There are special methods available to access the full list of values. - """ - - def __init__(self, *a, **k): - self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) - - def __len__(self): return len(self.dict) - def __iter__(self): return iter(self.dict) - def __contains__(self, key): return key in self.dict - def __delitem__(self, key): del self.dict[key] - def __getitem__(self, key): return self.dict[key][-1] - def __setitem__(self, key, value): self.append(key, value) - def keys(self): return self.dict.keys() - - if py3k: - def values(self): return (v[-1] for v in self.dict.values()) - def items(self): return ((k, v[-1]) for k, v in self.dict.items()) - def allitems(self): - return ((k, v) for k, vl in self.dict.items() for v in vl) - iterkeys = keys - itervalues = values - iteritems = items - iterallitems = allitems - - else: - def values(self): return [v[-1] for v in self.dict.values()] - def items(self): return [(k, v[-1]) for k, v in self.dict.items()] - def iterkeys(self): return self.dict.iterkeys() - def itervalues(self): return (v[-1] for v in self.dict.itervalues()) - def iteritems(self): - return ((k, v[-1]) for k, v in self.dict.iteritems()) - def iterallitems(self): - return ((k, v) for k, vl in self.dict.iteritems() for v in vl) - def allitems(self): - return [(k, v) for k, vl in self.dict.iteritems() for v in vl] - - def get(self, key, default=None, index=-1, type=None): - """ Return the most recent value for a key. - - :param default: The default value to be returned if the key is not - present or the type conversion fails. - :param index: An index for the list of available values. - :param type: If defined, this callable is used to cast the value - into a specific type. Exception are suppressed and result in - the default value to be returned. - """ - try: - val = self.dict[key][index] - return type(val) if type else val - except Exception: - pass - return default - - def append(self, key, value): - """ Add a new value to the list of values for this key. """ - self.dict.setdefault(key, []).append(value) - - def replace(self, key, value): - """ Replace the list of values with a single value. """ - self.dict[key] = [value] - - def getall(self, key): - """ Return a (possibly empty) list of values for a key. """ - return self.dict.get(key) or [] - - #: Aliases for WTForms to mimic other multi-dict APIs (Django) - getone = get - getlist = getall - - -class FormsDict(MultiDict): - """ This :class:`MultiDict` subclass is used to store request form data. - Additionally to the normal dict-like item access methods (which return - unmodified data as native strings), this container also supports - attribute-like access to its values. Attributes are automatically de- - or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing - attributes default to an empty string. """ - - #: Encoding used for attribute values. - input_encoding = 'utf8' - #: If true (default), unicode strings are first encoded with `latin1` - #: and then decoded to match :attr:`input_encoding`. - recode_unicode = True - - def _fix(self, s, encoding=None): - if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI - return s.encode('latin1').decode(encoding or self.input_encoding) - elif isinstance(s, bytes): # Python 2 WSGI - return s.decode(encoding or self.input_encoding) - else: - return s - - def decode(self, encoding=None): - """ Returns a copy with all keys and values de- or recoded to match - :attr:`input_encoding`. Some libraries (e.g. WTForms) want a - unicode dictionary. """ - copy = FormsDict() - enc = copy.input_encoding = encoding or self.input_encoding - copy.recode_unicode = False - for key, value in self.allitems(): - copy.append(self._fix(key, enc), self._fix(value, enc)) - return copy - - def getunicode(self, name, default=None, encoding=None): - """ Return the value as a unicode string, or the default. """ - try: - return self._fix(self[name], encoding) - except (UnicodeError, KeyError): - return default - - def __getattr__(self, name, default=unicode()): - # Without this guard, pickle generates a cryptic TypeError: - if name.startswith('__') and name.endswith('__'): - return super(FormsDict, self).__getattr__(name) - return self.getunicode(name, default=default) - - -class HeaderDict(MultiDict): - """ A case-insensitive version of :class:`MultiDict` that defaults to - replace the old value instead of appending it. """ - - def __init__(self, *a, **ka): - self.dict = {} - if a or ka: self.update(*a, **ka) - - def __contains__(self, key): return _hkey(key) in self.dict - def __delitem__(self, key): del self.dict[_hkey(key)] - def __getitem__(self, key): return self.dict[_hkey(key)][-1] - def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)] - def append(self, key, value): - self.dict.setdefault(_hkey(key), []).append(str(value)) - def replace(self, key, value): self.dict[_hkey(key)] = [str(value)] - def getall(self, key): return self.dict.get(_hkey(key)) or [] - def get(self, key, default=None, index=-1): - return MultiDict.get(self, _hkey(key), default, index) - def filter(self, names): - for name in [_hkey(n) for n in names]: - if name in self.dict: - del self.dict[name] - - -class WSGIHeaderDict(DictMixin): - """ This dict-like class wraps a WSGI environ dict and provides convenient - access to HTTP_* fields. Keys and values are native strings - (2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI - environment contains non-native string values, these are de- or encoded - using a lossless 'latin1' character set. - - The API will remain stable even on changes to the relevant PEPs. - Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one - that uses non-native strings.) - """ - #: List of keys that do not have a ``HTTP_`` prefix. - cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH') - - def __init__(self, environ): - self.environ = environ - - def _ekey(self, key): - """ Translate header field name to CGI/WSGI environ key. """ - key = key.replace('-','_').upper() - if key in self.cgikeys: - return key - return 'HTTP_' + key - - def raw(self, key, default=None): - """ Return the header value as is (may be bytes or unicode). """ - return self.environ.get(self._ekey(key), default) - - def __getitem__(self, key): - return tonat(self.environ[self._ekey(key)], 'latin1') - - def __setitem__(self, key, value): - raise TypeError("%s is read-only." % self.__class__) - - def __delitem__(self, key): - raise TypeError("%s is read-only." % self.__class__) - - def __iter__(self): - for key in self.environ: - if key[:5] == 'HTTP_': - yield key[5:].replace('_', '-').title() - elif key in self.cgikeys: - yield key.replace('_', '-').title() - - def keys(self): return [x for x in self] - def __len__(self): return len(self.keys()) - def __contains__(self, key): return self._ekey(key) in self.environ - - - -class ConfigDict(dict): - """ A dict-like configuration storage with additional support for - namespaces, validators, meta-data, on_change listeners and more. - """ - - __slots__ = ('_meta', '_on_change') - - def __init__(self): - self._meta = {} - self._on_change = lambda name, value: None - - def load_config(self, filename): - """ Load values from an ``*.ini`` style config file. - - If the config file contains sections, their names are used as - namespaces for the values within. The two special sections - ``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix). - """ - conf = ConfigParser() - conf.read(filename) - for section in conf.sections(): - for key, value in conf.items(section): - if section not in ('DEFAULT', 'bottle'): - key = section + '.' + key - self[key] = value - return self - - def load_dict(self, source, namespace=''): - """ Load values from a dictionary structure. Nesting can be used to - represent namespaces. - - >>> c = ConfigDict() - >>> c.load_dict({'some': {'namespace': {'key': 'value'} } }) - {'some.namespace.key': 'value'} - """ - for key, value in source.items(): - if isinstance(key, str): - nskey = (namespace + '.' + key).strip('.') - if isinstance(value, dict): - self.load_dict(value, namespace=nskey) - else: - self[nskey] = value - else: - raise TypeError('Key has type %r (not a string)' % type(key)) - return self - - def update(self, *a, **ka): - """ If the first parameter is a string, all keys are prefixed with this - namespace. Apart from that it works just as the usual dict.update(). - Example: ``update('some.namespace', key='value')`` """ - prefix = '' - if a and isinstance(a[0], str): - prefix = a[0].strip('.') + '.' - a = a[1:] - for key, value in dict(*a, **ka).items(): - self[prefix+key] = value - - def setdefault(self, key, value): - if key not in self: - self[key] = value - - def __setitem__(self, key, value): - if not isinstance(key, str): - raise TypeError('Key has type %r (not a string)' % type(key)) - value = self.meta_get(key, 'filter', lambda x: x)(value) - if key in self and self[key] is value: - return - self._on_change(key, value) - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - self._on_change(key, None) - dict.__delitem__(self, key) - - def meta_get(self, key, metafield, default=None): - """ Return the value of a meta field for a key. """ - return self._meta.get(key, {}).get(metafield, default) - - def meta_set(self, key, metafield, value): - """ Set the meta field for a key to a new value. This triggers the - on-change handler for existing keys. """ - self._meta.setdefault(key, {})[metafield] = value - if key in self: - self[key] = self[key] - - def meta_list(self, key): - """ Return an iterable of meta field names defined for a key. """ - return self._meta.get(key, {}).keys() - - -class AppStack(list): - """ A stack-like list. Calling it returns the head of the stack. """ - - def __call__(self): - """ Return the current default application. """ - return self[-1] - - def push(self, value=None): - """ Add a new :class:`Bottle` instance to the stack """ - if not isinstance(value, Bottle): - value = Bottle() - self.append(value) - return value - - -class WSGIFileWrapper(object): - - def __init__(self, fp, buffer_size=1024*64): - self.fp, self.buffer_size = fp, buffer_size - for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): - if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) - - def __iter__(self): - buff, read = self.buffer_size, self.read - while True: - part = read(buff) - if not part: return - yield part - - -class _closeiter(object): - """ This only exists to be able to attach a .close method to iterators that - do not support attribute assignment (most of itertools). """ - - def __init__(self, iterator, close=None): - self.iterator = iterator - self.close_callbacks = makelist(close) - - def __iter__(self): - return iter(self.iterator) - - def close(self): - for func in self.close_callbacks: - func() - - -class ResourceManager(object): - """ This class manages a list of search paths and helps to find and open - application-bound resources (files). - - :param base: default value for :meth:`add_path` calls. - :param opener: callable used to open resources. - :param cachemode: controls which lookups are cached. One of 'all', - 'found' or 'none'. - """ - - def __init__(self, base='./', opener=open, cachemode='all'): - self.opener = opener - self.base = base - self.cachemode = cachemode - - #: A list of search paths. See :meth:`add_path` for details. - self.path = [] - #: A cache for resolved paths. ``res.cache.clear()`` clears the cache. - self.cache = {} - - def add_path(self, path, base=None, index=None, create=False): - """ Add a new path to the list of search paths. Return False if the - path does not exist. - - :param path: The new search path. Relative paths are turned into - an absolute and normalized form. If the path looks like a file - (not ending in `/`), the filename is stripped off. - :param base: Path used to absolutize relative search paths. - Defaults to :attr:`base` which defaults to ``os.getcwd()``. - :param index: Position within the list of search paths. Defaults - to last index (appends to the list). - - The `base` parameter makes it easy to reference files installed - along with a python module or package:: - - res.add_path('./resources/', __file__) - """ - base = os.path.abspath(os.path.dirname(base or self.base)) - path = os.path.abspath(os.path.join(base, os.path.dirname(path))) - path += os.sep - if path in self.path: - self.path.remove(path) - if create and not os.path.isdir(path): - os.makedirs(path) - if index is None: - self.path.append(path) - else: - self.path.insert(index, path) - self.cache.clear() - return os.path.exists(path) - - def __iter__(self): - """ Iterate over all existing files in all registered paths. """ - search = self.path[:] - while search: - path = search.pop() - if not os.path.isdir(path): continue - for name in os.listdir(path): - full = os.path.join(path, name) - if os.path.isdir(full): search.append(full) - else: yield full - - def lookup(self, name): - """ Search for a resource and return an absolute file path, or `None`. - - The :attr:`path` list is searched in order. The first match is - returend. Symlinks are followed. The result is cached to speed up - future lookups. """ - if name not in self.cache or DEBUG: - for path in self.path: - fpath = os.path.join(path, name) - if os.path.isfile(fpath): - if self.cachemode in ('all', 'found'): - self.cache[name] = fpath - return fpath - if self.cachemode == 'all': - self.cache[name] = None - return self.cache[name] - - def open(self, name, mode='r', *args, **kwargs): - """ Find a resource and return a file object, or raise IOError. """ - fname = self.lookup(name) - if not fname: raise IOError("Resource %r not found." % name) - return self.opener(fname, mode=mode, *args, **kwargs) - - -class FileUpload(object): - - def __init__(self, fileobj, name, filename, headers=None): - """ Wrapper for file uploads. """ - #: Open file(-like) object (BytesIO buffer or temporary file) - self.file = fileobj - #: Name of the upload form field - self.name = name - #: Raw filename as sent by the client (may contain unsafe characters) - self.raw_filename = filename - #: A :class:`HeaderDict` with additional headers (e.g. content-type) - self.headers = HeaderDict(headers) if headers else HeaderDict() - - content_type = HeaderProperty('Content-Type') - content_length = HeaderProperty('Content-Length', reader=int, default=-1) - - @cached_property - def filename(self): - """ Name of the file on the client file system, but normalized to ensure - file system compatibility. An empty filename is returned as 'empty'. - - Only ASCII letters, digits, dashes, underscores and dots are - allowed in the final filename. Accents are removed, if possible. - Whitespace is replaced by a single dash. Leading or tailing dots - or dashes are removed. The filename is limited to 255 characters. - """ - fname = self.raw_filename - if not isinstance(fname, unicode): - fname = fname.decode('utf8', 'ignore') - fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII') - fname = os.path.basename(fname.replace('\\', os.path.sep)) - fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip() - fname = re.sub(r'[-\s]+', '-', fname).strip('.-') - return fname[:255] or 'empty' - - def _copy_file(self, fp, chunk_size=2**16): - read, write, offset = self.file.read, fp.write, self.file.tell() - while 1: - buf = read(chunk_size) - if not buf: break - write(buf) - self.file.seek(offset) - - def save(self, destination, overwrite=False, chunk_size=2**16): - """ Save file to disk or copy its content to an open file(-like) object. - If *destination* is a directory, :attr:`filename` is added to the - path. Existing files are not overwritten by default (IOError). - - :param destination: File path, directory or file(-like) object. - :param overwrite: If True, replace existing files. (default: False) - :param chunk_size: Bytes to read at a time. (default: 64kb) - """ - if isinstance(destination, basestring): # Except file-likes here - if os.path.isdir(destination): - destination = os.path.join(destination, self.filename) - if not overwrite and os.path.exists(destination): - raise IOError('File exists.') - with open(destination, 'wb') as fp: - self._copy_file(fp, chunk_size) - else: - self._copy_file(destination, chunk_size) - - - - - - -############################################################################### -# Application Helper ########################################################### -############################################################################### - - -def abort(code=500, text='Unknown Error.'): - """ Aborts execution and causes a HTTP error. """ - raise HTTPError(code, text) - - -def redirect(url, code=None): - """ Aborts execution and causes a 303 or 302 redirect, depending on - the HTTP protocol version. """ - if not code: - code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 - res = response.copy(cls=HTTPResponse) - res.status = code - res.body = "" - res.set_header('Location', urljoin(request.url, url)) - raise res - - -def _file_iter_range(fp, offset, bytes, maxread=1024*1024): - """ Yield chunks from a range in a file. No chunk is bigger than maxread.""" - fp.seek(offset) - while bytes > 0: - part = fp.read(min(bytes, maxread)) - if not part: break - bytes -= len(part) - yield part - - -def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'): - """ Open a file in a safe way and return :exc:`HTTPResponse` with status - code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, - ``Content-Length`` and ``Last-Modified`` headers are set if possible. - Special support for ``If-Modified-Since``, ``Range`` and ``HEAD`` - requests. - - :param filename: Name or path of the file to send. - :param root: Root path for file lookups. Should be an absolute directory - path. - :param mimetype: Defines the content-type header (default: guess from - file extension) - :param download: If True, ask the browser to open a `Save as...` dialog - instead of opening the file with the associated program. You can - specify a custom filename as a string. If not specified, the - original filename is used (default: False). - :param charset: The charset to use for files with a ``text/*`` - mime-type. (default: UTF-8) - """ - - root = os.path.abspath(root) + os.sep - filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) - headers = dict() - - if not filename.startswith(root): - return HTTPError(403, "Access denied.") - if not os.path.exists(filename) or not os.path.isfile(filename): - return HTTPError(404, "File does not exist.") - if not os.access(filename, os.R_OK): - return HTTPError(403, "You do not have permission to access this file.") - - if mimetype == 'auto': - mimetype, encoding = mimetypes.guess_type(filename) - if encoding: headers['Content-Encoding'] = encoding - - if mimetype: - if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: - mimetype += '; charset=%s' % charset - headers['Content-Type'] = mimetype - - if download: - download = os.path.basename(filename if download == True else download) - headers['Content-Disposition'] = 'attachment; filename="%s"' % download - - stats = os.stat(filename) - headers['Content-Length'] = clen = stats.st_size - lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) - headers['Last-Modified'] = lm - - ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') - if ims: - ims = parse_date(ims.split(";")[0].strip()) - if ims is not None and ims >= int(stats.st_mtime): - headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - return HTTPResponse(status=304, **headers) - - body = '' if request.method == 'HEAD' else open(filename, 'rb') - - headers["Accept-Ranges"] = "bytes" - ranges = request.environ.get('HTTP_RANGE') - if 'HTTP_RANGE' in request.environ: - ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) - if not ranges: - return HTTPError(416, "Requested Range Not Satisfiable") - offset, end = ranges[0] - headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen) - headers["Content-Length"] = str(end-offset) - if body: body = _file_iter_range(body, offset, end-offset) - return HTTPResponse(body, status=206, **headers) - return HTTPResponse(body, **headers) - - - - - - -############################################################################### -# HTTP Utilities and MISC (TODO) ############################################### -############################################################################### - - -def debug(mode=True): - """ Change the debug level. - There is only one debug level supported at the moment.""" - global DEBUG - if mode: warnings.simplefilter('default') - DEBUG = bool(mode) - -def http_date(value): - if isinstance(value, (datedate, datetime)): - value = value.utctimetuple() - elif isinstance(value, (int, float)): - value = time.gmtime(value) - if not isinstance(value, basestring): - value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) - return value - -def parse_date(ims): - """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """ - try: - ts = email.utils.parsedate_tz(ims) - return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone - except (TypeError, ValueError, IndexError, OverflowError): - return None - -def parse_auth(header): - """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None""" - try: - method, data = header.split(None, 1) - if method.lower() == 'basic': - user, pwd = touni(base64.b64decode(tob(data))).split(':',1) - return user, pwd - except (KeyError, ValueError): - return None - -def parse_range_header(header, maxlen=0): - """ Yield (start, end) ranges parsed from a HTTP Range header. Skip - unsatisfiable ranges. The end index is non-inclusive.""" - if not header or header[:6] != 'bytes=': return - ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r] - for start, end in ranges: - try: - if not start: # bytes=-100 -> last 100 bytes - start, end = max(0, maxlen-int(end)), maxlen - elif not end: # bytes=100- -> all but the first 99 bytes - start, end = int(start), maxlen - else: # bytes=100-200 -> bytes 100-200 (inclusive) - start, end = int(start), min(int(end)+1, maxlen) - if 0 <= start < end <= maxlen: - yield start, end - except ValueError: - pass - -def _parse_qsl(qs): - r = [] - for pair in qs.replace(';','&').split('&'): - if not pair: continue - nv = pair.split('=', 1) - if len(nv) != 2: nv.append('') - key = urlunquote(nv[0].replace('+', ' ')) - value = urlunquote(nv[1].replace('+', ' ')) - r.append((key, value)) - return r - -def _lscmp(a, b): - """ Compares two strings in a cryptographically safe way: - Runtime is not affected by length of common prefix. """ - return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b) - - -def cookie_encode(data, key): - """ Encode and sign a pickle-able object. Return a (byte) string """ - msg = base64.b64encode(pickle.dumps(data, -1)) - sig = base64.b64encode(hmac.new(tob(key), msg).digest()) - return tob('!') + sig + tob('?') + msg - - -def cookie_decode(data, key): - """ Verify and decode an encoded string. Return an object or None.""" - data = tob(data) - if cookie_is_encoded(data): - sig, msg = data.split(tob('?'), 1) - if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())): - return pickle.loads(base64.b64decode(msg)) - return None - - -def cookie_is_encoded(data): - """ Return True if the argument looks like a encoded cookie.""" - return bool(data.startswith(tob('!')) and tob('?') in data) - - -def html_escape(string): - """ Escape HTML special characters ``&<>`` and quotes ``'"``. """ - return string.replace('&','&').replace('<','<').replace('>','>')\ - .replace('"','"').replace("'",''') - - -def html_quote(string): - """ Escape and quote a string to be used as an HTTP attribute.""" - return '"%s"' % html_escape(string).replace('\n',' ')\ - .replace('\r',' ').replace('\t',' ') - - -def yieldroutes(func): - """ Return a generator for routes that match the signature (name, args) - of the func parameter. This may yield more than one route if the function - takes optional keyword arguments. The output is best described by example:: - - a() -> '/a' - b(x, y) -> '/b//' - c(x, y=5) -> '/c/' and '/c//' - d(x=5, y=6) -> '/d' and '/d/' and '/d//' - """ - path = '/' + func.__name__.replace('__','/').lstrip('/') - spec = getargspec(func) - argc = len(spec[0]) - len(spec[3] or []) - path += ('/<%s>' * argc) % tuple(spec[0][:argc]) - yield path - for arg in spec[0][argc:]: - path += '/<%s>' % arg - yield path - - -def path_shift(script_name, path_info, shift=1): - """ Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa. - - :return: The modified paths. - :param script_name: The SCRIPT_NAME path. - :param script_name: The PATH_INFO path. - :param shift: The number of path fragments to shift. May be negative to - change the shift direction. (default: 1) - """ - if shift == 0: return script_name, path_info - pathlist = path_info.strip('/').split('/') - scriptlist = script_name.strip('/').split('/') - if pathlist and pathlist[0] == '': pathlist = [] - if scriptlist and scriptlist[0] == '': scriptlist = [] - if 0 < shift <= len(pathlist): - moved = pathlist[:shift] - scriptlist = scriptlist + moved - pathlist = pathlist[shift:] - elif 0 > shift >= -len(scriptlist): - moved = scriptlist[shift:] - pathlist = moved + pathlist - scriptlist = scriptlist[:shift] - else: - empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO' - raise AssertionError("Cannot shift. Nothing left from %s" % empty) - new_script_name = '/' + '/'.join(scriptlist) - new_path_info = '/' + '/'.join(pathlist) - if path_info.endswith('/') and pathlist: new_path_info += '/' - return new_script_name, new_path_info - - -def auth_basic(check, realm="private", text="Access denied"): - """ Callback decorator to require HTTP auth (basic). - TODO: Add route(check_auth=...) parameter. """ - def decorator(func): - @functools.wraps(func) - def wrapper(*a, **ka): - user, password = request.auth or (None, None) - if user is None or not check(user, password): - err = HTTPError(401, text) - err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm) - return err - return func(*a, **ka) - return wrapper - return decorator - - -# Shortcuts for common Bottle methods. -# They all refer to the current default application. - -def make_default_app_wrapper(name): - """ Return a callable that relays calls to the current default app. """ - @functools.wraps(getattr(Bottle, name)) - def wrapper(*a, **ka): - return getattr(app(), name)(*a, **ka) - return wrapper - -route = make_default_app_wrapper('route') -get = make_default_app_wrapper('get') -post = make_default_app_wrapper('post') -put = make_default_app_wrapper('put') -delete = make_default_app_wrapper('delete') -patch = make_default_app_wrapper('patch') -error = make_default_app_wrapper('error') -mount = make_default_app_wrapper('mount') -hook = make_default_app_wrapper('hook') -install = make_default_app_wrapper('install') -uninstall = make_default_app_wrapper('uninstall') -url = make_default_app_wrapper('get_url') - - - - - - - -############################################################################### -# Server Adapter ############################################################### -############################################################################### - - -class ServerAdapter(object): - quiet = False - def __init__(self, host='127.0.0.1', port=8080, **options): - self.options = options - self.host = host - self.port = int(port) - - def run(self, handler): # pragma: no cover - pass - - def __repr__(self): - args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()]) - return "%s(%s)" % (self.__class__.__name__, args) - - -class CGIServer(ServerAdapter): - quiet = True - def run(self, handler): # pragma: no cover - from wsgiref.handlers import CGIHandler - def fixed_environ(environ, start_response): - environ.setdefault('PATH_INFO', '') - return handler(environ, start_response) - CGIHandler().run(fixed_environ) - - -class FlupFCGIServer(ServerAdapter): - def run(self, handler): # pragma: no cover - import flup.server.fcgi - self.options.setdefault('bindAddress', (self.host, self.port)) - flup.server.fcgi.WSGIServer(handler, **self.options).run() - - -class WSGIRefServer(ServerAdapter): - def run(self, app): # pragma: no cover - from wsgiref.simple_server import WSGIRequestHandler, WSGIServer - from wsgiref.simple_server import make_server - import socket - - class FixedHandler(WSGIRequestHandler): - def address_string(self): # Prevent reverse DNS lookups please. - return self.client_address[0] - def log_request(*args, **kw): - if not self.quiet: - return WSGIRequestHandler.log_request(*args, **kw) - - handler_cls = self.options.get('handler_class', FixedHandler) - server_cls = self.options.get('server_class', WSGIServer) - - if ':' in self.host: # Fix wsgiref for IPv6 addresses. - if getattr(server_cls, 'address_family') == socket.AF_INET: - class server_cls(server_cls): - address_family = socket.AF_INET6 - - srv = make_server(self.host, self.port, app, server_cls, handler_cls) - srv.serve_forever() - - -class CherryPyServer(ServerAdapter): - def run(self, handler): # pragma: no cover - from cherrypy import wsgiserver - self.options['bind_addr'] = (self.host, self.port) - self.options['wsgi_app'] = handler - - certfile = self.options.get('certfile') - if certfile: - del self.options['certfile'] - keyfile = self.options.get('keyfile') - if keyfile: - del self.options['keyfile'] - - server = wsgiserver.CherryPyWSGIServer(**self.options) - if certfile: - server.ssl_certificate = certfile - if keyfile: - server.ssl_private_key = keyfile - - try: - server.start() - finally: - server.stop() - - -class WaitressServer(ServerAdapter): - def run(self, handler): - from waitress import serve - serve(handler, host=self.host, port=self.port) - - -class PasteServer(ServerAdapter): - def run(self, handler): # pragma: no cover - from paste import httpserver - from paste.translogger import TransLogger - handler = TransLogger(handler, setup_console_handler=(not self.quiet)) - httpserver.serve(handler, host=self.host, port=str(self.port), - **self.options) - - -class MeinheldServer(ServerAdapter): - def run(self, handler): - from meinheld import server - server.listen((self.host, self.port)) - server.run(handler) - - -class FapwsServer(ServerAdapter): - """ Extremely fast webserver using libev. See http://www.fapws.org/ """ - def run(self, handler): # pragma: no cover - import fapws._evwsgi as evwsgi - from fapws import base, config - port = self.port - if float(config.SERVER_IDENT[-2:]) > 0.4: - # fapws3 silently changed its API in 0.5 - port = str(port) - evwsgi.start(self.host, port) - # fapws3 never releases the GIL. Complain upstream. I tried. No luck. - if 'BOTTLE_CHILD' in os.environ and not self.quiet: - _stderr("WARNING: Auto-reloading does not work with Fapws3.\n") - _stderr(" (Fapws3 breaks python thread support)\n") - evwsgi.set_base_module(base) - def app(environ, start_response): - environ['wsgi.multiprocess'] = False - return handler(environ, start_response) - evwsgi.wsgi_cb(('', app)) - evwsgi.run() - - -class TornadoServer(ServerAdapter): - """ The super hyped asynchronous server by facebook. Untested. """ - def run(self, handler): # pragma: no cover - import tornado.wsgi, tornado.httpserver, tornado.ioloop - container = tornado.wsgi.WSGIContainer(handler) - server = tornado.httpserver.HTTPServer(container) - server.listen(port=self.port,address=self.host) - tornado.ioloop.IOLoop.instance().start() - - -class AppEngineServer(ServerAdapter): - """ Adapter for Google App Engine. """ - quiet = True - def run(self, handler): - from google.appengine.ext.webapp import util - # A main() function in the handler script enables 'App Caching'. - # Lets makes sure it is there. This _really_ improves performance. - module = sys.modules.get('__main__') - if module and not hasattr(module, 'main'): - module.main = lambda: util.run_wsgi_app(handler) - util.run_wsgi_app(handler) - - -class TwistedServer(ServerAdapter): - """ Untested. """ - def run(self, handler): - from twisted.web import server, wsgi - from twisted.python.threadpool import ThreadPool - from twisted.internet import reactor - thread_pool = ThreadPool() - thread_pool.start() - reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) - factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler)) - reactor.listenTCP(self.port, factory, interface=self.host) - if not reactor.running: - reactor.run() - - -class DieselServer(ServerAdapter): - """ Untested. """ - def run(self, handler): - from diesel.protocols.wsgi import WSGIApplication - app = WSGIApplication(handler, port=self.port) - app.run() - - -class GeventServer(ServerAdapter): - """ Untested. Options: - - * `fast` (default: False) uses libevent's http server, but has some - issues: No streaming, no pipelining, no SSL. - * See gevent.wsgi.WSGIServer() documentation for more options. - """ - def run(self, handler): - from gevent import wsgi, pywsgi, local - if not isinstance(threading.local(), local.local): - msg = "Bottle requires gevent.monkey.patch_all() (before import)" - raise RuntimeError(msg) - if not self.options.pop('fast', None): wsgi = pywsgi - self.options['log'] = None if self.quiet else 'default' - address = (self.host, self.port) - server = wsgi.WSGIServer(address, handler, **self.options) - if 'BOTTLE_CHILD' in os.environ: - import signal - signal.signal(signal.SIGINT, lambda s, f: server.stop()) - server.serve_forever() - - -class GeventSocketIOServer(ServerAdapter): - def run(self,handler): - from socketio import server - address = (self.host, self.port) - server.SocketIOServer(address, handler, **self.options).serve_forever() - - -class GunicornServer(ServerAdapter): - """ Untested. See http://gunicorn.org/configure.html for options. """ - def run(self, handler): - from gunicorn.app.base import Application - - config = {'bind': "%s:%d" % (self.host, int(self.port))} - config.update(self.options) - - class GunicornApplication(Application): - def init(self, parser, opts, args): - return config - - def load(self): - return handler - - GunicornApplication().run() - - -class EventletServer(ServerAdapter): - """ Untested. Options: - - * `backlog` adjust the eventlet backlog parameter which is the maximum - number of queued connections. Should be at least 1; the maximum - value is system-dependent. - * `family`: (default is 2) socket family, optional. See socket - documentation for available families. - """ - def run(self, handler): - from eventlet import wsgi, listen, patcher - if not patcher.is_monkey_patched(os): - msg = "Bottle requires eventlet.monkey_patch() (before import)" - raise RuntimeError(msg) - socket_args = {} - for arg in ('backlog', 'family'): - try: - socket_args[arg] = self.options.pop(arg) - except KeyError: - pass - address = (self.host, self.port) - try: - wsgi.server(listen(address, **socket_args), handler, - log_output=(not self.quiet)) - except TypeError: - # Fallback, if we have old version of eventlet - wsgi.server(listen(address), handler) - - -class RocketServer(ServerAdapter): - """ Untested. """ - def run(self, handler): - from rocket import Rocket - server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler }) - server.start() - - -class BjoernServer(ServerAdapter): - """ Fast server written in C: https://github.com/jonashaag/bjoern """ - def run(self, handler): - from bjoern import run - run(handler, self.host, self.port) - - -class AutoServer(ServerAdapter): - """ Untested. """ - adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer] - def run(self, handler): - for sa in self.adapters: - try: - return sa(self.host, self.port, **self.options).run(handler) - except ImportError: - pass - -server_names = { - 'cgi': CGIServer, - 'flup': FlupFCGIServer, - 'wsgiref': WSGIRefServer, - 'waitress': WaitressServer, - 'cherrypy': CherryPyServer, - 'paste': PasteServer, - 'fapws3': FapwsServer, - 'tornado': TornadoServer, - 'gae': AppEngineServer, - 'twisted': TwistedServer, - 'diesel': DieselServer, - 'meinheld': MeinheldServer, - 'gunicorn': GunicornServer, - 'eventlet': EventletServer, - 'gevent': GeventServer, - 'geventSocketIO':GeventSocketIOServer, - 'rocket': RocketServer, - 'bjoern' : BjoernServer, - 'auto': AutoServer, -} - - - - - - -############################################################################### -# Application Control ########################################################## -############################################################################### - - -def load(target, **namespace): - """ Import a module or fetch an object from a module. - - * ``package.module`` returns `module` as a module object. - * ``pack.mod:name`` returns the module variable `name` from `pack.mod`. - * ``pack.mod:func()`` calls `pack.mod.func()` and returns the result. - - The last form accepts not only function calls, but any type of - expression. Keyword arguments passed to this function are available as - local variables. Example: ``import_string('re:compile(x)', x='[a-z]')`` - """ - module, target = target.split(":", 1) if ':' in target else (target, None) - if module not in sys.modules: __import__(module) - if not target: return sys.modules[module] - if target.isalnum(): return getattr(sys.modules[module], target) - package_name = module.split('.')[0] - namespace[package_name] = sys.modules[package_name] - return eval('%s.%s' % (module, target), namespace) - - -def load_app(target): - """ Load a bottle application from a module and make sure that the import - does not affect the current default application, but returns a separate - application object. See :func:`load` for the target parameter. """ - global NORUN; NORUN, nr_old = True, NORUN - tmp = default_app.push() # Create a new "default application" - try: - rv = load(target) # Import the target module - return rv if callable(rv) else tmp - finally: - default_app.remove(tmp) # Remove the temporary added default application - NORUN = nr_old - -_debug = debug -def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, - interval=1, reloader=False, quiet=False, plugins=None, - debug=None, **kargs): - """ Start a server instance. This method blocks until the server terminates. - - :param app: WSGI application or target string supported by - :func:`load_app`. (default: :func:`default_app`) - :param server: Server adapter to use. See :data:`server_names` keys - for valid names or pass a :class:`ServerAdapter` subclass. - (default: `wsgiref`) - :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on - all interfaces including the external one. (default: 127.0.0.1) - :param port: Server port to bind to. Values below 1024 require root - privileges. (default: 8080) - :param reloader: Start auto-reloading server? (default: False) - :param interval: Auto-reloader interval in seconds (default: 1) - :param quiet: Suppress output to stdout and stderr? (default: False) - :param options: Options passed to the server adapter. - """ - if NORUN: return - if reloader and not os.environ.get('BOTTLE_CHILD'): - lockfile = None - try: - fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') - os.close(fd) # We only need this file to exist. We never write to it - while os.path.exists(lockfile): - args = [sys.executable] + sys.argv - environ = os.environ.copy() - environ['BOTTLE_CHILD'] = 'true' - environ['BOTTLE_LOCKFILE'] = lockfile - p = subprocess.Popen(args, env=environ) - while p.poll() is None: # Busy wait... - os.utime(lockfile, None) # I am alive! - time.sleep(interval) - if p.poll() != 3: - if os.path.exists(lockfile): os.unlink(lockfile) - sys.exit(p.poll()) - except KeyboardInterrupt: - pass - finally: - if os.path.exists(lockfile): - os.unlink(lockfile) - return - - try: - if debug is not None: _debug(debug) - app = app or default_app() - if isinstance(app, basestring): - app = load_app(app) - if not callable(app): - raise ValueError("Application is not callable: %r" % app) - - for plugin in plugins or []: - if isinstance(plugin, basestring): - plugin = load(plugin) - app.install(plugin) - - if server in server_names: - server = server_names.get(server) - if isinstance(server, basestring): - server = load(server) - if isinstance(server, type): - server = server(host=host, port=port, **kargs) - if not isinstance(server, ServerAdapter): - raise ValueError("Unknown or unsupported server: %r" % server) - - server.quiet = server.quiet or quiet - if not server.quiet: - _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) - _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) - _stderr("Hit Ctrl-C to quit.\n\n") - - if reloader: - lockfile = os.environ.get('BOTTLE_LOCKFILE') - bgcheck = FileCheckerThread(lockfile, interval) - with bgcheck: - server.run(app) - if bgcheck.status == 'reload': - sys.exit(3) - else: - server.run(app) - except KeyboardInterrupt: - pass - except (SystemExit, MemoryError): - raise - except: - if not reloader: raise - if not getattr(server, 'quiet', quiet): - print_exc() - time.sleep(interval) - sys.exit(3) - - - -class FileCheckerThread(threading.Thread): - """ Interrupt main-thread as soon as a changed module file is detected, - the lockfile gets deleted or gets to old. """ - - def __init__(self, lockfile, interval): - threading.Thread.__init__(self) - self.daemon = True - self.lockfile, self.interval = lockfile, interval - #: Is one of 'reload', 'error' or 'exit' - self.status = None - - def run(self): - exists = os.path.exists - mtime = lambda p: os.stat(p).st_mtime - files = dict() - - for module in list(sys.modules.values()): - path = getattr(module, '__file__', '') - if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] - if path and exists(path): files[path] = mtime(path) - - while not self.status: - if not exists(self.lockfile)\ - or mtime(self.lockfile) < time.time() - self.interval - 5: - self.status = 'error' - thread.interrupt_main() - for path, lmtime in list(files.items()): - if not exists(path) or mtime(path) > lmtime: - self.status = 'reload' - thread.interrupt_main() - break - time.sleep(self.interval) - - def __enter__(self): - self.start() - - def __exit__(self, exc_type, *_): - if not self.status: self.status = 'exit' # silent exit - self.join() - return exc_type is not None and issubclass(exc_type, KeyboardInterrupt) - - - - - -############################################################################### -# Template Adapters ############################################################ -############################################################################### - - -class TemplateError(HTTPError): - def __init__(self, message): - HTTPError.__init__(self, 500, message) - - -class BaseTemplate(object): - """ Base class and minimal API for template adapters """ - extensions = ['tpl','html','thtml','stpl'] - settings = {} #used in prepare() - defaults = {} #used in render() - - def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings): - """ Create a new template. - If the source parameter (str or buffer) is missing, the name argument - is used to guess a template filename. Subclasses can assume that - self.source and/or self.filename are set. Both are strings. - The lookup, encoding and settings parameters are stored as instance - variables. - The lookup parameter stores a list containing directory paths. - The encoding parameter should be used to decode byte strings or files. - The settings parameter contains a dict for engine-specific settings. - """ - self.name = name - self.source = source.read() if hasattr(source, 'read') else source - self.filename = source.filename if hasattr(source, 'filename') else None - self.lookup = [os.path.abspath(x) for x in lookup] if lookup else [] - self.encoding = encoding - self.settings = self.settings.copy() # Copy from class variable - self.settings.update(settings) # Apply - if not self.source and self.name: - self.filename = self.search(self.name, self.lookup) - if not self.filename: - raise TemplateError('Template %s not found.' % repr(name)) - if not self.source and not self.filename: - raise TemplateError('No template specified.') - self.prepare(**self.settings) - - @classmethod - def search(cls, name, lookup=None): - """ Search name in all directories specified in lookup. - First without, then with common extensions. Return first hit. """ - if not lookup: - depr('The template lookup path list should not be empty.', True) #0.12 - lookup = ['.'] - - if os.path.isabs(name) and os.path.isfile(name): - depr('Absolute template path names are deprecated.', True) #0.12 - return os.path.abspath(name) - - for spath in lookup: - spath = os.path.abspath(spath) + os.sep - fname = os.path.abspath(os.path.join(spath, name)) - if not fname.startswith(spath): continue - if os.path.isfile(fname): return fname - for ext in cls.extensions: - if os.path.isfile('%s.%s' % (fname, ext)): - return '%s.%s' % (fname, ext) - - @classmethod - def global_config(cls, key, *args): - """ This reads or sets the global settings stored in class.settings. """ - if args: - cls.settings = cls.settings.copy() # Make settings local to class - cls.settings[key] = args[0] - else: - return cls.settings[key] - - def prepare(self, **options): - """ Run preparations (parsing, caching, ...). - It should be possible to call this again to refresh a template or to - update settings. - """ - raise NotImplementedError - - def render(self, *args, **kwargs): - """ Render the template with the specified local variables and return - a single byte or unicode string. If it is a byte string, the encoding - must match self.encoding. This method must be thread-safe! - Local variables may be provided in dictionaries (args) - or directly, as keywords (kwargs). - """ - raise NotImplementedError - - -class MakoTemplate(BaseTemplate): - def prepare(self, **options): - from mako.template import Template - from mako.lookup import TemplateLookup - options.update({'input_encoding':self.encoding}) - options.setdefault('format_exceptions', bool(DEBUG)) - lookup = TemplateLookup(directories=self.lookup, **options) - if self.source: - self.tpl = Template(self.source, lookup=lookup, **options) - else: - self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options) - - def render(self, *args, **kwargs): - for dictarg in args: kwargs.update(dictarg) - _defaults = self.defaults.copy() - _defaults.update(kwargs) - return self.tpl.render(**_defaults) - - -class CheetahTemplate(BaseTemplate): - def prepare(self, **options): - from Cheetah.Template import Template - self.context = threading.local() - self.context.vars = {} - options['searchList'] = [self.context.vars] - if self.source: - self.tpl = Template(source=self.source, **options) - else: - self.tpl = Template(file=self.filename, **options) - - def render(self, *args, **kwargs): - for dictarg in args: kwargs.update(dictarg) - self.context.vars.update(self.defaults) - self.context.vars.update(kwargs) - out = str(self.tpl) - self.context.vars.clear() - return out - - -class Jinja2Template(BaseTemplate): - def prepare(self, filters=None, tests=None, globals={}, **kwargs): - from jinja2 import Environment, FunctionLoader - self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) - if filters: self.env.filters.update(filters) - if tests: self.env.tests.update(tests) - if globals: self.env.globals.update(globals) - if self.source: - self.tpl = self.env.from_string(self.source) - else: - self.tpl = self.env.get_template(self.filename) - - def render(self, *args, **kwargs): - for dictarg in args: kwargs.update(dictarg) - _defaults = self.defaults.copy() - _defaults.update(kwargs) - return self.tpl.render(**_defaults) - - def loader(self, name): - fname = self.search(name, self.lookup) - if not fname: return - with open(fname, "rb") as f: - return f.read().decode(self.encoding) - - -class SimpleTemplate(BaseTemplate): - - def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka): - self.cache = {} - enc = self.encoding - self._str = lambda x: touni(x, enc) - self._escape = lambda x: escape_func(touni(x, enc)) - self.syntax = syntax - if noescape: - self._str, self._escape = self._escape, self._str - - @cached_property - def co(self): - return compile(self.code, self.filename or '', 'exec') - - @cached_property - def code(self): - source = self.source - if not source: - with open(self.filename, 'rb') as f: - source = f.read() - try: - source, encoding = touni(source), 'utf8' - except UnicodeError: - depr('Template encodings other than utf8 are no longer supported.') #0.11 - source, encoding = touni(source, 'latin1'), 'latin1' - parser = StplParser(source, encoding=encoding, syntax=self.syntax) - code = parser.translate() - self.encoding = parser.encoding - return code - - def _rebase(self, _env, _name=None, **kwargs): - _env['_rebase'] = (_name, kwargs) - - def _include(self, _env, _name=None, **kwargs): - env = _env.copy() - env.update(kwargs) - if _name not in self.cache: - self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) - return self.cache[_name].execute(env['_stdout'], env) - - def execute(self, _stdout, kwargs): - env = self.defaults.copy() - env.update(kwargs) - env.update({'_stdout': _stdout, '_printlist': _stdout.extend, - 'include': functools.partial(self._include, env), - 'rebase': functools.partial(self._rebase, env), '_rebase': None, - '_str': self._str, '_escape': self._escape, 'get': env.get, - 'setdefault': env.setdefault, 'defined': env.__contains__ }) - eval(self.co, env) - if env.get('_rebase'): - subtpl, rargs = env.pop('_rebase') - rargs['base'] = ''.join(_stdout) #copy stdout - del _stdout[:] # clear stdout - return self._include(env, subtpl, **rargs) - return env - - def render(self, *args, **kwargs): - """ Render the template using keyword arguments as local variables. """ - env = {}; stdout = [] - for dictarg in args: env.update(dictarg) - env.update(kwargs) - self.execute(stdout, env) - return ''.join(stdout) - - -class StplSyntaxError(TemplateError): pass - - -class StplParser(object): - """ Parser for stpl templates. """ - _re_cache = {} #: Cache for compiled re patterns - # This huge pile of voodoo magic splits python code into 8 different tokens. - # 1: All kinds of python strings (trust me, it works) - _re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \ - '|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \ - '|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \ - '|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))' - _re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later - # 2: Comments (until end of line, but not the newline itself) - _re_tok += '|(#.*)' - # 3,4: Keywords that start or continue a python block (only start of line) - _re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \ - '|^([ \\t]*(?:elif|else|except|finally)\\b)' - # 5: Our special 'end' keyword (but only if it stands alone) - _re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))' - # 6: A customizable end-of-code-block template token (only end of line) - _re_tok += '|(%(block_close)s[ \\t]*(?=$))' - # 7: And finally, a single newline. The 8th token is 'everything else' - _re_tok += '|(\\r?\\n)' - # Match the start tokens of code areas in a template - _re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))' - # Match inline statements (may contain python strings) - _re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl - - default_syntax = '<% %> % {{ }}' - - def __init__(self, source, syntax=None, encoding='utf8'): - self.source, self.encoding = touni(source, encoding), encoding - self.set_syntax(syntax or self.default_syntax) - self.code_buffer, self.text_buffer = [], [] - self.lineno, self.offset = 1, 0 - self.indent, self.indent_mod = 0, 0 - - def get_syntax(self): - """ Tokens as a space separated string (default: <% %> % {{ }}) """ - return self._syntax - - def set_syntax(self, syntax): - self._syntax = syntax - self._tokens = syntax.split() - if not syntax in self._re_cache: - names = 'block_start block_close line_start inline_start inline_end' - etokens = map(re.escape, self._tokens) - pattern_vars = dict(zip(names.split(), etokens)) - patterns = (self._re_split, self._re_tok, self._re_inl) - patterns = [re.compile(p%pattern_vars) for p in patterns] - self._re_cache[syntax] = patterns - self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax] - - syntax = property(get_syntax, set_syntax) - - def translate(self): - if self.offset: raise RuntimeError('Parser is a one time instance.') - while True: - m = self.re_split.search(self.source[self.offset:]) - if m: - text = self.source[self.offset:self.offset+m.start()] - self.text_buffer.append(text) - self.offset += m.end() - if m.group(1): # Escape syntax - line, sep, _ = self.source[self.offset:].partition('\n') - self.text_buffer.append(m.group(2)+line+sep) - self.offset += len(line+sep)+1 - continue - self.flush_text() - self.read_code(multiline=bool(m.group(4))) - else: break - self.text_buffer.append(self.source[self.offset:]) - self.flush_text() - return ''.join(self.code_buffer) - - def read_code(self, multiline): - code_line, comment = '', '' - while True: - m = self.re_tok.search(self.source[self.offset:]) - if not m: - code_line += self.source[self.offset:] - self.offset = len(self.source) - self.write_code(code_line.strip(), comment) - return - code_line += self.source[self.offset:self.offset+m.start()] - self.offset += m.end() - _str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups() - if code_line and (_blk1 or _blk2): # a if b else c - code_line += _blk1 or _blk2 - continue - if _str: # Python string - code_line += _str - elif _com: # Python comment (up to EOL) - comment = _com - if multiline and _com.strip().endswith(self._tokens[1]): - multiline = False # Allow end-of-block in comments - elif _blk1: # Start-block keyword (if/for/while/def/try/...) - code_line, self.indent_mod = _blk1, -1 - self.indent += 1 - elif _blk2: # Continue-block keyword (else/elif/except/...) - code_line, self.indent_mod = _blk2, -1 - elif _end: # The non-standard 'end'-keyword (ends a block) - self.indent -= 1 - elif _cend: # The end-code-block template token (usually '%>') - if multiline: multiline = False - else: code_line += _cend - else: # \n - self.write_code(code_line.strip(), comment) - self.lineno += 1 - code_line, comment, self.indent_mod = '', '', 0 - if not multiline: - break - - def flush_text(self): - text = ''.join(self.text_buffer) - del self.text_buffer[:] - if not text: return - parts, pos, nl = [], 0, '\\\n'+' '*self.indent - for m in self.re_inl.finditer(text): - prefix, pos = text[pos:m.start()], m.end() - if prefix: - parts.append(nl.join(map(repr, prefix.splitlines(True)))) - if prefix.endswith('\n'): parts[-1] += nl - parts.append(self.process_inline(m.group(1).strip())) - if pos < len(text): - prefix = text[pos:] - lines = prefix.splitlines(True) - if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3] - elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4] - parts.append(nl.join(map(repr, lines))) - code = '_printlist((%s,))' % ', '.join(parts) - self.lineno += code.count('\n')+1 - self.write_code(code) - - @staticmethod - def process_inline(chunk): - if chunk[0] == '!': return '_str(%s)' % chunk[1:] - return '_escape(%s)' % chunk - - def write_code(self, line, comment=''): - code = ' ' * (self.indent+self.indent_mod) - code += line.lstrip() + comment + '\n' - self.code_buffer.append(code) - - -def template(*args, **kwargs): - """ - Get a rendered template as a string iterator. - You can use a name, a filename or a template string as first parameter. - Template rendering arguments can be passed as dictionaries - or directly (as keyword arguments). - """ - tpl = args[0] if args else None - adapter = kwargs.pop('template_adapter', SimpleTemplate) - lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) - tplid = (id(lookup), tpl) - if tplid not in TEMPLATES or DEBUG: - settings = kwargs.pop('template_settings', {}) - if isinstance(tpl, adapter): - TEMPLATES[tplid] = tpl - if settings: TEMPLATES[tplid].prepare(**settings) - elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: - TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) - else: - TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) - if not TEMPLATES[tplid]: - abort(500, 'Template (%s) not found' % tpl) - for dictarg in args[1:]: kwargs.update(dictarg) - return TEMPLATES[tplid].render(kwargs) - -mako_template = functools.partial(template, template_adapter=MakoTemplate) -cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) -jinja2_template = functools.partial(template, template_adapter=Jinja2Template) - - -def view(tpl_name, **defaults): - """ Decorator: renders a template for a handler. - The handler can control its behavior like that: - - - return a dict of template vars to fill out the template - - return something other than a dict and the view decorator will not - process the template, but return the handler result as is. - This includes returning a HTTPResponse(dict) to get, - for instance, JSON with autojson or other castfilters. - """ - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - if isinstance(result, (dict, DictMixin)): - tplvars = defaults.copy() - tplvars.update(result) - return template(tpl_name, **tplvars) - elif result is None: - return template(tpl_name, defaults) - return result - return wrapper - return decorator - -mako_view = functools.partial(view, template_adapter=MakoTemplate) -cheetah_view = functools.partial(view, template_adapter=CheetahTemplate) -jinja2_view = functools.partial(view, template_adapter=Jinja2Template) - - - - - - -############################################################################### -# Constants and Globals ######################################################## -############################################################################### - - -TEMPLATE_PATH = ['./', './views/'] -TEMPLATES = {} -DEBUG = False -NORUN = False # If set, run() does nothing. Used by load_app() - -#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found') -HTTP_CODES = httplib.responses -HTTP_CODES[418] = "I'm a teapot" # RFC 2324 -HTTP_CODES[428] = "Precondition Required" -HTTP_CODES[429] = "Too Many Requests" -HTTP_CODES[431] = "Request Header Fields Too Large" -HTTP_CODES[511] = "Network Authentication Required" -_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items()) - -#: The default template used for error pages. Override with @error() -ERROR_PAGE_TEMPLATE = """ -%%try: - %%from %s import DEBUG, request - - - - Error: {{e.status}} - - - -

Error: {{e.status}}

-

Sorry, the requested URL {{repr(request.url)}} - caused an error:

-
{{e.body}}
- %%if DEBUG and e.exception: -

Exception:

-
{{repr(e.exception)}}
- %%end - %%if DEBUG and e.traceback: -

Traceback:

-
{{e.traceback}}
- %%end - - -%%except ImportError: - ImportError: Could not generate the error page. Please add bottle to - the import path. -%%end -""" % __name__ - -#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a -#: request callback, this instance always refers to the *current* request -#: (even on a multithreaded server). -request = LocalRequest() - -#: A thread-safe instance of :class:`LocalResponse`. It is used to change the -#: HTTP response for the *current* request. -response = LocalResponse() - -#: A thread-safe namespace. Not used by Bottle. -local = threading.local() - -# Initialize app stack (create first empty Bottle app) -# BC: 0.6.4 and needed for run() -app = default_app = AppStack() -app.push() - -#: A virtual package that redirects import statements. -#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`. -ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module - -if __name__ == '__main__': - opt, args, parser = _cmd_options, _cmd_args, _cmd_parser - if opt.version: - _stdout('Bottle %s\n'%__version__) - sys.exit(0) - if not args: - parser.print_help() - _stderr('\nError: No application entry point specified.\n') - sys.exit(1) - - sys.path.insert(0, '.') - sys.modules.setdefault('bottle', sys.modules['__main__']) - - host, port = (opt.bind or 'localhost'), 8080 - if ':' in host and host.rfind(']') < host.rfind(':'): - host, port = host.rsplit(':', 1) - host = host.strip('[]') - - run(args[0], host=host, port=int(port), server=opt.server, - reloader=opt.reload, plugins=opt.plugin, debug=opt.debug) - - - - -# THE END diff --git a/IM/retry.py b/IM/retry.py new file mode 100644 index 000000000..83176a0ee --- /dev/null +++ b/IM/retry.py @@ -0,0 +1,48 @@ +import time +from functools import wraps + + +def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None, quiet = True): + """Retry calling the decorated function using an exponential backoff. + + http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ + original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry + + :param ExceptionToCheck: the exception to check. may be a tuple of + exceptions to check + :type ExceptionToCheck: Exception or tuple + :param tries: number of times to try (not retry) before giving up + :type tries: int + :param delay: initial delay between retries in seconds + :type delay: int + :param backoff: backoff multiplier e.g. value of 2 will double the delay + each retry + :type backoff: int + :param logger: logger to use. If None, print + :type logger: logging.Logger instance + :param quiet: flat to specify not to print any message. + :type quit: bool + """ + def deco_retry(f): + + @wraps(f) + def f_retry(*args, **kwargs): + mtries, mdelay = tries, delay + while mtries > 1: + try: + return f(*args, **kwargs) + except ExceptionToCheck, e: + if not quiet: + msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) + if logger: + logger.warning(msg) + else: + print msg + time.sleep(mdelay) + mtries -= 1 + mdelay *= backoff + return f(*args, **kwargs) + + return f_retry # true decorator + + return deco_retry diff --git a/changelog b/changelog index 41a0f5580..f5c04ec80 100644 --- a/changelog +++ b/changelog @@ -121,3 +121,4 @@ IM 1.3.0 * Bugfix in RADL with unicode strings * Add StarVM and StopVM functions to the API * Modify contextualziation process to ignore not running VMs enabling to configure the rest of VMs of an Inf. + * Enable SSH with retry in all the ctxt steps diff --git a/doc/source/manual.rst b/doc/source/manual.rst index ad62fdcdc..ea1f628f2 100644 --- a/doc/source/manual.rst +++ b/doc/source/manual.rst @@ -61,6 +61,8 @@ Optional Packages if the access to XML-RPC API is secured with SSL certificates (see :confval:`XMLRCP_SSL`). The Debian package is named ``python-springpython``. +* `Bottle `_ is needed if needed to use the REST API + (see :confval:`ACTIVATE_REST`). The Debian package is named ``python-bottle``. * `CherryPy `_ is needed if needed to secure the REST API with SSL certificates (see :confval:`REST_SSL`). The Debian package is named ``python-cherrypy3``. diff --git a/setup.py b/setup.py index bde1304c2..95ccecff3 100644 --- a/setup.py +++ b/setup.py @@ -42,5 +42,5 @@ long_description="IM is a tool that ease the access and the usability of IaaS clouds by automating the VMI selection, deployment, configuration, software installation, monitoring and update of Virtual Appliances. It supports APIs from a large number of virtual platforms, making user applications cloud-agnostic. In addition it integrates a contextualization system to enable the installation and configuration of all the user required applications providing the user with a fully functional infrastructure.", description="IM is a tool to manage virtual infrastructures on Cloud deployments", platforms=["any"], - install_requires=["ansible >= 1.4","paramiko >= 1.14","PyYAML","SOAPpy","boto >= 2.29","apache-libcloud >= 0.17","ply"] + install_requires=["ansible >= 1.4","paramiko >= 1.14","PyYAML","SOAPpy","boto >= 2.29","apache-libcloud >= 0.17","ply", "bottle"] ) diff --git a/test/TestIM.py b/test/TestIM.py index ba049e0e7..e2dc94fc6 100755 --- a/test/TestIM.py +++ b/test/TestIM.py @@ -230,6 +230,8 @@ def test_22_start(self): """ Test StartInfrastructure function """ + # Assure the VM to be stopped + time.sleep(10) (success, res) = self.server.StartInfrastructure(self.inf_id, self.auth_data) self.assertTrue(success, msg="ERROR calling StartInfrastructure: " + str(res)) @@ -250,6 +252,8 @@ def test_24_start_vm(self): """ Test StartVM function """ + # Assure the VM to be stopped + time.sleep(10) (success, res) = self.server.StartVM(self.inf_id, 0, self.auth_data) self.assertTrue(success, msg="ERROR calling StartVM: " + str(res))