diff --git a/IM/connectors/OCCI.py b/IM/connectors/OCCI.py index 6fba6c2ca..dcb56f9a5 100644 --- a/IM/connectors/OCCI.py +++ b/IM/connectors/OCCI.py @@ -1233,7 +1233,8 @@ def attach_volume(self, vm, volume_id, device, mount_path, auth_data): body += 'X-OCCI-Attribute: occi.core.id="%s"\n' % disk_id body += 'X-OCCI-Attribute: occi.core.target="%s/storage/%s"\n' % (self.cloud.path, volume_id) body += 'X-OCCI-Attribute: occi.core.source="%s/compute/%s"' % (self.cloud.path, vm.id) - body += 'X-OCCI-Attribute: occi.storagelink.deviceid="/dev/%s"' % device + if device: + body += '\nX-OCCI-Attribute: occi.storagelink.deviceid="/dev/%s"' % device # body += 'X-OCCI-Attribute: occi.storagelink.mountpoint="%s"' % mount_path resp = self.create_request('POST', url, auth_data, headers, body) diff --git a/IM/connectors/OpenStack.py b/IM/connectors/OpenStack.py index ce2eb2693..c27bdf8f0 100644 --- a/IM/connectors/OpenStack.py +++ b/IM/connectors/OpenStack.py @@ -98,6 +98,8 @@ def get_driver(self, auth_data): "service_name": None, "service_region": 'RegionOne', "base_url": None, + "network_url": None, + "image_url": None, "api_version": "2.0", "domain": None} diff --git a/doc/source/client.rst b/doc/source/client.rst index 24719b8e5..372e348bc 100644 --- a/doc/source/client.rst +++ b/doc/source/client.rst @@ -52,6 +52,8 @@ user can specify the following parameters:: auth_file=auth.dat xmlrpc_ssl_ca_certs=/tmp/pki/ca-chain.pem +.. _inv-client: + Invocation ---------- @@ -251,10 +253,22 @@ OpenStack has a set of additional fields to access a cloud site: The possible values are: ``2.0_password``, ``2.0_voms``, ``3.x_password`` or ``3.x_oidc_access_token``. The default value is ``2.0_password``. -* ``base_url`` base URL to the OpenStack API endpoint. By default, the connector obtains API endpoint URL from the +* ``api_version`` the api version used to connect with nova endpoint. + The possible values are: ``1.0``, ``1.1``, ``2.0`, ``2.1`` or ``2.2``. + The default value is ``2.0``. + +* ``base_url`` base URL to the OpenStack API nova endpoint. By default, the connector obtains API endpoint URL from the server catalog, but if this argument is provided, this step is skipped and the provided value is used directly. The value is: http://cloud_server.com:8774/v2/. +* ``network_url`` base URL to the OpenStack API neutron endpoint. By default, the connector obtains API endpoint URL from the + server catalog, but if this argument is provided, this step is skipped and the provided value is used directly. + The value is: http://cloud_server.com:9696. + +* ``image_url`` base URL to the OpenStack API glance endpoint. By default, the connector obtains API endpoint URL from the + server catalog, but if this argument is provided, this step is skipped and the provided value is used directly. + The value is: http://cloud_server.com:9292. + * ``service_region`` the region of the cloud site (case sensitive). It is used to obtain the API endpoint URL. The default value is: ``RegionOne``. diff --git a/doc/source/endpoints.rst b/doc/source/endpoints.rst index 520eba02e..5fab5ff06 100644 --- a/doc/source/endpoints.rst +++ b/doc/source/endpoints.rst @@ -1,3 +1,5 @@ +.. _endpoints: + Public IM Endpoints at UPV ========================== diff --git a/doc/source/gstarted.rst b/doc/source/gstarted.rst new file mode 100644 index 000000000..d5848070d --- /dev/null +++ b/doc/source/gstarted.rst @@ -0,0 +1,184 @@ +Quick Start +=========== + +Launch IM Service +----------------- + +To launch an instance of the Infrastructure Manager the easiest solution is to use the Docker image named +`grycap/im` that has been created using the default configuration. + +To launch the IM service using docker:: + + $ sudo docker run -d -p 8899:8899 -p 8800:8800 --name im grycap/im + +More information about this image can be found here: `https://registry.hub.docker.com/u/grycap/im/ `_. + +IM Client tools +--------------- + +To access the IM service two client tools can be used (apart from the two APIs): + +* The IM client: You only have to call the install command of the pip tool with the IM-client package:: + + $ pip install IM-client + + See full reference in IM Client :ref:`inv-client`. + +* The IM web: To launch the IM Web portal in the same machine where we have previously launched the IM service use + the followiing docker command:: + + $ sudo docker run -d -p 80:80 --name im-web --link im:im grycap/im-web + + See full manual in IM Web :ref:`use-web`. + + + +In this first examples we will use the IM-client tool to create, manage and finally destroy a single VM. + +Authentication file +^^^^^^^^^^^^^^^^^^^ +To access the IM service an authenticatio file must be created. It must have one line per authentication element. +It must have at least one line with the authentication data for the IM service and another one for the Cloud/s +provider/s the user want to access. + +An example to access an OpenNebula and/or an OpenStack site:: + + id = im; type = InfrastructureManager; username = user; password = pass + id = one; type = OpenNebula; host = osenserver:2633; username = user; password = pass + id = ost; type = OpenStack; host = https://ostserver:5000; username = user; password = pass; tenant = tenant + +See all the options of the auth file are describe in section :ref:`auth-file`. + + +RADL basic example +^^^^^^^^^^^^^^^^^^^ +Then the user must describe in a input file the cloud topology. It can be done in the IM native language (RADL) or +the TOSCA standard. In this first example we will so how to launch a single VM using RADL:: + + network net (outbound = 'yes') + system node ( + cpu.count >= 2 and + memory.size >= 2G and + net_interface.0.connection = 'net' and + disk.0.image.url = 'one://someserver.com/123' + ) + deploy node 1 + +In this RADL user is requesting 1 VM with at least 2 CPUs and 2 GB of RAM connected with a public IP. Finally +the user must specify the image used to boot the VM with the field `disk.0.image.url`. In this URL the user must +specify an existing image on the cluod provider where VM will be launched. O.S. image URLs for different +Cloud providers: + + * **one://:/**, for OpenNebula; + * **ost:///**, for OpenStack; + * **aws:///**, for Amazon Web Service; + * **gce:///**, for Google Cloud; + * **azr://///**, for Microsoft Azure; and + * **/**, for FedCloud OCCI connector. + * **appdb:///?**, for FedCloud OCCI connector using AppDB info (from ver. 1.6.0). + * **docker://**, for Docker images. + * **fbw://**, for FogBow images. + +See full information about RADL language at :ref:`radl`. More RADL examples are available at the IM GitHub repo +`examples folder `_. + +TOSCA basic example +^^^^^^^^^^^^^^^^^^^ + +In case of you want to use a TOSCA file to define a similar example to the previous RADL one the file +should be like that:: + + tosca_definitions_version: tosca_simple_yaml_1_0 + + imports: + - indigo_custom_types: https://raw.githubusercontent.com/indigo-dc/tosca-types/master/custom_types.yaml + + topology_template: + + node_templates: + + simple_node: + type: tosca.nodes.indigo.Compute + capabilities: + endpoint: + properties: + network_name: PUBLIC + host: + properties: + num_cpus: 2 + mem_size: 2 GB + os: + properties: + image: one://someserver.com/123 + + outputs: + node_ip: + value: { get_attribute: [ simple_node, public_address, 0 ] } + node_creds: + value: { get_attribute: [ simple_node, endpoint, credential, 0 ] } + +For more information about TOSCA see the +`OASIS TOSCA Simple Profile in YAML Version 1.0 `_. +The TOSCA support has been developed under de framework of the `INDIGO DataCloud EU project `_. +You can see some input examples at +`https://github.com/indigo-dc/tosca-types/tree/master/examples `_. + +Basic IM Client usage +^^^^^^^^^^^^^^^^^^^^^ + +Now that we have the authentication file and the RADL input file we can create our first infrastructure using +the IM client:: + + $ im_client.py -a auth.dat create input_file + +By default this command expects the IM to be hosted on the `localhost` machine. If the server is located at other +host you must specify the `-u` or `-r` parameters to set the URL of the XML-RPC API or REST API respectively:: + + $ im_client.py -a auth.dat create input_file -r http://imhost.com:8800 + $ im_client.py -a auth.dat create input_file -u http://imhost.com:8899 + +To avoid putting this parameters on all the IM Cleint calls you can create an `im_client.cfg` file with the +default options to use. See all the options at the client manual page: :ref:`inv-client`. + +In this moment the IM client with contact the IM service to start the creation of the infrastructure. It will require +some time depending on the number of VMs or the cloud provider. Finally when all the VMs are created it will retun a +message like that:: + + Connected with: http://locahost:8899 + Infrastructure successfully created with ID: 573c4b0a-67d9-11e8-b75f-0a580af401da + +In case of error in the creation of all the VMs it will return an error message describing the errors raised. +If only some of them fails it will return the ID and the user must check the status of the VMs and take the +corresponding decissions. To get the state of of the infrastructure call the `getstate` option of the client:: + + $ im_client.py -a auth.dat getstate 573c4b0a-67d9-11e8-b75f-0a580af401da + + The infrastructure is in state: running + VM ID: 0 is in state: running. + +You have to wait untill your infrastructure is the state `configured`. In the meanwhile you can get the output +of the contextualization process to follow the status:: + + $ im_client.py -a auth.dat getcontmsg 573c4b0a-67d9-11e8-b75f-0a580af401da + + Msg Contextualizator: + + 2018-05-02 14:20:31.816193: Select master VM + 2018-05-02 14:20:31.819775: Wait master VM to boot + . + . + . + +This message will show all the steps made by the IM to fully configure the VM including the outputs of all +Ansible processes. Then you can access via SSH the created VM with the command:: + + $ im_client.py -a auth.dat ssh 573c4b0a-67d9-11e8-b75f-0a580af401da + +And Enjoy you customized VM!! + +Finally to destroy the infrastructure and all the related resources call the `destroy` operation:: + + $ im_client.py -a auth.dat destroy 573c4b0a-67d9-11e8-b75f-0a580af401da + + Connected with: http://locahost:8899 + Infrastructure successfully destroyed diff --git a/doc/source/index.rst b/doc/source/index.rst index d1ee63334..06d36d012 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -12,6 +12,7 @@ Contents: :maxdepth: 2 intro + gstarted videos manual radl diff --git a/doc/source/manual.rst b/doc/source/manual.rst index 226236f34..6028c25db 100644 --- a/doc/source/manual.rst +++ b/doc/source/manual.rst @@ -521,11 +521,11 @@ Docker Image ============ A Docker image named `grycap/im` has been created to make easier the deployment of an IM service using the -default configuration. Information about this image can be found here: https://registry.hub.docker.com/u/grycap/im/. +default configuration. Information about this image can be found here: `https://registry.hub.docker.com/u/grycap/im/ `_. How to launch the IM service using docker:: - $ sudo docker run -d -p 8899:8899 --name im grycap/im + $ sudo docker run -d -p 8899:8899 -p 8800:8800 --name im grycap/im To make the IM data persistent you also have to specify a persistent location for the IM database using the IM_DATA_DB environment variable and adding a volume:: diff --git a/doc/source/radl.rst b/doc/source/radl.rst index 7d0d82667..0f32b690f 100644 --- a/doc/source/radl.rst +++ b/doc/source/radl.rst @@ -1,3 +1,4 @@ +.. _radl: Resource and Application Description Language (RADL) ==================================================== diff --git a/doc/source/videos.rst b/doc/source/videos.rst index 579f5db83..c39f76fd7 100644 --- a/doc/source/videos.rst +++ b/doc/source/videos.rst @@ -1,3 +1,5 @@ +.. _videos: + IM Videos ========= diff --git a/doc/source/web.rst b/doc/source/web.rst index cec5f2247..b877ea8ce 100644 --- a/doc/source/web.rst +++ b/doc/source/web.rst @@ -106,6 +106,11 @@ This container is prepaired to work linked with the IM service container `grycap * `im_host`: Hostname of the IM service. Default value "im". * `im_port`: Port of the IM service. Default value "8899". * `im_db`: Location of the D.B. file used in the web application to store data. Default value "/home/www-data/im.db". + * `openid_issuer`: OpenID Issuer supported use "" to disable OpenID support. + * `openid_name`: OpenID Issuer name. + * `client_id`: OpenID Client data. + * `client_secret`: OpenID Client secret. + * `redirect_uri`: OpenID Redirect URI. ``docker run -p 80:80 -e "im_use_rest=true" -e "im_host=server.domain" -e "im_port=8800" -d grycap/im-web`` @@ -115,6 +120,8 @@ Add your own in the docker command: ``docker run -p 80:80 -p 443:443 -v server.crt:/etc/ssl/certs/server.crt -v server.key:/etc/ssl/certs/server.key -d grycap/im-web:1.5.5-ssl`` +.. _use-web: + Usage ----- The web interface of the IM enables the user to manage all the aspects related with the @@ -124,13 +131,15 @@ Register ^^^^^^^^ To access the we interface the user must register first to the application. Each user -must include a username and a password to access the platform. +must include a username and a password to access the platform. From 1.5.6 version OpenID +authentication has been added. .. _figure_register: .. figure:: images/register.png Fig 1. Register page. + Credentials ^^^^^^^^^^^ @@ -141,9 +150,9 @@ of user credentials. In this list there are two related with the IM components: * InfrastructureManager: user and password to access the IM service. * VMRC: user, password and URL to access the `VMRC `_ service -When a new user is registered the web UI automatically creates credentials to both of them to make easier -the creation of credentials process. The rest of elements of this list are the user credentials to access -diferent Cloud providers. +When a new user is registered (or access with OpenID credentials) the web UI automatically creates +credentials to both of them to make easier the creation of credentials process. The rest of elements +of this list are the user credentials to access diferent Cloud providers. .. _figure_cred_list: .. figure:: images/cred_list.png diff --git a/docker/Dockerfile b/docker/Dockerfile index bae19819f..4b8661914 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -15,6 +15,8 @@ RUN pip install pyOpenSSL --upgrade -I RUN apt-get update && apt-get install --no-install-recommends -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev libmysqlclient20 && \ pip install pycrypto && \ pip install MySQL-python && \ + # Install my version until PR #1215 is accepted + cd /tmp && git clone https://github.com/micafer/libcloud && cd libcloud && pip install /tmp/libcloud && \ pip install IM==1.7.3 && \ apt-get purge -y gcc libmysqld-dev libssl-dev libffi-dev libsqlite3-dev python-dev python-pip && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ diff --git a/examples/README.md b/examples/README.md index e0df55edc..7d9fb76a8 100644 --- a/examples/README.md +++ b/examples/README.md @@ -21,3 +21,9 @@ This directory has some RADL examples to deploy different types of virtual infra * swarm.radl: Installs a Docker Swarm cluster with one front-end node and two working nodes. * kubernetes.radl: Installs a Kubernetes cluster with one front-end node and two working nodes. * galaxy.radl: Installs a Galaxy Portal on top of a a SLURM cluster with one front-end node and two working nodes. + +## TOSCA examples: + +* tosca.yml: Launches an Apache web server VM and MySQL server VM adding an storage disk of 1 GB. +* galaxy_tosca.yml: Installs a standalone Galaxy Portal on a single VM. +* clues_tosca.yml: Launches a Torque/PBS Elastic cluster. \ No newline at end of file