diff --git a/ansible/roles/elk-elasticsearch/README.md b/ansible/roles/elk-elasticsearch/README.md new file mode 100644 index 000000000..c59d688af --- /dev/null +++ b/ansible/roles/elk-elasticsearch/README.md @@ -0,0 +1,56 @@ +elk-elasticsearch +================= + +This role deploys the elastic search instance for the DE ELK stack. +The ELK stack refers to the use of ElasticSearch, Kibana, and Logstash together. + + +Requirements +------------ + +systemd + +Role Variables +-------------- +dockerHostStats_url: the location of the latest `dockerHostStats` file from Jenkins. + +Role Tags +--------- + +config: deploys all config files. +stop_services: stops all services +update_service_files: updates all service files. CentOS 7, systemd files. +restart_services: restarts all services. + +stop_elasticsearch: stops the elasticsearch service. CentOS 7 only. +stop_kibana: stops the kibana service. CentOS 7 only. +stop_logstash: stops the logstash service. CentOS 7 only. +restart_data: resarts the data container service. CentOS 7 only. +restart_elasticsearch: restarts the elasticsearch service. CentOS 7 only. +restart_kibana: restarts the kibana service. CentOS 7 only. +restart_logstash: restarts the logstash service. CentOS 7 only. + +Dependencies +------------ +N/A + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: elk + roles: + - { role: elk-stack } + +License +------- + +BSD + +Author Information +------------------ + +Jonathan Strootman - jstroot@iplantcollaborative.org + + diff --git a/ansible/roles/elk-elasticsearch/defaults/main.yaml b/ansible/roles/elk-elasticsearch/defaults/main.yaml new file mode 100644 index 000000000..8790bdda1 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +# defaults file for elk-elasticsearch +enable_services: yes diff --git a/ansible/roles/elk-elasticsearch/files/de-error-template.json b/ansible/roles/elk-elasticsearch/files/de-error-template.json new file mode 100644 index 000000000..200bab8b6 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/files/de-error-template.json @@ -0,0 +1,12 @@ +{ + "template": "de-error-logs-*", + "order": 2, + "settings": { + "index.refresh_interval": "5s", + "index.number_of_replicas": "0" + }, + "aliases": { + "de-error-logs": {} + } +} + diff --git a/ansible/roles/elk-elasticsearch/files/de-logs-template.json b/ansible/roles/elk-elasticsearch/files/de-logs-template.json new file mode 100644 index 000000000..e2c3142f4 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/files/de-logs-template.json @@ -0,0 +1,12 @@ +{ + "template": "de-logs-*", + "order": 2, + "settings": { + "index.refresh_interval": "5s", + "index.number_of_replicas": "0" + }, + "aliases": { + "de-logs": {} + } +} + diff --git a/ansible/roles/elk-elasticsearch/files/de-metrics-template.json b/ansible/roles/elk-elasticsearch/files/de-metrics-template.json new file mode 100644 index 000000000..e75fc27fb --- /dev/null +++ b/ansible/roles/elk-elasticsearch/files/de-metrics-template.json @@ -0,0 +1,121 @@ +{ + "template": "de-metrics-*", + "order": 1, + "settings": { + "index.refresh_interval": "5s", + "index.number_of_replicas": "0" + }, + "mappings": { + "app_event": { + "properties": { + "app": { + "type": "nested", + "include_in_parent": true, + "properties": { + "id": { + "type": "string", + "index": "not_analyzed" + }, + "name": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "integrator_name": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "description": { + "type": "string", + "index": "analyzed" + }, + "step_count": { + "type": "integer" + } + } + }, + "analysis": { + "type": "nested", + "include_in_parent": true, + "properties": { + "id": { + "type": "string", + "index": "not_analyzed" + }, + "name": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "output_dir": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + } + } + }, + "request": { + "type": "nested", + "include_in_parent": true, + "properties": { + "path-info": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "uri": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "request-method": { + "type": "string", + "index": "analyzed" + } + } + }, + "response": { + "type": "nested", + "include_in_parent": true, + "properties": { + "path-info": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "uri": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "request-method": { + "type": "string", + "index": "analyzed" + } + } + } + } + } + }, + "aliases": { + "de-metrics": {} + } +} + diff --git a/ansible/roles/elk-elasticsearch/files/de-template.json b/ansible/roles/elk-elasticsearch/files/de-template.json new file mode 100644 index 000000000..63bb6bd1d --- /dev/null +++ b/ansible/roles/elk-elasticsearch/files/de-template.json @@ -0,0 +1,180 @@ +{ + "template": "de-*", + "order": 0, + "settings": { + "index.refresh_interval": "5s", + "index.number_of_replicas": "0" + }, + "mappings": { + "_default_": { + "dynamic_templates": [ + { + "message_field": { + "mapping": { + "index": "analyzed", + "omit_norms": true, + "type": "string" + }, + "match_mapping_type": "string", + "match": "message" + } + }, + { + "string_fields": { + "mapping": { + "index": "analyzed", + "omit_norms": true, + "type": "string" + }, + "match_mapping_type": "string", + "match": "*" + } + } + ], + "properties": { + "geoip": { + "dynamic": true, + "properties": { + "location": { + "type": "geo_point" + } + }, + "type": "object" + }, + "service": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "ext_service": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "exception": { + "type": "nested", + "include_in_parent": true, + "properties": { + "error-code": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + } + } + }, + "user-info": { + "type": "nested", + "include_in_parent": true, + "properties": { + "user": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "email": { "type": "string", "index": "analyzed"}, + "first-name": { "type": "string", "index": "analyzed"}, + "las-name": { "type": "string", "index": "analyzed"} + } + }, + "app": { + "type": "nested", + "include_in_parent": true, + "properties": { + "id": { "type": "string", "index": "not_analyzed"}, + "name": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "integrator_name": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "description": { "type": "string", "index": "analyzed"}, + "step_count": { "type": "integer"} + } + }, + "analysis": { + "type": "nested", + "include_in_parent": true, + "properties": { + "id": { + "type": "string", + "index": "not_analyzed" + }, + "name": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "output_dir": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + } + } + }, + "request": { + "type": "nested", + "include_in_parent": true, + "properties": { + "path-info": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "uri": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + }, + "request-method": { + "type": "string", + "index": "analyzed" + }, + "id": { + "type": "string", + "index": "analyzed", + "fields": { + "raw": {"type": "string", "index": "not_analyzed"} + } + } + + } + }, + "response": { + "type": "nested", + "include_in_parent": true + } + + }, + "_all": { + "enabled": true, + "omit_norms": true + } + } + }, + "aliases": { } +} + diff --git a/ansible/roles/elk-elasticsearch/tasks/apps-lookup-index.yaml b/ansible/roles/elk-elasticsearch/tasks/apps-lookup-index.yaml new file mode 100644 index 000000000..bab67c296 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/tasks/apps-lookup-index.yaml @@ -0,0 +1,15 @@ +--- + +- name: Pull apps index creation tool + shell: docker pull {{app_index_tool_image}} + register: docker_pull_v + changed_when: docker_pull_v.stdout.find('Image is up to date') == -1 + tags: + - docker_pull + - docker_pull_app_index_tool + +- name: Run app index creation tool + shell: "docker run --rm {{app_index_tool_image}} '{{elk_url}}'" + register: app_index_creation_result + +#- debug: var=app_index_creation_result diff --git a/ansible/roles/elk-elasticsearch/tasks/index-templates.yaml b/ansible/roles/elk-elasticsearch/tasks/index-templates.yaml new file mode 100644 index 000000000..3b0f1b274 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/tasks/index-templates.yaml @@ -0,0 +1,87 @@ +--- +- name: install httplib2 + pip: name=httplib2 + tags: + - config + - config_elasticsearch + +#- name: upload de-* index template +# uri: url="{{es_de_index_template_url}}" method=PUT +# body={{ lookup('file', 'de-template.json')|replace('\n', '')|to_nice_json }} +# register: upload_de_index_template +# tags: +# - config +# - config_elasticsearch + +- name: copy de-template.json to elk box + copy: src=de-template.json dest=/tmp owner=root group=root mode=0644 + tags: + - config + - config_elasticsearch + +- name: curl de-template.json because uri module barfs + shell: "curl -v -XPUT {{es_de_index_template_url}} --data-binary @/tmp/de-template.json" + tags: + - config + - config_elasticsearch + +#- name: upload de-metrics-* index template +# uri: url="{{es_de_metrics_index_template_url}}" method=PUT +# body={{ lookup('file', 'de-metrics-template.json')|replace('\n', '')|to_nice_json }} +# register: upload_de_metrics_index_template +# tags: +# - config +# - config_elasticsearch + +- name: copy de-metrics-template.json to elk box + copy: src=de-metrics-template.json dest=/tmp owner=root group=root mode=0644 + tags: + - config + - config_elasticsearch + +- name: curl de-metrics-template.json because uri module barfs + shell: "curl -v -XPUT {{es_de_metrics_index_template_url}} --data-binary @/tmp/de-metrics-template.json" + tags: + - config + - config_elasticsearch + +#- name: upload de-logs-* index template +# uri: url="{{es_de_logs_index_template_url}}" method=PUT +# body={{ lookup('file', 'de-logs-template.json')|replace('\n', '')|to_nice_json }} +# register: upload_de_logs_index_template +# tags: +# - config +# - config_elasticsearch + +- name: copy de-logs-template.json to elk box + copy: src=de-logs-template.json dest=/tmp owner=root group=root mode=0644 + tags: + - config + - config_elasticsearch + +- name: curl de-logs-template.json because uri module barfs + shell: "curl -v -XPUT {{es_de_logs_index_template_url}} --data-binary @/tmp/de-logs-template.json" + tags: + - config + - config_elasticsearch + +#- name: upload de-error-logs-* index template +# uri: url="{{es_de_error_logs_index_template_url}}" method=PUT +# body={{ lookup('file', 'de-error-template.json')|replace('\n', '')|to_nice_json }} +# register: upload_de_logs_index_template +# tags: +# - config +# - config_elasticsearch + +- name: copy de-error-template.json to elk box + copy: src=de-error-template.json dest=/tmp owner=root group=root mode=0644 + tags: + - config + - config_elasticsearch + +- name: curl de-error-template.json because uri module barfs + shell: "curl -v -XPUT {{es_de_error_logs_index_template_url}} --data-binary @/tmp/de-error-template.json" + tags: + - config + - config_elasticsearch + diff --git a/ansible/roles/elk-elasticsearch/tasks/main.yaml b/ansible/roles/elk-elasticsearch/tasks/main.yaml new file mode 100644 index 000000000..1682809e1 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/tasks/main.yaml @@ -0,0 +1,92 @@ +--- +# tasks file for elk-elasticsearch + +- name: create iplant group + group: name=iplant gid=1337 state=present + +- name: create iplant user + user: name=iplant uid=1337 state=present + +- name: add iplant user to the docker group + sudo: yes + user: name=iplant groups=docker append=yes + +- name: install epel for python-pip + yum: name=epel-release state=latest + +- name: install python-pip dependency + yum: name=python-pip state=latest + +- name: pull elk images + shell: docker pull {{ elk.elasticsearch.image_name }} + register: docker_pull_v + changed_when: docker_pull_v.stdout.find('Image is up to date') == -1 + tags: + - docker_pull + - docker_pull_elasticsearch + +- name: place Systemd service files + template: src={{ elk.elasticsearch.service_name }} dest=/usr/lib/systemd/system group=root owner=root mode=0664 + register: service_file_v + tags: + - update_service_files + - update_service_files_elasticsearch + +- name: create {{elk.conf_dir}} + file: path={{elk.conf_dir}} owner=iplant group=iplant mode=0644 state=directory + tags: + - config + - config_elasticsearch + +- name: place elasticsearch config files + template: src=elasticsearch.yml.j2 dest="{{elk.conf_dir}}/elasticsearch.yml" group=root owner=root mode=0664 + register: config_file_v + tags: + - config + - config_elasticsearch + +- name: Reload systemd + shell: systemctl daemon-reload + when: service_file_v.changed + tags: + - update_service_files + - update_service_files_elasticsearch + +- name: enable elasticsearch service + service: name={{elk.elasticsearch.service_name}} enabled={{enable_services}} + when: service_file_v.changed + tags: + - enable_services + - enable_elasticsearch + +- name: services are running + shell: systemctl is-active {{elk.elasticsearch.service_name}} + register: verify_service_running + ignore_errors: true + tags: + - start_services + - start_elasticsearch + +- name: start elasticsearch service + service: name={{elk.elasticsearch.service_name}} state=started + when: verify_service_running.rc != 0 + tags: + - start_services + - start_elasticsearch + + # Only restart if service file or config file changed, or if the image has been updated. +- name: restart elasticsearch service + service: name={{elk.elasticsearch.service_name}} state=restarted + when: service_file_v.changed or + config_file_v.changed or + docker_pull_v.changed + tags: + - restart_services + - restart_elasticsearch + +- name: Wait for ES to come back + wait_for: host="{{elk_host}}" port="{{elk.elasticsearch.port}}" state=started + +- include: index-templates.yaml +#- include: apps-lookup-index.yaml + diff --git a/ansible/roles/elk-elasticsearch/templates/elasticsearch.yml.j2 b/ansible/roles/elk-elasticsearch/templates/elasticsearch.yml.j2 new file mode 100644 index 000000000..8cefbbfb2 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/templates/elasticsearch.yml.j2 @@ -0,0 +1,386 @@ +#################### Elasticsearch Configuration Example ##################### + +# This file contains an overview of various configuration settings, +# targeted at operations staff. Application developers should +# consult the guide at . +# +# The installation procedure is covered at +# . +# +# Elasticsearch comes with reasonable defaults for most settings, +# so you can try it out without bothering with configuration. +# +# Most of the time, these defaults are just fine for running a production +# cluster. If you're fine-tuning your cluster, or wondering about the +# effect of certain configuration option, please _do ask_ on the +# mailing list or IRC channel [http://elasticsearch.org/community]. + +# Any element in the configuration can be replaced with environment variables +# by placing them in ${...} notation. For example: +# +#node.rack: ${RACK_ENV_VAR} + +# For information on supported formats and syntax for the config file, see +# + + +################################### Cluster ################################### + +# Cluster name identifies your cluster for auto-discovery. If you're running +# multiple clusters on the same network, make sure you're using unique names. +# +cluster.name: {{elk.elasticsearch.cluster_name}} + + +#################################### Node ##################################### + +# Node names are generated dynamically on startup, so you're relieved +# from configuring them manually. You can tie this node to a specific name: +# +#node.name: "Franz Kafka" + +# Every node can be configured to allow or deny being eligible as the master, +# and to allow or deny to store the data. +# +# Allow this node to be eligible as a master node (enabled by default): +# +node.master: true +# +# Allow this node to store data (enabled by default): +# +#node.data: true + +# You can exploit these settings to design advanced cluster topologies. +# +# 1. You want this node to never become a master node, only to hold data. +# This will be the "workhorse" of your cluster. +# +#node.master: false +#node.data: true +# +# 2. You want this node to only serve as a master: to not store any data and +# to have free resources. This will be the "coordinator" of your cluster. +# +#node.master: true +#node.data: false +# +# 3. You want this node to be neither master nor data node, but +# to act as a "search load balancer" (fetching data from nodes, +# aggregating results, etc.) +# +#node.master: false +#node.data: false + +# Use the Cluster Health API [http://localhost:9200/_cluster/health], the +# Node Info API [http://localhost:9200/_nodes] or GUI tools +# such as , +# , +# and +# to inspect the cluster state. + +# A node can have generic attributes associated with it, which can later be used +# for customized shard allocation filtering, or allocation awareness. An attribute +# is a simple key value pair, similar to node.key: value, here is an example: +# +#node.rack: rack314 + +# By default, multiple nodes are allowed to start from the same installation location +# to disable it, set the following: +#node.max_local_storage_nodes: 1 + + +#################################### Index #################################### + +# You can set a number of options (such as shard/replica options, mapping +# or analyzer definitions, translog settings, ...) for indices globally, +# in this file. +# +# Note, that it makes more sense to configure index settings specifically for +# a certain index, either when creating it or by using the index templates API. +# +# See and +# +# for more information. + +# Set the number of shards (splits) of an index (5 by default): +# +#index.number_of_shards: 5 + +# Set the number of replicas (additional copies) of an index (1 by default): +# +index.number_of_replicas: 0 + +# Note, that for development on a local machine, with small indices, it usually +# makes sense to "disable" the distributed features: +# +#index.number_of_shards: 1 +#index.number_of_replicas: 0 + +# These settings directly affect the performance of index and search operations +# in your cluster. Assuming you have enough machines to hold shards and +# replicas, the rule of thumb is: +# +# 1. Having more *shards* enhances the _indexing_ performance and allows to +# _distribute_ a big index across machines. +# 2. Having more *replicas* enhances the _search_ performance and improves the +# cluster _availability_. +# +# The "number_of_shards" is a one-time setting for an index. +# +# The "number_of_replicas" can be increased or decreased anytime, +# by using the Index Update Settings API. +# +# Elasticsearch takes care about load balancing, relocating, gathering the +# results from nodes, etc. Experiment with different settings to fine-tune +# your setup. + +# Use the Index Status API () to inspect +# the index status. + + +#################################### Paths #################################### + +# Path to directory containing configuration (this file and logging.yml): +# +#path.conf: /path/to/conf + +# Path to directory where to store index data allocated for this node. +# +#path.data: /path/to/data +# +# Can optionally include more than one location, causing data to be striped across +# the locations (a la RAID 0) on a file level, favouring locations with most free +# space on creation. For example: +# +#path.data: /path/to/data1,/path/to/data2 + +# Path to temporary files: +# +#path.work: /path/to/work + +# Path to log files: +# +path.logs: /var/log/elasticsearch + +# Path to where plugins are installed: +# +#path.plugins: /path/to/plugins + + +#################################### Plugin ################################### + +# If a plugin listed here is not installed for current node, the node will not start. +# +#plugin.mandatory: mapper-attachments,lang-groovy + + +################################### Memory #################################### + +# Elasticsearch performs poorly when JVM starts swapping: you should ensure that +# it _never_ swaps. +# +# Set this property to true to lock the memory: +# +#bootstrap.mlockall: true + +# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set +# to the same value, and that the machine has enough memory to allocate +# for Elasticsearch, leaving enough memory for the operating system itself. +# +# You should also make sure that the Elasticsearch process is allowed to lock +# the memory, eg. by using `ulimit -l unlimited`. + + +############################## Network And HTTP ############################### + +# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens +# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node +# communication. (the range means that if the port is busy, it will automatically +# try the next port). + +# Set the bind address specifically (IPv4 or IPv6): +# +#network.bind_host: 192.168.0.1 + +# Set the address other nodes will use to communicate with this node. If not +# set, it is automatically derived. It must point to an actual IP address. +# +#network.publish_host: 192.168.0.1 + +# Set both 'bind_host' and 'publish_host': +# +#network.host: 192.168.0.1 + +# Set a custom port for the node to node communication (9300 by default): +# +#transport.tcp.port: 9300 + +# Enable compression for all communication between nodes (disabled by default): +# +#transport.tcp.compress: true + +# Set a custom port to listen for HTTP traffic: +# +#http.port: 9200 + +# Set a custom allowed content length: +# +#http.max_content_length: 100mb + +# Disable HTTP completely: +# +#http.enabled: false + + +################################### Gateway ################################### + +# The gateway allows for persisting the cluster state between full cluster +# restarts. Every change to the state (such as adding an index) will be stored +# in the gateway, and when the cluster starts up for the first time, +# it will read its state from the gateway. + +# There are several types of gateway implementations. For more information, see +# . + +# The default gateway type is the "local" gateway (recommended): +# +#gateway.type: local + +# Settings below control how and when to start the initial recovery process on +# a full cluster restart (to reuse as much local data as possible when using shared +# gateway). + +# Allow recovery process after N nodes in a cluster are up: +# +#gateway.recover_after_nodes: 1 + +# Set the timeout to initiate the recovery process, once the N nodes +# from previous setting are up (accepts time value): +# +#gateway.recover_after_time: 5m + +# Set how many nodes are expected in this cluster. Once these N nodes +# are up (and recover_after_nodes is met), begin recovery process immediately +# (without waiting for recover_after_time to expire): +# +#gateway.expected_nodes: 2 + + +############################# Recovery Throttling ############################# + +# These settings allow to control the process of shards allocation between +# nodes during initial recovery, replica allocation, rebalancing, +# or when adding and removing nodes. + +# Set the number of concurrent recoveries happening on a node: +# +# 1. During the initial recovery +# +#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +# +# 2. During adding/removing nodes, rebalancing, etc +# +#cluster.routing.allocation.node_concurrent_recoveries: 2 + +# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +# +#indices.recovery.max_bytes_per_sec: 20mb + +# Set to limit the number of open concurrent streams when +# recovering a shard from a peer: +# +#indices.recovery.concurrent_streams: 5 + + +################################## Discovery ################################## + +# Discovery infrastructure ensures nodes can be found within a cluster +# and master node is elected. Multicast discovery is the default. + +# Set to ensure a node sees N other master eligible nodes to be considered +# operational within the cluster. This should be set to a quorum/majority of +# the master-eligible nodes in the cluster. +# +#discovery.zen.minimum_master_nodes: 1 + +# Set the time to wait for ping responses from other nodes when discovering. +# Set this option to a higher value on a slow or congested network +# to minimize discovery failures: +# +#discovery.zen.ping.timeout: 3s + +# For more information, see +# + +# Unicast discovery allows to explicitly control which nodes will be used +# to discover the cluster. It can be used when multicast is not present, +# or to restrict the cluster communication-wise. +# +# 1. Disable multicast discovery (enabled by default): +# +#discovery.zen.ping.multicast.enabled: false +# +# 2. Configure an initial list of master nodes in the cluster +# to perform discovery when new nodes (master or data) are started: +# +#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] + +# EC2 discovery allows to use AWS EC2 API in order to perform discovery. +# +# You have to install the cloud-aws plugin for enabling the EC2 discovery. +# +# For more information, see +# +# +# See +# for a step-by-step tutorial. + +# GCE discovery allows to use Google Compute Engine API in order to perform discovery. +# +# You have to install the cloud-gce plugin for enabling the GCE discovery. +# +# For more information, see . + +# Azure discovery allows to use Azure API in order to perform discovery. +# +# You have to install the cloud-azure plugin for enabling the Azure discovery. +# +# For more information, see . + +################################## Slow Log ################################## + +# Shard level query and fetch threshold logging. + +#index.search.slowlog.threshold.query.warn: 10s +#index.search.slowlog.threshold.query.info: 5s +#index.search.slowlog.threshold.query.debug: 2s +#index.search.slowlog.threshold.query.trace: 500ms + +#index.search.slowlog.threshold.fetch.warn: 1s +#index.search.slowlog.threshold.fetch.info: 800ms +#index.search.slowlog.threshold.fetch.debug: 500ms +#index.search.slowlog.threshold.fetch.trace: 200ms + +#index.indexing.slowlog.threshold.index.warn: 10s +#index.indexing.slowlog.threshold.index.info: 5s +#index.indexing.slowlog.threshold.index.debug: 2s +#index.indexing.slowlog.threshold.index.trace: 500ms + +################################## GC Logging ################################ + +#monitor.jvm.gc.young.warn: 1000ms +#monitor.jvm.gc.young.info: 700ms +#monitor.jvm.gc.young.debug: 400ms + +#monitor.jvm.gc.old.warn: 10s +#monitor.jvm.gc.old.info: 5s +#monitor.jvm.gc.old.debug: 2s + +################################## Security ################################ + +# Uncomment if you want to enable JSONP as a valid return transport on the +# http server. With this enabled, it may pose a security risk, so disabling +# it unless you need it is recommended (it is disabled by default). +# +#http.jsonp.enable: true + diff --git a/ansible/roles/elk-elasticsearch/templates/elk-elasticsearch.service b/ansible/roles/elk-elasticsearch/templates/elk-elasticsearch.service new file mode 100644 index 000000000..ba8882a20 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/templates/elk-elasticsearch.service @@ -0,0 +1,24 @@ +[Unit] +Description={{elk.elasticsearch.service_description}} +BindsTo=docker.service +PartOf=docker.service {{elk.data.service_name}} +After=docker.service +Requisite=docker.service + +[Service] +ExecStartPre=-/usr/bin/docker rm -v {{elk.elasticsearch.container_name}} +ExecStart=/usr/bin/docker run --name {{elk.elasticsearch.container_name}} \ + --volumes-from {{elk.data.container_name}} \ + -v {{ de_config_dir }}:/etc/iplant/de \ + -v /etc/localtime:/etc/localtime -v /etc/timezone:/etc/timezone \ + -v {{elk.conf_dir}}/elasticsearch.yml:/etc/elasticsearch/elasticsearch.yml \ + -p 9200:9200 \ + -e ES_HEAP_SIZE={{elk.elasticsearch.heap_size}} \ + --log-driver={{docker.log_driver}} --log-opt syslog-tag={{elk.elasticsearch.service_name_short}} \ + {{elk.elasticsearch.image_name}} +ExecStop=/usr/bin/docker stop {{elk.elasticsearch.container_name}} +Restart=on-failure + +[Install] +WantedBy=multi-user.target + diff --git a/ansible/roles/elk-elasticsearch/vars/main.yaml b/ansible/roles/elk-elasticsearch/vars/main.yaml new file mode 100644 index 000000000..d0bdc7258 --- /dev/null +++ b/ansible/roles/elk-elasticsearch/vars/main.yaml @@ -0,0 +1,11 @@ +--- +# vars file for elk-elasticsearch +elk_host: "{{ groups['elk'][0]}}" +elk_url: "http://{{elk_host}}:{{elk.elasticsearch.port}}" +registry_host: "{{ groups['docker-registry'][0]}}" +remote_registry: "{{ registry_host }}:{{ docker.registry.port }}" +es_de_index_template_url: "{{elk_url}}/_template/de" +es_de_metrics_index_template_url: "{{elk_url}}/_template/de-metrics" +es_de_logs_index_template_url: "{{elk_url}}/_template/de-logs" +es_de_error_logs_index_template_url: "{{elk_url}}/_template/de-error-logs" +app_index_tool_image: "{{remote_registry}}/de-elk-apps-index-tool:latest"