diff --git a/cdrhook/connector.py b/cdrhook/connector.py index 461427e..921dfd0 100644 --- a/cdrhook/connector.py +++ b/cdrhook/connector.py @@ -58,7 +58,7 @@ def register(self): "events": self.events } logging.info(f"Registering with CDR: [system_name : {registration['name']}, system_version : {registration['version']}, callback_url : {registration['callback_url']}") - r = requests.post(f"{self.cdr_url}/user/me/register", json=registration, headers=headers) + r = requests.post(f"{self.cdr_url}user/me/register", json=registration, headers=headers) logging.debug(r.text) r.raise_for_status() self.registration = r.json()["id"] @@ -139,4 +139,4 @@ def retrieve_endpoint(self, endpoint_url:str, schema:BaseModel=None, headers:dic if isinstance(response, list): return [schema.model_validate(item) for item in response] return schema.model_validate(response) - return response \ No newline at end of file + return response diff --git a/docker-compose.yml b/docker-compose.yml index be00d24..a7149b4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -51,8 +51,9 @@ services: # CDR HOOK # ---------------------------------------------------------------------- cdrhook: - image: ncsa/criticalmaas-cdr:${CDRHOOK_VERSION:-latest} + image: ${CDRHOOK_VERSION:-latest} hostname: cdrhook + build: cdrhook restart: unless-stopped profiles: - cdrhook @@ -60,10 +61,9 @@ services: depends_on: - rabbitmq environment: - CDR_URL: "${CDR_URL}" CDR_TOKEN: "${CDR_TOKEN}" CDR_KEEP_EVENT: "no" - CALLBACK_URL: "${CALLBACK_URL}" + CALLBACK_URL: "https://${SERVER_NAME}/cdr" CALLBACK_SECRET: "${CALLBACK_SECRET}" CALLBACK_USERNAME: "${CALLBACK_USERNAME}" CALLBACK_PASSWORD: "${CALLBACK_PASSWORD}" @@ -79,7 +79,7 @@ services: # RABBITMQ MONITOR # ---------------------------------------------------------------------- monitor: - image: ncsa/criticalmaas-monitor:${CDRHOOK_VERSION:-latest} + image: ncsa/criticalmaas-monitor:latest hostname: monitor build: monitor restart: unless-stopped @@ -139,7 +139,8 @@ services: # DOWNLOADER and UPLOADER # ---------------------------------------------------------------------- downloader: - image: ncsa/criticalmaas-downloader:${CDRHOOK_VERSION:-latest} + image: ncsa/criticalmaas-downloader:latest + build: uploader restart: "unless-stopped" profiles: - pipeline @@ -150,19 +151,234 @@ services: - "data:/data" uploader: - image: ncsa/criticalmaas-uploader:${CDRHOOK_VERSION:-latest} + image: ncsa/criticalmaas-uploader:latest + build: uploader restart: "unless-stopped" profiles: - pipeline - allinone environment: - CDR_URL: "${CDR_URL}" CDR_TOKEN: "${CDR_TOKEN}" RABBITMQ_URI: "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" PREFIX: "" volumes: - "output:/output" + + setupElk: + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + profiles: + - elk + - allinone + volumes: + - certs:/usr/share/elasticsearch/config/certs + user: "0" + command: > + bash -c ' + if [ x${ELASTIC_PASSWORD} == x ]; then + echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; + exit 1; + elif [ x${KIBANA_PASSWORD} == x ]; then + echo "Set the KIBANA_PASSWORD environment variable in the .env file"; + exit 1; + fi; + if [ ! -f config/certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem --out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; + fi; + if [ ! -f config/certs/certs.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: es01Elk\n"\ + " dns:\n"\ + " - es01Elk\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: kibana\n"\ + " dns:\n"\ + " - kibana\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/instances.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + find . -type d -exec chmod 750 \{\} \;; + find . -type f -exec chmod 640 \{\} \;; + echo "Waiting for Elasticsearch availability"; + until curl -s --cacert config/certs/ca/ca.crt https://es01Elk:9200 | grep -q "missing authentication credentials"; do sleep 30; done; + echo "Setting kibana_system password"; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01Elk:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + echo "All done!"; + ' + healthcheck: + test: ["CMD-SHELL", "[ -f config/certs/es01Elk/es01Elk.crt ]"] + interval: 1s + timeout: 5s + retries: 120 + + es01Elk: + profiles: + - elk + - allinone + depends_on: + setupElk: + condition: service_healthy + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + labels: + co.elastic.logs/module: elasticsearch + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata01:/usr/share/elasticsearch/data + ports: + - ${ES_PORT}:9200 + environment: + - node.name=es01Elk + - cluster.name=${CLUSTER_NAME} + - discovery.type=single-node + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.key=certs/es01Elk/es01Elk.key + - xpack.security.http.ssl.certificate=certs/es01Elk/es01Elk.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.key=certs/es01Elk/es01Elk.key + - xpack.security.transport.ssl.certificate=certs/es01Elk/es01Elk.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${ES_MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + + kibanaElk: + profiles: + - elk + - allinone + depends_on: + es01Elk: + condition: service_healthy + image: docker.elastic.co/kibana/kibana:${STACK_VERSION} + labels: + co.elastic.logs/module: kibana + volumes: + - certs:/usr/share/kibana/config/certs + - kibanadata:/usr/share/kibana/data + ports: + - ${KIBANA_PORT}:5601 + environment: + - SERVERNAME=kibanaElk + - ELASTICSEARCH_HOSTS=https://es01Elk:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} + - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + - XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY} + - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY} + - XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY} + mem_limit: ${KB_MEM_LIMIT} + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + ] + interval: 10s + timeout: 10s + retries: 120 + + metricbeat01: + profiles: + - elk + - allinone + depends_on: + es01Elk: + condition: service_healthy + kibanaElk: + condition: service_healthy + image: docker.elastic.co/beats/metricbeat:${STACK_VERSION} + user: root + volumes: + - certs:/usr/share/metricbeat/certs + - metricbeatdata01:/usr/share/metricbeat/data + - "./metricbeat.yml:/usr/share/metricbeat/metricbeat.yml:ro" + - "/var/run/docker.sock:/var/run/docker.sock:ro" + - "/sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro" + - "/proc:/hostfs/proc:ro" + - "/:/hostfs:ro" + environment: + - ELASTIC_USER=elastic + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - ELASTIC_HOSTS=https://es01Elk:9200 + - KIBANA_HOSTS=http://kibana:5601 + - LOGSTASH_HOSTS=http://logstash01:9600 + + + filebeat01: + profiles: + - elk + - allinone + depends_on: + es01Elk: + condition: service_healthy + image: docker.elastic.co/beats/filebeat:${STACK_VERSION} + user: root + volumes: + - certs:/usr/share/filebeat/certs + - filebeatdata01:/usr/share/filebeat/data + - "./filebeat_ingest_data/:/usr/share/filebeat/ingest_data/" + - "./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro" + - "/var/lib/docker/containers:/var/lib/docker/containers:ro" + - "/var/run/docker.sock:/var/run/docker.sock:ro" + environment: + - ELASTIC_USER=elastic + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - ELASTIC_HOSTS=https://es01Elk:9200 + - KIBANA_HOSTS=http://kibana:5601 + - LOGSTASH_HOSTS=http://logstash01:9600 + + logstash01: + profiles: + - elk + - allinone + depends_on: + es01Elk: + condition: service_healthy + kibanaElk: + condition: service_healthy + image: docker.elastic.co/logstash/logstash:${STACK_VERSION} + labels: + co.elastic.logs/module: logstash + user: root + volumes: + - certs:/usr/share/logstash/certs + - logstashdata01:/usr/share/logstash/data + - "./logstash_ingest_data/:/usr/share/logstash/ingest_data/" + - "./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro" + environment: + - xpack.monitoring.enabled=false + - ELASTIC_USER=elastic + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - ELASTIC_HOSTS=https://es01Elk:9200 + volumes: traefik: rabbitmq: @@ -171,4 +387,15 @@ volumes: data: logs: output: - + certs: + driver: local + esdata01: + driver: local + kibanadata: + driver: local + metricbeatdata01: + driver: local + filebeatdata01: + driver: local + logstashdata01: + driver: local diff --git a/docker-compose.yml_craig b/docker-compose.yml_craig new file mode 100644 index 0000000..d5bb221 --- /dev/null +++ b/docker-compose.yml_craig @@ -0,0 +1,328 @@ +services: + + # ---------------------------------------------------------------------- + # REVERSE PROXY + # ---------------------------------------------------------------------- + + setup: + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + volumes: + - certs:/usr/share/elasticsearch/config/certs + user: "0" + command: > + bash -c ' + if [ x${ELASTIC_PASSWORD} == x ]; then + echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; + exit 1; + elif [ x${KIBANA_PASSWORD} == x ]; then + echo "Set the KIBANA_PASSWORD environment variable in the .env file"; + exit 1; + fi; + if [ ! -f config/certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; + fi; + if [ ! -f config/certs/certs.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: es01\n"\ + " dns:\n"\ + " - es01\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: kibana\n"\ + " dns:\n"\ + " - kibana\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/instances.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + find . -type d -exec chmod 750 \{\} \;; + find . -type f -exec chmod 640 \{\} \;; + echo "Waiting for Elasticsearch availability"; + until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; + echo "Setting kibana_system password"; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + echo "All done!"; + ' + healthcheck: + test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"] + interval: 1s + timeout: 5s + retries: 120 + + traefik: + image: "traefik:v2.11" + command: + - --log.level=INFO + - --api=true + - --api.dashboard=true + - --api.insecure=true + # Entrypoints + - --entrypoints.http.address=:80 + - --entrypoints.http.http.redirections.entryPoint.to=https + - --entrypoints.https.address=:443 + - --entrypoints.https.http.tls.certresolver=myresolver + # letsencrypt + - --certificatesresolvers.myresolver.acme.email=${TRAEFIK_ACME_EMAIL} + - --certificatesresolvers.myresolver.acme.storage=/config/acme.json + # uncomment to use testing certs + #- --certificatesresolvers.myresolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory + - --certificatesresolvers.myresolver.acme.httpchallenge=true + - --certificatesresolvers.myresolver.acme.httpchallenge.entrypoint=http + # Docker setup + - --providers.docker=true + - --providers.docker.endpoint=unix:///var/run/docker.sock + - --providers.docker.exposedbydefault=false + - --providers.docker.watch=true + restart: "unless-stopped" + security_opt: + - no-new-privileges:true + ports: + - "80:80" + - "443:443" + volumes: + - "traefik:/config" + - "/var/run/docker.sock:/var/run/docker.sock:ro" + + # ---------------------------------------------------------------------- + # MESSAGE BROKER + # ---------------------------------------------------------------------- + rabbitmq: + image: rabbitmq:3.13-management + hostname: rabbitmq + restart: unless-stopped + environment: + RABBITMQ_DEFAULT_USER: "${RABBITMQ_USERNAME:-guest}" + RABBITMQ_DEFAULT_PASS: "${RABBITMQ_PASSWORD:-guest}" + volumes: + - rabbitmq:/var/lib/rabbitmq + - ./50-criticalmaas.conf:/etc/rabbitmq/conf.d/50-criticalmaas.conf:ro + + # ---------------------------------------------------------------------- + # CDR HOOK + # ---------------------------------------------------------------------- + cdrhook: + image: ncsa/criticalmaas-cdr:latest + hostname: cdrhook + build: cdrhook + restart: unless-stopped + depends_on: + - rabbitmq + environment: + CDR_TOKEN: "${CDR_TOKEN}" + CDR_KEEP_EVENT: "no" + CALLBACK_URL: "https://${SERVER_NAME}/cdr" + CALLBACK_SECRET: "${CALLBACK_SECRET}" + CALLBACK_USERNAME: "${CALLBACK_USERNAME}" + CALLBACK_PASSWORD: "${CALLBACK_PASSWORD}" + RABBITMQ_URI: "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + PREFIX: "" + labels: + - "traefik.enable=true" + - "traefik.http.routers.cdrhook.rule=Host(`${SERVER_NAME}`) && PathPrefix(`/cdr`)" + volumes: + - cdrhook:/data + + # ---------------------------------------------------------------------- + # RABBITMQ MONITOR + # ---------------------------------------------------------------------- + monitor: + image: ncsa/criticalmaas-monitor:latest + hostname: monitor + build: monitor + restart: unless-stopped + depends_on: + - rabbitmq + environment: + RABBITMQ_MGMT_URL: ${RABBITMQ_MGMT_URL} + RABBITMQ_USERNAME: ${RABBITMQ_USERNAME} + RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD} + labels: + - "traefik.enable=true" + - "traefik.http.routers.monitor.rule=Host(`${SERVER_NAME}`) && PathPrefix(`/monitor`)" + + # ---------------------------------------------------------------------- + # DATA PROCESSING PIPELINE + # use one, or more, per model to be executed + # ---------------------------------------------------------------------- + golden_muscat: + image: ncsa/criticalmaas-pipeline:latest + build: ../uiuc-pipeline + runtime: nvidia + profiles: + - pipeline + depends_on: + - rabbitmq + environment: + NVIDIA_VISIBLE_DEVICES: all + PREFIX: "" + command: + - -v + - --data + - /data + - --log + - /logs/logs.latest + - --output + - /output + - --feedback + - /feedback + - --amqp + - "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + - --inactive_timeout + - "86000" + - --model + - golden_muscat + restart: "unless-stopped" + volumes: + - "data:/data" + - "logs:/logs" + - "output:/output" + - "feedback:/feedback" + + # ---------------------------------------------------------------------- + # DOWNLOADER and UPLOADER + # ---------------------------------------------------------------------- + downloader: + image: ncsa/criticalmaas-downloader:latest + build: uploader + restart: "unless-stopped" + profiles: + - pipeline + depends_on: + - rabbitmq + environment: + RABBITMQ_URI: "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + volumes: + - "data:/data" + + uploader: + image: ncsa/criticalmaas-uploader:latest + build: uploader + restart: "unless-stopped" + profiles: + - pipeline + depends_on: + - rabbitmq + environment: + CDR_TOKEN: "${CDR_TOKEN}" + RABBITMQ_URI: "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + PREFIX: "" + volumes: + - "output:/output" + + es01: + depends_on: + setup: + condition: service_healthy + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + labels: + co.elastic.logs/module: elasticsearch + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata01:/usr/share/elasticsearch/data + ports: + - ${ES_PORT}:9200 + environment: + - node.name=es01 + - cluster.name=${CLUSTER_NAME} + - discovery.type=single-node + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=true +# - xpack.security.enabled=false +# - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.enabled=false + - xpack.security.http.ssl.key=certs/es01/es01.key + - xpack.security.http.ssl.certificate=certs/es01/es01.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt +# - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.enabled=false + - xpack.security.transport.ssl.key=certs/es01/es01.key + - xpack.security.transport.ssl.certificate=certs/es01/es01.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${ES_MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + + kibana: + depends_on: + es01: + condition: service_healthy + image: docker.elastic.co/kibana/kibana:${STACK_VERSION} + labels: + co.elastic.logs/module: kibana + volumes: + - certs:/usr/share/kibana/config/certs + - kibanadata:/usr/share/kibana/data + ports: + - ${KIBANA_PORT}:5601 + environment: + - SERVERNAME=kibana + - ELASTICSEARCH_HOSTS=https://es01:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} + - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + - XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY} + - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY} + - XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY} + mem_limit: ${KB_MEM_LIMIT} + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + ] + interval: 10s + timeout: 10s + retries: 120 + +#networks: +# elk: +# default: +# name: elastic +# driver: bridge +# external: false + +volumes: + traefik: + rabbitmq: + cdrhook: + feedback: + data: + logs: + output: + test_data: + certs: + driver: local + esdata01: + driver: local + kibanadata: + driver: local + metricbeatdata01: + driver: local + filebeatdata01: + driver: local + logstashdata01: + driver: local diff --git a/docker-compose.yml_oldgit b/docker-compose.yml_oldgit new file mode 100644 index 0000000..efcd2f5 --- /dev/null +++ b/docker-compose.yml_oldgit @@ -0,0 +1,328 @@ +services: + +# +# This sets up SSL stuff in a volume to make the ELK components happy (maybe?) +# + setup: + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + volumes: + - certs:/usr/share/elasticsearch/config/certs + user: "0" + command: > + bash -c ' + if [ x${ELASTIC_PASSWORD} == x ]; then + echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; + exit 1; + elif [ x${KIBANA_PASSWORD} == x ]; then + echo "Set the KIBANA_PASSWORD environment variable in the .env file"; + exit 1; + fi; + if [ ! -f config/certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; + fi; + if [ ! -f config/certs/certs.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: elasticsearch\n"\ + " dns:\n"\ + " - elasticsearch\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: kibana\n"\ + " dns:\n"\ + " - kibana\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/instances.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + find . -type d -exec chmod 750 \{\} \;; + find . -type f -exec chmod 640 \{\} \;; + echo "Waiting for Elasticsearch availability"; + until curl -s --cacert config/certs/ca/ca.crt https://elasticsearch:9200 | grep -q "missing authentication credentials"; do sleep 30; done; + echo "Setting kibana_system password"; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + echo "All done!"; + ' + healthcheck: + test: ["CMD-SHELL", "[ -f config/certs/elasticsearch/elasticsearch.crt ]"] + interval: 1s + timeout: 5s + retries: 120 + + + + + # ---------------------------------------------------------------------- + # REVERSE PROXY + # ---------------------------------------------------------------------- + traefik: + image: "traefik:munster" + command: + - --log.level=INFO + - --api=true + - --api.dashboard=true + - --api.insecure=true + # Entrypoints + - --entrypoints.http.address=:80 + - --entrypoints.http.http.redirections.entryPoint.to=https + # Docker setup + - --providers.docker=true + - --providers.docker.endpoint=unix:///var/run/docker.sock + - --providers.docker.exposedbydefault=false + - --providers.docker.watch=true + restart: "unless-stopped" +# profiles: +# - traefik +# - allinone + security_opt: + - no-new-privileges:true + ports: + - "80:80" + volumes: + - "traefik:/config" + - "/var/run/docker.sock:/var/run/docker.sock:ro" + + # ---------------------------------------------------------------------- + # MESSAGE BROKER + # ---------------------------------------------------------------------- + rabbitmq: + image: rabbitmq:3.13-management + hostname: rabbitmq + restart: unless-stopped +# profiles: +# - cdrhook +# - allinone + environment: + RABBITMQ_DEFAULT_USER: "${RABBITMQ_USERNAME:-guest}" + RABBITMQ_DEFAULT_PASS: "${RABBITMQ_PASSWORD:-guest}" + volumes: + - rabbitmq:/var/lib/rabbitmq + - ./50-criticalmaas.conf:/etc/rabbitmq/conf.d/50-criticalmaas.conf:ro + + # ---------------------------------------------------------------------- + # CDR HOOK + # ---------------------------------------------------------------------- + cdrhook: + image: ncsa/criticalmaas-cdr:${CDRHOOK_VERSION:-latest} + hostname: cdrhook + restart: unless-stopped +# profiles: +# - cdrhook +# - allinone + depends_on: + - rabbitmq + environment: +# CDR_URL: "${CDR_URL}" + CDR_TOKEN: "${CDR_TOKEN}" + CDR_KEEP_EVENT: "no" +# CALLBACK_URL: "${CALLBACK_URL}" + CALLBACK_URL: "https://${SERVER_NAME}/cdr" + CALLBACK_SECRET: "${CALLBACK_SECRET}" + CALLBACK_USERNAME: "${CALLBACK_USERNAME}" + CALLBACK_PASSWORD: "${CALLBACK_PASSWORD}" + RABBITMQ_URI: "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + PREFIX: "" + labels: + - "traefik.enable=true" + - "traefik.http.routers.cdrhook.rule=Host(`${SERVER_NAME}`) && PathPrefix(`/cdr`)" + volumes: + - cdrhook:/data + + # ---------------------------------------------------------------------- + # RABBITMQ MONITOR + # ---------------------------------------------------------------------- + monitor: + image: ncsa/criticalmaas-monitor:${CDRHOOK_VERSION:-latest} + hostname: monitor + build: monitor + restart: unless-stopped +# profiles: +# - cdrhook +# - allinone + depends_on: + - rabbitmq + environment: + RABBITMQ_MGMT_URL: ${RABBITMQ_MGMT_URL} + RABBITMQ_USERNAME: ${RABBITMQ_USERNAME} + RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD} + labels: + - "traefik.enable=true" + - "traefik.http.routers.monitor.rule=Host(`${SERVER_NAME}`) && PathPrefix(`/monitor`)" + + # ---------------------------------------------------------------------- + # DATA PROCESSING PIPELINE + # use one, or more, per model to be executed + # ---------------------------------------------------------------------- + icy-resin: + image: ncsa/criticalmaas-pipeline:${PIPELINE_VERSION:-latest} + runtime: nvidia + restart: "unless-stopped" + profiles: + - pipeline + - allinone + environment: + NVIDIA_VISIBLE_DEVICES: all + PREFIX: "" + ipc: host + command: + - -v + - --data + - /data + - --log + - /logs/logs.latest + - --output + - /output + - --feedback + - /feedback + - --amqp + - "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + - --inactive_timeout + - "86000" + - --output_types + - cdr_json + - --model + - icy_resin + volumes: + - "data:/data" + - "logs:/logs" + - "output:/output" + - "feedback:/feedback" + + # ---------------------------------------------------------------------- + # DOWNLOADER and UPLOADER + # ---------------------------------------------------------------------- + downloader: + image: ncsa/criticalmaas-downloader:${CDRHOOK_VERSION:-latest} + restart: "unless-stopped" + profiles: + - pipeline + - allinone + environment: + RABBITMQ_URI: "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + volumes: + - "data:/data" + + uploader: + image: ncsa/criticalmaas-uploader:${CDRHOOK_VERSION:-latest} + restart: "unless-stopped" + profiles: + - pipeline + - allinone + environment: +# CDR_URL: "${CDR_URL}" + CDR_TOKEN: "${CDR_TOKEN}" + RABBITMQ_URI: "amqp://${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD}@rabbitmq/%2F" + PREFIX: "" + volumes: + - "output:/output" + + elasticsearch: + depends_on: + setup: + condition: service_healthy + image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION} + container_name: elasticsearch + labels: + co.elastic.logs/module: elasticsearch + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata01:/usr/share/elasticsearch/data + ports: + - ${ES_PORT}:9200 + environment: + - node.name=elasticsearch + - cluster.name=${CLUSTER_NAME} + - discovery.type=single-node + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=true +# - xpack.security.enabled=false +# - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.enabled=false + - xpack.security.http.ssl.key=certs/elasticsearch/elasticsearch.key + - xpack.security.http.ssl.certificate=certs/elasticsearch/elasticsearch.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt +# - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.enabled=false + - xpack.security.transport.ssl.key=certs/elasticsearch/elasticsearch.key + - xpack.security.transport.ssl.certificate=certs/elasticsearch/elasticsearch.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${ES_MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + + kibana: +# depends_on: +# elasticsearch: +# condition: service_healthy + image: docker.elastic.co/kibana/kibana:${STACK_VERSION} + labels: + co.elastic.logs/module: kibana + volumes: + - certs:/usr/share/kibana/config/certs + - kibanadata:/usr/share/kibana/data + ports: + - ${KIBANA_PORT}:5601 + environment: + - SERVERNAME=kibana + - ELASTICSEARCH_HOSTS=https://elasticsearch:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} + - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + - XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY} + - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY} + - XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY} + mem_limit: ${KB_MEM_LIMIT} + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + ] + interval: 10s + timeout: 10s + retries: 120 + +volumes: + traefik: + rabbitmq: + cdrhook: + feedback: + data: + logs: + output: + test_data: + certs: + driver: local + esdata01: + driver: local + kibanadata: + driver: local + metricbeatdata01: + driver: local + filebeatdata01: + driver: local + logstashdata01: + driver: local diff --git a/elk-config/elasticsearch/elasticsearch.yml b/elk-config/elasticsearch/elasticsearch.yml new file mode 100644 index 0000000..74534c3 --- /dev/null +++ b/elk-config/elasticsearch/elasticsearch.yml @@ -0,0 +1,7 @@ +cluster.name: "${CLUSTER_NAME}" +server.name: es01 +#network.host: localhost +network.host: 0.0.0.0 +# getting ssl to work +xpack.security.enabled: true +#xpack.security.autoconfiguration.enabled: true diff --git a/elk-config/kibana/kibana.yml b/elk-config/kibana/kibana.yml new file mode 100644 index 0000000..0b1e14c --- /dev/null +++ b/elk-config/kibana/kibana.yml @@ -0,0 +1,6 @@ +# Default Kibana configuration for docker target +server.name: kibana +#server.host: "0" +#elasticsearch.hosts: [ "http://elasticsearch:9200" ] +elasticsearch.hosts: [ "http://es01:9200" ] +monitoring.ui.container.elasticsearch.enabled: true diff --git a/elk-config/logstash/logstash.conf b/elk-config/logstash/logstash.conf new file mode 100644 index 0000000..c32244b --- /dev/null +++ b/elk-config/logstash/logstash.conf @@ -0,0 +1,17 @@ +input { + beats{ + port => 5044 + } +} + +filter { +} + +output { + elasticsearch { + hosts => "es01:9200" + index => "filebeat-test%{+YYYY.MM.DD}" + user => "elastic" + password => "${ELASTIC_PASSWORD}" + } +} diff --git a/elk-config/logstash/logstash.yml b/elk-config/logstash/logstash.yml new file mode 100644 index 0000000..a7f818c --- /dev/null +++ b/elk-config/logstash/logstash.yml @@ -0,0 +1,3 @@ +http.host: 0.0.0.0 +xpack.monitoring.elasticsearch.hosts: ["es01:9200"] +xpack.monitoring.enabled: true diff --git a/filebeat.yml b/filebeat.yml new file mode 100644 index 0000000..fd404d4 --- /dev/null +++ b/filebeat.yml @@ -0,0 +1,29 @@ +filebeat.inputs: + - type: filestream + id: default-filestream + paths: + - ingest_data/*.log + + +filebeat.autodiscover: + providers: + - type: docker + hints.enabled: true + + +processors: + - add_docker_metadata: ~ + + +setup.kibana: + host: ${KIBANA_HOSTS} + username: ${ELASTIC_USER} + password: ${ELASTIC_PASSWORD} + + +output.elasticsearch: + hosts: ${ELASTIC_HOSTS} + username: ${ELASTIC_USER} + password: ${ELASTIC_PASSWORD} + ssl.enabled: true + ssl.certificate_authorities: "certs/ca/ca.crt" diff --git a/keys/DOIRootCA2.crt b/keys/DOIRootCA2.crt new file mode 100644 index 0000000..3a49f72 --- /dev/null +++ b/keys/DOIRootCA2.crt @@ -0,0 +1,56 @@ +-----BEGIN CERTIFICATE----- +MIIJ+jCCB+KgAwIBAgIQSeV7u0WVHrFBqkjcKiEnEzANBgkqhkiG9w0BAQsFADAV +MRMwEQYDVQQDEwpET0lSb290Q0EyMB4XDTE2MDQyNjE4MjE1MFoXDTM2MDQyNjE4 +MjE1MFowFTETMBEGA1UEAxMKRE9JUm9vdENBMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBANTHy0AvCFT0CZOsktj3kpFfFJgrhiS5haBK7DvYYpbZoaWa +jOpldnvoqnd1bOJUJ9jUwxZERo27FJHZ8HSdU8ac63hdK1N6t1OrlQOjgs/Kn0LY +b7xOf6iDBfhdhXcjpq2KZBzVi0tWuyBPhDmZyTLqZ5UREy1mV3/p1pJ3Hx5lPziH +Fid+wILxe2fk+N1ExY/GA+cgdsSCP4kp4aBxXLNTq+oMu/NahyF+NmcbqUk3xh+v +U1UA7h35b8kjd/3Kx2Bv1EDveWbzaS7sn8T3OVnU6n9UObUcqaoJOXF3PDdQqPIh +YVLT8/s15YaUznTe7jc46YjnqZPaJJGbDir/m03QR8qWi0qMhLjTTfPYe4DFYa27 +4e8sTeK0DTSpUMBhQqagRQEFcYRd6QaZ4wfj+8zwX7EUha9jOKrMALvxRurkEqsa +m9NZntaHlSkRjQAZ562TUYowBpb841O0v0c9+i8SM6D9kRVV+NIj3StPNkQG7qlc ++PhF5YA4jYAifZ7AtWraLeopTPonfX0avWbIt5ryy4Y+sISwsg4HZ+rdNrJq9MUu +YWDbdO/lRclnFJ64VmD0rH7Fuef7CDiQvwn0NPJHQoU6h/zHnfOEIlh44h+0uy+R +lEp41vrb9mA/a7ZZEohcJroQ6JL1Z8b+KLY47ryuqneLklCVTGbMNGZxusOtAgMB +AAGjggVEMIIFQDALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUv4YryvNsbT5fHDtOTtiN52rHak8wEAYJKwYBBAGCNxUBBAMCAQAwggTtBgNV +HSAEggTkMIIE4DCCAg8GCWCGSAFlAwIBEzCCAgAwMAYIKwYBBQUHAgEWJGh0dHA6 +Ly9wa2kyLmRvaS5uZXQvbGVnYWxwb2xpY3kuYXNwADCCAcoGCCsGAQUFBwICMIIB +vB6CAbgAQwBlAHIAdABpAGYAaQBjAGEAdABlACAAaQBzAHMAdQBlAGQAIABiAHkA +IAB0AGgAZQAgAEQAZQBwAGEAcgB0AG0AZQBuAHQAIABvAGYAIAB0AGgAZQAgAEkA +bgB0AGUAcgBpAG8AcgAgAGEAcgBlACAAbwBuAGwAeQAgAGYAbwByACAAaQBuAHQA +ZQByAG4AYQBsACAAdQBuAGMAbABhAHMAcwBpAGYAaQBlAGQAIABVAFMAIABHAG8A +dgBlAHIAbgBtAGUAbgB0ACAAdQBzAGUAIABhAGwAbAAgAG8AdABoAGUAcgAgAHUA +cwBlACAAaQBzACAAcAByAG8AaABpAGIAaQB0AGUAZAAuACAAVQBuAGEAdQB0AGgA +bwByAGkAegBlAGQAIAB1AHMAZQAgAG0AYQB5ACAAcwB1AGIAagBlAGMAdAAgAHYA +aQBvAGwAYQB0AG8AcgBzACAAdABvACAAYwByAGkAbQBpAG4AYQBsACwAIABjAGkA +dgBpAGwAIABhAG4AZAAvAG8AcgAgAGQAaQBzAGMAaQBwAGwAaQBuAGEAcgB5ACAA +YQBjAHQAaQBvAG4ALjCCAskGCmCGSAFlAwIBEwEwggK5MDUGCCsGAQUFBwIBFilo +dHRwOi8vcGtpMi5kb2kubmV0L2xpbWl0ZWR1c2Vwb2xpY3kuYXNwADCCAn4GCCsG +AQUFBwICMIICcB6CAmwAVQBzAGUAIABvAGYAIAB0AGgAaQBzACAAQwBlAHIAdABp +AGYAaQBjAGEAdABlACAAaQBzACAAbABpAG0AaQB0AGUAZAAgAHQAbwAgAEkAbgB0 +AGUAcgBuAGEAbAAgAEcAbwB2AGUAcgBuAG0AZQBuAHQAIAB1AHMAZQAgAGIAeQAg +AC8AIABmAG8AcgAgAHQAaABlACAARABlAHAAYQByAHQAbQBlAG4AdAAgAG8AZgAg +AHQAaABlACAASQBuAHQAZQByAGkAbwByACAAbwBuAGwAeQAuACAARQB4AHQAZQBy +AG4AYQBsACAAdQBzAGUAIABvAHIAIAByAGUAYwBlAGkAcAB0ACAAbwBmACAAdABo +AGkAcwAgAEMAZQByAHQAaQBmAGkAYwBhAHQAZQAgAHMAaABvAHUAbABkACAAbgBv +AHQAIABiAGUAIAB0AHIAdQBzAHQAZQBkAC4AIABBAGwAbAAgAHMAdQBzAHAAZQBj +AHQAZQBkACAAbQBpAHMAdQBzAGUAIABvAHIAIABjAG8AbQBwAHIAbwBtAGkAcwBl +ACAAbwBmACAAdABoAGkAcwAgAGMAZQByAHQAaQBmAGkAYwBhAHQAZQAgAHMAaABv +AHUAbABkACAAYgBlACAAcgBlAHAAbwByAHQAZQBkACAAaQBtAG0AZQBkAGkAYQB0 +AGUAbAB5ACAAdABvACAAYQAgAEQAZQBwAGEAcgB0AG0AZQBuAHQAIABvAGYAIAB0 +AGgAZQAgAEkAbgB0AGUAcgBpAG8AcgAgAFMAZQBjAHUAcgBpAHQAeQAgAE8AZgBm +AGkAYwBlAHIALjANBgkqhkiG9w0BAQsFAAOCAgEAF/q4Z2mRTIYJMu5mzlWsbV4o +gGQJ9YcSdUZRq2vzINJCpGDXstAIE81Pfz/Fna98KOkjEB8XGXVUGQf07c9ylGJS +XFoBwcN8GgOuys5iiP9/yd2yLHB8rBb8pu9RForl9RoTsYY8nFuOOtl9o2EfB/1O +PbRYkfHhhqrfvvHdvDKWPmT+ZhaliWJrg2my432yqBqPePjqMZSl4sxiPYi9WicU +UWYdJpxQlys3igICD4GXOcSh316jfaqfN8+9jps+lgO7rqOA41B8fU9Gwi4B8jjx +Tw0pgvbuebwwL5IQwrsGcA8rFfRPR6CaSY5v3XXqTMbCXyYjNK1/44I9MoFFaFPc +e3cqZ5cQ+lCoW3UE0SLNZb3YKh28ES/Gi5CO0Bq5P8QVLRJQL5xOaSzV9blszHv5 +okR+lkSsVo2QzR/mzFD7lXtwznkd/uak0hripTB7MtZenBzoQ8zAgjgw5TXjRSAZ +goWiJTAg+YTKclhJ7Cfg/m4XeCxzNgz/pU1XEdBF2Ngvp3C9M5CSBcqzb234uiFF +SyvJl/6erDTkQ5dLrnSnsJIw1ZS/XG/Fi41u8il0piLc5depTLn9qiWf29BRBEtG +xwFKSmqlRWsClj/zADirBTjcctw7ajPMkRpebgn+Bzv1eWDx4+OolQuR/a45644Q +GHVtIa/kVEl2DE0WcUw= +-----END CERTIFICATE----- diff --git a/keys/instructions_from_David_watkins.txt b/keys/instructions_from_David_watkins.txt new file mode 100644 index 0000000..bfd9be5 --- /dev/null +++ b/keys/instructions_from_David_watkins.txt @@ -0,0 +1,12 @@ +One common (but avoidable) hurdle we are going to run into with transition is DOI's SSL intercept. If you have a container that makes HTTPS requests (e.g. to the CDR), it will need to contain the DOI root certificate (attached here) in its certificate store when it is deployed by USGS, and Python will need the appropriate environment variables set to point at it. Here is what I typically use in Dockerfiles to do this: + +COPY DOIRootCA2.crt /usr/local/share/ca-certificates +RUN chmod 644 /usr/local/share/ca-certificates/DOIRootCA2.crt && \ + update-ca-certificates +# you probably don't need all of these, but they don't hurt +ENV PIP_CERT="/etc/ssl/certs/ca-certificates.crt" \ + SSL_CERT_FILE="/etc/ssl/certs/ca-certificates.crt" \ + CURL_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt" \ + REQUESTS_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt" \ + AWS_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt + diff --git a/logstash.conf b/logstash.conf new file mode 100644 index 0000000..abe73cf --- /dev/null +++ b/logstash.conf @@ -0,0 +1,64 @@ +input { + file { + #https://www.elastic.co/guide/en/logstash/current/plugins-inputs-file.html + #default is TAIL which assumes more data will come into the file. + #change to mode => "read" if the file is a compelte file. by default, the file will be removed once reading is complete -- backup your files if you need them. + mode => "tail" + path => "/usr/share/logstash/ingest_data/*" + } +} + + + +filter { + if [@metadata][rabbitmq_properties][timestamp] { + json { + source => "message" + target => "parsed" + } + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } +} + +output { + elasticsearch { + index => "logstash-%{+YYYY.MM.dd}" + hosts=> "${ELASTIC_HOSTS}" + user=> "${ELASTIC_USER}" + password=> "${ELASTIC_PASSWORD}" + cacert=> "certs/ca/ca.crt" + } +} + + +input { + rabbitmq { + host => "rabbitmq" + port => 5672 + user => "guest" + password => "guest" + queue => "cdrhook.error" + prefetch_count => 1 + durable => true + metadata_enabled => basic + } +} + +input { + rabbitmq { + host => "rabbitmq" + port => 5672 + user => "guest" + password => "guest" + queue => "download.error" + prefetch_count => 1 + durable => true + metadata_enabled => basic + } +} + +output { + stdout { codec => rubydebug } +} diff --git a/metricbeat.yml b/metricbeat.yml new file mode 100644 index 0000000..64a6262 --- /dev/null +++ b/metricbeat.yml @@ -0,0 +1,62 @@ +metricbeat.config.modules: + path: ${path.config}/modules.d/*.yml + reload.enabled: false + + + metricbeat.modules: + - module: elasticsearch + xpack.enabled: true + period: 10s + hosts: ${ELASTIC_HOSTS} + ssl.certificate_authorities: "certs/ca/ca.crt" + ssl.certificate: "certs/es01Elk/es01Elk.crt" + ssl.key: "certs/es01Elk/es01Elk.key" + username: ${ELASTIC_USER} + password: ${ELASTIC_PASSWORD} + ssl.enabled: true + + + - module: logstash + xpack.enabled: true + period: 10s + hosts: ${LOGSTASH_HOSTS} + + + - module: kibana + metricsets: + - stats + period: 10s + hosts: ${KIBANA_HOSTS} + username: ${ELASTIC_USER} + password: ${ELASTIC_PASSWORD} + xpack.enabled: true + + + - module: docker + metricsets: + - "container" + - "cpu" + - "diskio" + - "healthcheck" + - "info" + #- "image" + - "memory" + - "network" + hosts: ["unix:///var/run/docker.sock"] + period: 10s + enabled: true + + + processors: + - add_host_metadata: ~ + - add_docker_metadata: ~ + + +output.elasticsearch: + hosts: ${ELASTIC_HOSTS} + username: ${ELASTIC_USER} + password: ${ELASTIC_PASSWORD} + ssl: + certificate: "certs/es01Elk/es01Elk.crt" + certificate_authorities: "certs/ca/ca.crt" + key: "certs/es01Elk/es01Elk.key" diff --git a/quickstart.sh b/quickstart.sh index fb842fd..4458a00 100755 --- a/quickstart.sh +++ b/quickstart.sh @@ -112,6 +112,41 @@ RABBITMQ_PASSWORD="${RABBITMQ_PASSWORD}" RABBITMQ_MGMT_URL="${RABBITMQ_MGMT_URL}" PIPELINE_VERSION="${PIPELINE_VERSION}" + +### ELK Stack +# Project namespace (defaults to the current folder name if not set) +#COMPOSE_PROJECT_NAME=myproject + +# Password for the 'elastic' user (at least 6 characters) +ELASTIC_PASSWORD=changeme + +# Password for the 'kibana_system' user (at least 6 characters) +KIBANA_PASSWORD=changeme + +# Version of Elastic products +STACK_VERSION=8.7.1 + +# Set the cluster name +CLUSTER_NAME=elk-cluster + +# Set to 'basic' or 'trial' to automatically start the 30-day trial +LICENSE=basic +#LICENSE=trial + +# Port to expose Elasticsearch HTTP API to the host +ES_PORT=9200 + +# Port to expose Kibana to the host +KIBANA_PORT=5601 + +# Increase or decrease based on the available host memory (in bytes) +ES_MEM_LIMIT=2073741824 +KB_MEM_LIMIT=1073741824 +LS_MEM_LIMIT=1073741824 + +# SAMPLE Predefined Key only to be used in POC environments +ENCRYPTION_KEY=c34d38b3a14956121ff2170e5030b471551370178f43e5626eec58b04a30fae2 + EOF # -------------------------------------------------------