Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add elasticsearch and kibana #23

Open
wants to merge 11 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,8 @@ bluesky_config/**/log/
bluesky_config/**/pid/
__pycache__
image_builders/pip_cache
var/

.idea/
.vagrant/
.ipynb_checkpoints/
86 changes: 86 additions & 0 deletions Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.

# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
config.vm.box = "bento/centos-8"

# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false

# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# NOTE: This will enable public access to the opened port
# config.vm.network "forwarded_port", guest: 80, host: 8080

# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine and only allow access
# via 127.0.0.1 to disable public access
config.vm.network "forwarded_port", guest: 5601, host: 5601, host_ip: "127.0.0.1"

# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"

# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"

# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"

# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
config.vm.provider "virtualbox" do |vb|
# Display the VirtualBox GUI when booting the machine
vb.gui = false

# Customize the amount of memory on the VM:
vb.memory = "4096"
end
#
# View the documentation for the provider you are using for more
# information on available options.

# Enable provisioning with a shell script. Additional provisioners such as
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
# documentation for more information about their specific syntax and use.
config.vm.provision "file", source: "~/.ssh/id_rsa.pub", destination: "/home/vagrant/.ssh/id_rsa.pub"
config.vm.provision "file", source: "~/.ssh/id_rsa", destination: "/home/vagrant/.ssh/id_rsa"
config.vm.provision "shell", inline: <<-SHELL
sudo echo -e "\nX11UseLocalhost no" >> /etc/ssh/sshd_config
sudo dnf -y update
sudo dnf -y install gcc git podman buildah python38 python38-devel python38-pip
SHELL

config.vm.provision "shell", inline: <<-SHELL
cd /vagrant
# create the kafka log directory or kafka will fail to start
mkdir -p opt/kafka/logs
# BUG: create bluesky log directory (nslsii.configure_bluesky_logging should do this)
mkdir -p var/log/bluesky
sudo bash image_builders/build_bluesky_base_image.sh
sudo bash image_builders/build_bluesky_image.sh
sudo bash image_builders/build_caproto_image.sh
sudo bash image_builders/build_databroker_server_image.sh
sudo bash image_builders/build_typhos_image.sh
SHELL
end
89 changes: 89 additions & 0 deletions bluesky_config/elastic/elasticsearch.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
#node.name: node-1
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
#network.host: 192.168.0.1
#
# Set a custom port for HTTP:
#
#http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#cluster.initial_master_nodes: ["node-1", "node-2"]
#
# For more information, consult the discovery and cluster formation module documentation.
#
discovery.type: single-node
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
17 changes: 17 additions & 0 deletions bluesky_config/elastic/filebeat.docker.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false

filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true

processors:
- add_cloud_metadata: ~

output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'
107 changes: 107 additions & 0 deletions bluesky_config/elastic/kibana.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
# Kibana is served by a back end server. This setting specifies the port to use.
#server.port: 5601

# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "0.0.0.0"

# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
#server.basePath: ""

# Specifies whether Kibana should rewrite requests that are prefixed with
# `server.basePath` or require that they are rewritten by your reverse proxy.
# This setting was effectively always `false` before Kibana 6.3 and will
# default to `true` starting in Kibana 7.0.
#server.rewriteBasePath: false

# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576

# The Kibana server's name. This is used for display purposes.
#server.name: "your-hostname"

# The URLs of the Elasticsearch instances to use for all your queries.
#elasticsearch.hosts: ["http://localhost:9200"]

# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
#kibana.index: ".kibana"

# The default application to load.
#kibana.defaultAppId: "home"

# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "kibana_system"
#elasticsearch.password: "pass"

# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key

# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files are used to verify the identity of Kibana to Elasticsearch and are required when
# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key

# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]

# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full

# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500

# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000

# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]

# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}

# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 30000

# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
#elasticsearch.logQueries: false

# Specifies the path where Kibana creates the process ID file.
#pid.file: /var/run/kibana.pid

# Enables you to specify a file where Kibana stores log output.
#logging.dest: stdout

# Set the value of this setting to true to suppress all logging output.
#logging.silent: false

# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false

# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false

# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000

# Specifies locale to be used for all localizable strings, dates and number formats.
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
#i18n.locale: "en"
25 changes: 11 additions & 14 deletions bluesky_config/ipython/profile_default/startup/00-base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,17 @@
import IPython

import redis
import msgpack
import msgpack_numpy as mpn

from bluesky import RunEngine
import bluesky.plans as bp

from bluesky.callbacks.best_effort import BestEffortCallback
from bluesky.callbacks.zmq import Publisher as zmqPublisher
from bluesky_kafka import Publisher as kafkaPublisher

from bluesky_adaptive.per_start import adaptive_plan
from bluesky_kafka import Publisher as kafkaPublisher

from nslsii import configure_bluesky_logging, subscribe_kafka_publisher

import databroker
import happi
Expand All @@ -26,22 +26,21 @@

hclient = happi.Client(path='/usr/local/share/happi/test_db.json')
db = databroker.catalog['MAD']
configure_bluesky_logging(ip)

RE = RunEngine()
bec = BestEffortCallback()

zmq_publisher = zmqPublisher("127.0.0.1:4567")
kafka_publisher = kafkaPublisher(
topic="mad.bluesky.documents",
bootstrap_servers="127.0.0.1:29092",
key="kafka-unit-test-key",
# work with a single broker
subscribe_kafka_publisher(
RE,
beamline_name="MAD",
bootstrap_servers="localhost:29092",
producer_config={
"acks": 1,
"enable.idempotence": False,
"request.timeout.ms": 5000,
},
serializer=partial(msgpack.dumps, default=mpn.encode),
}
)

logger = logging.getLogger("databroker")
Expand All @@ -51,20 +50,18 @@
logger.addHandler(handler)

RE.subscribe(zmq_publisher)
RE.subscribe(kafka_publisher)
RE.subscribe(bec)

to_recommender = kafkaPublisher(
topic="adaptive",
bootstrap_servers="127.0.0.1:9092",
key="kafka-unit-test-key",
key="adaptive",
# work with a single broker
producer_config={
"acks": 1,
"enable.idempotence": False,
"request.timeout.ms": 5000,
},
serializer=partial(msgpack.dumps, default=mpn.encode),
}
)


Expand Down
2 changes: 2 additions & 0 deletions launch_bluesky_headless.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ podman run --pod acquisition \
-v ./bluesky_config/ipython:/usr/local/share/ipython \
-v ./bluesky_config/databroker:/usr/local/share/intake \
-v ./bluesky_config/happi:/usr/local/share/happi \
-v `pwd`/var/log/bluesky:/var/log/bluesky \
-e EPICS_CA_ADDR_LIST=10.0.2.255 \
-e EPICS_CA_AUTO_ADDR_LIST=no \
-e BLUESKY_LOG_FILE=/var/log/bluesky/bluesky.log \
$imagename \
ipython3 --ipython-dir=/usr/local/share/ipython
Loading