diff --git a/.github/workflows/close-threads.yml b/.github/workflows/close-threads.yml new file mode 100644 index 0000000000..41b267f2bc --- /dev/null +++ b/.github/workflows/close-threads.yml @@ -0,0 +1,33 @@ +name: 'Close Threads' + +on: + schedule: + - cron: '50 1 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + discussions: write + +concurrency: + group: lock-threads + +jobs: + close-threads: + if: github.repository_owner == 'security-onion-solutions' + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v5 + with: + days-before-issue-stale: -1 + days-before-issue-close: 60 + stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution." + close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available." + days-before-pr-stale: 45 + days-before-pr-close: 60 + stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes." + close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked." diff --git a/.github/workflows/lock-threads.yml b/.github/workflows/lock-threads.yml index 25e5d8c170..f5d46ed464 100644 --- a/.github/workflows/lock-threads.yml +++ b/.github/workflows/lock-threads.yml @@ -2,7 +2,7 @@ name: 'Lock Threads' on: schedule: - - cron: '50 1 * * *' + - cron: '50 2 * * *' workflow_dispatch: permissions: @@ -14,24 +14,8 @@ concurrency: group: lock-threads jobs: - close-threads: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: actions/stale@v5 - with: - days-before-issue-stale: -1 - days-before-issue-close: 60 - stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution." - close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available." - days-before-pr-stale: 45 - days-before-pr-close: 60 - stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes." - close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked." - lock-threads: + if: github.repository_owner == 'security-onion-solutions' runs-on: ubuntu-latest steps: - uses: jertel/lock-threads@main diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index 4493f210db..a5fd6e157d 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,17 +1,17 @@ -### 2.4.60-20240320 ISO image released on 2024/03/20 +### 2.4.70-20240529 ISO image released on 2024/05/29 ### Download and Verify -2.4.60-20240320 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso +2.4.70-20240529 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso -MD5: 178DD42D06B2F32F3870E0C27219821E -SHA1: 73EDCD50817A7F6003FE405CF1808A30D034F89D -SHA256: DD334B8D7088A7B78160C253B680D645E25984BA5CCAB5CC5C327CA72137FC06 +MD5: 8FCCF31C2470D1ABA380AF196B611DEC +SHA1: EE5E8F8C14819E7A1FE423E6920531A97F39600B +SHA256: EF5E781D50D50660F452ADC54FD4911296ECBECED7879FA8E04687337CA89BEC Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS @@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2. Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.4.60-20240320.iso.sig securityonion-2.4.60-20240320.iso +gpg --verify securityonion-2.4.70-20240529.iso.sig securityonion-2.4.70-20240529.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Tue 19 Mar 2024 03:17:58 PM EDT using RSA key ID FE507013 +gpg: Signature made Wed 29 May 2024 11:40:59 AM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/README.md b/README.md index 19a5604197..530a21813e 100644 --- a/README.md +++ b/README.md @@ -8,19 +8,22 @@ Alerts ![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png) Dashboards -![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/51_dashboards.png) +![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_dashboards.png) Hunt -![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/52_hunt.png) +![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png) + +Detections +![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_detections.png) PCAP -![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_pcap.png) +![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png) Grid -![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_grid.png) +![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/75_grid.png) Config -![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/61_config.png) +![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/87_config.png) ### Release Notes diff --git a/VERSION b/VERSION index 5a99ed019d..b3c5d8c275 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.60 +2.4.70 diff --git a/pillar/top.sls b/pillar/top.sls index 9af62aa0b2..b6d6c2e730 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -43,8 +43,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - kratos.soc_kratos @@ -61,8 +59,6 @@ base: - elastalert.adv_elastalert - backup.soc_backup - backup.adv_backup - - soctopus.soc_soctopus - - soctopus.adv_soctopus - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - stig.soc_stig @@ -108,8 +104,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - strelka.soc_strelka @@ -165,8 +159,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - strelka.soc_strelka @@ -262,8 +254,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - backup.soc_backup diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 3ead8b26e0..109e244d7e 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -34,7 +34,6 @@ 'suricata', 'utility', 'schedule', - 'soctopus', 'tcpreplay', 'docker_clean' ], @@ -66,6 +65,7 @@ 'registry', 'manager', 'nginx', + 'strelka.manager', 'soc', 'kratos', 'influxdb', @@ -92,6 +92,7 @@ 'nginx', 'telegraf', 'influxdb', + 'strelka.manager', 'soc', 'kratos', 'elasticfleet', @@ -101,7 +102,6 @@ 'suricata.manager', 'utility', 'schedule', - 'soctopus', 'docker_clean', 'stig' ], @@ -113,6 +113,7 @@ 'nginx', 'telegraf', 'influxdb', + 'strelka.manager', 'soc', 'kratos', 'elastic-fleet-package-registry', @@ -123,7 +124,6 @@ 'suricata.manager', 'utility', 'schedule', - 'soctopus', 'docker_clean', 'stig' ], @@ -157,7 +157,6 @@ 'healthcheck', 'utility', 'schedule', - 'soctopus', 'tcpreplay', 'docker_clean', 'stig' @@ -200,10 +199,6 @@ ], }, grain='role') %} - {% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %} - {% do allowed_states.append('mysql') %} - {% endif %} - {%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %} {% do allowed_states.append('zeek') %} {%- endif %} @@ -229,10 +224,6 @@ {% do allowed_states.append('elastalert') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %} - {% do allowed_states.append('playbook') %} - {% endif %} - {% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %} {% do allowed_states.append('logstash') %} {% endif %} diff --git a/salt/bpf/soc_bpf.yaml b/salt/bpf/soc_bpf.yaml index 379eaa0224..d93ec98fd2 100644 --- a/salt/bpf/soc_bpf.yaml +++ b/salt/bpf/soc_bpf.yaml @@ -1,6 +1,6 @@ bpf: pcap: - description: List of BPF filters to apply to PCAP. + description: List of BPF filters to apply to Stenographer. multiline: True forcedType: "[]string" helpLink: bpf.html diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index fd32b8a286..898de67ee4 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -1,9 +1,16 @@ -{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} -{% if SOC_GLOBAL.global.airgap %} -{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} -{% else %} -{% set UPDATE_DIR='/tmp/sogh/securityonion' %} -{% endif %} +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %} + +{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} +{% if SOC_GLOBAL.global.airgap %} +{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} +{% else %} +{% set UPDATE_DIR='/tmp/sogh/securityonion' %} +{% endif %} remove_common_soup: file.absent: @@ -13,6 +20,8 @@ remove_common_so-firewall: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall +# This section is used to put the scripts in place in the Salt file system +# in case a state run tries to overwrite what we do in the next section. copy_so-common_common_tools_sbin: file.copy: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common @@ -41,6 +50,15 @@ copy_so-firewall_manager_tools_sbin: - force: True - preserve: True +copy_so-yaml_manager_tools_sbin: + file.copy: + - name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-yaml.py + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py + - force: True + - preserve: True + +# This section is used to put the new script in place so that it can be called during soup. +# It is faster than calling the states that normally manage them to put them in place. copy_so-common_sbin: file.copy: - name: /usr/sbin/so-common @@ -68,3 +86,19 @@ copy_so-firewall_sbin: - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall - force: True - preserve: True + +copy_so-yaml_sbin: + file.copy: + - name: /usr/sbin/so-yaml.py + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py + - force: True + - preserve: True + +{% else %} +fix_23_soup_sbin: + cmd.run: + - name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup +fix_23_soup_salt: + cmd.run: + - name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup +{% endif %} diff --git a/salt/common/tools/sbin/so-checkin b/salt/common/tools/sbin/so-checkin index db35af4101..13ea9acd9b 100755 --- a/salt/common/tools/sbin/so-checkin +++ b/salt/common/tools/sbin/so-checkin @@ -5,8 +5,13 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +. /usr/sbin/so-common +cat << EOF -. /usr/sbin/so-common +so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see: +https://docs.securityonion.net/en/2.4/salt.html + +EOF -salt-call state.highstate -l info +salt-call state.highstate -l info queue=True diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 37adcef994..8d4d9f8abf 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -179,6 +179,21 @@ copy_new_files() { cd /tmp } +create_local_directories() { + echo "Creating local pillar and salt directories if needed" + PILLARSALTDIR=$1 + local_salt_dir="/opt/so/saltstack/local" + for i in "pillar" "salt"; do + for d in $(find $PILLARSALTDIR/$i -type d); do + suffixdir=${d//$PILLARSALTDIR/} + if [ ! -d "$local_salt_dir/$suffixdir" ]; then + mkdir -pv $local_salt_dir$suffixdir + fi + done + chown -R socore:socore $local_salt_dir/$i + done +} + disable_fastestmirror() { sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf } @@ -248,6 +263,14 @@ get_random_value() { head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1 } +get_agent_count() { + if [ -f /opt/so/log/agents/agentstatus.log ]; then + AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}') + else + AGENTCOUNT=0 + fi +} + gpg_rpm_import() { if [[ $is_oracle ]]; then if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then @@ -329,7 +352,7 @@ lookup_salt_value() { local="" fi - salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local} + salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local} } lookup_pillar() { @@ -570,8 +593,9 @@ sync_options() { set_version set_os salt_minion_count + get_agent_count - echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)" + echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)" } systemctl_func() { diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 7900b3c529..752ec20e08 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -53,13 +53,10 @@ container_list() { "so-kibana" "so-kratos" "so-logstash" - "so-mysql" "so-nginx" "so-pcaptools" - "so-playbook" "so-redis" "so-soc" - "so-soctopus" "so-steno" "so-strelka-backend" "so-strelka-filestream" diff --git a/salt/common/tools/sbin/so-ip-update b/salt/common/tools/sbin/so-ip-update index 7278afb941..6a330f644a 100755 --- a/salt/common/tools/sbin/so-ip-update +++ b/salt/common/tools/sbin/so-ip-update @@ -49,10 +49,6 @@ if [ "$CONTINUE" == "y" ]; then sed -i "s|$OLD_IP|$NEW_IP|g" $file done - echo "Granting MySQL root user permissions on $NEW_IP" - docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null - echo "Removing MySQL root user from $OLD_IP" - docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null echo "Updating Kibana dashboards" salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index b8c68ffa11..cf1691589b 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -122,6 +122,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then @@ -154,15 +155,11 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index @@ -201,7 +198,13 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp. EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error fi RESULT=0 @@ -210,7 +213,9 @@ RESULT=0 CONTAINER_IDS=$(docker ps -q) exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary exclude_container so-idstools # ignore due to known issues and noisy logging -exclude_container so-playbook # ignore due to several playbook known issues +exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers +exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers +exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers for container_id in $CONTAINER_IDS; do container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names") @@ -228,10 +233,14 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet exclude_log "spool" # disregard zeek analyze logs as this is data specific exclude_log "import" # disregard imported test data the contains error strings exclude_log "update.log" # ignore playbook updates due to several known issues -exclude_log "playbook.log" # ignore due to several playbook known issues exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed exclude_log "cron-close.log" # ignore since Curator has been removed -exclude_log "curator.log" # ignore since Curator has been removed +exclude_log "curator.log" # ignore since Curator has been removed +exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk +exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk +exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk +exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state +exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" diff --git a/salt/common/tools/sbin/so-luks-tpm-regen b/salt/common/tools/sbin/so-luks-tpm-regen new file mode 100644 index 0000000000..50058b504d --- /dev/null +++ b/salt/common/tools/sbin/so-luks-tpm-regen @@ -0,0 +1,98 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0." + +set -e +# This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot. +if [ -z $NOROOT ]; then + # Check for prerequisites + if [ "$(id -u)" -ne 0 ]; then + echo "This script must be run using sudo!" + exit 1 + fi +fi +ENROLL_TPM=N + +while [[ $# -gt 0 ]]; do + case $1 in + --enroll-tpm) + ENROLL_TPM=Y + ;; + *) + echo "Usage: $0 [options]" + echo "" + echo "where options are:" + echo " --enroll-tpm for when TPM enrollment was not selected during ISO install." + echo "" + exit 1 + ;; + esac + shift +done + +check_for_tpm() { + echo -n "Checking for TPM: " + if [ -d /sys/class/tpm/tpm0 ]; then + echo -e "tpm0 found." + TPM="yes" + # Check if TPM is using sha1 or sha256 + if [ -d /sys/class/tpm/tpm0/pcr-sha1 ]; then + echo -e "TPM is using sha1.\n" + TPM_PCR="sha1" + elif [ -d /sys/class/tpm/tpm0/pcr-sha256 ]; then + echo -e "TPM is using sha256.\n" + TPM_PCR="sha256" + fi + else + echo -e "No TPM found.\n" + exit 1 + fi +} + +check_for_luks_partitions() { + echo "Checking for LUKS partitions" + for part in $(lsblk -o NAME,FSTYPE -ln | grep crypto_LUKS | awk '{print $1}'); do + echo "Found LUKS partition: $part" + LUKS_PARTITIONS+=("$part") + done + if [ ${#LUKS_PARTITIONS[@]} -eq 0 ]; then + echo -e "No LUKS partitions found.\n" + exit 1 + fi + echo "" +} + +enroll_tpm_in_luks() { + read -s -p "Enter the LUKS passphrase used during ISO install: " LUKS_PASSPHRASE + echo "" + for part in "${LUKS_PARTITIONS[@]}"; do + echo "Enrolling TPM for LUKS device: /dev/$part" + if [ "$TPM_PCR" == "sha1" ]; then + clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha1","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE + elif [ "$TPM_PCR" == "sha256" ]; then + clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE + fi + done + } + +regenerate_tpm_enrollment_token() { + for part in "${LUKS_PARTITIONS[@]}"; do + clevis luks regen -d /dev/$part -s 1 -q + done +} + +check_for_tpm +check_for_luks_partitions + +if [[ $ENROLL_TPM == "Y" ]]; then + enroll_tpm_in_luks +else + regenerate_tpm_enrollment_token +fi + +echo "Running dracut" +dracut -fv +echo -e "\nTPM configuration complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n" \ No newline at end of file diff --git a/salt/common/tools/sbin_jinja/so-import-pcap b/salt/common/tools/sbin_jinja/so-import-pcap index b8a90421f9..d3886305ec 100755 --- a/salt/common/tools/sbin_jinja/so-import-pcap +++ b/salt/common/tools/sbin_jinja/so-import-pcap @@ -89,6 +89,7 @@ function suricata() { -v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${NSM_PATH}/:/nsm/:rw \ -v "$PCAP:/input.pcap:ro" \ + -v /dev/null:/nsm/suripcap:rw \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \ --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 @@ -247,7 +248,7 @@ fi START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g') END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g') if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then - URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" + URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" status "Import complete!" status diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 4bc212fbed..b1d3b4e441 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -67,13 +67,6 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] - 'so-mysql': - final_octet: 30 - port_bindings: - - 0.0.0.0:3306:3306 - custom_bind_mounts: [] - extra_hosts: [] - extra_env: [] 'so-nginx': final_octet: 31 port_bindings: @@ -91,13 +84,6 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] - 'so-playbook': - final_octet: 32 - port_bindings: - - 0.0.0.0:3000:3000 - custom_bind_mounts: [] - extra_hosts: [] - extra_env: [] 'so-redis': final_octet: 33 port_bindings: @@ -118,13 +104,6 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] - 'so-soctopus': - final_octet: 35 - port_bindings: - - 0.0.0.0:7000:7000 - custom_bind_mounts: [] - extra_hosts: [] - extra_env: [] 'so-strelka-backend': final_octet: 36 custom_bind_mounts: [] @@ -201,6 +180,8 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] + ulimits: + - memlock=524288000 'so-zeek': final_octet: 99 custom_bind_mounts: [] diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index 6e0efeb20a..08e0dccc53 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -46,14 +46,11 @@ docker: so-kibana: *dockerOptions so-kratos: *dockerOptions so-logstash: *dockerOptions - so-mysql: *dockerOptions so-nginx: *dockerOptions so-nginx-fleet-node: *dockerOptions - so-playbook: *dockerOptions so-redis: *dockerOptions so-sensoroni: *dockerOptions so-soc: *dockerOptions - so-soctopus: *dockerOptions so-strelka-backend: *dockerOptions so-strelka-filestream: *dockerOptions so-strelka-frontend: *dockerOptions @@ -66,5 +63,41 @@ docker: so-elastic-agent: *dockerOptions so-telegraf: *dockerOptions so-steno: *dockerOptions - so-suricata: *dockerOptions + so-suricata: + final_octet: + description: Last octet of the container IP address. + helpLink: docker.html + readonly: True + advanced: True + global: True + port_bindings: + description: List of port bindings for the container. + helpLink: docker.html + advanced: True + multiline: True + forcedType: "[]string" + custom_bind_mounts: + description: List of custom local volume bindings. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" + extra_hosts: + description: List of additional host entries for the container. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" + extra_env: + description: List of additional ENV entries for the container. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" + ulimits: + description: Ulimits for the container, in bytes. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" so-zeek: *dockerOptions diff --git a/salt/elastalert/config.sls b/salt/elastalert/config.sls index 252aa83c0d..25d5bf5f86 100644 --- a/salt/elastalert/config.sls +++ b/salt/elastalert/config.sls @@ -82,6 +82,36 @@ elastasomodulesync: - group: 933 - makedirs: True +elastacustomdir: + file.directory: + - name: /opt/so/conf/elastalert/custom + - user: 933 + - group: 933 + - makedirs: True + +elastacustomsync: + file.recurse: + - name: /opt/so/conf/elastalert/custom + - source: salt://elastalert/files/custom + - user: 933 + - group: 933 + - makedirs: True + - file_mode: 660 + - show_changes: False + +elastapredefinedsync: + file.recurse: + - name: /opt/so/conf/elastalert/predefined + - source: salt://elastalert/files/predefined + - user: 933 + - group: 933 + - makedirs: True + - template: jinja + - file_mode: 660 + - context: + elastalert: {{ ELASTALERTMERGED }} + - show_changes: False + elastaconf: file.managed: - name: /opt/so/conf/elastalert/elastalert_config.yaml diff --git a/salt/elastalert/defaults.yaml b/salt/elastalert/defaults.yaml index a01c809525..3939329922 100644 --- a/salt/elastalert/defaults.yaml +++ b/salt/elastalert/defaults.yaml @@ -1,5 +1,6 @@ elastalert: enabled: False + alerter_parameters: "" config: rules_folder: /opt/elastalert/rules/ scan_subdirectories: true diff --git a/salt/elastalert/enabled.sls b/salt/elastalert/enabled.sls index e4b3642dbd..6a1ff14406 100644 --- a/salt/elastalert/enabled.sls +++ b/salt/elastalert/enabled.sls @@ -30,6 +30,8 @@ so-elastalert: - /opt/so/rules/elastalert:/opt/elastalert/rules/:ro - /opt/so/log/elastalert:/var/log/elastalert:rw - /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro + - /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro + - /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro - /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro {% if DOCKER.containers['so-elastalert'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %} diff --git a/salt/elastalert/files/custom/placeholder b/salt/elastalert/files/custom/placeholder new file mode 100644 index 0000000000..42e4ae4f02 --- /dev/null +++ b/salt/elastalert/files/custom/placeholder @@ -0,0 +1 @@ +THIS IS A PLACEHOLDER FILE \ No newline at end of file diff --git a/salt/elastalert/files/modules/so/playbook-es.py b/salt/elastalert/files/modules/so/playbook-es.py deleted file mode 100644 index 3a43c26c16..0000000000 --- a/salt/elastalert/files/modules/so/playbook-es.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - -from time import gmtime, strftime -import requests,json -from elastalert.alerts import Alerter - -import urllib3 -urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - -class PlaybookESAlerter(Alerter): - """ - Use matched data to create alerts in elasticsearch - """ - - required_options = set(['play_title','play_url','sigma_level']) - - def alert(self, matches): - for match in matches: - today = strftime("%Y.%m.%d", gmtime()) - timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime()) - headers = {"Content-Type": "application/json"} - - creds = None - if 'es_username' in self.rule and 'es_password' in self.rule: - creds = (self.rule['es_username'], self.rule['es_password']) - - payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp} - url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/" - requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds) - - def get_info(self): - return {'type': 'PlaybookESAlerter'} diff --git a/salt/elastalert/files/modules/so/securityonion-es.py b/salt/elastalert/files/modules/so/securityonion-es.py new file mode 100644 index 0000000000..d9bb8009e0 --- /dev/null +++ b/salt/elastalert/files/modules/so/securityonion-es.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + + +from time import gmtime, strftime +import requests,json +from elastalert.alerts import Alerter + +import urllib3 +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +class SecurityOnionESAlerter(Alerter): + """ + Use matched data to create alerts in Elasticsearch. + """ + + required_options = set(['detection_title', 'sigma_level']) + optional_fields = ['sigma_category', 'sigma_product', 'sigma_service'] + + def alert(self, matches): + for match in matches: + timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime()) + headers = {"Content-Type": "application/json"} + + creds = None + if 'es_username' in self.rule and 'es_password' in self.rule: + creds = (self.rule['es_username'], self.rule['es_password']) + + # Start building the rule dict + rule_info = { + "name": self.rule['detection_title'], + "uuid": self.rule['detection_public_id'] + } + + # Add optional fields if they are present in the rule + for field in self.optional_fields: + rule_key = field.split('_')[-1] # Assumes field format "sigma_" + if field in self.rule: + rule_info[rule_key] = self.rule[field] + + # Construct the payload with the conditional rule_info + payload = { + "tags": "alert", + "rule": rule_info, + "event": { + "severity": self.rule['event.severity'], + "module": self.rule['event.module'], + "dataset": self.rule['event.dataset'], + "severity_label": self.rule['sigma_level'] + }, + "sigma_level": self.rule['sigma_level'], + "event_data": match, + "@timestamp": timestamp + } + url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-detections.alerts-so/_doc/" + requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds) + + def get_info(self): + return {'type': 'SecurityOnionESAlerter'} diff --git a/salt/elastalert/files/predefined/jira_auth.yaml b/salt/elastalert/files/predefined/jira_auth.yaml new file mode 100644 index 0000000000..9bf0425c0b --- /dev/null +++ b/salt/elastalert/files/predefined/jira_auth.yaml @@ -0,0 +1,6 @@ +{% if elastalert.get('jira_user', '') | length > 0 and elastalert.get('jira_pass', '') | length > 0 %} +user: {{ elastalert.jira_user }} +password: {{ elastalert.jira_pass }} +{% else %} +apikey: {{ elastalert.get('jira_api_key', '') }} +{% endif %} \ No newline at end of file diff --git a/salt/elastalert/files/predefined/smtp_auth.yaml b/salt/elastalert/files/predefined/smtp_auth.yaml new file mode 100644 index 0000000000..d09d101f11 --- /dev/null +++ b/salt/elastalert/files/predefined/smtp_auth.yaml @@ -0,0 +1,2 @@ +user: {{ elastalert.get('smtp_user', '') }} +password: {{ elastalert.get('smtp_pass', '') }} diff --git a/salt/elastalert/map.jinja b/salt/elastalert/map.jinja index cc395d8ee8..b8ceca277b 100644 --- a/salt/elastalert/map.jinja +++ b/salt/elastalert/map.jinja @@ -13,3 +13,19 @@ {% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %} {% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %} + +{% if 'ntf' in salt['pillar.get']('features', []) %} + {% set params = ELASTALERTMERGED.get('alerter_parameters', '') | load_yaml %} + {% if params != None and params | length > 0 %} + {% do ELASTALERTMERGED.config.update(params) %} + {% endif %} + + {% if ELASTALERTMERGED.get('smtp_user', '') | length > 0 %} + {% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %} + {% endif %} + + {% if ELASTALERTMERGED.get('jira_user', '') | length > 0 or ELASTALERTMERGED.get('jira_key', '') | length > 0 %} + {% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %} + {% endif %} + +{% endif %} diff --git a/salt/elastalert/soc_elastalert.yaml b/salt/elastalert/soc_elastalert.yaml index cde09b83e9..435c5be6a3 100644 --- a/salt/elastalert/soc_elastalert.yaml +++ b/salt/elastalert/soc_elastalert.yaml @@ -2,6 +2,99 @@ elastalert: enabled: description: You can enable or disable Elastalert. helpLink: elastalert.html + alerter_parameters: + title: Alerter Parameters + description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key. + global: True + multiline: True + syntax: yaml + helpLink: elastalert.html + forcedType: string + jira_api_key: + title: Jira API Key + description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key. + global: True + sensitive: True + helpLink: elastalert.html + forcedType: string + jira_pass: + title: Jira Password + description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key. + global: True + sensitive: True + helpLink: elastalert.html + forcedType: string + jira_user: + title: Jira Username + description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key. + global: True + helpLink: elastalert.html + forcedType: string + smtp_pass: + title: SMTP Password + description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key. + global: True + sensitive: True + helpLink: elastalert.html + forcedType: string + smtp_user: + title: SMTP Username + description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key. + global: True + helpLink: elastalert.html + forcedType: string + files: + custom: + alertmanager_ca__crt: + description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + gelf_ca__crt: + description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + http_post_ca__crt: + description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + http_post2_ca__crt: + description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + ms_teams_ca__crt: + description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + pagerduty_ca__crt: + description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + rocket_chat_ca__crt: + description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + smtp__crt: + description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + smtp__key: + description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + slack_ca__crt: + description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html config: disable_rules_on_error: description: Disable rules on failure. diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 7b2d9d6a3c..2d9ab97a14 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -37,6 +37,7 @@ elasticfleet: - azure - barracuda - carbonblack_edr + - cef - checkpoint - cisco_asa - cisco_duo @@ -118,3 +119,8 @@ elasticfleet: base_url: https://api.platform.sublimesecurity.com poll_interval: 5m limit: 100 + kismet: + base_url: http://localhost:2501 + poll_interval: 1m + api_key: + enabled_nodes: [] diff --git a/salt/elasticfleet/files/integrations-optional/kismet.json b/salt/elasticfleet/files/integrations-optional/kismet.json new file mode 100644 index 0000000000..9a333f31cc --- /dev/null +++ b/salt/elasticfleet/files/integrations-optional/kismet.json @@ -0,0 +1,36 @@ +{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{% raw %} +{ + "package": { + "name": "httpjson", + "version": "" + }, + "name": "kismet-logs", + "namespace": "so", + "description": "Kismet Logs", + "policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}", + "inputs": { + "generic-httpjson": { + "enabled": true, + "streams": { + "httpjson.generic": { + "enabled": true, + "vars": { + "data_stream.dataset": "kismet", + "request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson", + "request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}", + "request_method": "GET", + "request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'", + "request_redirect_headers_ban_list": [], + "oauth_scopes": [], + "processors": "", + "tags": [], + "pipeline": "kismet.common" + } + } + } + } + }, + "force": true +} +{% endraw %} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/endpoints-initial/windows-defender.json b/salt/elasticfleet/files/integrations/endpoints-initial/windows-defender.json new file mode 100644 index 0000000000..ac4394e628 --- /dev/null +++ b/salt/elasticfleet/files/integrations/endpoints-initial/windows-defender.json @@ -0,0 +1,29 @@ +{ + "package": { + "name": "winlog", + "version": "" + }, + "name": "windows-defender", + "namespace": "default", + "description": "Windows Defender - Operational logs", + "policy_id": "endpoints-initial", + "inputs": { + "winlogs-winlog": { + "enabled": true, + "streams": { + "winlog.winlog": { + "enabled": true, + "vars": { + "channel": "Microsoft-Windows-Windows Defender/Operational", + "data_stream.dataset": "winlog.winlog", + "preserve_original_event": false, + "providers": [], + "ignore_older": "72h", + "language": 0, + "tags": [] } + } + } + } + }, + "force": true +} diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json new file mode 100644 index 0000000000..5649b481de --- /dev/null +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json @@ -0,0 +1,35 @@ +{ + "policy_id": "so-grid-nodes_general", + "package": { + "name": "log", + "version": "" + }, + "name": "soc-detections-logs", + "description": "Security Onion Console - Detections Logs", + "namespace": "so", + "inputs": { + "logs-logfile": { + "enabled": true, + "streams": { + "log.logs": { + "enabled": true, + "vars": { + "paths": [ + "/opt/so/log/soc/detections_runtime-status_sigma.log", + "/opt/so/log/soc/detections_runtime-status_yara.log" + ], + "exclude_files": [], + "ignore_older": "72h", + "data_stream.dataset": "soc", + "tags": [ + "so-soc" + ], + "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", + "custom": "pipeline: common" + } + } + } + } + }, + "force": true +} diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json b/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json index 8e6bf7958e..98204e894f 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json @@ -16,6 +16,9 @@ "paths": [ "/var/log/auth.log*", "/var/log/secure*" + ], + "tags": [ + "so-grid-node" ] } }, @@ -25,6 +28,9 @@ "paths": [ "/var/log/messages*", "/var/log/syslog*" + ], + "tags": [ + "so-grid-node" ] } } diff --git a/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json b/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json index 6c42086bcd..42918cc97e 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json @@ -16,6 +16,9 @@ "paths": [ "/var/log/auth.log*", "/var/log/secure*" + ], + "tags": [ + "so-grid-node" ] } }, @@ -25,6 +28,9 @@ "paths": [ "/var/log/messages*", "/var/log/syslog*" + ], + "tags": [ + "so-grid-node" ] } } diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index 9a0cd0a912..7ed97e6ec2 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -79,3 +79,29 @@ elasticfleet: helpLink: elastic-fleet.html advanced: True forcedType: int + kismet: + base_url: + description: Base URL for Kismet. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: string + poll_interval: + description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: string + api_key: + description: API key for Kismet. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: string + sensitive: True + enabled_nodes: + description: Fleet nodes with the Kismet integration enabled. Enter one per line. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: "[]string" diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers index ff46a3e072..1e4222caee 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers @@ -72,5 +72,5 @@ do printf "\n### $GOOS/$GOARCH Installer Generated...\n" done -printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace" +printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n" rm -rf /nsm/elastic-agent-workspace diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 2274018b16..e54d58c3b6 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1,4 +1,6 @@ elasticsearch: + enabled: false + index_clean: true config: action: destructive_requires_name: true @@ -54,7 +56,6 @@ elasticsearch: enabled: true key: /usr/share/elasticsearch/config/elasticsearch.key verification_mode: none - enabled: false pipelines: custom001: description: Custom Pipeline @@ -151,7 +152,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -227,6 +228,113 @@ elasticsearch: sort: field: '@timestamp' order: desc + so-logs-soc: + close: 30 + delete: 365 + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - container-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - common-settings + - common-dynamic-mappings + data_stream: {} + index_patterns: + - logs-soc-so* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + lifecycle: + name: so-soc-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 60d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + warm: 7 so-common: close: 30 delete: 365 @@ -324,7 +432,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -436,7 +544,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -541,7 +649,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -654,7 +762,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -762,7 +870,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -825,7 +933,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -869,7 +977,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -913,7 +1021,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -957,7 +1065,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1001,7 +1109,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1045,7 +1153,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1089,7 +1197,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1133,7 +1241,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1177,7 +1285,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1221,7 +1329,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1265,7 +1373,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1309,7 +1417,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1353,7 +1461,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1397,7 +1505,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1441,7 +1549,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1485,7 +1593,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1529,7 +1637,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1573,7 +1681,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1617,7 +1725,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1661,7 +1769,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1705,7 +1813,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1749,7 +1857,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1793,7 +1901,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1837,7 +1945,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1881,7 +1989,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1925,7 +2033,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1969,7 +2077,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2013,7 +2121,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2057,7 +2165,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2101,7 +2209,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2145,7 +2253,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2189,7 +2297,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2233,7 +2341,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2277,7 +2385,51 @@ elasticsearch: actions: set_priority: priority: 0 + min_age: 60d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 min_age: 30d + so-logs-cef_x_log: + index_sorting: False + index_template: + index_patterns: + - "logs-cef.log-*" + template: + settings: + index: + lifecycle: + name: so-logs-cef.log-logs + number_of_replicas: 0 + composed_of: + - "logs-cef.log@package" + - "logs-cef.log@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 60d delete: actions: delete: {} @@ -2321,7 +2473,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2365,7 +2517,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2409,7 +2561,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2453,7 +2605,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2497,7 +2649,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2541,7 +2693,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2585,7 +2737,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2629,7 +2781,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2673,7 +2825,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2717,7 +2869,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2761,7 +2913,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2805,7 +2957,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2849,7 +3001,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2893,7 +3045,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2937,7 +3089,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2981,7 +3133,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3025,7 +3177,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3069,7 +3221,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3113,7 +3265,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3157,7 +3309,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3201,7 +3353,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3245,7 +3397,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3289,7 +3441,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3333,7 +3485,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3377,7 +3529,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3421,7 +3573,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3439,6 +3591,68 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-detections_x_alerts: + index_sorting: false + index_template: + composed_of: + - so-data-streams-mappings + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + - so-logs-mappings + - so-logs-settings + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-detections.alerts-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-detections.alerts-so + mapping: + total_fields: + limit: 5001 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 60d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 1d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-elastic_agent: index_sorting: false index_template: @@ -3483,7 +3697,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3544,7 +3758,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3605,7 +3819,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3663,7 +3877,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3719,7 +3933,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3775,7 +3989,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3828,7 +4042,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3886,7 +4100,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3942,7 +4156,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3998,7 +4212,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4059,7 +4273,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4115,7 +4329,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4171,7 +4385,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4227,7 +4441,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4283,7 +4497,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4339,7 +4553,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4395,7 +4609,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4451,7 +4665,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4507,7 +4721,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4563,7 +4777,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4607,7 +4821,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4651,7 +4865,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4695,7 +4909,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4739,7 +4953,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4783,7 +4997,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4827,7 +5041,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4871,7 +5085,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4915,7 +5129,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4959,7 +5173,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5003,7 +5217,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5047,7 +5261,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5091,7 +5305,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5135,7 +5349,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5179,7 +5393,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5223,7 +5437,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5267,7 +5481,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5311,7 +5525,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5355,7 +5569,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5399,7 +5613,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5443,7 +5657,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5487,7 +5701,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5531,7 +5745,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5575,7 +5789,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5619,7 +5833,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5663,7 +5877,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5707,7 +5921,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5751,7 +5965,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5795,7 +6009,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5839,7 +6053,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5883,7 +6097,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5927,7 +6141,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5971,7 +6185,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6015,7 +6229,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6059,7 +6273,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6103,7 +6317,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6147,7 +6361,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6191,7 +6405,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6235,7 +6449,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6279,7 +6493,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6323,7 +6537,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6367,7 +6581,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6411,7 +6625,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6455,7 +6669,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6499,7 +6713,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6543,7 +6757,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6587,7 +6801,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6631,7 +6845,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6675,7 +6889,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6719,7 +6933,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6763,7 +6977,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6807,7 +7021,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6851,7 +7065,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6895,7 +7109,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6939,7 +7153,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6983,7 +7197,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7027,7 +7241,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7071,7 +7285,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7115,7 +7329,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7159,7 +7373,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7203,7 +7417,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7247,7 +7461,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7291,7 +7505,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7335,7 +7549,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7379,7 +7593,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7423,7 +7637,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7467,7 +7681,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7511,7 +7725,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7589,7 +7803,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7633,7 +7847,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7677,7 +7891,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7721,7 +7935,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7765,7 +7979,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7809,7 +8023,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7853,7 +8067,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7897,7 +8111,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7941,7 +8155,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7985,7 +8199,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8029,7 +8243,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8073,7 +8287,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8117,7 +8331,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8161,7 +8375,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8205,7 +8419,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8249,7 +8463,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8293,7 +8507,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8337,7 +8551,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8381,7 +8595,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8425,7 +8639,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8469,7 +8683,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8514,7 +8728,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8559,7 +8773,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8604,7 +8818,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8649,7 +8863,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8694,7 +8908,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8738,7 +8952,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8782,7 +8996,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8826,7 +9040,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8870,7 +9084,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8914,7 +9128,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8958,7 +9172,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9002,7 +9216,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9046,7 +9260,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9090,7 +9304,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9134,7 +9348,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9178,7 +9392,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9222,7 +9436,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9266,7 +9480,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9310,7 +9524,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9354,7 +9568,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9398,7 +9612,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9442,7 +9656,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9486,7 +9700,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9530,7 +9744,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9574,7 +9788,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9618,7 +9832,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9662,7 +9876,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9706,7 +9920,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9750,7 +9964,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9794,7 +10008,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9838,7 +10052,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9882,7 +10096,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9926,7 +10140,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9970,7 +10184,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10014,7 +10228,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10058,7 +10272,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10102,7 +10316,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10146,7 +10360,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10190,7 +10404,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10234,7 +10448,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10278,7 +10492,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10322,7 +10536,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10366,7 +10580,54 @@ elasticsearch: actions: set_priority: priority: 0 + min_age: 60d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 min_age: 30d + so-kismet: + index_sorting: false + index_template: + composed_of: + - kismet-mappings + - source-mappings + - client-mappings + - device-mappings + - network-mappings + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-kismet-so* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-kismet-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 60d delete: actions: delete: {} @@ -10476,7 +10737,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10586,7 +10847,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10698,7 +10959,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10809,7 +11070,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10921,7 +11182,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -11034,7 +11295,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} diff --git a/salt/mysql/disabled.sls b/salt/elasticsearch/download.sls similarity index 67% rename from salt/mysql/disabled.sls rename to salt/elasticsearch/download.sls index 805a755e41..f74c7059a8 100644 --- a/salt/mysql/disabled.sls +++ b/salt/elasticsearch/download.sls @@ -1,22 +1,15 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} -include: - - mysql.sostatus - -so-mysql: - docker_container.absent: - - force: True - -so-mysql_so-status.disabled: - file.comment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-mysql$ +so-elasticsearch_image: + docker_image.present: + - name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ GLOBALS.so_version }} {% else %} diff --git a/salt/elasticsearch/enabled.sls b/salt/elasticsearch/enabled.sls index b9c66f2318..1d35d3505f 100644 --- a/salt/elasticsearch/enabled.sls +++ b/salt/elasticsearch/enabled.sls @@ -200,9 +200,15 @@ so-elasticsearch-roles-load: - require: - docker_container: so-elasticsearch - file: elasticsearch_sbin_jinja -{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %} + +{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %} +{% if ELASTICSEARCHMERGED.index_clean %} +{% set ap = "present" %} +{% else %} +{% set ap = "absent" %} +{% endif %} so-elasticsearch-indices-delete: - cron.present: + cron.{{ap}}: - name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1 - identifier: so-elasticsearch-indices-delete - user: root @@ -211,7 +217,8 @@ so-elasticsearch-indices-delete: - daymonth: '*' - month: '*' - dayweek: '*' -{% endif %} +{% endif %} + {% endif %} {% else %} diff --git a/salt/elasticsearch/files/ingest-dynamic/common b/salt/elasticsearch/files/ingest-dynamic/common index b2c13f9832..836b8d4af0 100644 --- a/salt/elasticsearch/files/ingest-dynamic/common +++ b/salt/elasticsearch/files/ingest-dynamic/common @@ -57,10 +57,11 @@ { "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, - { "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }}, + { "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } }, { "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } }, - { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } }, - { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }}, + { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } }, + { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } }, + { "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } } {%- endraw %} {%- if HIGHLANDER %} diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 2ecbc39897..c3e70ec2ce 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -68,7 +68,7 @@ "field": "_security", "ignore_missing": true } - }, + }, { "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } }, { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } }, { "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } }, @@ -80,9 +80,10 @@ { "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } }, - { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } }, + { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } }, { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, + { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ diff --git a/salt/elasticsearch/files/ingest/kismet.ad_hoc b/salt/elasticsearch/files/ingest/kismet.ad_hoc new file mode 100644 index 0000000000..adfbd79013 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.ad_hoc @@ -0,0 +1,10 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "network.wireless.bssid" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.ap b/salt/elasticsearch/files/ingest/kismet.ap new file mode 100644 index 0000000000..a864c09e4f --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.ap @@ -0,0 +1,50 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked", + "target_field": "network.wireless.ssid_cloaked", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid", + "target_field": "network.wireless.ssid", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null" + } + }, + { + "set": { + "field": "network.wireless.ssid", + "value": "Hidden", + "if": "ctx?.network?.wireless?.ssid_cloaked != null && ctx?.network?.wireless?.ssid_cloaked == 1" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc", + "target_field": "network.wireless.channel_utilization", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_bssid", + "target_field": "network.wireless.bssid" + } + }, + { + "foreach": { + "field": "message2.dot11_device.dot11_device_associated_client_map", + "processor": { + "append": { + "field": "network.wireless.associated_clients", + "value": "{{_ingest._key}}" + } + }, + "if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.bridged b/salt/elasticsearch/files/ingest/kismet.bridged new file mode 100644 index 0000000000..b61635e3aa --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.bridged @@ -0,0 +1,16 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "client.mac" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_bssid", + "target_field": "network.wireless.bssid" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.client b/salt/elasticsearch/files/ingest/kismet.client new file mode 100644 index 0000000000..6da0a071b9 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.client @@ -0,0 +1,29 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "client.mac" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_bssid", + "target_field": "network.wireless.last_connected_bssid", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null" + } + }, + { + "foreach": { + "field": "message2.dot11_device.dot11_device_client_map", + "processor": { + "append": { + "field": "network.wireless.known_connected_bssid", + "value": "{{_ingest._key}}" + } + }, + "if": "ctx?.message2?.dot11_device?.dot11_device_client_map != null" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.common b/salt/elasticsearch/files/ingest/kismet.common new file mode 100644 index 0000000000..14d439105d --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.common @@ -0,0 +1,159 @@ +{ + "processors": [ + { + "json": { + "field": "message", + "target_field": "message2" + } + }, + { + "date": { + "field": "message2.kismet_device_base_mod_time", + "formats": [ + "epoch_second" + ], + "target_field": "@timestamp" + } + }, + { + "set": { + "field": "event.category", + "value": "network" + } + }, + { + "dissect": { + "field": "message2.kismet_device_base_type", + "pattern": "%{wifi} %{device_type}" + } + }, + { + "lowercase": { + "field": "device_type" + } + }, + { + "set": { + "field": "event.dataset", + "value": "kismet.{{device_type}}" + } + }, + { + "set": { + "field": "event.dataset", + "value": "kismet.wds_ap", + "if": "ctx?.device_type == 'wds ap'" + } + }, + { + "set": { + "field": "event.dataset", + "value": "kismet.ad_hoc", + "if": "ctx?.device_type == 'ad-hoc'" + } + }, + { + "set": { + "field": "event.module", + "value": "kismet" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_packets_tx_total", + "target_field": "source.packets" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_num_alerts", + "target_field": "kismet.alerts.count" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_channel", + "target_field": "network.wireless.channel", + "if": "ctx?.message2?.kismet_device_base_channel != ''" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_frequency", + "target_field": "network.wireless.frequency", + "if": "ctx?.message2?.kismet_device_base_frequency != 0" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_last_time", + "target_field": "kismet.last_seen" + } + }, + { + "date": { + "field": "kismet.last_seen", + "formats": [ + "epoch_second" + ], + "target_field": "kismet.last_seen" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_first_time", + "target_field": "kismet.first_seen" + } + }, + { + "date": { + "field": "kismet.first_seen", + "formats": [ + "epoch_second" + ], + "target_field": "kismet.first_seen" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_seenby", + "target_field": "kismet.seenby" + } + }, + { + "foreach": { + "field": "kismet.seenby", + "processor": { + "pipeline": { + "name": "kismet.seenby" + } + } + } + }, + { + "rename": { + "field": "message2.kismet_device_base_manuf", + "target_field": "device.manufacturer" + } + }, + { + "pipeline": { + "name": "{{event.dataset}}" + } + }, + { + "remove": { + "field": [ + "message2", + "message", + "device_type", + "wifi", + "agent", + "host", + "event.created" + ], + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.device b/salt/elasticsearch/files/ingest/kismet.device new file mode 100644 index 0000000000..49d0c7ad71 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.device @@ -0,0 +1,9 @@ +{ + "processors": [ + { + "pipeline": { + "name": "kismet.client" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.seenby b/salt/elasticsearch/files/ingest/kismet.seenby new file mode 100644 index 0000000000..d41220d763 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.seenby @@ -0,0 +1,52 @@ +{ + "processors": [ + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_num_packets", + "target_field": "_ingest._value.packets_seen", + "ignore_missing": true + } + }, + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_uuid", + "target_field": "_ingest._value.serial_number", + "ignore_missing": true + } + }, + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_first_time", + "target_field": "_ingest._value.first_seen", + "ignore_missing": true + } + }, + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_last_time", + "target_field": "_ingest._value.last_seen", + "ignore_missing": true + } + }, + { + "date": { + "field": "_ingest._value.first_seen", + "formats": [ + "epoch_second" + ], + "target_field": "_ingest._value.first_seen", + "ignore_failure": true + } + }, + { + "date": { + "field": "_ingest._value.last_seen", + "formats": [ + "epoch_second" + ], + "target_field": "_ingest._value.last_seen", + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.wds b/salt/elasticsearch/files/ingest/kismet.wds new file mode 100644 index 0000000000..1e426c4634 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.wds @@ -0,0 +1,10 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "client.mac" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.wds_ap b/salt/elasticsearch/files/ingest/kismet.wds_ap new file mode 100644 index 0000000000..4d10b211be --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.wds_ap @@ -0,0 +1,22 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_commonname", + "target_field": "network.wireless.bssid" + } + }, + { + "foreach": { + "field": "message2.dot11_device.dot11_device_associated_client_map", + "processor": { + "append": { + "field": "network.wireless.associated_clients", + "value": "{{_ingest._key}}" + } + }, + "if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 new file mode 100644 index 0000000000..af31e1518d --- /dev/null +++ b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 @@ -0,0 +1,389 @@ +{ + "description": "Pipeline for pfSense", + "processors": [ + { + "set": { + "field": "ecs.version", + "value": "8.10.0" + } + }, + { + "set": { + "field": "observer.vendor", + "value": "netgate" + } + }, + { + "set": { + "field": "observer.type", + "value": "firewall" + } + }, + { + "rename": { + "field": "message", + "target_field": "event.original" + } + }, + { + "set": { + "field": "event.kind", + "value": "event" + } + }, + { + "set": { + "field": "event.timezone", + "value": "{{_tmp.tz_offset}}", + "if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'" + } + }, + { + "grok": { + "description": "Parse syslog header", + "field": "event.original", + "patterns": [ + "^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}" + ], + "pattern_definitions": { + "ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?", + "TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})", + "BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:", + "BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))", + "NAME": "[[[:alnum:]]_-]+", + "SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})", + "TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?", + "OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})", + "PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})", + "BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+", + "META": "\\[[^\\]]*\\]" + } + } + }, + { + "date": { + "if": "ctx._tmp.timestamp8601 != null", + "field": "_tmp.timestamp8601", + "target_field": "@timestamp", + "formats": [ + "ISO8601" + ] + } + }, + { + "date": { + "if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null", + "field": "_tmp.timestamp", + "target_field": "@timestamp", + "formats": [ + "MMM d HH:mm:ss", + "MMM d HH:mm:ss", + "MMM dd HH:mm:ss" + ], + "timezone": "{{ event.timezone }}" + } + }, + { + "grok": { + "description": "Set Event Provider", + "field": "process.name", + "patterns": [ + "^%{HYPHENATED_WORDS:event.provider}" + ], + "pattern_definitions": { + "HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b" + } + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-firewall", + "if": "ctx.event.provider == 'filterlog'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-openvpn", + "if": "ctx.event.provider == 'openvpn'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-ipsec", + "if": "ctx.event.provider == 'charon'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-dhcp", + "if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-unbound", + "if": "ctx.event.provider == 'unbound'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-haproxy", + "if": "ctx.event.provider == 'haproxy'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-php-fpm", + "if": "ctx.event.provider == 'php-fpm'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-squid", + "if": "ctx.event.provider == 'squid'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-suricata", + "if": "ctx.event.provider == 'suricata'" + } + }, + { + "drop": { + "if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)" + } + }, + { + "append": { + "field": "event.category", + "value": "network", + "if": "ctx.network != null" + } + }, + { + "convert": { + "field": "source.address", + "target_field": "source.ip", + "type": "ip", + "ignore_failure": true, + "ignore_missing": true + } + }, + { + "convert": { + "field": "destination.address", + "target_field": "destination.ip", + "type": "ip", + "ignore_failure": true, + "ignore_missing": true + } + }, + { + "set": { + "field": "network.type", + "value": "ipv6", + "if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")" + } + }, + { + "set": { + "field": "network.type", + "value": "ipv4", + "if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")" + } + }, + { + "geoip": { + "field": "source.ip", + "target_field": "source.geo", + "ignore_missing": true + } + }, + { + "geoip": { + "field": "destination.ip", + "target_field": "destination.geo", + "ignore_missing": true + } + }, + { + "geoip": { + "ignore_missing": true, + "database_file": "GeoLite2-ASN.mmdb", + "field": "source.ip", + "target_field": "source.as", + "properties": [ + "asn", + "organization_name" + ] + } + }, + { + "geoip": { + "database_file": "GeoLite2-ASN.mmdb", + "field": "destination.ip", + "target_field": "destination.as", + "properties": [ + "asn", + "organization_name" + ], + "ignore_missing": true + } + }, + { + "rename": { + "field": "source.as.asn", + "target_field": "source.as.number", + "ignore_missing": true + } + }, + { + "rename": { + "field": "source.as.organization_name", + "target_field": "source.as.organization.name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "destination.as.asn", + "target_field": "destination.as.number", + "ignore_missing": true + } + }, + { + "rename": { + "field": "destination.as.organization_name", + "target_field": "destination.as.organization.name", + "ignore_missing": true + } + }, + { + "community_id": { + "target_field": "network.community_id", + "ignore_failure": true + } + }, + { + "grok": { + "field": "observer.ingress.interface.name", + "patterns": [ + "%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}" + ], + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "set": { + "field": "network.vlan.id", + "copy_from": "observer.ingress.vlan.id", + "ignore_empty_value": true + } + }, + { + "append": { + "field": "related.ip", + "value": "{{destination.ip}}", + "allow_duplicates": false, + "if": "ctx.destination?.ip != null" + } + }, + { + "append": { + "field": "related.ip", + "value": "{{source.ip}}", + "allow_duplicates": false, + "if": "ctx.source?.ip != null" + } + }, + { + "append": { + "field": "related.ip", + "value": "{{source.nat.ip}}", + "allow_duplicates": false, + "if": "ctx.source?.nat?.ip != null" + } + }, + { + "append": { + "field": "related.hosts", + "value": "{{destination.domain}}", + "if": "ctx.destination?.domain != null" + } + }, + { + "append": { + "field": "related.user", + "value": "{{user.name}}", + "if": "ctx.user?.name != null" + } + }, + { + "set": { + "field": "network.direction", + "value": "{{network.direction}}bound", + "if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/" + } + }, + { + "remove": { + "field": [ + "_tmp" + ], + "ignore_failure": true + } + }, + { + "script": { + "lang": "painless", + "description": "This script processor iterates over the whole document to remove fields with null values.", + "source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n" + } + }, + { + "remove": { + "field": "event.original", + "if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))", + "ignore_failure": true, + "ignore_missing": true + } + }, + { + "pipeline": { + "name": "logs-pfsense.log@custom", + "ignore_missing_pipeline": true + } + } + ], + "on_failure": [ + { + "remove": { + "field": [ + "_tmp" + ], + "ignore_failure": true + } + }, + { + "set": { + "field": "event.kind", + "value": "pipeline_error" + } + }, + { + "append": { + "field": "error.message", + "value": "{{{ _ingest.on_failure_message }}}" + } + } + ], + "_meta": { + "managed_by": "fleet", + "managed": true, + "package": { + "name": "pfsense" + } + } +} diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata new file mode 100644 index 0000000000..f3a14af44b --- /dev/null +++ b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata @@ -0,0 +1,31 @@ +{ + "description": "Pipeline for parsing pfSense Suricata logs.", + "processors": [ + { + "pipeline": { + "name": "suricata.common" + } + } + ], + "on_failure": [ + { + "set": { + "field": "event.kind", + "value": "pipeline_error" + } + }, + { + "append": { + "field": "error.message", + "value": "{{{ _ingest.on_failure_message }}}" + } + } + ], + "_meta": { + "managed_by": "fleet", + "managed": true, + "package": { + "name": "pfsense" + } + } +} diff --git a/salt/elasticsearch/files/ingest/strelka.file b/salt/elasticsearch/files/ingest/strelka.file index d55e193507..d9aed3b29a 100644 --- a/salt/elasticsearch/files/ingest/strelka.file +++ b/salt/elasticsearch/files/ingest/strelka.file @@ -56,6 +56,7 @@ { "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }}, { "set": { "if": "ctx.scan?.yara?.matches instanceof List", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }}, { "set": { "if": "ctx.rule?.name != null", "field": "event.dataset", "value": "alert", "override": true }}, + { "set": { "if": "ctx.rule?.name != null", "field": "rule.uuid", "value": "{{rule.name}}", "override": true }}, { "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }}, { "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } }, { "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}}, diff --git a/salt/elasticsearch/files/ingest/suricata.common b/salt/elasticsearch/files/ingest/suricata.common index 6aec40a2b9..8143882c71 100644 --- a/salt/elasticsearch/files/ingest/suricata.common +++ b/salt/elasticsearch/files/ingest/suricata.common @@ -13,7 +13,6 @@ { "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } }, { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } }, { "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } }, - { "lowercase": { "field": "network.transport", "ignore_failure": true } }, { "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } }, { "set": { "field": "observer.name", "value": "{{agent.name}}" } }, { "set": { "field": "event.ingested", "value": "{{@timestamp}}" } }, diff --git a/salt/elasticsearch/roles/analyst.json b/salt/elasticsearch/roles/analyst.json index 2fd10ab474..90ff95ad48 100644 --- a/salt/elasticsearch/roles/analyst.json +++ b/salt/elasticsearch/roles/analyst.json @@ -27,7 +27,8 @@ "monitor", "read", "read_cross_cluster", - "view_index_metadata" + "view_index_metadata", + "write" ] } ], diff --git a/salt/elasticsearch/roles/limited-analyst.json b/salt/elasticsearch/roles/limited-analyst.json index 9186b732ec..6511e5f447 100644 --- a/salt/elasticsearch/roles/limited-analyst.json +++ b/salt/elasticsearch/roles/limited-analyst.json @@ -13,7 +13,8 @@ "monitor", "read", "read_cross_cluster", - "view_index_metadata" + "view_index_metadata", + "write" ] } ], diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index e68d0441b5..000fd60b72 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -5,6 +5,10 @@ elasticsearch: esheap: description: Specify the memory heap size in (m)egabytes for Elasticsearch. helpLink: elasticsearch.html + index_clean: + description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings. + forcedType: bool + helpLink: elasticsearch.html retention: retention_pct: decription: Total percentage of space used by Elasticsearch for multi node clusters @@ -98,10 +102,6 @@ elasticsearch: policy: phases: hot: - max_age: - description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier. - global: True - helpLink: elasticsearch.html actions: set_priority: priority: @@ -120,7 +120,9 @@ elasticsearch: helpLink: elasticsearch.html cold: min_age: - description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + regex: ^[0-9]{1,5}d$ + forcedType: string global: True helpLink: elasticsearch.html actions: @@ -131,8 +133,8 @@ elasticsearch: helpLink: elasticsearch.html warm: min_age: - description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. - regex: ^\[0-9\]{1,5}d$ + description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. + regex: ^[0-9]{1,5}d$ forcedType: string global: True actions: @@ -145,6 +147,8 @@ elasticsearch: delete: min_age: description: Minimum age of index. ex. 90d - This determines when the index should be deleted. + regex: ^[0-9]{1,5}d$ + forcedType: string global: True helpLink: elasticsearch.html so-logs: &indexSettings @@ -271,7 +275,9 @@ elasticsearch: helpLink: elasticsearch.html warm: min_age: - description: Minimum age of index. This determines when the index should be moved to the hot tier. + description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. + regex: ^[0-9]{1,5}d$ + forcedType: string global: True advanced: True helpLink: elasticsearch.html @@ -296,7 +302,9 @@ elasticsearch: helpLink: elasticsearch.html cold: min_age: - description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + regex: ^[0-9]{1,5}d$ + forcedType: string global: True advanced: True helpLink: elasticsearch.html @@ -311,6 +319,8 @@ elasticsearch: delete: min_age: description: Minimum age of index. This determines when the index should be deleted. + regex: ^[0-9]{1,5}d$ + forcedType: string global: True advanced: True helpLink: elasticsearch.html @@ -366,6 +376,7 @@ elasticsearch: so-logs-azure_x_signinlogs: *indexSettings so-logs-azure_x_springcloudlogs: *indexSettings so-logs-barracuda_x_waf: *indexSettings + so-logs-cef_x_log: *indexSettings so-logs-cisco_asa_x_log: *indexSettings so-logs-cisco_ftd_x_log: *indexSettings so-logs-cisco_ios_x_log: *indexSettings @@ -383,6 +394,7 @@ elasticsearch: so-logs-darktrace_x_ai_analyst_alert: *indexSettings so-logs-darktrace_x_model_breach_alert: *indexSettings so-logs-darktrace_x_system_status_alert: *indexSettings + so-logs-detections_x_alerts: *indexSettings so-logs-f5_bigip_x_log: *indexSettings so-logs-fim_x_event: *indexSettings so-logs-fortinet_x_clientendpoint: *indexSettings @@ -511,6 +523,7 @@ elasticsearch: so-suricata: *indexSettings so-import: *indexSettings so-kratos: *indexSettings + so-kismet: *indexSettings so-logstash: *indexSettings so-redis: *indexSettings so-strelka: *indexSettings diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index f5a124a9ac..b59c291a44 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -2,11 +2,9 @@ {% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %} {% set PILLAR_GLOBAL_OVERRIDES = {} %} -{% if salt['pillar.get']('elasticsearch:index_settings') is defined %} -{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings') %} -{% if ES_INDEX_PILLAR.global_overrides is defined %} -{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %} -{% endif %} +{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings', {}) %} +{% if ES_INDEX_PILLAR.global_overrides is defined %} +{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %} {% endif %} {% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %} @@ -19,6 +17,12 @@ {% set ES_INDEX_SETTINGS = {} %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %} {% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %} +{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #} +{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #} +{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %} +{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %} +{% endif %} + {% if settings.index_template is defined %} {% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %} {% do settings.index_template.template.settings.index.pop('sort') %} diff --git a/salt/elasticsearch/templates/component/ecs/device.json b/salt/elasticsearch/templates/component/ecs/device.json new file mode 100644 index 0000000000..a281f2c1ec --- /dev/null +++ b/salt/elasticsearch/templates/component/ecs/device.json @@ -0,0 +1,36 @@ +{ + "_meta": { + "documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-device.html", + "ecs_version": "1.12.2" + }, + "template": { + "mappings": { + "properties": { + "device": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "manufacturer": { + "ignore_above": 1024, + "type": "keyword" + }, + "model": { + "properties": { + "identifier": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/salt/elasticsearch/templates/component/ecs/kismet.json b/salt/elasticsearch/templates/component/ecs/kismet.json new file mode 100644 index 0000000000..a03236ab8d --- /dev/null +++ b/salt/elasticsearch/templates/component/ecs/kismet.json @@ -0,0 +1,32 @@ +{ + "_meta": { + "documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-base.html", + "ecs_version": "1.12.2" + }, + "template": { + "mappings": { + "properties": { + "kismet": { + "properties": { + "alerts": { + "properties": { + "count": { + "type": "long" + } + } + }, + "first_seen": { + "type": "date" + }, + "last_seen": { + "type": "date" + }, + "seenby": { + "type": "nested" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/salt/elasticsearch/templates/component/ecs/network.json b/salt/elasticsearch/templates/component/ecs/network.json index c2e35efd06..8cc6bdc370 100644 --- a/salt/elasticsearch/templates/component/ecs/network.json +++ b/salt/elasticsearch/templates/component/ecs/network.json @@ -77,6 +77,43 @@ "type": "keyword" } } + }, + "wireless": { + "properties": { + "associated_clients": { + "ignore_above": 1024, + "type": "keyword" + }, + "bssid": { + "ignore_above": 1024, + "type": "keyword" + }, + "channel": { + "ignore_above": 1024, + "type": "keyword" + }, + "channel_utilization": { + "type": "float" + }, + "frequency": { + "type": "double" + }, + "ssid": { + "ignore_above": 1024, + "type": "keyword" + }, + "ssid_cloaked": { + "type": "integer" + }, + "known_connected_bssid": { + "ignore_above": 1024, + "type": "keyword" + }, + "last_connected_bssid": { + "ignore_above": 1024, + "type": "keyword" + } + } } } } diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json index 9b68421e71..5e51b872ba 100644 --- a/salt/elasticsearch/templates/component/so/detection-mappings.json +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -20,21 +20,36 @@ "so_detection": { "properties": { "publicId": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "title": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "severity": { "ignore_above": 1024, "type": "keyword" }, "author": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "description": { "type": "text" }, + "category": { + "ignore_above": 1024, + "type": "keyword" + }, + "product": { + "ignore_above": 1024, + "type": "keyword" + }, + "service": { + "ignore_above": 1024, + "type": "keyword" + }, "content": { "type": "text" }, @@ -48,7 +63,8 @@ "type": "boolean" }, "tags": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "ruleset": { "ignore_above": 1024, @@ -135,4 +151,4 @@ "_meta": { "ecs_version": "1.12.2" } -} \ No newline at end of file +} diff --git a/salt/elasticsearch/tools/sbin/so-index-list b/salt/elasticsearch/tools/sbin/so-index-list index 1e4595b350..572e55cba3 100755 --- a/salt/elasticsearch/tools/sbin/so-index-list +++ b/salt/elasticsearch/tools/sbin/so-index-list @@ -5,6 +5,6 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +. /usr/sbin/so-common - -curl -K /opt/so/conf/elasticsearch/curl.config-X GET -k -L "https://localhost:9200/_cat/indices?v&s=index" +curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L "https://localhost:9200/_cat/indices?pretty&v&s=index" diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total index 3faa2a7a9d..b73967c26f 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total @@ -40,7 +40,7 @@ fi # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space {% if GLOBALS.role == 'so-manager' %} -for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do {% else %} for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do {% endif %} diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used index 5d8a60e222..49e6348531 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used @@ -13,7 +13,7 @@ TOTAL_USED_SPACE=0 # Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space {% if GLOBALS.role == 'so-manager' %} # Get total disk space - disk.total -for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do {% else %} # Get disk space taken up by indices - disk.indices for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete index 07feb36bdf..a00437a25e 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete @@ -27,6 +27,7 @@ overlimit() { # 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit. # Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop. + while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do # If we can't query Elasticsearch, then immediately return false. @@ -34,28 +35,36 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do [ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit # We iterate through the closed and open indices - CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) + CLOSED_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3) + CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4) + OPEN_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3) + OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4) - for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do - # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream - # To do so, we need to identify to which data stream this index is associated - # We extract the data stream name using the pattern below - DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" - DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN") - # We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream - BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length') - if [ "$BACKING_INDICES" -gt 1 ]; then - CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) - # We make sure we are not trying to delete a write index - if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} - /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 - fi + for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do + # Check if index is an older index. If it is an older index, delete it before moving on to newer indices. + if [[ "$INDEX" =~ "^logstash-.*|so-.*" ]]; then + printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} + /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 else - printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG} + # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream + # To do so, we need to identify to which data stream this index is associated + # We extract the data stream name using the pattern below + DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" + DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN") + # We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream + BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length') + if [ "$BACKING_INDICES" -gt 1 ]; then + CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + # We make sure we are not trying to delete a write index + if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} + /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 + fi + else + printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG} /usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1 + fi fi if ! overlimit ; then exit diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index 8f45d6c368..080348522f 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -133,7 +133,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then for i in $pattern; do TEMPLATE=${i::-14} COMPONENT_PATTERN=${TEMPLATE:3} - MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -v osquery) + MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery") if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then load_failures=$((load_failures+1)) echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures" diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index b3ead0f4c9..aa9a4d0a56 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -9,11 +9,9 @@ 'so-influxdb', 'so-kibana', 'so-kratos', - 'so-mysql', 'so-nginx', 'so-redis', 'so-soc', - 'so-soctopus', 'so-strelka-coordinator', 'so-strelka-gatekeeper', 'so-strelka-frontend', @@ -32,11 +30,9 @@ 'so-kibana', 'so-kratos', 'so-logstash', - 'so-mysql', 'so-nginx', 'so-redis', 'so-soc', - 'so-soctopus', 'so-strelka-coordinator', 'so-strelka-gatekeeper', 'so-strelka-frontend', diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 75a70828e4..b105059565 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -98,19 +98,11 @@ firewall: tcp: - 7788 udp: [] - mysql: - tcp: - - 3306 - udp: [] nginx: tcp: - 80 - 443 udp: [] - playbook: - tcp: - - 3000 - udp: [] redis: tcp: - 6379 @@ -178,8 +170,6 @@ firewall: hostgroups: eval: portgroups: - - playbook - - mysql - kibana - redis - influxdb @@ -363,8 +353,6 @@ firewall: hostgroups: manager: portgroups: - - playbook - - mysql - kibana - redis - influxdb @@ -559,8 +547,6 @@ firewall: hostgroups: managersearch: portgroups: - - playbook - - mysql - kibana - redis - influxdb @@ -756,8 +742,6 @@ firewall: - all standalone: portgroups: - - playbook - - mysql - kibana - redis - influxdb diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml index 7a2e3b0356..69093dee73 100644 --- a/salt/firewall/soc_firewall.yaml +++ b/salt/firewall/soc_firewall.yaml @@ -7,6 +7,7 @@ firewall: multiline: True regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regexFailureMessage: You must enter a valid IP address or CIDR. + duplicates: True anywhere: &hostgroupsettingsadv description: List of IP or CIDR blocks to allow access to this hostgroup. forcedType: "[]string" @@ -15,6 +16,7 @@ firewall: advanced: True regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regexFailureMessage: You must enter a valid IP address or CIDR. + duplicates: True beats_endpoint: *hostgroupsettings beats_endpoint_ssl: *hostgroupsettings dockernet: &ROhostgroupsettingsadv @@ -53,6 +55,7 @@ firewall: multiline: True regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regexFailureMessage: You must enter a valid IP address or CIDR. + duplicates: True customhostgroup1: *customhostgroupsettings customhostgroup2: *customhostgroupsettings customhostgroup3: *customhostgroupsettings @@ -70,12 +73,14 @@ firewall: helpLink: firewall.html advanced: True multiline: True + duplicates: True udp: &udpsettings description: List of UDP ports for this port group. forcedType: "[]string" helpLink: firewall.html advanced: True multiline: True + duplicates: True agrules: tcp: *tcpsettings udp: *udpsettings @@ -121,15 +126,9 @@ firewall: localrules: tcp: *tcpsettings udp: *udpsettings - mysql: - tcp: *tcpsettings - udp: *udpsettings nginx: tcp: *tcpsettings udp: *udpsettings - playbook: - tcp: *tcpsettings - udp: *udpsettings redis: tcp: *tcpsettings udp: *udpsettings @@ -193,6 +192,7 @@ firewall: multiline: True forcedType: "[]string" helpLink: firewall.html + duplicates: True sensor: portgroups: *portgroupsdocker searchnode: @@ -246,6 +246,7 @@ firewall: multiline: True forcedType: "[]string" helpLink: firewall.html + duplicates: True dockernet: portgroups: *portgroupshost localhost: diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index a48476214f..daaf28b286 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -28,7 +28,7 @@ global: description: Used for handling of authentication cookies. global: True airgap: - description: Sets airgap mode. + description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting. global: True readonly: True imagerepo: diff --git a/salt/idh/openssh/config.sls b/salt/idh/openssh/config.sls index d358bb5be0..5e2acd8d20 100644 --- a/salt/idh/openssh/config.sls +++ b/salt/idh/openssh/config.sls @@ -11,6 +11,8 @@ idh_sshd_selinux: - sel_type: ssh_port_t - prereq: - file: openssh_config + - require: + - pkg: python_selinux_mgmt_tools {% endif %} openssh_config: diff --git a/salt/idh/openssh/init.sls b/salt/idh/openssh/init.sls index ba0a8ab04a..79d0825029 100644 --- a/salt/idh/openssh/init.sls +++ b/salt/idh/openssh/init.sls @@ -15,3 +15,9 @@ openssh: - enable: False - name: {{ openssh_map.service }} {% endif %} + +{% if grains.os_family == 'RedHat' %} +python_selinux_mgmt_tools: + pkg.installed: + - name: policycoreutils-python-utils +{% endif %} diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index 3e3a68117d..993abfd51f 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -9,43 +9,53 @@ idstools: forcedType: string helpLink: rules.html ruleset: - description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True regex: ETPRO\b|ETOPEN\b helpLink: rules.html urls: - description: This is a list of additional rule download locations. + description: This is a list of additional rule download locations. This feature is currently disabled. global: True + multiline: True + forcedType: "[]string" + readonly: True helpLink: rules.html sids: disabled: - description: Contains the list of NIDS rules manually disabled across the grid. To disable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To disable multiple rules, you can use regular expressions. + description: Contains the list of NIDS rules (or regex patterns) disabled across the grid. This setting is readonly; Use the Detections screen to disable rules. global: True multiline: True forcedType: "[]string" regex: \d*|re:.* helpLink: managing-alerts.html + readonlyUi: True + advanced: true enabled: - description: Contains the list of NIDS rules manually enabled across the grid. To enable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To enable multiple rules, you can use regular expressions. + description: Contains the list of NIDS rules (or regex patterns) enabled across the grid. This setting is readonly; Use the Detections screen to enable rules. global: True multiline: True forcedType: "[]string" regex: \d*|re:.* helpLink: managing-alerts.html + readonlyUi: True + advanced: true modify: - description: Contains the list of NIDS rules that were modified from their default values. Entries must adhere to the following format - SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM" + description: Contains the list of NIDS rules (SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"). This setting is readonly; Use the Detections screen to modify rules. global: True multiline: True forcedType: "[]string" helpLink: managing-alerts.html + readonlyUi: True + advanced: true rules: local__rules: - description: Contains the list of custom NIDS rules applied to the grid. To add custom NIDS rules to the grid, enter one rule per line in the Current Grid Value box. + description: Contains the list of custom NIDS rules applied to the grid. This setting is readonly; Use the Detections screen to adjust rules. file: True global: True advanced: True title: Local Rules helpLink: local-rules.html + readonlyUi: True filters__rules: description: If you are using Suricata for metadata, then you can set custom filters for that metadata here. file: True diff --git a/salt/mysql/sostatus.sls b/salt/logstash/download.sls similarity index 68% rename from salt/mysql/sostatus.sls rename to salt/logstash/download.sls index 2f5dbba068..cf1c6176c3 100644 --- a/salt/mysql/sostatus.sls +++ b/salt/logstash/download.sls @@ -1,16 +1,15 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} -append_so-mysql_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-mysql - - unless: grep -q so-mysql /opt/so/conf/so-status/so-status.conf +so-logstash_image: + docker_image.present: + - name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }} {% else %} diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index 3172ff7c58..cc81d31036 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -10,6 +10,7 @@ logstash: helpLink: logstash.html multiline: True forcedType: "[]string" + duplicates: True receiver: *assigned_pipelines heavynode: *assigned_pipelines searchnode: *assigned_pipelines @@ -23,6 +24,7 @@ logstash: helpLink: logstash.html multiline: True forcedType: "[]string" + duplicates: True fleet: *defined_pipelines manager: *defined_pipelines search: *defined_pipelines @@ -38,6 +40,7 @@ logstash: multiline: True forcedType: string helpLink: logstash.html + duplicates: True custom002: *pipeline_config custom003: *pipeline_config custom004: *pipeline_config diff --git a/salt/manager/init.sls b/salt/manager/init.sls index c62a419998..ec37f9ff32 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -27,6 +27,15 @@ repo_log_dir: - user - group +agents_log_dir: + file.directory: + - name: /opt/so/log/agents + - user: root + - group: root + - recurse: + - user + - group + yara_log_dir: file.directory: - name: /opt/so/log/yarasync @@ -64,17 +73,6 @@ manager_sbin: - exclude_pat: - "*_test.py" -yara_update_scripts: - file.recurse: - - name: /usr/sbin/ - - source: salt://manager/tools/sbin_jinja/ - - user: socore - - group: socore - - file_mode: 755 - - template: jinja - - defaults: - EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} - so-repo-file: file.managed: - name: /opt/so/conf/reposync/repodownload.conf @@ -101,6 +99,17 @@ so-repo-sync: - hour: '{{ MANAGERMERGED.reposync.hour }}' - minute: '{{ MANAGERMERGED.reposync.minute }}' +so_fleetagent_status: + cron.present: + - name: /usr/sbin/so-elasticagent-status > /opt/so/log/agents/agentstatus.log 2>&1 + - identifier: so_fleetagent_status + - user: root + - minute: '*/5' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' + socore_own_saltstack: file.directory: - name: /opt/so/saltstack @@ -117,51 +126,6 @@ rules_dir: - group: socore - makedirs: True -{% if STRELKAMERGED.rules.enabled %} -strelkarepos: - file.managed: - - name: /opt/so/conf/strelka/repos.txt - - source: salt://strelka/rules/repos.txt.jinja - - template: jinja - - defaults: - STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} - - makedirs: True -strelka-yara-update: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' - - identifier: strelka-yara-update - - hour: '7' - - minute: '1' -strelka-yara-download: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' - - identifier: strelka-yara-download - - hour: '7' - - minute: '1' -{% if not GLOBALS.airgap %} -update_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-update - - onchanges: - - file: yara_update_scripts -download_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-download - - onchanges: - - file: yara_update_scripts -{% endif %} -{% endif %} - {% else %} {{sls}}_state_not_allowed: diff --git a/salt/manager/soc_manager.yaml b/salt/manager/soc_manager.yaml index 01d63a609b..f6461a0c7b 100644 --- a/salt/manager/soc_manager.yaml +++ b/salt/manager/soc_manager.yaml @@ -20,10 +20,6 @@ manager: description: String of hosts to ignore the proxy settings for. global: True helpLink: proxy.html - playbook: - description: Enable playbook 1=enabled 0=disabled. - global: True - helpLink: playbook.html proxy: description: Proxy server to use for updates. global: True diff --git a/salt/mysql/tools/sbin/so-mysql-start b/salt/manager/tools/sbin/so-elasticagent-status old mode 100755 new mode 100644 similarity index 76% rename from salt/mysql/tools/sbin/so-mysql-start rename to salt/manager/tools/sbin/so-elasticagent-status index e68536809b..dffd766600 --- a/salt/mysql/tools/sbin/so-mysql-start +++ b/salt/manager/tools/sbin/so-elasticagent-status @@ -5,8 +5,6 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. - - . /usr/sbin/so-common -/usr/sbin/so-start mysql $1 +curl -s -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agent_status" | jq . \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index cb4e40adee..8b563ef1d4 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -201,11 +201,7 @@ function add_idh_to_minion() { "idh:"\ " enabled: True"\ " restrict_management_ip: $IDH_MGTRESTRICT"\ - " services:" >> "$PILLARFILE" - IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES" - for service in ${IDH_SERVICES_ARRAY[@]}; do - echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE" - done + " " >> $PILLARFILE } function add_logstash_to_minion() { @@ -286,12 +282,6 @@ function add_sensor_to_minion() { echo " " >> $PILLARFILE } -function add_playbook_to_minion() { - printf '%s\n'\ - "playbook:"\ - " enabled: True"\ - " " >> $PILLARFILE -} function add_elastalert_to_minion() { printf '%s\n'\ @@ -353,13 +343,6 @@ function add_nginx_to_minion() { " " >> $PILLARFILE } -function add_soctopus_to_minion() { - printf '%s\n'\ - "soctopus:"\ - " enabled: True"\ - " " >> $PILLARFILE -} - function add_soc_to_minion() { printf '%s\n'\ "soc:"\ @@ -374,13 +357,6 @@ function add_registry_to_minion() { " " >> $PILLARFILE } -function add_mysql_to_minion() { - printf '%s\n'\ - "mysql:"\ - " enabled: True"\ - " " >> $PILLARFILE -} - function add_kratos_to_minion() { printf '%s\n'\ "kratos:"\ @@ -446,26 +422,19 @@ function checkMine() { } -function updateMine() { - retry 20 1 "salt '$MINION_ID' mine.update" True -} - function createEVAL() { is_pcaplimit=true pcapspace add_elasticsearch_to_minion add_sensor_to_minion add_strelka_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion @@ -478,17 +447,14 @@ function createSTANDALONE() { add_logstash_to_minion add_sensor_to_minion add_strelka_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_redis_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion @@ -497,17 +463,14 @@ function createSTANDALONE() { function createMANAGER() { add_elasticsearch_to_minion add_logstash_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_redis_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion @@ -516,17 +479,14 @@ function createMANAGER() { function createMANAGERSEARCH() { add_elasticsearch_to_minion add_logstash_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_redis_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion @@ -636,20 +596,16 @@ function addMinion() { } function updateMineAndApplyStates() { - # tell the minion to populate the mine with data from mine_functions which is populated during setup - # this only needs to happen on non managers since they handle this during setup - # and they need to wait for ca creation to update the mine - updateMine - checkMine "network.ip_addrs" - # apply the elasticsearch state to the manager if a new searchnode was added + + #checkMine "network.ip_addrs" + # calls so-common and set_minionid sets MINIONID to local minion id + set_minionid + # if this is a searchnode or heavynode, start downloading logstash and elasticsearch containers while the manager prepares for the new node if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then - # calls so-common and set_minionid sets MINIONID to local minion id - set_minionid - salt $MINIONID state.apply elasticsearch queue=True --async - salt $MINIONID state.apply soc queue=True --async + salt-run state.orch orch.container_download pillar="{'setup': {'newnode': $MINION_ID }}" > /dev/null 2>&1 & fi - # run this async so the cli doesn't wait for a return - salt "$MINION_ID" state.highstate --async queue=True + # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured + salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 & } function setupMinionFiles() { diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 41cab0b23d..5427a2e483 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -17,13 +17,16 @@ def showUsage(args): print('Usage: {} [ARGS...]'.format(sys.argv[0])) print(' General commands:') print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.') + print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.') print(' remove - Removes a yaml key, if it exists. Requires KEY arg.') + print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.') print(' help - Prints this usage information.') print('') print(' Where:') print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml') print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2') - print(' LISTITEM - Item to add to the list.') + print(' VALUE - Value to set for a given key') + print(' LISTITEM - Item to append to a given key\'s list value') sys.exit(1) @@ -37,6 +40,7 @@ def writeYaml(filename, content): file = open(filename, "w") return yaml.dump(content, file) + def appendItem(content, key, listItem): pieces = key.split(".", 1) if len(pieces) > 1: @@ -51,6 +55,30 @@ def appendItem(content, key, listItem): print("The key provided does not exist. No action was taken on the file.") return 1 + +def convertType(value): + if len(value) > 0 and (not value.startswith("0") or len(value) == 1): + if "." in value: + try: + value = float(value) + return value + except ValueError: + pass + + try: + value = int(value) + return value + except ValueError: + pass + + lowered_value = value.lower() + if lowered_value == "false": + return False + elif lowered_value == "true": + return True + return value + + def append(args): if len(args) != 3: print('Missing filename, key arg, or list item to append', file=sys.stderr) @@ -62,11 +90,41 @@ def append(args): listItem = args[2] content = loadYaml(filename) - appendItem(content, key, listItem) + appendItem(content, key, convertType(listItem)) writeYaml(filename, content) return 0 + +def addKey(content, key, value): + pieces = key.split(".", 1) + if len(pieces) > 1: + if not pieces[0] in content: + content[pieces[0]] = {} + addKey(content[pieces[0]], pieces[1], value) + elif key in content: + raise KeyError("key already exists") + else: + content[key] = value + + +def add(args): + if len(args) != 3: + print('Missing filename, key arg, and/or value', file=sys.stderr) + showUsage(None) + return + + filename = args[0] + key = args[1] + value = args[2] + + content = loadYaml(filename) + addKey(content, key, convertType(value)) + writeYaml(filename, content) + + return 0 + + def removeKey(content, key): pieces = key.split(".", 1) if len(pieces) > 1: @@ -91,6 +149,24 @@ def remove(args): return 0 +def replace(args): + if len(args) != 3: + print('Missing filename, key arg, and/or value', file=sys.stderr) + showUsage(None) + return + + filename = args[0] + key = args[1] + value = args[2] + + content = loadYaml(filename) + removeKey(content, key) + addKey(content, key, convertType(value)) + writeYaml(filename, content) + + return 0 + + def main(): args = sys.argv[1:] @@ -100,8 +176,10 @@ def main(): commands = { "help": showUsage, + "add": add, "append": append, "remove": remove, + "replace": replace, } code = 1 diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index 488877ea19..7effabac92 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -42,6 +42,14 @@ def test_main_help(self): sysmock.assert_called() self.assertIn(mock_stdout.getvalue(), "Usage:") + def test_remove_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.remove(["file"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + def test_remove(self): filename = "/tmp/so-yaml_test-remove.yaml" file = open(filename, "w") @@ -106,6 +114,14 @@ def test_remove_missing_args(self): sysmock.assert_called_once_with(1) self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + def test_append_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.append(["file", "key"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n") + def test_append(self): filename = "/tmp/so-yaml_test-remove.yaml" file = open(filename, "w") @@ -201,3 +217,146 @@ def test_append_key_nonlist_deep(self): soyaml.main() sysmock.assert_called() self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") + + def test_add_key(self): + content = {} + soyaml.addKey(content, "foo", 123) + self.assertEqual(content, {"foo": 123}) + + try: + soyaml.addKey(content, "foo", "bar") + self.assertFail("expected key error since key already exists") + except KeyError: + pass + + try: + soyaml.addKey(content, "foo.bar", 123) + self.assertFail("expected type error since key parent value is not a map") + except TypeError: + pass + + content = {} + soyaml.addKey(content, "foo", "bar") + self.assertEqual(content, {"foo": "bar"}) + + soyaml.addKey(content, "badda.badda", "boom") + self.assertEqual(content, {"foo": "bar", "badda": {"badda": "boom"}}) + + def test_add_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.add(["file", "key"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + + def test_add(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}") + file.close() + + soyaml.add([filename, "key4", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\nkey4: d\n" + self.assertEqual(actual, expected) + + def test_add_nested(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.add([filename, "key1.child3", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n child3: d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_add_nested_deep(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.add([filename, "key1.child2.deep2", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_replace_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.replace(["file", "key"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + + def test_replace(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}") + file.close() + + soyaml.replace([filename, "key2", True]) + + file = open(filename, "r") + actual = file.read() + file.close() + expected = "key1:\n child1: 123\n child2: abc\nkey2: true\nkey3:\n- a\n- b\n- c\n" + self.assertEqual(actual, expected) + + def test_replace_nested(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.replace([filename, "key1.child2", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_replace_nested_deep(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.replace([filename, "key1.child2.deep1", 46]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n deep1: 46\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_convert(self): + self.assertEqual(soyaml.convertType("foo"), "foo") + self.assertEqual(soyaml.convertType("foo.bar"), "foo.bar") + self.assertEqual(soyaml.convertType("123"), 123) + self.assertEqual(soyaml.convertType("0"), 0) + self.assertEqual(soyaml.convertType("00"), "00") + self.assertEqual(soyaml.convertType("0123"), "0123") + self.assertEqual(soyaml.convertType("123.456"), 123.456) + self.assertEqual(soyaml.convertType("0123.456"), "0123.456") + self.assertEqual(soyaml.convertType("true"), True) + self.assertEqual(soyaml.convertType("TRUE"), True) + self.assertEqual(soyaml.convertType("false"), False) + self.assertEqual(soyaml.convertType("FALSE"), False) + self.assertEqual(soyaml.convertType(""), "") diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a585f877c7..1e95859871 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -229,7 +229,7 @@ check_local_mods() { # {% endraw %} check_pillar_items() { - local pillar_output=$(salt-call pillar.items --out=json) + local pillar_output=$(salt-call pillar.items -lerror --out=json) cond=$(jq '.local | has("_errors")' <<< "$pillar_output") if [[ "$cond" == "true" ]]; then @@ -357,6 +357,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40 [[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50 [[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60 + [[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70 true } @@ -373,6 +374,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40 [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 + [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 true } @@ -435,6 +437,17 @@ post_to_2.4.60() { POSTVERSION=2.4.60 } +post_to_2.4.70() { + printf "\nRemoving idh.services from any existing IDH node pillar files\n" + for file in /opt/so/saltstack/local/pillar/minions/*.sls; do + if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then + echo "Removing idh.services from: $file" + so-yaml.py remove "$file" idh.services + fi + done + POSTVERSION=2.4.70 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -574,6 +587,161 @@ up_to_2.4.60() { INSTALLEDVERSION=2.4.60 } +up_to_2.4.70() { + playbook_migration + suricata_idstools_migration + toggle_telemetry + add_detection_test_pillars + INSTALLEDVERSION=2.4.70 +} + +add_detection_test_pillars() { + if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then + echo "Adding detection pillar values for automated testing" + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.allowRegex SecurityOnion + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.failAfterConsecutiveErrorCount 1 + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.allowRegex "EquationGroup_Toolset_Apr17__ELV_.*" + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.failAfterConsecutiveErrorCount 1 + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.allowRegex "(200033\\d|2100538|2102466)" + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.failAfterConsecutiveErrorCount 1 + fi +} + +toggle_telemetry() { + if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then + cat << ASSIST_EOF + +--------------- SOC Telemetry --------------- + +The Security Onion development team could use your help! Enabling SOC +Telemetry will help the team understand which UI features are being +used and enables informed prioritization of future development. + +Adjust this setting at anytime via the SOC Configuration screen. + +Documentation: https://docs.securityonion.net/en/2.4/telemetry.html + +ASSIST_EOF + + echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? " + + read -r input + input=$(echo "${input,,}" | xargs echo -n) + echo "" + if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then + echo "Thank you for helping improve Security Onion!" + else + if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then + echo "Disabled SOC Telemetry." + else + fail "Failed to disable SOC Telemetry; aborting." + fi + fi + echo "" + fi +} + +suricata_idstools_migration() { + #Backup the pillars for idstools + mkdir -p /nsm/backup/detections-migration/idstools + rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools + if [[ $? -eq 0 ]]; then + echo "IDStools configuration has been backed up." + else + fail "Error: rsync failed to copy the files. IDStools configuration has not been backed up." + fi + + #Backup Thresholds + mkdir -p /nsm/backup/detections-migration/suricata + rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata + if [[ $? -eq 0 ]]; then + echo "Suricata thresholds have been backed up." + else + fail "Error: rsync failed to copy the files. Thresholds have not been backed up." + fi + + #Backup local rules + mkdir -p /nsm/backup/detections-migration/suricata/local-rules + rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules + if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then + rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak + fi + + #Tell SOC to migrate + mkdir -p /opt/so/conf/soc/migrations + echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70 + chown -R socore:socore /opt/so/conf/soc/migrations +} + +playbook_migration() { + # Start SOC Detections migration + mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert} + + # Remove cronjobs + crontab -l | grep -v 'so-playbook-sync_cron' | crontab - + crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab - + + if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then + + # Check for active Elastalert rules + active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l) + + if [[ "$active_rules_count" -gt 0 ]]; then + # Prompt the user to press ENTER if active Elastalert rules found + echo + echo "$active_rules_count Active Elastalert/Playbook rules found." + echo "In preparation for the new Detections module, they will be backed up and then disabled." + echo + echo "Press ENTER to proceed." + echo + # Read user input + read -r + + echo "Backing up the Elastalert rules..." + rsync -av --ignore-missing-args --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/ + + # Verify that rsync completed successfully + if [[ $? -eq 0 ]]; then + # Delete the Elastlaert rules + rm -f /opt/so/rules/elastalert/playbook/*.yaml + echo "Active Elastalert rules have been backed up." + else + fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up." + fi + fi + + echo + echo "Exporting Sigma rules from Playbook..." + MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls) + + docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do + echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml" + done || fail "Failed to export Sigma rules..." + + echo + echo "Exporting Sigma Filters from Playbook..." + docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters." + + echo + echo "Backing up Playbook database..." + docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database." + docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database." + fi + + echo + echo "Stopping Playbook services & cleaning up..." + for container in so-playbook so-mysql so-soctopus; do + if [ -n "$(docker ps -q -f name=^${container}$)" ]; then + docker stop $container + fi + done + sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf + rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-* + + echo + echo "Playbook Migration is complete...." +} + determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap @@ -617,12 +785,6 @@ unmount_update() { update_airgap_rules() { # Copy the rules over to update them for airgap. rsync -av $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/ - rsync -av $UPDATE_DIR/agrules/yara/* /nsm/rules/yara/ - if [ -d /nsm/repo/rules/sigma ]; then - rsync -av $UPDATE_DIR/agrules/sigma/* /nsm/repo/rules/sigma/ - fi - - # SOC Detections Airgap rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/ rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/ } @@ -756,7 +918,7 @@ verify_latest_update_script() { else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null # Verify that soup scripts updated as expected get_soup_script_hashes @@ -837,7 +999,6 @@ main() { echo "### Preparing soup at $(date) ###" echo "" - set_os check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master." @@ -903,6 +1064,7 @@ main() { backup_old_states_pillars fi copy_new_files + create_local_directories "/opt/so/saltstack/default" apply_hotfix echo "Hotfix applied" update_version @@ -969,6 +1131,7 @@ main() { echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR." copy_new_files echo "" + create_local_directories "/opt/so/saltstack/default" update_version echo "" diff --git a/salt/manager/tools/sbin_jinja/so-yara-download b/salt/manager/tools/sbin_jinja/so-yara-download deleted file mode 100644 index aa95762539..0000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-download +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -NOROOT=1 -. /usr/sbin/so-common - -{%- set proxy = salt['pillar.get']('manager:proxy') %} -{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %} - -# Download the rules from the internet -{%- if proxy %} -export http_proxy={{ proxy }} -export https_proxy={{ proxy }} -export no_proxy="{{ noproxy }}" -{%- endif %} - -repos="/opt/so/conf/strelka/repos.txt" -output_dir=/nsm/rules/yara -gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com) -clone_dir="/tmp" -if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then - - while IFS= read -r repo; do - if ! $(echo "$repo" | grep -qE '^#'); then - # Remove old repo if existing bc of previous error condition or unexpected disruption - repo_name=`echo $repo | awk -F '/' '{print $NF}'` - [ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name - - # Clone repo and make appropriate directories for rules - git clone $repo $clone_dir/$repo_name - echo "Analyzing rules from $clone_dir/$repo_name..." - mkdir -p $output_dir/$repo_name - # Ensure a copy of the license is available for the rules - [ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name - - # Copy over rules - for i in $(find $clone_dir/$repo_name -name "*.yar*"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - cp $i $output_dir/$repo_name - done - rm -rf $clone_dir/$repo_name - fi - done < $repos - - echo "Done!" - -/usr/sbin/so-yara-update - -else - echo "Server returned $gh_status status code." - echo "No connectivity to Github...exiting..." - exit 1 -fi diff --git a/salt/manager/tools/sbin_jinja/so-yara-update b/salt/manager/tools/sbin_jinja/so-yara-update deleted file mode 100644 index 07c940f47d..0000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-update +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -NOROOT=1 -. /usr/sbin/so-common - -echo "Starting to check for yara rule updates at $(date)..." - -newcounter=0 -excludedcounter=0 -excluded_rules=({{ EXCLUDEDRULES | join(' ') }}) - -# Pull down the SO Rules -SORULEDIR=/nsm/rules/yara -OUTPUTDIR=/opt/so/saltstack/local/salt/strelka/rules - -mkdir -p $OUTPUTDIR -# remove all rules prior to copy so we can clear out old rules -rm -f $OUTPUTDIR/* - -for i in $(find $SORULEDIR -name "*.yar" -o -name "*.yara"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - if [[ ! "${excluded_rules[*]}" =~ ${rule_name} ]]; then - echo "Adding rule: $rule_name..." - cp $i $OUTPUTDIR/$rule_name - ((newcounter++)) - else - echo "Excluding rule: $rule_name..." - ((excludedcounter++)) - fi -done - -if [ "$newcounter" -gt 0 ] || [ "$excludedcounter" -gt 0 ];then - echo "$newcounter rules added." - echo "$excludedcounter rule(s) excluded." -fi - -echo "Finished rule updates at $(date)..." diff --git a/salt/mysql/config.sls b/salt/mysql/config.sls deleted file mode 100644 index 274f25d761..0000000000 --- a/salt/mysql/config.sls +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} - -# MySQL Setup -mysqlpkgs: - pkg.removed: - - skip_suggestions: False - - pkgs: - {% if grains['os_family'] != 'RedHat' %} - - python3-mysqldb - {% else %} - - python3-mysqlclient - {% endif %} - -mysqletcdir: - file.directory: - - name: /opt/so/conf/mysql/etc - - user: 939 - - group: 939 - - makedirs: True - -mysqlpiddir: - file.directory: - - name: /opt/so/conf/mysql/pid - - user: 939 - - group: 939 - - makedirs: True - -mysqlcnf: - file.managed: - - name: /opt/so/conf/mysql/etc/my.cnf - - source: salt://mysql/etc/my.cnf - - user: 939 - - group: 939 - -mysqlpass: - file.managed: - - name: /opt/so/conf/mysql/etc/mypass - - source: salt://mysql/etc/mypass - - user: 939 - - group: 939 - - template: jinja - - defaults: - MYSQLPASS: {{ MYSQLPASS }} - -mysqllogdir: - file.directory: - - name: /opt/so/log/mysql - - user: 939 - - group: 939 - - makedirs: True - -mysqldatadir: - file.directory: - - name: /nsm/mysql - - user: 939 - - group: 939 - - makedirs: True - -mysql_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://mysql/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -#mysql_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://mysql/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/mysql/defaults.yaml b/salt/mysql/defaults.yaml deleted file mode 100644 index 87d8cef256..0000000000 --- a/salt/mysql/defaults.yaml +++ /dev/null @@ -1,2 +0,0 @@ -mysql: - enabled: False diff --git a/salt/mysql/enabled.sls b/salt/mysql/enabled.sls deleted file mode 100644 index 1e1a3ca1eb..0000000000 --- a/salt/mysql/enabled.sls +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} - -include: - - mysql.config - - mysql.sostatus - -{% if MYSQLPASS == None %} - -mysql_password_none: - test.configurable_test_state: - - changes: False - - result: False - - comment: "MySQL Password Error - Not Starting MySQL" - -{% else %} - -so-mysql: - docker_container.running: - - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }} - - hostname: so-mysql - - user: socore - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-mysql'].ip }} - - extra_hosts: - - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-mysql'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %} - - {{ XTRAHOST }} - {% endfor %} - {% endif %} - - port_bindings: - {% for BINDING in DOCKER.containers['so-mysql'].port_bindings %} - - {{ BINDING }} - {% endfor %} - - environment: - - MYSQL_ROOT_HOST={{ GLOBALS.so_docker_gateway }} - - MYSQL_ROOT_PASSWORD=/etc/mypass - {% if DOCKER.containers['so-mysql'].extra_env %} - {% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - - binds: - - /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro - - /opt/so/conf/mysql/etc/mypass:/etc/mypass - - /nsm/mysql:/var/lib/mysql:rw - - /opt/so/log/mysql:/var/log/mysql:rw - {% if DOCKER.containers['so-mysql'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %} - - {{ BIND }} - {% endfor %} - {% endif %} - - cap_add: - - SYS_NICE - - watch: - - file: mysqlcnf - - file: mysqlpass - - require: - - file: mysqlcnf - - file: mysqlpass -{% endif %} - -delete_so-mysql_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-mysql$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/mysql/etc/my.cnf b/salt/mysql/etc/my.cnf deleted file mode 100644 index 621ce83d86..0000000000 --- a/salt/mysql/etc/my.cnf +++ /dev/null @@ -1,32 +0,0 @@ -# For advice on how to change settings please see -# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html - -[mysqld] -# -# Remove leading # and set to the amount of RAM for the most important data -# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%. -# innodb_buffer_pool_size = 128M -# -# Remove leading # to turn on a very important data integrity option: logging -# changes to the binary log between backups. -# log_bin -# -# Remove leading # to set options mainly useful for reporting servers. -# The server defaults are faster for transactions and fast SELECTs. -# Adjust sizes as needed, experiment to find the optimal values. -# join_buffer_size = 128M -# sort_buffer_size = 2M -# read_rnd_buffer_size = 2M - -host_cache_size=0 -skip-name-resolve -datadir=/var/lib/mysql -socket=/var/lib/mysql/mysql.sock -secure-file-priv=/var/lib/mysql-files -user=socore - -log-error=/var/log/mysql/mysqld.log -pid-file=/var/run/mysqld/mysqld.pid - -# Switch back to the native password module so that playbook can connect -authentication_policy=mysql_native_password diff --git a/salt/mysql/etc/mypass b/salt/mysql/etc/mypass deleted file mode 100644 index b38bf75ec4..0000000000 --- a/salt/mysql/etc/mypass +++ /dev/null @@ -1 +0,0 @@ -{{ MYSQLPASS }} diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls deleted file mode 100644 index 48e4f558cb..0000000000 --- a/salt/mysql/init.sls +++ /dev/null @@ -1,14 +0,0 @@ - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'mysql/map.jinja' import MYSQLMERGED %} - -include: -{% if MYSQLMERGED.enabled %} - - mysql.enabled -{% else %} - - mysql.disabled -{% endif %} diff --git a/salt/mysql/map.jinja b/salt/mysql/map.jinja deleted file mode 100644 index dd9a6474e4..0000000000 --- a/salt/mysql/map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one - or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at - https://securityonion.net/license; you may not use this file except in compliance with the - Elastic License 2.0. #} - -{% import_yaml 'mysql/defaults.yaml' as MYSQLDEFAULTS with context %} -{% set MYSQLMERGED = salt['pillar.get']('mysql', MYSQLDEFAULTS.mysql, merge=True) %} diff --git a/salt/mysql/soc_mysql.yaml b/salt/mysql/soc_mysql.yaml deleted file mode 100644 index 4be816d904..0000000000 --- a/salt/mysql/soc_mysql.yaml +++ /dev/null @@ -1,4 +0,0 @@ -mysql: - enabled: - description: You can enable or disable MySQL. - advanced: True diff --git a/salt/mysql/tools/sbin/so-mysql-restart b/salt/mysql/tools/sbin/so-mysql-restart deleted file mode 100755 index 8c0583232d..0000000000 --- a/salt/mysql/tools/sbin/so-mysql-restart +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-restart mysql $1 diff --git a/salt/mysql/tools/sbin/so-mysql-stop b/salt/mysql/tools/sbin/so-mysql-stop deleted file mode 100755 index 58f6072f26..0000000000 --- a/salt/mysql/tools/sbin/so-mysql-stop +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-stop mysql $1 diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 236f8da7fa..52ea68daa1 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -277,38 +277,11 @@ http { proxy_set_header X-Forwarded-Proto $scheme; } - location /playbook/ { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ GLOBALS.manager }}:3000/playbook/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - - location /soctopus/ { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ GLOBALS.manager }}:7000/; - proxy_read_timeout 300; - proxy_connect_timeout 300; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } location /kibana/app/soc/ { rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; } - location /kibana/app/soctopus/ { - rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; - } location /sensoroniagents/ { if ($http_authorization = "") { diff --git a/salt/orch/container_download.sls b/salt/orch/container_download.sls new file mode 100644 index 0000000000..aa8e19587e --- /dev/null +++ b/salt/orch/container_download.sls @@ -0,0 +1,17 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% set NEWNODE = salt['pillar.get']('setup:newnode') %} + +{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} +{{NEWNODE}}_download_logstash_elasticsearch: + salt.state: + - tgt: {{ NEWNODE }} + - sls: + - repo.client + - docker + - logstash.download + - elasticsearch.download +{% endif %} diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls new file mode 100644 index 0000000000..c05a812a33 --- /dev/null +++ b/salt/orch/deploy_newnode.sls @@ -0,0 +1,32 @@ +{% set MANAGER = salt['pillar.get']('setup:manager') %} +{% set NEWNODE = salt['pillar.get']('setup:newnode') %} + +# tell the minion to populate the mine with data from mine_functions which is populated during setup +# this only needs to happen on non managers since they handle this during setup +# and they need to wait for ca creation to update the mine +{{NEWNODE}}_update_mine: + salt.function: + - name: mine.update + - tgt: {{ NEWNODE }} + - retry: + attempts: 36 + interval: 5 + +# we need to prepare the manager for a new searchnode or heavynode +{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} +manager_run_es_soc: + salt.state: + - tgt: {{ MANAGER }} + - sls: + - elasticsearch + - soc + - queue: True + - require: + - salt: {{NEWNODE}}_update_mine +{% endif %} + +{{NEWNODE}}_run_highstate: + salt.state: + - tgt: {{ NEWNODE }} + - highstate: True + - queue: True diff --git a/salt/playbook/automation_user_create.sls b/salt/playbook/automation_user_create.sls deleted file mode 100644 index 49ec2e7957..0000000000 --- a/salt/playbook/automation_user_create.sls +++ /dev/null @@ -1,19 +0,0 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} - -# This state will create the SecOps Automation user within Playbook - -include: - - playbook - -wait_for_playbook: - cmd.run: - - name: until nc -z {{ GLOBALS.manager }} 3000; do sleep 1; done - - timeout: 300 - -create_user: - cmd.script: - - source: salt://playbook/files/automation_user_create.sh - - cwd: /root - - template: jinja - - onchanges: - - cmd: wait_for_playbook diff --git a/salt/playbook/config.sls b/salt/playbook/config.sls deleted file mode 100644 index f4c2cf1371..0000000000 --- a/salt/playbook/config.sls +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} -{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %} - - -include: - - mysql - -create_playbookdbuser: - mysql_user.present: - - name: playbookdbuser - - password: {{ PLAYBOOKPASS }} - - host: "{{ DOCKER.range.split('/')[0] }}/255.255.255.0" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_playbookdbuser_grants: - mysql_query.run: - - database: playbook - - query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.range.split('/')[0] }}/255.255.255.0';" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_updatwebhooks: - mysql_query.run: - - database: playbook - - query: "update webhooks set url = 'http://{{ GLOBALS.manager_ip}}:7000/playbook/webhook' where project_id = 1" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_updatename: - mysql_query.run: - - database: playbook - - query: "update custom_fields set name = 'Custom Filter' where id = 21;" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_updatepluginurls: - mysql_query.run: - - database: playbook - - query: |- - update settings set value = - "--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess - project: '1' - convert_url: http://{{ GLOBALS.manager }}:7000/playbook/sigmac - create_url: http://{{ GLOBALS.manager }}:7000/playbook/play" - where id = 43 - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -playbook_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://playbook/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -#playbook_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://playbook/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja - -playbooklogdir: - file.directory: - - name: /opt/so/log/playbook - - dir_mode: 775 - - user: 939 - - group: 939 - - makedirs: True - -playbookfilesdir: - file.directory: - - name: /opt/so/conf/playbook/redmine-files - - dir_mode: 775 - - user: 939 - - group: 939 - - makedirs: True - -{% if 'idh' in salt['cmd.shell']("ls /opt/so/saltstack/local/pillar/minions/|awk -F'_' {'print $2'}|awk -F'.' {'print $1'}").split() %} -idh-plays: - file.recurse: - - name: /opt/so/conf/soctopus/sigma-import - - source: salt://idh/plays - - makedirs: True - cmd.run: - - name: so-playbook-import True - - onchanges: - - file: /opt/so/conf/soctopus/sigma-import -{% endif %} - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/db_init.sls b/salt/playbook/db_init.sls deleted file mode 100644 index 1b2bf7b1a5..0000000000 --- a/salt/playbook/db_init.sls +++ /dev/null @@ -1,14 +0,0 @@ - -# This state will import the initial default playbook database. -# If there is an existing playbook database, it will be overwritten - no backups are made. - -include: - - mysql - -salt://playbook/files/playbook_db_init.sh: - cmd.script: - - cwd: /root - - template: jinja - -'sleep 5': - cmd.run \ No newline at end of file diff --git a/salt/playbook/defaults.yaml b/salt/playbook/defaults.yaml deleted file mode 100644 index e75ec6a3cf..0000000000 --- a/salt/playbook/defaults.yaml +++ /dev/null @@ -1,2 +0,0 @@ -playbook: - enabled: False diff --git a/salt/playbook/disabled.sls b/salt/playbook/disabled.sls deleted file mode 100644 index c8c876cfba..0000000000 --- a/salt/playbook/disabled.sls +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -include: - - playbook.sostatus - -so-playbook: - docker_container.absent: - - force: True - -so-playbook_so-status.disabled: - file.comment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-playbook$ - -so-playbook-sync_cron: - cron.absent: - - identifier: so-playbook-sync_cron - - user: root - -so-playbook-ruleupdate_cron: - cron.absent: - - identifier: so-playbook-ruleupdate_cron - - user: root - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/enabled.sls b/salt/playbook/enabled.sls deleted file mode 100644 index e70fec6939..0000000000 --- a/salt/playbook/enabled.sls +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %} - -include: - - playbook.config - - playbook.sostatus - -{% if PLAYBOOKPASS == None %} - -playbook_password_none: - test.configurable_test_state: - - changes: False - - result: False - - comment: "Playbook MySQL Password Error - Not Starting Playbook" - -{% else %} - -so-playbook: - docker_container.running: - - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }} - - hostname: playbook - - name: so-playbook - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-playbook'].ip }} - - binds: - - /opt/so/conf/playbook/redmine-files:/usr/src/redmine/files:rw - - /opt/so/log/playbook:/playbook/log:rw - {% if DOCKER.containers['so-playbook'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %} - - {{ BIND }} - {% endfor %} - {% endif %} - - extra_hosts: - - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-playbook'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-playbook'].extra_hosts %} - - {{ XTRAHOST }} - {% endfor %} - {% endif %} - - environment: - - REDMINE_DB_MYSQL={{ GLOBALS.manager }} - - REDMINE_DB_DATABASE=playbook - - REDMINE_DB_USERNAME=playbookdbuser - - REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }} - {% if DOCKER.containers['so-playbook'].extra_env %} - {% for XTRAENV in DOCKER.containers['so-playbook'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - - port_bindings: - {% for BINDING in DOCKER.containers['so-playbook'].port_bindings %} - - {{ BINDING }} - {% endfor %} - -delete_so-playbook_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-playbook$ - -so-playbook-sync_cron: - cron.present: - - name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1 - - identifier: so-playbook-sync_cron - - user: root - - minute: '*/5' - -so-playbook-ruleupdate_cron: - cron.present: - - name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1 - - identifier: so-playbook-ruleupdate_cron - - user: root - - minute: '1' - - hour: '6' - -{% endif %} - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/files/automation_user_create.sh b/salt/playbook/files/automation_user_create.sh deleted file mode 100644 index 782ce4c88d..0000000000 --- a/salt/playbook/files/automation_user_create.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%} -# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %} - -local_salt_dir=/opt/so/saltstack/local - -try_count=6 -interval=10 - -while [[ $try_count -le 6 ]]; do - if docker top "so-playbook" &>/dev/null; then - automation_group=6 - - # Create user and retrieve api_key and user_id from response - mapfile -t automation_res < <( - curl -s --location --request POST 'http://127.0.0.1:3000/playbook/users.json' --user "admin:{{ admin_pass }}" --header 'Content-Type: application/json' --data '{ - "user" : { - "login" : "automation", - "password": "{{ automation_pass }}", - "firstname": "SecOps", - "lastname": "Automation", - "mail": "automation2@localhost.local" - } - }' | jq -r '.user.api_key, .user.id' - ) - - automation_api_key=${automation_res[0]} - automation_user_id=${automation_res[1]} - - # Add user_id from newly created user to Automation group - curl -s --location --request POST "http://127.0.0.1:3000/playbook/groups/${automation_group}/users.json" \ - --user "admin:{{ admin_pass }}" \ - --header 'Content-Type: application/json' \ - --data "{ - \"user_id\" : ${automation_user_id} - }" - - # Update the Automation API key in the secrets pillar - so-yaml.py remove $local_salt_dir/pillar/secrets.sls secrets.playbook_automation_api_key - printf '%s\n'\ - " playbook_automation_api_key: $automation_api_key" >> $local_salt_dir/pillar/secrets.sls - exit 0 - fi - ((try_count++)) - sleep "${interval}s" -done - -# Timeout exceeded, exit with non-zero exit code -exit 1 diff --git a/salt/playbook/files/playbook_db_init.sh b/salt/playbook/files/playbook_db_init.sh deleted file mode 100644 index 94aef0a443..0000000000 --- a/salt/playbook/files/playbook_db_init.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} -# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) %} -. /usr/sbin/so-common - -default_salt_dir=/opt/so/saltstack/default - -# Generate salt + hash for admin user -admin_salt=$(get_random_value 32) -admin_stage1_hash=$(echo -n '{{ admin_pass }}' | sha1sum | awk '{print $1}') -admin_hash=$(echo -n "${admin_salt}${admin_stage1_hash}" | sha1sum | awk '{print $1}') -sed -i "s/ADMIN_HASH/${admin_hash}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql -sed -i "s/ADMIN_SALT/${admin_salt}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql - -# Copy file to destination + execute SQL -docker cp $default_salt_dir/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql -docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql" diff --git a/salt/playbook/files/playbook_db_init.sql b/salt/playbook/files/playbook_db_init.sql deleted file mode 100644 index 7a3b4da68c..0000000000 --- a/salt/playbook/files/playbook_db_init.sql +++ /dev/null @@ -1,1788 +0,0 @@ --- MySQL dump 10.13 Distrib 5.7.24, for Linux (x86_64) --- --- Host: localhost Database: playbook --- ------------------------------------------------------ --- Server version 5.7.24 - -/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; -/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; -/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; -/*!40101 SET NAMES utf8 */; -/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; -/*!40103 SET TIME_ZONE='+00:00' */; -/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; -/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; -/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; -/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; - --- --- Current Database: `playbook` --- - -CREATE DATABASE /*!32312 IF NOT EXISTS*/ `playbook` /*!40100 DEFAULT CHARACTER SET latin1 */; - -USE `playbook`; - --- --- Table structure for table `ar_internal_metadata` --- - -DROP TABLE IF EXISTS `ar_internal_metadata`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `ar_internal_metadata` ( - `key` varchar(255) NOT NULL, - `value` varchar(255) DEFAULT NULL, - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`key`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `ar_internal_metadata` --- - -LOCK TABLES `ar_internal_metadata` WRITE; -/*!40000 ALTER TABLE `ar_internal_metadata` DISABLE KEYS */; -INSERT INTO `ar_internal_metadata` VALUES ('environment','production','2020-04-26 13:08:38','2020-04-26 13:08:38'); -/*!40000 ALTER TABLE `ar_internal_metadata` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `attachments` --- - -DROP TABLE IF EXISTS `attachments`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `attachments` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `container_id` int(11) DEFAULT NULL, - `container_type` varchar(30) DEFAULT NULL, - `filename` varchar(255) NOT NULL DEFAULT '', - `disk_filename` varchar(255) NOT NULL DEFAULT '', - `filesize` bigint(20) NOT NULL DEFAULT '0', - `content_type` varchar(255) DEFAULT '', - `digest` varchar(64) NOT NULL DEFAULT '', - `downloads` int(11) NOT NULL DEFAULT '0', - `author_id` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `description` varchar(255) DEFAULT NULL, - `disk_directory` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_attachments_on_author_id` (`author_id`), - KEY `index_attachments_on_created_on` (`created_on`), - KEY `index_attachments_on_container_id_and_container_type` (`container_id`,`container_type`), - KEY `index_attachments_on_disk_filename` (`disk_filename`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `attachments` --- - -LOCK TABLES `attachments` WRITE; -/*!40000 ALTER TABLE `attachments` DISABLE KEYS */; -/*!40000 ALTER TABLE `attachments` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `auth_sources` --- - -DROP TABLE IF EXISTS `auth_sources`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `auth_sources` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `type` varchar(30) NOT NULL DEFAULT '', - `name` varchar(60) NOT NULL DEFAULT '', - `host` varchar(60) DEFAULT NULL, - `port` int(11) DEFAULT NULL, - `account` varchar(255) DEFAULT NULL, - `account_password` varchar(255) DEFAULT '', - `base_dn` varchar(255) DEFAULT NULL, - `attr_login` varchar(30) DEFAULT NULL, - `attr_firstname` varchar(30) DEFAULT NULL, - `attr_lastname` varchar(30) DEFAULT NULL, - `attr_mail` varchar(30) DEFAULT NULL, - `onthefly_register` tinyint(1) NOT NULL DEFAULT '0', - `tls` tinyint(1) NOT NULL DEFAULT '0', - `filter` text, - `timeout` int(11) DEFAULT NULL, - `verify_peer` tinyint(1) NOT NULL DEFAULT '1', - PRIMARY KEY (`id`), - KEY `index_auth_sources_on_id_and_type` (`id`,`type`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `auth_sources` --- - -LOCK TABLES `auth_sources` WRITE; -/*!40000 ALTER TABLE `auth_sources` DISABLE KEYS */; -/*!40000 ALTER TABLE `auth_sources` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `boards` --- - -DROP TABLE IF EXISTS `boards`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `boards` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL, - `name` varchar(255) NOT NULL DEFAULT '', - `description` varchar(255) DEFAULT NULL, - `position` int(11) DEFAULT NULL, - `topics_count` int(11) NOT NULL DEFAULT '0', - `messages_count` int(11) NOT NULL DEFAULT '0', - `last_message_id` int(11) DEFAULT NULL, - `parent_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `boards_project_id` (`project_id`), - KEY `index_boards_on_last_message_id` (`last_message_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `boards` --- - -LOCK TABLES `boards` WRITE; -/*!40000 ALTER TABLE `boards` DISABLE KEYS */; -/*!40000 ALTER TABLE `boards` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changes` --- - -DROP TABLE IF EXISTS `changes`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changes` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `changeset_id` int(11) NOT NULL, - `action` varchar(1) NOT NULL DEFAULT '', - `path` text NOT NULL, - `from_path` text, - `from_revision` varchar(255) DEFAULT NULL, - `revision` varchar(255) DEFAULT NULL, - `branch` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `changesets_changeset_id` (`changeset_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changes` --- - -LOCK TABLES `changes` WRITE; -/*!40000 ALTER TABLE `changes` DISABLE KEYS */; -/*!40000 ALTER TABLE `changes` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changeset_parents` --- - -DROP TABLE IF EXISTS `changeset_parents`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changeset_parents` ( - `changeset_id` int(11) NOT NULL, - `parent_id` int(11) NOT NULL, - KEY `changeset_parents_changeset_ids` (`changeset_id`), - KEY `changeset_parents_parent_ids` (`parent_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changeset_parents` --- - -LOCK TABLES `changeset_parents` WRITE; -/*!40000 ALTER TABLE `changeset_parents` DISABLE KEYS */; -/*!40000 ALTER TABLE `changeset_parents` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changesets` --- - -DROP TABLE IF EXISTS `changesets`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changesets` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `repository_id` int(11) NOT NULL, - `revision` varchar(255) NOT NULL, - `committer` varchar(255) DEFAULT NULL, - `committed_on` datetime NOT NULL, - `comments` longtext, - `commit_date` date DEFAULT NULL, - `scmid` varchar(255) DEFAULT NULL, - `user_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `changesets_repos_rev` (`repository_id`,`revision`), - KEY `index_changesets_on_user_id` (`user_id`), - KEY `index_changesets_on_repository_id` (`repository_id`), - KEY `index_changesets_on_committed_on` (`committed_on`), - KEY `changesets_repos_scmid` (`repository_id`,`scmid`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changesets` --- - -LOCK TABLES `changesets` WRITE; -/*!40000 ALTER TABLE `changesets` DISABLE KEYS */; -/*!40000 ALTER TABLE `changesets` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changesets_issues` --- - -DROP TABLE IF EXISTS `changesets_issues`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changesets_issues` ( - `changeset_id` int(11) NOT NULL, - `issue_id` int(11) NOT NULL, - UNIQUE KEY `changesets_issues_ids` (`changeset_id`,`issue_id`), - KEY `index_changesets_issues_on_issue_id` (`issue_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changesets_issues` --- - -LOCK TABLES `changesets_issues` WRITE; -/*!40000 ALTER TABLE `changesets_issues` DISABLE KEYS */; -/*!40000 ALTER TABLE `changesets_issues` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `comments` --- - -DROP TABLE IF EXISTS `comments`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `comments` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `commented_type` varchar(30) NOT NULL DEFAULT '', - `commented_id` int(11) NOT NULL DEFAULT '0', - `author_id` int(11) NOT NULL DEFAULT '0', - `content` text, - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `index_comments_on_commented_id_and_commented_type` (`commented_id`,`commented_type`), - KEY `index_comments_on_author_id` (`author_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `comments` --- - -LOCK TABLES `comments` WRITE; -/*!40000 ALTER TABLE `comments` DISABLE KEYS */; -/*!40000 ALTER TABLE `comments` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_field_enumerations` --- - -DROP TABLE IF EXISTS `custom_field_enumerations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_field_enumerations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `custom_field_id` int(11) NOT NULL, - `name` varchar(255) NOT NULL, - `active` tinyint(1) NOT NULL DEFAULT '1', - `position` int(11) NOT NULL DEFAULT '1', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_field_enumerations` --- - -LOCK TABLES `custom_field_enumerations` WRITE; -/*!40000 ALTER TABLE `custom_field_enumerations` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_field_enumerations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields` --- - -DROP TABLE IF EXISTS `custom_fields`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `type` varchar(30) NOT NULL DEFAULT '', - `name` varchar(30) NOT NULL DEFAULT '', - `field_format` varchar(30) NOT NULL DEFAULT '', - `possible_values` text, - `regexp` varchar(255) DEFAULT '', - `min_length` int(11) DEFAULT NULL, - `max_length` int(11) DEFAULT NULL, - `is_required` tinyint(1) NOT NULL DEFAULT '0', - `is_for_all` tinyint(1) NOT NULL DEFAULT '0', - `is_filter` tinyint(1) NOT NULL DEFAULT '0', - `position` int(11) DEFAULT NULL, - `searchable` tinyint(1) DEFAULT '0', - `default_value` text, - `editable` tinyint(1) DEFAULT '1', - `visible` tinyint(1) NOT NULL DEFAULT '1', - `multiple` tinyint(1) DEFAULT '0', - `format_store` text, - `description` text, - PRIMARY KEY (`id`), - KEY `index_custom_fields_on_id_and_type` (`id`,`type`) -) ENGINE=InnoDB AUTO_INCREMENT=27 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields` --- - -LOCK TABLES `custom_fields` WRITE; -/*!40000 ALTER TABLE `custom_fields` DISABLE KEYS */; -INSERT INTO `custom_fields` VALUES (1,'IssueCustomField','Title','string',NULL,'',NULL,NULL,0,1,1,1,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(2,'IssueCustomField','Author','string',NULL,'',NULL,NULL,0,1,1,2,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(3,'IssueCustomField','Objective','text',NULL,'',NULL,NULL,0,1,1,14,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nfull_width_layout: \'1\'\n',''),(4,'IssueCustomField','Operational Notes','text',NULL,'',NULL,NULL,0,1,0,15,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(5,'IssueCustomField','Result Analysis','text',NULL,'',NULL,NULL,0,1,0,16,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(6,'IssueCustomField','ElastAlert Config','text',NULL,'',NULL,NULL,0,1,0,17,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(7,'IssueCustomField','HiveID','string',NULL,'',NULL,NULL,0,1,1,13,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(8,'IssueCustomField','References','text',NULL,'',NULL,NULL,0,1,0,6,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'0\'\n',''),(9,'IssueCustomField','Sigma','text',NULL,'',NULL,NULL,0,1,0,18,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(10,'IssueCustomField','Level','list','---\n- low\n- medium\n- high\n- critical\n','',NULL,NULL,0,1,1,3,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(11,'IssueCustomField','PlayID','string',NULL,'',NULL,NULL,0,1,1,8,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(12,'IssueCustomField','Rule ID','string',NULL,'',NULL,NULL,0,1,1,9,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(13,'IssueCustomField','Playbook','list','---\n- Internal\n- imported\n- community\n','',NULL,NULL,0,1,1,4,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(15,'IssueCustomField','ATT&CK Technique','list','---\n- T1001\n- T1002\n- T1003\n- T1004\n- T1005\n- T1006\n- T1007\n- T1008\n- T1009\n- T1010\n- T1011\n- T1012\n- T1013\n- T1014\n- T1015\n- T1016\n- T1017\n- T1018\n- T1019\n- T1020\n- T1021\n- T1022\n- T1023\n- T1024\n- T1025\n- T1026\n- T1027\n- T1028\n- T1029\n- T1030\n- T1031\n- T1032\n- T1033\n- T1034\n- T1035\n- T1036\n- T1037\n- T1038\n- T1039\n- T1040\n- T1041\n- T1042\n- T1043\n- T1044\n- T1045\n- T1046\n- T1047\n- T1048\n- T1049\n- T1050\n- T1051\n- T1052\n- T1053\n- T1054\n- T1055\n- T1056\n- T1057\n- T1058\n- T1059\n- T1060\n- T1061\n- T1062\n- T1063\n- T1064\n- T1065\n- T1066\n- T1067\n- T1068\n- T1069\n- T1070\n- T1071\n- T1072\n- T1073\n- T1074\n- T1075\n- T1076\n- T1077\n- T1078\n- T1079\n- T1080\n- T1081\n- T1082\n- T1083\n- T1084\n- T1085\n- T1086\n- T1087\n- T1088\n- T1089\n- T1090\n- T1091\n- T1092\n- T1093\n- T1094\n- T1095\n- T1096\n- T1097\n- T1098\n- T1099\n- T1100\n- T1101\n- T1102\n- T1103\n- T1104\n- T1105\n- T1106\n- T1107\n- T1108\n- T1109\n- T1110\n- T1111\n- T1112\n- T1113\n- T1114\n- T1115\n- T1116\n- T1117\n- T1118\n- T1119\n- T1120\n- T1121\n- T1122\n- T1123\n- T1124\n- T1125\n- T1126\n- T1127\n- T1128\n- T1129\n- T1130\n- T1131\n- T1132\n- T1133\n- T1134\n- T1135\n- T1136\n- T1137\n- T1138\n- T1139\n- T1140\n- T1141\n- T1142\n- T1143\n- T1144\n- T1145\n- T1146\n- T1147\n- T1148\n- T1149\n- T1150\n- T1151\n- T1152\n- T1153\n- T1154\n- T1155\n- T1156\n- T1157\n- T1158\n- T1159\n- T1160\n- T1161\n- T1162\n- T1163\n- T1164\n- T1165\n- T1166\n- T1167\n- T1168\n- T1169\n- T1170\n- T1171\n- T1172\n- T1173\n- T1174\n- T1175\n- T1176\n- T1177\n- T1178\n- T1179\n- T1180\n- T1181\n- T1182\n- T1183\n- T1184\n- T1185\n- T1186\n- T1187\n- T1188\n- T1189\n- T1190\n- T1191\n- T1192\n- T1193\n- T1194\n- T1195\n- T1196\n- T1197\n- T1198\n- T1199\n- T1200\n- T1201\n- T1202\n- T1203\n- T1204\n- T1205\n- T1206\n- T1207\n- T1208\n- T1209\n- T1210\n- T1211\n- T1212\n- T1213\n- T1214\n- T1215\n- T1216\n- T1217\n- T1218\n- T1219\n- T1220\n- T1221\n- T1222\n- T1223\n- T1480\n- T1482\n- T1483\n- T1484\n- T1485\n- T1486\n- T1487\n- T1488\n- T1489\n- T1490\n- T1491\n- T1492\n- T1493\n- T1494\n- T1495\n- T1496\n- T1497\n- T1498\n- T1499\n- T1500\n- T1501\n- T1502\n- T1503\n- T1504\n- T1505\n- T1506\n- T1514\n- T1518\n- T1519\n- T1522\n- T1525\n- T1526\n- T1527\n- T1528\n- T1529\n- T1530\n- T1531\n- T1534\n- T1535\n- T1536\n- T1537\n- T1538\n- T1539\n- T1540\n- T1541\n- T1542\n- T1543\n- T1544\n- T1545\n- T1546\n- T1547\n- T1548\n- T1549\n- T1550\n- T1551\n- T1552\n- T1553\n- T1554\n- T1555\n- T1556\n- T1557\n- T1558\n- T1559\n- T1560\n- T1561\n- T1562\n- T1563\n- T1564\n- T1565\n- T1566\n- T1567\n- T1568\n- T1569\n- T1570\n- T1571\n- T1572\n- T1573\n- T1574\n- T1575\n- T1576\n- T1577\n- T1578\n- T1579\n- T1580\n- T1581\n- T1582\n- T1583\n- T1584\n- T1585\n- T1586\n- T1587\n- T1588\n- T1589\n- T1590\n- T1591\n- T1592\n- T1593\n- T1594\n- T1595\n- T1596\n- T1597\n- T1598\n- T1599\n- T1600\n- T1601\n- T1602\n- T1603\n- T1604\n- T1605\n- T1606\n- T1607\n- T1608\n- T1609\n- T1610\n- T1611\n- T1612\n- T1613\n- T1614\n- T1615\n- T1616\n- T1617\n- T1618\n- T1619\n- T1620\n- T1621\n- T1622\n- T1623\n- T1624\n- T1625\n- T1626\n- T1627\n- T1628\n- T1629\n- T1630\n- T1631\n- T1632\n- T1633\n- T1634\n- T1635\n- T1636\n- T1637\n- T1638\n- T1639\n- T1640\n- T1641\n- T1642\n- T1643\n- T1644\n- T1645\n- T1646\n- T1647\n- T1648\n- T1649\n- T1650\n- T1651\n- T1652\n- T1653\n- T1654\n- T1655\n- T1656\n- T1657\n- T1658\n- T1659\n- T1660\n- T1661\n- T1662\n- T1663\n- T1664\n- T1665\n- T1666\n- T1667\n- T1668\n- T1669\n- T1670\n- T1671\n- T1672\n- T1673\n- T1674\n- T1675\n- T1676\n- T1677\n- T1678\n- T1679\n- T1680\n- T1681\n- T1682\n- T1683\n- T1684\n- T1685\n- T1686\n- T1687\n- T1688\n- T1689\n- T1690\n- T1691\n- T1692\n- T1693\n- T1694\n- T1695\n- T1696\n- T1697\n- T1698\n- T1699\n- T1700\n- T1701\n- T1702\n- T1703\n- T1704\n- T1705\n- T1706\n- T1707\n- T1708\n- T1709\n- T1710\n- T1711\n- T1712\n- T1713\n- T1714\n- T1715\n- T1716\n- T1717\n- T1718\n- T1719\n- T1720\n- T1721\n- T1722\n- T1723\n- T1724\n- T1725\n- T1726\n- T1727\n- T1728\n- T1729\n- T1730\n- T1731\n- T1732\n- T1733\n- T1734\n- T1735\n- T1736\n- T1737\n- T1738\n- T1739\n- T1740\n- T1741\n- T1742\n- T1743\n- T1744\n- T1745\n- T1746\n- T1747\n- T1748\n- T1749\n- T1750\n- T1751\n- T1752\n','',NULL,NULL,0,1,1,7,0,'',1,1,1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: https://attack.mitre.org/techniques/%value%\nedit_tag_style: \'\'\n',''),(17,'IssueCustomField','Case Analyzers','list','---\n- Urlscan_io_Search - ip,domain,hash,url\n- CERTatPassiveDNS - domain,fqdn,ip\n','',NULL,NULL,0,1,1,12,1,'',1,1,1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(18,'IssueCustomField','Ruleset','string',NULL,'',NULL,NULL,0,1,1,10,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(19,'IssueCustomField','Group','string',NULL,'',NULL,NULL,0,1,1,11,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(20,'IssueCustomField','Product','string',NULL,'',NULL,NULL,0,1,1,5,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(21,'IssueCustomField','Target Log','text',NULL,'',NULL,NULL,0,1,0,19,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(22,'IssueCustomField','Unit Test','list','---\n- Passed\n- Failed\n','',NULL,NULL,0,1,1,20,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(26,'IssueCustomField','License','list','---\n- Apache-2.0\n- BSD-2-Clause\n- BSD-3-Clause\n- CC0-1.0\n- CC-PDDC\n- DRL-1.0\n- LGPL-3.0-only\n- MIT License\n- GPL-2.0-only\n- GPL-3.0-only\n','',NULL,NULL,0,1,0,21,0,'',1,1,1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: https://spdx.org/licenses/%value%.html\nedit_tag_style: \'\'\n',''); -/*!40000 ALTER TABLE `custom_fields` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields_projects` --- - -DROP TABLE IF EXISTS `custom_fields_projects`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields_projects` ( - `custom_field_id` int(11) NOT NULL DEFAULT '0', - `project_id` int(11) NOT NULL DEFAULT '0', - UNIQUE KEY `index_custom_fields_projects_on_custom_field_id_and_project_id` (`custom_field_id`,`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields_projects` --- - -LOCK TABLES `custom_fields_projects` WRITE; -/*!40000 ALTER TABLE `custom_fields_projects` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_fields_projects` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields_roles` --- - -DROP TABLE IF EXISTS `custom_fields_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields_roles` ( - `custom_field_id` int(11) NOT NULL, - `role_id` int(11) NOT NULL, - UNIQUE KEY `custom_fields_roles_ids` (`custom_field_id`,`role_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields_roles` --- - -LOCK TABLES `custom_fields_roles` WRITE; -/*!40000 ALTER TABLE `custom_fields_roles` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_fields_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields_trackers` --- - -DROP TABLE IF EXISTS `custom_fields_trackers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields_trackers` ( - `custom_field_id` int(11) NOT NULL DEFAULT '0', - `tracker_id` int(11) NOT NULL DEFAULT '0', - UNIQUE KEY `index_custom_fields_trackers_on_custom_field_id_and_tracker_id` (`custom_field_id`,`tracker_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields_trackers` --- - -LOCK TABLES `custom_fields_trackers` WRITE; -/*!40000 ALTER TABLE `custom_fields_trackers` DISABLE KEYS */; -INSERT INTO `custom_fields_trackers` VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(15,1),(17,1),(18,1),(19,1),(20,1),(21,1),(22,1),(26,1); -/*!40000 ALTER TABLE `custom_fields_trackers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_values` --- - -DROP TABLE IF EXISTS `custom_values`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_values` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `customized_type` varchar(30) NOT NULL DEFAULT '', - `customized_id` int(11) NOT NULL DEFAULT '0', - `custom_field_id` int(11) NOT NULL DEFAULT '0', - `value` longtext, - PRIMARY KEY (`id`), - KEY `custom_values_customized` (`customized_type`,`customized_id`), - KEY `index_custom_values_on_custom_field_id` (`custom_field_id`) -) ENGINE=InnoDB AUTO_INCREMENT=145325 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_values` --- - -LOCK TABLES `custom_values` WRITE; -/*!40000 ALTER TABLE `custom_values` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_values` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `documents` --- - -DROP TABLE IF EXISTS `documents`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `documents` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `category_id` int(11) NOT NULL DEFAULT '0', - `title` varchar(255) NOT NULL DEFAULT '', - `description` text, - `created_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `documents_project_id` (`project_id`), - KEY `index_documents_on_category_id` (`category_id`), - KEY `index_documents_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `documents` --- - -LOCK TABLES `documents` WRITE; -/*!40000 ALTER TABLE `documents` DISABLE KEYS */; -/*!40000 ALTER TABLE `documents` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `email_addresses` --- - -DROP TABLE IF EXISTS `email_addresses`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `email_addresses` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL, - `address` varchar(255) NOT NULL, - `is_default` tinyint(1) NOT NULL DEFAULT '0', - `notify` tinyint(1) NOT NULL DEFAULT '1', - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `index_email_addresses_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `email_addresses` --- - -LOCK TABLES `email_addresses` WRITE; -/*!40000 ALTER TABLE `email_addresses` DISABLE KEYS */; -INSERT INTO `email_addresses` VALUES (1,1,'admin@example.net',1,1,'2020-04-26 13:08:38','2020-04-26 13:08:38'),(3,9,'automation@localhost.local',1,1,'2020-04-26 18:47:46','2020-04-26 18:47:46'); -/*!40000 ALTER TABLE `email_addresses` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `enabled_modules` --- - -DROP TABLE IF EXISTS `enabled_modules`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `enabled_modules` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) DEFAULT NULL, - `name` varchar(255) NOT NULL, - PRIMARY KEY (`id`), - KEY `enabled_modules_project_id` (`project_id`) -) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `enabled_modules` --- - -LOCK TABLES `enabled_modules` WRITE; -/*!40000 ALTER TABLE `enabled_modules` DISABLE KEYS */; -INSERT INTO `enabled_modules` VALUES (1,1,'sigma_editor'),(2,1,'issue_tracking'); -/*!40000 ALTER TABLE `enabled_modules` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `enumerations` --- - -DROP TABLE IF EXISTS `enumerations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `enumerations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(30) NOT NULL DEFAULT '', - `position` int(11) DEFAULT NULL, - `is_default` tinyint(1) NOT NULL DEFAULT '0', - `type` varchar(255) DEFAULT NULL, - `active` tinyint(1) NOT NULL DEFAULT '1', - `project_id` int(11) DEFAULT NULL, - `parent_id` int(11) DEFAULT NULL, - `position_name` varchar(30) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_enumerations_on_project_id` (`project_id`), - KEY `index_enumerations_on_id_and_type` (`id`,`type`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `enumerations` --- - -LOCK TABLES `enumerations` WRITE; -/*!40000 ALTER TABLE `enumerations` DISABLE KEYS */; -INSERT INTO `enumerations` VALUES (1,'Normal',1,1,'IssuePriority',1,NULL,NULL,'default'); -/*!40000 ALTER TABLE `enumerations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `groups_users` --- - -DROP TABLE IF EXISTS `groups_users`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `groups_users` ( - `group_id` int(11) NOT NULL, - `user_id` int(11) NOT NULL, - UNIQUE KEY `groups_users_ids` (`group_id`,`user_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `groups_users` --- - -LOCK TABLES `groups_users` WRITE; -/*!40000 ALTER TABLE `groups_users` DISABLE KEYS */; -INSERT INTO `groups_users` VALUES (7,1); -/*!40000 ALTER TABLE `groups_users` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `import_items` --- - -DROP TABLE IF EXISTS `import_items`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `import_items` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `import_id` int(11) NOT NULL, - `position` int(11) NOT NULL, - `obj_id` int(11) DEFAULT NULL, - `message` text, - `unique_id` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_import_items_on_import_id_and_unique_id` (`import_id`,`unique_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `import_items` --- - -LOCK TABLES `import_items` WRITE; -/*!40000 ALTER TABLE `import_items` DISABLE KEYS */; -/*!40000 ALTER TABLE `import_items` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `imports` --- - -DROP TABLE IF EXISTS `imports`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `imports` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `type` varchar(255) DEFAULT NULL, - `user_id` int(11) NOT NULL, - `filename` varchar(255) DEFAULT NULL, - `settings` text, - `total_items` int(11) DEFAULT NULL, - `finished` tinyint(1) NOT NULL DEFAULT '0', - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `imports` --- - -LOCK TABLES `imports` WRITE; -/*!40000 ALTER TABLE `imports` DISABLE KEYS */; -/*!40000 ALTER TABLE `imports` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issue_categories` --- - -DROP TABLE IF EXISTS `issue_categories`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issue_categories` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `name` varchar(60) NOT NULL DEFAULT '', - `assigned_to_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `issue_categories_project_id` (`project_id`), - KEY `index_issue_categories_on_assigned_to_id` (`assigned_to_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issue_categories` --- - -LOCK TABLES `issue_categories` WRITE; -/*!40000 ALTER TABLE `issue_categories` DISABLE KEYS */; -/*!40000 ALTER TABLE `issue_categories` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issue_relations` --- - -DROP TABLE IF EXISTS `issue_relations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issue_relations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `issue_from_id` int(11) NOT NULL, - `issue_to_id` int(11) NOT NULL, - `relation_type` varchar(255) NOT NULL DEFAULT '', - `delay` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `index_issue_relations_on_issue_from_id_and_issue_to_id` (`issue_from_id`,`issue_to_id`), - KEY `index_issue_relations_on_issue_from_id` (`issue_from_id`), - KEY `index_issue_relations_on_issue_to_id` (`issue_to_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issue_relations` --- - -LOCK TABLES `issue_relations` WRITE; -/*!40000 ALTER TABLE `issue_relations` DISABLE KEYS */; -/*!40000 ALTER TABLE `issue_relations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issue_statuses` --- - -DROP TABLE IF EXISTS `issue_statuses`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issue_statuses` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(30) NOT NULL DEFAULT '', - `is_closed` tinyint(1) NOT NULL DEFAULT '0', - `position` int(11) DEFAULT NULL, - `default_done_ratio` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_issue_statuses_on_position` (`position`), - KEY `index_issue_statuses_on_is_closed` (`is_closed`) -) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issue_statuses` --- - -LOCK TABLES `issue_statuses` WRITE; -/*!40000 ALTER TABLE `issue_statuses` DISABLE KEYS */; -INSERT INTO `issue_statuses` VALUES (2,'Draft',0,1,NULL),(3,'Active',0,2,NULL),(4,'Inactive',0,3,NULL),(5,'Archived',0,4,NULL),(6,'Disabled',0,5,NULL); -/*!40000 ALTER TABLE `issue_statuses` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issues` --- - -DROP TABLE IF EXISTS `issues`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issues` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `tracker_id` int(11) NOT NULL, - `project_id` int(11) NOT NULL, - `subject` varchar(255) NOT NULL DEFAULT '', - `description` longtext, - `due_date` date DEFAULT NULL, - `category_id` int(11) DEFAULT NULL, - `status_id` int(11) NOT NULL, - `assigned_to_id` int(11) DEFAULT NULL, - `priority_id` int(11) NOT NULL, - `fixed_version_id` int(11) DEFAULT NULL, - `author_id` int(11) NOT NULL, - `lock_version` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `start_date` date DEFAULT NULL, - `done_ratio` int(11) NOT NULL DEFAULT '0', - `estimated_hours` float DEFAULT NULL, - `parent_id` int(11) DEFAULT NULL, - `root_id` int(11) DEFAULT NULL, - `lft` int(11) DEFAULT NULL, - `rgt` int(11) DEFAULT NULL, - `is_private` tinyint(1) NOT NULL DEFAULT '0', - `closed_on` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `issues_project_id` (`project_id`), - KEY `index_issues_on_status_id` (`status_id`), - KEY `index_issues_on_category_id` (`category_id`), - KEY `index_issues_on_assigned_to_id` (`assigned_to_id`), - KEY `index_issues_on_fixed_version_id` (`fixed_version_id`), - KEY `index_issues_on_tracker_id` (`tracker_id`), - KEY `index_issues_on_priority_id` (`priority_id`), - KEY `index_issues_on_author_id` (`author_id`), - KEY `index_issues_on_created_on` (`created_on`), - KEY `index_issues_on_root_id_and_lft_and_rgt` (`root_id`,`lft`,`rgt`), - KEY `index_issues_on_parent_id` (`parent_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issues` --- - -LOCK TABLES `issues` WRITE; -/*!40000 ALTER TABLE `issues` DISABLE KEYS */; -/*!40000 ALTER TABLE `issues` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `journal_details` --- - -DROP TABLE IF EXISTS `journal_details`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `journal_details` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `journal_id` int(11) NOT NULL DEFAULT '0', - `property` varchar(30) NOT NULL DEFAULT '', - `prop_key` varchar(30) NOT NULL DEFAULT '', - `old_value` longtext, - `value` longtext, - PRIMARY KEY (`id`), - KEY `journal_details_journal_id` (`journal_id`) -) ENGINE=InnoDB AUTO_INCREMENT=792 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `journal_details` --- - -LOCK TABLES `journal_details` WRITE; -/*!40000 ALTER TABLE `journal_details` DISABLE KEYS */; -/*!40000 ALTER TABLE `journal_details` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `journals` --- - -DROP TABLE IF EXISTS `journals`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `journals` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `journalized_id` int(11) NOT NULL DEFAULT '0', - `journalized_type` varchar(30) NOT NULL DEFAULT '', - `user_id` int(11) NOT NULL DEFAULT '0', - `notes` longtext, - `created_on` datetime NOT NULL, - `private_notes` tinyint(1) NOT NULL DEFAULT '0', - PRIMARY KEY (`id`), - KEY `journals_journalized_id` (`journalized_id`,`journalized_type`), - KEY `index_journals_on_user_id` (`user_id`), - KEY `index_journals_on_journalized_id` (`journalized_id`), - KEY `index_journals_on_created_on` (`created_on`) -) ENGINE=InnoDB AUTO_INCREMENT=9502 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `journals` --- - -LOCK TABLES `journals` WRITE; -/*!40000 ALTER TABLE `journals` DISABLE KEYS */; -/*!40000 ALTER TABLE `journals` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `member_roles` --- - -DROP TABLE IF EXISTS `member_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `member_roles` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `member_id` int(11) NOT NULL, - `role_id` int(11) NOT NULL, - `inherited_from` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_member_roles_on_member_id` (`member_id`), - KEY `index_member_roles_on_role_id` (`role_id`), - KEY `index_member_roles_on_inherited_from` (`inherited_from`) -) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `member_roles` --- - -LOCK TABLES `member_roles` WRITE; -/*!40000 ALTER TABLE `member_roles` DISABLE KEYS */; -INSERT INTO `member_roles` VALUES (1,1,5,NULL),(2,2,3,NULL),(3,3,4,NULL),(4,4,5,1),(7,7,4,3); -/*!40000 ALTER TABLE `member_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `members` --- - -DROP TABLE IF EXISTS `members`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `members` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL DEFAULT '0', - `project_id` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `mail_notification` tinyint(1) NOT NULL DEFAULT '0', - PRIMARY KEY (`id`), - UNIQUE KEY `index_members_on_user_id_and_project_id` (`user_id`,`project_id`), - KEY `index_members_on_user_id` (`user_id`), - KEY `index_members_on_project_id` (`project_id`) -) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `members` --- - -LOCK TABLES `members` WRITE; -/*!40000 ALTER TABLE `members` DISABLE KEYS */; -INSERT INTO `members` VALUES (1,6,1,'2020-04-26 18:44:14',0),(2,5,1,'2020-04-26 18:44:23',0),(3,7,1,'2020-04-26 18:45:27',0),(4,9,1,'2020-04-26 18:47:51',0),(7,1,1,'2020-05-01 16:42:56',0); -/*!40000 ALTER TABLE `members` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `messages` --- - -DROP TABLE IF EXISTS `messages`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `messages` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `board_id` int(11) NOT NULL, - `parent_id` int(11) DEFAULT NULL, - `subject` varchar(255) NOT NULL DEFAULT '', - `content` text, - `author_id` int(11) DEFAULT NULL, - `replies_count` int(11) NOT NULL DEFAULT '0', - `last_reply_id` int(11) DEFAULT NULL, - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - `locked` tinyint(1) DEFAULT '0', - `sticky` int(11) DEFAULT '0', - PRIMARY KEY (`id`), - KEY `messages_board_id` (`board_id`), - KEY `messages_parent_id` (`parent_id`), - KEY `index_messages_on_last_reply_id` (`last_reply_id`), - KEY `index_messages_on_author_id` (`author_id`), - KEY `index_messages_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `messages` --- - -LOCK TABLES `messages` WRITE; -/*!40000 ALTER TABLE `messages` DISABLE KEYS */; -/*!40000 ALTER TABLE `messages` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `news` --- - -DROP TABLE IF EXISTS `news`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `news` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) DEFAULT NULL, - `title` varchar(60) NOT NULL DEFAULT '', - `summary` varchar(255) DEFAULT '', - `description` text, - `author_id` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `comments_count` int(11) NOT NULL DEFAULT '0', - PRIMARY KEY (`id`), - KEY `news_project_id` (`project_id`), - KEY `index_news_on_author_id` (`author_id`), - KEY `index_news_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `news` --- - -LOCK TABLES `news` WRITE; -/*!40000 ALTER TABLE `news` DISABLE KEYS */; -/*!40000 ALTER TABLE `news` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `open_id_authentication_associations` --- - -DROP TABLE IF EXISTS `open_id_authentication_associations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `open_id_authentication_associations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `issued` int(11) DEFAULT NULL, - `lifetime` int(11) DEFAULT NULL, - `handle` varchar(255) DEFAULT NULL, - `assoc_type` varchar(255) DEFAULT NULL, - `server_url` blob, - `secret` blob, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `open_id_authentication_associations` --- - -LOCK TABLES `open_id_authentication_associations` WRITE; -/*!40000 ALTER TABLE `open_id_authentication_associations` DISABLE KEYS */; -/*!40000 ALTER TABLE `open_id_authentication_associations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `open_id_authentication_nonces` --- - -DROP TABLE IF EXISTS `open_id_authentication_nonces`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `open_id_authentication_nonces` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `timestamp` int(11) NOT NULL, - `server_url` varchar(255) DEFAULT NULL, - `salt` varchar(255) NOT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `open_id_authentication_nonces` --- - -LOCK TABLES `open_id_authentication_nonces` WRITE; -/*!40000 ALTER TABLE `open_id_authentication_nonces` DISABLE KEYS */; -/*!40000 ALTER TABLE `open_id_authentication_nonces` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `projects` --- - -DROP TABLE IF EXISTS `projects`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `projects` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(255) NOT NULL DEFAULT '', - `description` text, - `homepage` varchar(255) DEFAULT '', - `is_public` tinyint(1) NOT NULL DEFAULT '1', - `parent_id` int(11) DEFAULT NULL, - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `identifier` varchar(255) DEFAULT NULL, - `status` int(11) NOT NULL DEFAULT '1', - `lft` int(11) DEFAULT NULL, - `rgt` int(11) DEFAULT NULL, - `inherit_members` tinyint(1) NOT NULL DEFAULT '0', - `default_version_id` int(11) DEFAULT NULL, - `default_assigned_to_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_projects_on_lft` (`lft`), - KEY `index_projects_on_rgt` (`rgt`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `projects` --- - -LOCK TABLES `projects` WRITE; -/*!40000 ALTER TABLE `projects` DISABLE KEYS */; -INSERT INTO `projects` VALUES (1,'Detection Playbooks','','',1,NULL,'2020-04-26 13:13:01','2020-07-10 19:33:53','detection-playbooks',1,1,2,0,NULL,NULL); -/*!40000 ALTER TABLE `projects` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `projects_trackers` --- - -DROP TABLE IF EXISTS `projects_trackers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `projects_trackers` ( - `project_id` int(11) NOT NULL DEFAULT '0', - `tracker_id` int(11) NOT NULL DEFAULT '0', - UNIQUE KEY `projects_trackers_unique` (`project_id`,`tracker_id`), - KEY `projects_trackers_project_id` (`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `projects_trackers` --- - -LOCK TABLES `projects_trackers` WRITE; -/*!40000 ALTER TABLE `projects_trackers` DISABLE KEYS */; -INSERT INTO `projects_trackers` VALUES (1,1); -/*!40000 ALTER TABLE `projects_trackers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `queries` --- - -DROP TABLE IF EXISTS `queries`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `queries` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) DEFAULT NULL, - `name` varchar(255) NOT NULL DEFAULT '', - `filters` text, - `user_id` int(11) NOT NULL DEFAULT '0', - `column_names` text, - `sort_criteria` text, - `group_by` varchar(255) DEFAULT NULL, - `type` varchar(255) DEFAULT NULL, - `visibility` int(11) DEFAULT '0', - `options` text, - PRIMARY KEY (`id`), - KEY `index_queries_on_project_id` (`project_id`), - KEY `index_queries_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `queries` --- - -LOCK TABLES `queries` WRITE; -/*!40000 ALTER TABLE `queries` DISABLE KEYS */; -INSERT INTO `queries` VALUES (3,1,'All Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(4,NULL,'Inactive Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'4\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(5,NULL,'Draft Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'2\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(6,NULL,'Playbook - Community Sigma','---\ncf_13:\n :operator: \"=\"\n :values:\n - community\n',1,'---\n- :status\n- :cf_10\n- :cf_18\n- :cf_19\n- :cf_20\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(8,NULL,'Playbook - Internal','---\ncf_13:\n :operator: \"=\"\n :values:\n - Internal\n',1,'---\n- :status\n- :cf_10\n- :cf_14\n- :cf_16\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(9,NULL,'Active Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\nstatus_id:\n :operator: \"=\"\n :values:\n - \'3\'\n',1,'---\n- :status\n- :cf_10\n- :cf_13\n- :cf_18\n- :cf_19\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'); -/*!40000 ALTER TABLE `queries` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `queries_roles` --- - -DROP TABLE IF EXISTS `queries_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `queries_roles` ( - `query_id` int(11) NOT NULL, - `role_id` int(11) NOT NULL, - UNIQUE KEY `queries_roles_ids` (`query_id`,`role_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `queries_roles` --- - -LOCK TABLES `queries_roles` WRITE; -/*!40000 ALTER TABLE `queries_roles` DISABLE KEYS */; -/*!40000 ALTER TABLE `queries_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `repositories` --- - -DROP TABLE IF EXISTS `repositories`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `repositories` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `url` varchar(255) NOT NULL DEFAULT '', - `login` varchar(60) DEFAULT '', - `password` varchar(255) DEFAULT '', - `root_url` varchar(255) DEFAULT '', - `type` varchar(255) DEFAULT NULL, - `path_encoding` varchar(64) DEFAULT NULL, - `log_encoding` varchar(64) DEFAULT NULL, - `extra_info` longtext, - `identifier` varchar(255) DEFAULT NULL, - `is_default` tinyint(1) DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_repositories_on_project_id` (`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `repositories` --- - -LOCK TABLES `repositories` WRITE; -/*!40000 ALTER TABLE `repositories` DISABLE KEYS */; -/*!40000 ALTER TABLE `repositories` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `roles` --- - -DROP TABLE IF EXISTS `roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `roles` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(255) NOT NULL DEFAULT '', - `position` int(11) DEFAULT NULL, - `assignable` tinyint(1) DEFAULT '1', - `builtin` int(11) NOT NULL DEFAULT '0', - `permissions` text, - `issues_visibility` varchar(30) NOT NULL DEFAULT 'default', - `users_visibility` varchar(30) NOT NULL DEFAULT 'all', - `time_entries_visibility` varchar(30) NOT NULL DEFAULT 'all', - `all_roles_managed` tinyint(1) NOT NULL DEFAULT '1', - `settings` text, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `roles` --- - -LOCK TABLES `roles` WRITE; -/*!40000 ALTER TABLE `roles` DISABLE KEYS */; -INSERT INTO `roles` VALUES (1,'Non member',0,1,1,NULL,'default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'0\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n'),(2,'Anonymous',0,1,2,'---\n- :view_issues\n- :edit_issues\n- :add_issue_notes\n- :sigma_editor\n','default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n'),(3,'Security-Analyst',1,0,0,'---\n- :save_queries\n- :view_issues\n- :edit_issues\n- :add_issue_notes\n- :edit_issue_notes\n- :sigma_editor\n','all','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\n delete_issues: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n delete_issues: []\n'),(4,'SuperAdmin',2,0,0,'---\n- :add_project\n- :edit_project\n- :close_project\n- :select_project_modules\n- :manage_members\n- :manage_versions\n- :add_subprojects\n- :manage_public_queries\n- :save_queries\n- :manage_hook\n- :view_messages\n- :add_messages\n- :edit_messages\n- :edit_own_messages\n- :delete_messages\n- :delete_own_messages\n- :manage_boards\n- :view_calendar\n- :view_documents\n- :add_documents\n- :edit_documents\n- :delete_documents\n- :view_files\n- :manage_files\n- :view_gantt\n- :view_issues\n- :edit_issues\n- :edit_own_issues\n- :copy_issues\n- :manage_issue_relations\n- :manage_subtasks\n- :set_issues_private\n- :set_own_issues_private\n- :add_issue_notes\n- :edit_issue_notes\n- :edit_own_issue_notes\n- :view_private_notes\n- :set_notes_private\n- :delete_issues\n- :view_issue_watchers\n- :add_issue_watchers\n- :delete_issue_watchers\n- :import_issues\n- :manage_categories\n- :view_news\n- :manage_news\n- :comment_news\n- :view_changesets\n- :browse_repository\n- :commit_access\n- :manage_related_issues\n- :manage_repository\n- :sigma_editor\n- :view_time_entries\n- :log_time\n- :edit_time_entries\n- :edit_own_time_entries\n- :manage_project_activities\n- :log_time_for_other_users\n- :import_time_entries\n- :view_wiki_pages\n- :view_wiki_edits\n- :export_wiki_pages\n- :edit_wiki_pages\n- :rename_wiki_pages\n- :delete_wiki_pages\n- :delete_wiki_pages_attachments\n- :protect_wiki_pages\n- :manage_wiki\n','default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\n delete_issues: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n delete_issues: []\n'),(5,'Automation',3,0,0,'---\n- :view_issues\n- :add_issues\n- :edit_issues\n- :add_issue_notes\n- :edit_issue_notes\n- :import_issues\n- :sigma_editor\n','default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\n delete_issues: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n delete_issues: []\n'); -/*!40000 ALTER TABLE `roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `roles_managed_roles` --- - -DROP TABLE IF EXISTS `roles_managed_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `roles_managed_roles` ( - `role_id` int(11) NOT NULL, - `managed_role_id` int(11) NOT NULL, - UNIQUE KEY `index_roles_managed_roles_on_role_id_and_managed_role_id` (`role_id`,`managed_role_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `roles_managed_roles` --- - -LOCK TABLES `roles_managed_roles` WRITE; -/*!40000 ALTER TABLE `roles_managed_roles` DISABLE KEYS */; -/*!40000 ALTER TABLE `roles_managed_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `schema_migrations` --- - -DROP TABLE IF EXISTS `schema_migrations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `schema_migrations` ( - `version` varchar(255) NOT NULL, - PRIMARY KEY (`version`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `schema_migrations` --- - -LOCK TABLES `schema_migrations` WRITE; -/*!40000 ALTER TABLE `schema_migrations` DISABLE KEYS */; -INSERT INTO `schema_migrations` VALUES ('1'),('1-redmine_webhook'),('10'),('100'),('101'),('102'),('103'),('104'),('105'),('106'),('107'),('108'),('11'),('12'),('13'),('14'),('15'),('16'),('17'),('18'),('19'),('2'),('20'),('20090214190337'),('20090312172426'),('20090312194159'),('20090318181151'),('20090323224724'),('20090401221305'),('20090401231134'),('20090403001910'),('20090406161854'),('20090425161243'),('20090503121501'),('20090503121505'),('20090503121510'),('20090614091200'),('20090704172350'),('20090704172355'),('20090704172358'),('20091010093521'),('20091017212227'),('20091017212457'),('20091017212644'),('20091017212938'),('20091017213027'),('20091017213113'),('20091017213151'),('20091017213228'),('20091017213257'),('20091017213332'),('20091017213444'),('20091017213536'),('20091017213642'),('20091017213716'),('20091017213757'),('20091017213835'),('20091017213910'),('20091017214015'),('20091017214107'),('20091017214136'),('20091017214236'),('20091017214308'),('20091017214336'),('20091017214406'),('20091017214440'),('20091017214519'),('20091017214611'),('20091017214644'),('20091017214720'),('20091017214750'),('20091025163651'),('20091108092559'),('20091114105931'),('20091123212029'),('20091205124427'),('20091220183509'),('20091220183727'),('20091220184736'),('20091225164732'),('20091227112908'),('20100129193402'),('20100129193813'),('20100221100219'),('20100313132032'),('20100313171051'),('20100705164950'),('20100819172912'),('20101104182107'),('20101107130441'),('20101114115114'),('20101114115359'),('20110220160626'),('20110223180944'),('20110223180953'),('20110224000000'),('20110226120112'),('20110226120132'),('20110227125750'),('20110228000000'),('20110228000100'),('20110401192910'),('20110408103312'),('20110412065600'),('20110511000000'),('20110902000000'),('20111201201315'),('20120115143024'),('20120115143100'),('20120115143126'),('20120127174243'),('20120205111326'),('20120223110929'),('20120301153455'),('20120422150750'),('20120705074331'),('20120707064544'),('20120714122000'),('20120714122100'),('20120714122200'),('20120731164049'),('20120930112914'),('20121026002032'),('20121026003537'),('20121209123234'),('20121209123358'),('20121213084931'),('20130110122628'),('20130201184705'),('20130202090625'),('20130207175206'),('20130207181455'),('20130215073721'),('20130215111127'),('20130215111141'),('20130217094251'),('20130602092539'),('20130710182539'),('20130713104233'),('20130713111657'),('20130729070143'),('20130911193200'),('20131004113137'),('20131005100610'),('20131124175346'),('20131210180802'),('20131214094309'),('20131215104612'),('20131218183023'),('20140228130325'),('20140903143914'),('20140920094058'),('20141029181752'),('20141029181824'),('20141109112308'),('20141122124142'),('20150113194759'),('20150113211532'),('20150113213922'),('20150113213955'),('20150208105930'),('20150510083747'),('20150525103953'),('20150526183158'),('20150528084820'),('20150528092912'),('20150528093249'),('20150725112753'),('20150730122707'),('20150730122735'),('20150921204850'),('20150921210243'),('20151020182334'),('20151020182731'),('20151021184614'),('20151021185456'),('20151021190616'),('20151024082034'),('20151025072118'),('20151031095005'),('20160404080304'),('20160416072926'),('20160529063352'),('20161001122012'),('20161002133421'),('20161010081301'),('20161010081528'),('20161010081600'),('20161126094932'),('20161220091118'),('20170207050700'),('20170302015225'),('20170309214320'),('20170320051650'),('20170418090031'),('20170419144536'),('20170723112801'),('20180501132547'),('20180913072918'),('20180923082945'),('20180923091603'),('20190315094151'),('20190315102101'),('20190510070108'),('20190620135549'),('21'),('22'),('23'),('24'),('25'),('26'),('27'),('28'),('29'),('3'),('30'),('31'),('32'),('33'),('34'),('35'),('36'),('37'),('38'),('39'),('4'),('40'),('41'),('42'),('43'),('44'),('45'),('46'),('47'),('48'),('49'),('5'),('50'),('51'),('52'),('53'),('54'),('55'),('56'),('57'),('58'),('59'),('6'),('60'),('61'),('62'),('63'),('64'),('65'),('66'),('67'),('68'),('69'),('7'),('70'),('71'),('72'),('73'),('74'),('75'),('76'),('77'),('78'),('79'),('8'),('80'),('81'),('82'),('83'),('84'),('85'),('86'),('87'),('88'),('89'),('9'),('90'),('91'),('92'),('93'),('94'),('95'),('96'),('97'),('98'),('99'); -/*!40000 ALTER TABLE `schema_migrations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `settings` --- - -DROP TABLE IF EXISTS `settings`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `settings` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(255) NOT NULL DEFAULT '', - `value` text, - `updated_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_settings_on_name` (`name`) -) ENGINE=InnoDB AUTO_INCREMENT=71 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `settings` --- - -LOCK TABLES `settings` WRITE; -/*!40000 ALTER TABLE `settings` DISABLE KEYS */; -INSERT INTO `settings` VALUES (1,'ui_theme','circle','2020-04-26 13:11:26'),(2,'default_language','en','2020-04-26 13:11:26'),(3,'force_default_language_for_anonymous','0','2020-04-26 13:11:26'),(4,'force_default_language_for_loggedin','0','2020-04-26 13:11:26'),(5,'start_of_week','','2020-04-26 13:11:26'),(6,'date_format','','2020-04-26 13:11:26'),(7,'time_format','','2020-04-26 13:11:26'),(8,'timespan_format','decimal','2020-04-26 13:11:26'),(9,'user_format','firstname_lastname','2020-05-02 12:45:00'),(10,'gravatar_enabled','1','2020-05-02 12:41:07'),(11,'thumbnails_enabled','1','2020-04-26 13:11:26'),(12,'thumbnails_size','100','2020-04-26 13:11:26'),(13,'new_item_menu_tab','0','2020-04-26 13:11:30'),(14,'login_required','0','2020-07-10 19:32:45'),(15,'autologin','0','2020-04-26 13:11:54'),(16,'self_registration','0','2020-04-26 13:11:54'),(17,'show_custom_fields_on_registration','0','2020-04-26 13:11:54'),(18,'password_min_length','8','2020-04-26 13:11:54'),(19,'password_required_char_classes','--- []\n','2020-04-26 13:11:54'),(20,'password_max_age','0','2020-04-26 13:11:54'),(21,'lost_password','1','2020-04-26 13:11:54'),(22,'openid','0','2020-04-26 13:11:55'),(23,'session_lifetime','0','2020-04-26 13:11:55'),(24,'session_timeout','0','2020-04-26 13:11:55'),(25,'rest_api_enabled','1','2020-04-26 13:11:58'),(26,'jsonp_enabled','0','2020-04-26 13:11:58'),(27,'default_projects_public','0','2020-04-26 13:12:21'),(28,'default_projects_modules','---\n- sigma_editor\n','2020-04-26 13:12:21'),(29,'default_projects_tracker_ids','--- []\n','2020-04-26 13:12:21'),(30,'sequential_project_identifiers','0','2020-04-26 13:12:21'),(31,'project_list_defaults','---\n:column_names:\n- name\n- identifier\n- short_description\n','2020-04-26 13:12:21'),(32,'app_title','Playbook','2020-04-26 18:17:51'),(33,'welcome_text','','2020-04-26 18:17:51'),(34,'per_page_options','25,75,150','2020-05-02 12:41:38'),(35,'search_results_per_page','10','2020-04-26 18:17:51'),(36,'activity_days_default','30','2020-04-26 18:17:51'),(37,'host_name','localhost:3000','2020-04-26 18:17:51'),(38,'protocol','http','2020-04-26 18:17:51'),(39,'text_formatting','textile','2020-04-26 18:17:51'),(40,'cache_formatted_text','0','2020-04-26 18:17:51'),(41,'wiki_compression','','2020-04-26 18:17:51'),(42,'feeds_limit','15','2020-04-26 18:17:51'),(43,'plugin_redmine_playbook','--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nproject: \'1\'\nconvert_url: http://10.66.166.135:7000/playbook/sigmac\ncreate_url: http://10.66.166.135:7000/playbook/play','2020-05-02 12:39:20'),(44,'cross_project_issue_relations','0','2020-05-01 16:27:33'),(45,'link_copied_issue','no','2020-05-01 16:27:33'),(46,'cross_project_subtasks','','2020-05-01 16:27:33'),(47,'close_duplicate_issues','0','2020-05-01 16:27:33'),(48,'issue_group_assignment','0','2020-05-01 16:27:33'),(49,'default_issue_start_date_to_creation_date','1','2020-05-01 16:27:33'),(50,'display_subprojects_issues','0','2020-05-01 16:27:33'),(51,'issue_done_ratio','issue_field','2020-05-01 16:27:33'),(52,'non_working_week_days','---\n- \'6\'\n- \'7\'\n','2020-05-01 16:27:33'),(53,'issues_export_limit','500','2020-05-01 16:27:33'),(54,'gantt_items_limit','500','2020-05-01 16:27:33'),(55,'gantt_months_limit','24','2020-05-01 16:27:33'),(56,'parent_issue_dates','derived','2020-05-01 16:27:33'),(57,'parent_issue_priority','derived','2020-05-01 16:27:33'),(58,'parent_issue_done_ratio','derived','2020-05-01 16:27:33'),(59,'issue_list_default_columns','---\n- status\n- cf_10\n- cf_13\n- cf_14\n- cf_1\n- updated_on\n','2020-05-01 19:32:13'),(60,'issue_list_default_totals','--- []\n','2020-05-01 16:27:33'),(61,'enabled_scm','--- []\n','2020-05-01 16:27:47'),(62,'autofetch_changesets','0','2020-05-01 16:27:47'),(63,'sys_api_enabled','0','2020-05-01 16:27:47'),(64,'repository_log_display_limit','100','2020-05-01 16:27:47'),(65,'commit_logs_formatting','1','2020-05-01 16:27:47'),(66,'commit_ref_keywords','refs,references,IssueID','2020-05-01 16:27:47'),(67,'commit_cross_project_ref','0','2020-05-01 16:27:47'),(68,'commit_logtime_enabled','0','2020-05-01 16:27:47'),(69,'commit_update_keywords','--- []\n','2020-05-01 16:27:47'),(70,'gravatar_default','','2020-05-02 12:41:07'); -/*!40000 ALTER TABLE `settings` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `time_entries` --- - -DROP TABLE IF EXISTS `time_entries`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `time_entries` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL, - `author_id` int(11) DEFAULT NULL, - `user_id` int(11) NOT NULL, - `issue_id` int(11) DEFAULT NULL, - `hours` float NOT NULL, - `comments` varchar(1024) DEFAULT NULL, - `activity_id` int(11) NOT NULL, - `spent_on` date NOT NULL, - `tyear` int(11) NOT NULL, - `tmonth` int(11) NOT NULL, - `tweek` int(11) NOT NULL, - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `time_entries_project_id` (`project_id`), - KEY `time_entries_issue_id` (`issue_id`), - KEY `index_time_entries_on_activity_id` (`activity_id`), - KEY `index_time_entries_on_user_id` (`user_id`), - KEY `index_time_entries_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `time_entries` --- - -LOCK TABLES `time_entries` WRITE; -/*!40000 ALTER TABLE `time_entries` DISABLE KEYS */; -/*!40000 ALTER TABLE `time_entries` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `tokens` --- - -DROP TABLE IF EXISTS `tokens`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `tokens` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL DEFAULT '0', - `action` varchar(30) NOT NULL DEFAULT '', - `value` varchar(40) NOT NULL DEFAULT '', - `created_on` datetime NOT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `tokens_value` (`value`), - KEY `index_tokens_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=67 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `tokens` --- - -LOCK TABLES `tokens` WRITE; -/*!40000 ALTER TABLE `tokens` DISABLE KEYS */; -INSERT INTO `tokens` - VALUES - (3,1,'feeds','6e5575602e1227c188cd85ef6d12608bb8701193','2020-04-26 13:10:46','2020-04-26 13:10:46'), - (4,1,'session','999412fa9badda7423c6c654d6364c32c20b3eac','2020-04-26 18:07:03','2020-04-26 18:12:02'), - (5,1,'session','124ad4acbf87a942426350e7ad028c1d119c3851','2020-04-26 18:17:11','2020-04-26 18:19:24'), - (9,1,'session','2890c663e0552f26ddb92acad6ab3b6d05b92915','2020-04-26 18:51:15','2020-04-26 18:51:15'), - (19,1,'session','b7ffb106ea0b34650dd9c1770f74c2b0ffe166b2','2020-05-01 16:52:33','2020-05-01 18:02:30'), - (20,1,'session','f44cfcf918eef59ffda47991c431d9c2b2ac6113','2020-05-01 18:05:56','2020-05-01 18:05:56'), - (23,9,'feeds','211918c9d7168979b5dc19bebb14573b928a5067','2020-05-01 18:26:17','2020-05-01 18:26:17'), - (46,1,'session','2d0c8f8ae641c06d8c2362746846440d465d53c0','2020-05-06 20:48:01','2020-05-06 20:48:07'), - (59,1,'session','2afe6590653d59a697d1436729c64f322a2eff82','2020-07-01 18:11:07','2020-07-01 20:30:43'), - (61,1,'session','b01f95709ca1ab086a049cf9c5afd81ca9d4526e','2020-07-15 16:30:42','2020-07-15 16:31:40'), - (62,1,'session','d29acdcd0b8e4ebf78ef8f696d3e76df7e2ab2ac','2020-08-17 14:51:59','2020-08-17 14:53:22'); -/*!40000 ALTER TABLE `tokens` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `trackers` --- - -DROP TABLE IF EXISTS `trackers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `trackers` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(30) NOT NULL DEFAULT '', - `description` varchar(255) DEFAULT NULL, - `is_in_chlog` tinyint(1) NOT NULL DEFAULT '0', - `position` int(11) DEFAULT NULL, - `is_in_roadmap` tinyint(1) NOT NULL DEFAULT '1', - `fields_bits` int(11) DEFAULT '0', - `default_status_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `trackers` --- - -LOCK TABLES `trackers` WRITE; -/*!40000 ALTER TABLE `trackers` DISABLE KEYS */; -INSERT INTO `trackers` VALUES (1,'Play','',0,1,0,255,2); -/*!40000 ALTER TABLE `trackers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `user_preferences` --- - -DROP TABLE IF EXISTS `user_preferences`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `user_preferences` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL DEFAULT '0', - `others` text, - `hide_mail` tinyint(1) DEFAULT '1', - `time_zone` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_user_preferences_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `user_preferences` --- - -LOCK TABLES `user_preferences` WRITE; -/*!40000 ALTER TABLE `user_preferences` DISABLE KEYS */; -INSERT INTO `user_preferences` VALUES (1,1,'---\n:no_self_notified: \'1\'\n:my_page_layout:\n left:\n - issuesassignedtome\n right:\n - issuesreportedbyme\n:my_page_settings: {}\n:comments_sorting: asc\n:warn_on_leaving_unsaved: \'1\'\n:textarea_font: \'\'\n:recently_used_projects: 3\n:history_default_tab: notes\n:recently_used_project_ids: \'1\'\n',1,''),(3,9,'---\n:no_self_notified: \'1\'\n:comments_sorting: asc\n:warn_on_leaving_unsaved: \'1\'\n:textarea_font: \'\'\n:recently_used_projects: 3\n:history_default_tab: notes\n:my_page_layout:\n left:\n - issuesassignedtome\n right:\n - issuesreportedbyme\n:my_page_settings: {}\n:recently_used_project_ids: \'1\'\n',1,''); -/*!40000 ALTER TABLE `user_preferences` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `users` --- - -DROP TABLE IF EXISTS `users`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `users` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `login` varchar(255) NOT NULL DEFAULT '', - `hashed_password` varchar(40) NOT NULL DEFAULT '', - `firstname` varchar(30) NOT NULL DEFAULT '', - `lastname` varchar(255) NOT NULL DEFAULT '', - `admin` tinyint(1) NOT NULL DEFAULT '0', - `status` int(11) NOT NULL DEFAULT '1', - `last_login_on` datetime DEFAULT NULL, - `language` varchar(5) DEFAULT '', - `auth_source_id` int(11) DEFAULT NULL, - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `type` varchar(255) DEFAULT NULL, - `identity_url` varchar(255) DEFAULT NULL, - `mail_notification` varchar(255) NOT NULL DEFAULT '', - `salt` varchar(64) DEFAULT NULL, - `must_change_passwd` tinyint(1) NOT NULL DEFAULT '0', - `passwd_changed_on` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_users_on_id_and_type` (`id`,`type`), - KEY `index_users_on_auth_source_id` (`auth_source_id`), - KEY `index_users_on_type` (`type`) -) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `users` --- - -LOCK TABLES `users` WRITE; -/*!40000 ALTER TABLE `users` DISABLE KEYS */; -INSERT INTO `users` - VALUES - (1,'admin','ADMIN_HASH','Admin','Admin',1,1,'2020-08-17 18:03:20','',NULL,'2020-04-26 13:08:34','2020-04-26 13:10:45','User',NULL,'all','ADMIN_SALT',0,'2020-04-26 13:10:27'), - (2,'','','','Anonymous users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupAnonymous',NULL,'',NULL,0,NULL), - (3,'','','','Non member users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupNonMember',NULL,'',NULL,0,NULL), - (4,'','','','Anonymous',0,0,NULL,'',NULL,'2020-04-26 13:09:44','2020-04-26 13:09:44','AnonymousUser',NULL,'only_my_events',NULL,0,NULL), - (5,'','','','Analysts',0,1,NULL,'',NULL,'2020-04-26 18:43:40','2020-04-26 18:43:40','Group',NULL,'',NULL,0,NULL), - (6,'','','','Automation',0,1,NULL,'',NULL,'2020-04-26 18:43:47','2020-04-26 18:43:47','Group',NULL,'',NULL,0,NULL), - (7,'','','','Admins',0,1,NULL,'',NULL,'2020-04-26 18:43:58','2020-04-26 18:43:58','Group',NULL,'',NULL,0,NULL) -; -/*!40000 ALTER TABLE `users` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `versions` --- - -DROP TABLE IF EXISTS `versions`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `versions` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `name` varchar(255) NOT NULL DEFAULT '', - `description` varchar(255) DEFAULT '', - `effective_date` date DEFAULT NULL, - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `wiki_page_title` varchar(255) DEFAULT NULL, - `status` varchar(255) DEFAULT 'open', - `sharing` varchar(255) NOT NULL DEFAULT 'none', - PRIMARY KEY (`id`), - KEY `versions_project_id` (`project_id`), - KEY `index_versions_on_sharing` (`sharing`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `versions` --- - -LOCK TABLES `versions` WRITE; -/*!40000 ALTER TABLE `versions` DISABLE KEYS */; -/*!40000 ALTER TABLE `versions` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `watchers` --- - -DROP TABLE IF EXISTS `watchers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `watchers` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `watchable_type` varchar(255) NOT NULL DEFAULT '', - `watchable_id` int(11) NOT NULL DEFAULT '0', - `user_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `watchers_user_id_type` (`user_id`,`watchable_type`), - KEY `index_watchers_on_user_id` (`user_id`), - KEY `index_watchers_on_watchable_id_and_watchable_type` (`watchable_id`,`watchable_type`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `watchers` --- - -LOCK TABLES `watchers` WRITE; -/*!40000 ALTER TABLE `watchers` DISABLE KEYS */; -/*!40000 ALTER TABLE `watchers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `webhooks` --- - -DROP TABLE IF EXISTS `webhooks`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `webhooks` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `url` varchar(255) DEFAULT NULL, - `project_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `webhooks` --- - -LOCK TABLES `webhooks` WRITE; -/*!40000 ALTER TABLE `webhooks` DISABLE KEYS */; -INSERT INTO `webhooks` VALUES (1,'http://10.66.166.135:7000/playbook/webhook',1); -/*!40000 ALTER TABLE `webhooks` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_content_versions` --- - -DROP TABLE IF EXISTS `wiki_content_versions`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_content_versions` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `wiki_content_id` int(11) NOT NULL, - `page_id` int(11) NOT NULL, - `author_id` int(11) DEFAULT NULL, - `data` longblob, - `compression` varchar(6) DEFAULT '', - `comments` varchar(1024) DEFAULT '', - `updated_on` datetime NOT NULL, - `version` int(11) NOT NULL, - PRIMARY KEY (`id`), - KEY `wiki_content_versions_wcid` (`wiki_content_id`), - KEY `index_wiki_content_versions_on_updated_on` (`updated_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_content_versions` --- - -LOCK TABLES `wiki_content_versions` WRITE; -/*!40000 ALTER TABLE `wiki_content_versions` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_content_versions` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_contents` --- - -DROP TABLE IF EXISTS `wiki_contents`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_contents` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `page_id` int(11) NOT NULL, - `author_id` int(11) DEFAULT NULL, - `text` longtext, - `comments` varchar(1024) DEFAULT '', - `updated_on` datetime NOT NULL, - `version` int(11) NOT NULL, - PRIMARY KEY (`id`), - KEY `wiki_contents_page_id` (`page_id`), - KEY `index_wiki_contents_on_author_id` (`author_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_contents` --- - -LOCK TABLES `wiki_contents` WRITE; -/*!40000 ALTER TABLE `wiki_contents` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_contents` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_pages` --- - -DROP TABLE IF EXISTS `wiki_pages`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_pages` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `wiki_id` int(11) NOT NULL, - `title` varchar(255) NOT NULL, - `created_on` datetime NOT NULL, - `protected` tinyint(1) NOT NULL DEFAULT '0', - `parent_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `wiki_pages_wiki_id_title` (`wiki_id`,`title`), - KEY `index_wiki_pages_on_wiki_id` (`wiki_id`), - KEY `index_wiki_pages_on_parent_id` (`parent_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_pages` --- - -LOCK TABLES `wiki_pages` WRITE; -/*!40000 ALTER TABLE `wiki_pages` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_pages` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_redirects` --- - -DROP TABLE IF EXISTS `wiki_redirects`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_redirects` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `wiki_id` int(11) NOT NULL, - `title` varchar(255) DEFAULT NULL, - `redirects_to` varchar(255) DEFAULT NULL, - `created_on` datetime NOT NULL, - `redirects_to_wiki_id` int(11) NOT NULL, - PRIMARY KEY (`id`), - KEY `wiki_redirects_wiki_id_title` (`wiki_id`,`title`), - KEY `index_wiki_redirects_on_wiki_id` (`wiki_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_redirects` --- - -LOCK TABLES `wiki_redirects` WRITE; -/*!40000 ALTER TABLE `wiki_redirects` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_redirects` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wikis` --- - -DROP TABLE IF EXISTS `wikis`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wikis` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL, - `start_page` varchar(255) NOT NULL, - `status` int(11) NOT NULL DEFAULT '1', - PRIMARY KEY (`id`), - KEY `wikis_project_id` (`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wikis` --- - -LOCK TABLES `wikis` WRITE; -/*!40000 ALTER TABLE `wikis` DISABLE KEYS */; -/*!40000 ALTER TABLE `wikis` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `workflows` --- - -DROP TABLE IF EXISTS `workflows`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `workflows` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `tracker_id` int(11) NOT NULL DEFAULT '0', - `old_status_id` int(11) NOT NULL DEFAULT '0', - `new_status_id` int(11) NOT NULL DEFAULT '0', - `role_id` int(11) NOT NULL DEFAULT '0', - `assignee` tinyint(1) NOT NULL DEFAULT '0', - `author` tinyint(1) NOT NULL DEFAULT '0', - `type` varchar(30) DEFAULT NULL, - `field_name` varchar(30) DEFAULT NULL, - `rule` varchar(30) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `wkfs_role_tracker_old_status` (`role_id`,`tracker_id`,`old_status_id`), - KEY `index_workflows_on_old_status_id` (`old_status_id`), - KEY `index_workflows_on_role_id` (`role_id`), - KEY `index_workflows_on_new_status_id` (`new_status_id`), - KEY `index_workflows_on_tracker_id` (`tracker_id`) -) ENGINE=InnoDB AUTO_INCREMENT=652 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `workflows` --- - -LOCK TABLES `workflows` WRITE; -/*!40000 ALTER TABLE `workflows` DISABLE KEYS */; -INSERT INTO `workflows` VALUES (132,1,2,0,3,0,0,'WorkflowPermission','14','readonly'),(134,1,2,0,3,0,0,'WorkflowPermission','16','readonly'),(151,1,3,0,3,0,0,'WorkflowPermission','14','readonly'),(153,1,3,0,3,0,0,'WorkflowPermission','16','readonly'),(170,1,4,0,3,0,0,'WorkflowPermission','14','readonly'),(172,1,4,0,3,0,0,'WorkflowPermission','16','readonly'),(189,1,5,0,3,0,0,'WorkflowPermission','14','readonly'),(191,1,5,0,3,0,0,'WorkflowPermission','16','readonly'),(208,1,6,0,3,0,0,'WorkflowPermission','14','readonly'),(210,1,6,0,3,0,0,'WorkflowPermission','16','readonly'),(220,1,2,3,3,0,0,'WorkflowTransition',NULL,NULL),(221,1,2,3,4,0,0,'WorkflowTransition',NULL,NULL),(222,1,2,3,5,0,0,'WorkflowTransition',NULL,NULL),(226,1,3,4,3,0,0,'WorkflowTransition',NULL,NULL),(227,1,3,4,4,0,0,'WorkflowTransition',NULL,NULL),(228,1,3,4,5,0,0,'WorkflowTransition',NULL,NULL),(229,1,4,5,3,0,0,'WorkflowTransition',NULL,NULL),(230,1,4,5,4,0,0,'WorkflowTransition',NULL,NULL),(231,1,4,5,5,0,0,'WorkflowTransition',NULL,NULL),(232,1,4,6,3,0,0,'WorkflowTransition',NULL,NULL),(233,1,4,6,4,0,0,'WorkflowTransition',NULL,NULL),(234,1,4,6,5,0,0,'WorkflowTransition',NULL,NULL),(239,1,2,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(240,1,3,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(241,1,4,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(242,1,5,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(243,1,6,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(244,1,0,2,5,0,0,'WorkflowTransition',NULL,NULL),(245,1,0,2,4,0,0,'WorkflowTransition',NULL,NULL),(246,1,0,6,5,0,0,'WorkflowTransition',NULL,NULL),(352,1,2,0,3,0,0,'WorkflowPermission','project_id','readonly'),(353,1,2,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(354,1,2,0,3,0,0,'WorkflowPermission','subject','readonly'),(355,1,2,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(356,1,2,0,3,0,0,'WorkflowPermission','is_private','readonly'),(357,1,2,0,3,0,0,'WorkflowPermission','description','readonly'),(358,1,2,0,3,0,0,'WorkflowPermission','1','readonly'),(359,1,2,0,3,0,0,'WorkflowPermission','2','readonly'),(360,1,2,0,3,0,0,'WorkflowPermission','10','readonly'),(361,1,2,0,3,0,0,'WorkflowPermission','20','readonly'),(362,1,2,0,3,0,0,'WorkflowPermission','8','readonly'),(363,1,2,0,3,0,0,'WorkflowPermission','15','readonly'),(364,1,2,0,3,0,0,'WorkflowPermission','11','readonly'),(365,1,2,0,3,0,0,'WorkflowPermission','12','readonly'),(366,1,2,0,3,0,0,'WorkflowPermission','19','readonly'),(367,1,2,0,3,0,0,'WorkflowPermission','7','readonly'),(368,1,2,0,3,0,0,'WorkflowPermission','3','readonly'),(369,1,2,0,3,0,0,'WorkflowPermission','5','readonly'),(370,1,2,0,3,0,0,'WorkflowPermission','6','readonly'),(371,1,2,0,3,0,0,'WorkflowPermission','22','readonly'),(372,1,3,0,3,0,0,'WorkflowPermission','project_id','readonly'),(373,1,3,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(374,1,3,0,3,0,0,'WorkflowPermission','subject','readonly'),(375,1,3,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(376,1,3,0,3,0,0,'WorkflowPermission','is_private','readonly'),(377,1,3,0,3,0,0,'WorkflowPermission','description','readonly'),(378,1,3,0,3,0,0,'WorkflowPermission','1','readonly'),(379,1,3,0,3,0,0,'WorkflowPermission','2','readonly'),(380,1,3,0,3,0,0,'WorkflowPermission','10','readonly'),(381,1,3,0,3,0,0,'WorkflowPermission','20','readonly'),(382,1,3,0,3,0,0,'WorkflowPermission','8','readonly'),(383,1,3,0,3,0,0,'WorkflowPermission','15','readonly'),(384,1,3,0,3,0,0,'WorkflowPermission','11','readonly'),(385,1,3,0,3,0,0,'WorkflowPermission','12','readonly'),(386,1,3,0,3,0,0,'WorkflowPermission','19','readonly'),(387,1,3,0,3,0,0,'WorkflowPermission','7','readonly'),(388,1,3,0,3,0,0,'WorkflowPermission','3','readonly'),(389,1,3,0,3,0,0,'WorkflowPermission','5','readonly'),(390,1,3,0,3,0,0,'WorkflowPermission','6','readonly'),(391,1,3,0,3,0,0,'WorkflowPermission','22','readonly'),(392,1,4,0,3,0,0,'WorkflowPermission','project_id','readonly'),(393,1,4,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(394,1,4,0,3,0,0,'WorkflowPermission','subject','readonly'),(395,1,4,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(396,1,4,0,3,0,0,'WorkflowPermission','is_private','readonly'),(397,1,4,0,3,0,0,'WorkflowPermission','description','readonly'),(398,1,4,0,3,0,0,'WorkflowPermission','1','readonly'),(399,1,4,0,3,0,0,'WorkflowPermission','2','readonly'),(400,1,4,0,3,0,0,'WorkflowPermission','10','readonly'),(401,1,4,0,3,0,0,'WorkflowPermission','20','readonly'),(402,1,4,0,3,0,0,'WorkflowPermission','8','readonly'),(403,1,4,0,3,0,0,'WorkflowPermission','15','readonly'),(404,1,4,0,3,0,0,'WorkflowPermission','11','readonly'),(405,1,4,0,3,0,0,'WorkflowPermission','12','readonly'),(406,1,4,0,3,0,0,'WorkflowPermission','19','readonly'),(407,1,4,0,3,0,0,'WorkflowPermission','7','readonly'),(408,1,4,0,3,0,0,'WorkflowPermission','3','readonly'),(409,1,4,0,3,0,0,'WorkflowPermission','5','readonly'),(410,1,4,0,3,0,0,'WorkflowPermission','6','readonly'),(411,1,4,0,3,0,0,'WorkflowPermission','22','readonly'),(412,1,5,0,3,0,0,'WorkflowPermission','project_id','readonly'),(413,1,5,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(414,1,5,0,3,0,0,'WorkflowPermission','subject','readonly'),(415,1,5,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(416,1,5,0,3,0,0,'WorkflowPermission','is_private','readonly'),(417,1,5,0,3,0,0,'WorkflowPermission','description','readonly'),(418,1,5,0,3,0,0,'WorkflowPermission','1','readonly'),(419,1,5,0,3,0,0,'WorkflowPermission','2','readonly'),(420,1,5,0,3,0,0,'WorkflowPermission','10','readonly'),(421,1,5,0,3,0,0,'WorkflowPermission','20','readonly'),(422,1,5,0,3,0,0,'WorkflowPermission','8','readonly'),(423,1,5,0,3,0,0,'WorkflowPermission','15','readonly'),(424,1,5,0,3,0,0,'WorkflowPermission','11','readonly'),(425,1,5,0,3,0,0,'WorkflowPermission','12','readonly'),(426,1,5,0,3,0,0,'WorkflowPermission','19','readonly'),(427,1,5,0,3,0,0,'WorkflowPermission','7','readonly'),(428,1,5,0,3,0,0,'WorkflowPermission','3','readonly'),(429,1,5,0,3,0,0,'WorkflowPermission','5','readonly'),(430,1,5,0,3,0,0,'WorkflowPermission','6','readonly'),(431,1,5,0,3,0,0,'WorkflowPermission','22','readonly'),(432,1,6,0,3,0,0,'WorkflowPermission','project_id','readonly'),(433,1,6,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(434,1,6,0,3,0,0,'WorkflowPermission','subject','readonly'),(435,1,6,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(436,1,6,0,3,0,0,'WorkflowPermission','is_private','readonly'),(437,1,6,0,3,0,0,'WorkflowPermission','description','readonly'),(438,1,6,0,3,0,0,'WorkflowPermission','1','readonly'),(439,1,6,0,3,0,0,'WorkflowPermission','2','readonly'),(440,1,6,0,3,0,0,'WorkflowPermission','10','readonly'),(441,1,6,0,3,0,0,'WorkflowPermission','20','readonly'),(442,1,6,0,3,0,0,'WorkflowPermission','8','readonly'),(443,1,6,0,3,0,0,'WorkflowPermission','15','readonly'),(444,1,6,0,3,0,0,'WorkflowPermission','11','readonly'),(445,1,6,0,3,0,0,'WorkflowPermission','12','readonly'),(446,1,6,0,3,0,0,'WorkflowPermission','19','readonly'),(447,1,6,0,3,0,0,'WorkflowPermission','7','readonly'),(448,1,6,0,3,0,0,'WorkflowPermission','3','readonly'),(449,1,6,0,3,0,0,'WorkflowPermission','5','readonly'),(450,1,6,0,3,0,0,'WorkflowPermission','6','readonly'),(451,1,6,0,3,0,0,'WorkflowPermission','22','readonly'),(537,1,2,0,2,0,0,'WorkflowPermission','project_id','readonly'),(538,1,2,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(539,1,2,0,2,0,0,'WorkflowPermission','subject','readonly'),(540,1,2,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(541,1,2,0,2,0,0,'WorkflowPermission','is_private','readonly'),(542,1,2,0,2,0,0,'WorkflowPermission','description','readonly'),(543,1,2,0,2,0,0,'WorkflowPermission','1','readonly'),(544,1,2,0,2,0,0,'WorkflowPermission','2','readonly'),(545,1,2,0,2,0,0,'WorkflowPermission','10','readonly'),(546,1,2,0,2,0,0,'WorkflowPermission','20','readonly'),(547,1,2,0,2,0,0,'WorkflowPermission','8','readonly'),(548,1,2,0,2,0,0,'WorkflowPermission','15','readonly'),(549,1,2,0,2,0,0,'WorkflowPermission','11','readonly'),(550,1,2,0,2,0,0,'WorkflowPermission','12','readonly'),(551,1,2,0,2,0,0,'WorkflowPermission','19','readonly'),(552,1,2,0,2,0,0,'WorkflowPermission','17','readonly'),(553,1,2,0,2,0,0,'WorkflowPermission','7','readonly'),(554,1,2,0,2,0,0,'WorkflowPermission','3','readonly'),(555,1,2,0,2,0,0,'WorkflowPermission','5','readonly'),(556,1,2,0,2,0,0,'WorkflowPermission','6','readonly'),(557,1,2,0,2,0,0,'WorkflowPermission','22','readonly'),(558,1,3,0,2,0,0,'WorkflowPermission','project_id','readonly'),(559,1,3,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(560,1,3,0,2,0,0,'WorkflowPermission','subject','readonly'),(561,1,3,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(562,1,3,0,2,0,0,'WorkflowPermission','is_private','readonly'),(563,1,3,0,2,0,0,'WorkflowPermission','description','readonly'),(564,1,3,0,2,0,0,'WorkflowPermission','1','readonly'),(565,1,3,0,2,0,0,'WorkflowPermission','2','readonly'),(566,1,3,0,2,0,0,'WorkflowPermission','10','readonly'),(567,1,3,0,2,0,0,'WorkflowPermission','20','readonly'),(568,1,3,0,2,0,0,'WorkflowPermission','8','readonly'),(569,1,3,0,2,0,0,'WorkflowPermission','15','readonly'),(570,1,3,0,2,0,0,'WorkflowPermission','11','readonly'),(571,1,3,0,2,0,0,'WorkflowPermission','12','readonly'),(572,1,3,0,2,0,0,'WorkflowPermission','19','readonly'),(573,1,3,0,2,0,0,'WorkflowPermission','17','readonly'),(574,1,3,0,2,0,0,'WorkflowPermission','7','readonly'),(575,1,3,0,2,0,0,'WorkflowPermission','3','readonly'),(576,1,3,0,2,0,0,'WorkflowPermission','5','readonly'),(577,1,3,0,2,0,0,'WorkflowPermission','6','readonly'),(578,1,3,0,2,0,0,'WorkflowPermission','22','readonly'),(579,1,4,0,2,0,0,'WorkflowPermission','project_id','readonly'),(580,1,4,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(581,1,4,0,2,0,0,'WorkflowPermission','subject','readonly'),(582,1,4,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(583,1,4,0,2,0,0,'WorkflowPermission','is_private','readonly'),(584,1,4,0,2,0,0,'WorkflowPermission','description','readonly'),(585,1,4,0,2,0,0,'WorkflowPermission','1','readonly'),(586,1,4,0,2,0,0,'WorkflowPermission','2','readonly'),(587,1,4,0,2,0,0,'WorkflowPermission','10','readonly'),(588,1,4,0,2,0,0,'WorkflowPermission','20','readonly'),(589,1,4,0,2,0,0,'WorkflowPermission','8','readonly'),(590,1,4,0,2,0,0,'WorkflowPermission','15','readonly'),(591,1,4,0,2,0,0,'WorkflowPermission','11','readonly'),(592,1,4,0,2,0,0,'WorkflowPermission','12','readonly'),(593,1,4,0,2,0,0,'WorkflowPermission','19','readonly'),(594,1,4,0,2,0,0,'WorkflowPermission','17','readonly'),(595,1,4,0,2,0,0,'WorkflowPermission','7','readonly'),(596,1,4,0,2,0,0,'WorkflowPermission','3','readonly'),(597,1,4,0,2,0,0,'WorkflowPermission','5','readonly'),(598,1,4,0,2,0,0,'WorkflowPermission','6','readonly'),(599,1,4,0,2,0,0,'WorkflowPermission','22','readonly'),(600,1,5,0,2,0,0,'WorkflowPermission','project_id','readonly'),(601,1,5,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(602,1,5,0,2,0,0,'WorkflowPermission','subject','readonly'),(603,1,5,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(604,1,5,0,2,0,0,'WorkflowPermission','is_private','readonly'),(605,1,5,0,2,0,0,'WorkflowPermission','description','readonly'),(606,1,5,0,2,0,0,'WorkflowPermission','1','readonly'),(607,1,5,0,2,0,0,'WorkflowPermission','2','readonly'),(608,1,5,0,2,0,0,'WorkflowPermission','10','readonly'),(609,1,5,0,2,0,0,'WorkflowPermission','20','readonly'),(610,1,5,0,2,0,0,'WorkflowPermission','8','readonly'),(611,1,5,0,2,0,0,'WorkflowPermission','15','readonly'),(612,1,5,0,2,0,0,'WorkflowPermission','11','readonly'),(613,1,5,0,2,0,0,'WorkflowPermission','12','readonly'),(614,1,5,0,2,0,0,'WorkflowPermission','19','readonly'),(615,1,5,0,2,0,0,'WorkflowPermission','17','readonly'),(616,1,5,0,2,0,0,'WorkflowPermission','7','readonly'),(617,1,5,0,2,0,0,'WorkflowPermission','3','readonly'),(618,1,5,0,2,0,0,'WorkflowPermission','5','readonly'),(619,1,5,0,2,0,0,'WorkflowPermission','6','readonly'),(620,1,5,0,2,0,0,'WorkflowPermission','22','readonly'),(621,1,6,0,2,0,0,'WorkflowPermission','project_id','readonly'),(622,1,6,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(623,1,6,0,2,0,0,'WorkflowPermission','subject','readonly'),(624,1,6,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(625,1,6,0,2,0,0,'WorkflowPermission','is_private','readonly'),(626,1,6,0,2,0,0,'WorkflowPermission','description','readonly'),(627,1,6,0,2,0,0,'WorkflowPermission','1','readonly'),(628,1,6,0,2,0,0,'WorkflowPermission','2','readonly'),(629,1,6,0,2,0,0,'WorkflowPermission','10','readonly'),(630,1,6,0,2,0,0,'WorkflowPermission','20','readonly'),(631,1,6,0,2,0,0,'WorkflowPermission','8','readonly'),(632,1,6,0,2,0,0,'WorkflowPermission','15','readonly'),(633,1,6,0,2,0,0,'WorkflowPermission','11','readonly'),(634,1,6,0,2,0,0,'WorkflowPermission','12','readonly'),(635,1,6,0,2,0,0,'WorkflowPermission','19','readonly'),(636,1,6,0,2,0,0,'WorkflowPermission','17','readonly'),(637,1,6,0,2,0,0,'WorkflowPermission','7','readonly'),(638,1,6,0,2,0,0,'WorkflowPermission','3','readonly'),(639,1,6,0,2,0,0,'WorkflowPermission','5','readonly'),(640,1,6,0,2,0,0,'WorkflowPermission','6','readonly'),(641,1,6,0,2,0,0,'WorkflowPermission','22','readonly'),(642,1,2,3,2,0,0,'WorkflowTransition',NULL,NULL),(644,1,3,4,2,0,0,'WorkflowTransition',NULL,NULL),(645,1,4,5,2,0,0,'WorkflowTransition',NULL,NULL),(646,1,4,6,2,0,0,'WorkflowTransition',NULL,NULL),(648,1,4,3,2,0,0,'WorkflowTransition',NULL,NULL),(649,1,4,3,3,0,0,'WorkflowTransition',NULL,NULL),(650,1,4,3,4,0,0,'WorkflowTransition',NULL,NULL),(651,1,4,3,5,0,0,'WorkflowTransition',NULL,NULL); -/*!40000 ALTER TABLE `workflows` ENABLE KEYS */; -UNLOCK TABLES; -/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; - -/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; -/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; -/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; -/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; -/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; -/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; -/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; - --- Dump completed on 2020-08-17 18:06:56 diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls deleted file mode 100644 index f8395f7b24..0000000000 --- a/salt/playbook/init.sls +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'playbook/map.jinja' import PLAYBOOKMERGED %} - -include: -{% if PLAYBOOKMERGED.enabled %} - - playbook.enabled -{% else %} - - playbook.disabled -{% endif %} diff --git a/salt/playbook/map.jinja b/salt/playbook/map.jinja deleted file mode 100644 index 0ee058c689..0000000000 --- a/salt/playbook/map.jinja +++ /dev/null @@ -1,2 +0,0 @@ -{% import_yaml 'playbook/defaults.yaml' as PLAYBOOKDEFAULTS %} -{% set PLAYBOOKMERGED = salt['pillar.get']('playbook', PLAYBOOKDEFAULTS.playbook, merge=True) %} diff --git a/salt/playbook/soc_playbook.yaml b/salt/playbook/soc_playbook.yaml deleted file mode 100644 index e07ae8653a..0000000000 --- a/salt/playbook/soc_playbook.yaml +++ /dev/null @@ -1,4 +0,0 @@ -playbook: - enabled: - description: You can enable or disable Playbook. - helpLink: playbook.html diff --git a/salt/playbook/sostatus.sls b/salt/playbook/sostatus.sls deleted file mode 100644 index f635746d33..0000000000 --- a/salt/playbook/sostatus.sls +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-playbook_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-playbook - - unless: grep -q so-playbook /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/tools/sbin/so-playbook-import b/salt/playbook/tools/sbin/so-playbook-import deleted file mode 100755 index d775656a19..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-import +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -ENABLEPLAY=${1:-False} - -docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))" diff --git a/salt/playbook/tools/sbin/so-playbook-reset b/salt/playbook/tools/sbin/so-playbook-reset deleted file mode 100755 index 106d9e8528..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-reset +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -salt-call state.apply playbook.db_init,playbook queue=True - -/usr/sbin/so-soctopus-restart - -salt-call state.apply playbook,playbook.automation_user_create queue=True - -/usr/sbin/so-soctopus-restart - -echo "Importing Plays - NOTE: this will continue after installation finishes and could take an hour or more. Rebooting while the import is in progress will delay playbook imports." -sleep 5 -so-playbook-ruleupdate >> /root/setup_playbook_rule_update.log 2>&1 & diff --git a/salt/playbook/tools/sbin/so-playbook-restart b/salt/playbook/tools/sbin/so-playbook-restart deleted file mode 100755 index c59e7f7eba..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-restart +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-restart playbook $1 diff --git a/salt/playbook/tools/sbin/so-playbook-ruleupdate b/salt/playbook/tools/sbin/so-playbook-ruleupdate deleted file mode 100755 index cbfe72bce5..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-ruleupdate +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -docker exec so-soctopus python3 playbook_bulk-update.py diff --git a/salt/playbook/tools/sbin/so-playbook-sigma-refresh b/salt/playbook/tools/sbin/so-playbook-sigma-refresh deleted file mode 100755 index fefd4ca682..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-sigma-refresh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -if ! [ -f /opt/so/state/playbook_regen_plays ] || [ "$1" = "--force" ]; then - - echo "Refreshing Sigma & regenerating plays... " - - # Regenerate ElastAlert & update Plays - docker exec so-soctopus python3 playbook_play-update.py - - # Delete current Elastalert Rules - rm /opt/so/rules/elastalert/playbook/*.yaml - - # Regenerate Elastalert Rules - so-playbook-sync - - # Create state file - touch /opt/so/state/playbook_regen_plays -else - printf "\nState file found, exiting...\nRerun with --force to override.\n" -fi \ No newline at end of file diff --git a/salt/playbook/tools/sbin/so-playbook-start b/salt/playbook/tools/sbin/so-playbook-start deleted file mode 100755 index 070bcc4f7b..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-start +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-start playbook $1 diff --git a/salt/playbook/tools/sbin/so-playbook-stop b/salt/playbook/tools/sbin/so-playbook-stop deleted file mode 100755 index 64ce83b2b9..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-stop +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-stop playbook $1 diff --git a/salt/playbook/tools/sbin/so-playbook-sync b/salt/playbook/tools/sbin/so-playbook-sync deleted file mode 100755 index 7f6ba4e318..0000000000 --- a/salt/playbook/tools/sbin/so-playbook-sync +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -# Check to see if we are already running -NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-playbook-sync") -[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING Playbook sync processes running...exiting." && exit 0 - -docker exec so-soctopus python3 playbook_play-sync.py diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py new file mode 100644 index 0000000000..48d3643745 --- /dev/null +++ b/salt/salt/engines/master/pillarWatch.py @@ -0,0 +1,153 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# -*- coding: utf-8 -*- + +import logging +import re +import os +import sys +log = logging.getLogger(__name__) + +# will need this in future versions of this engine +#import salt.client +#local = salt.client.LocalClient() + +def start(fpa, interval=10): + currentPillarValue = '' + previousPillarValue = '' + + ''' + def processJinjaFile(): + log.info("pillarWatch engine: processing jinja file") + log.info(pillarFile) + log.info(__salt__['jinja.load_map'](pillarFile, 'GLOBALMERGED')) + sys.exit(0) + ''' + + def checkChangesTakeAction(): + # if the pillar value changed, then we find what actions we should take + log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue") + if currentPillarValue != previousPillarValue: + log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue)) + # check if the previous pillar value is defined in the pillar from -> to actions + if previousPillarValue in actions['from']: + # check if the new / current pillar value is defined under to + if currentPillarValue in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + # if the new / current pillar value isn't defined under to, is there a wildcard defined + elif '*' in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to']['*'] + # no action was defined for us to take when we see the pillar change + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + # if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from + elif '*' in actions['from']: + # is the new pillar value defined for the wildcard match + if currentPillarValue in actions['from']['*']['to']: + ACTIONS=actions['from']['*']['to'][currentPillarValue] + # if the new pillar doesn't have an action, was a wildcard defined + elif '*' in actions['from']['*']['to']: + # need more logic here for to and from + ACTIONS=actions['from']['*']['to']['*'] + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + log.debug("pillarWatch engine: all defined actions: %s" % actions['from']) + log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) + for action in ACTIONS: + log.info("pillarWatch engine: action: %s" % action) + if action != 'NO DEFINED ACTION FOR US TO TAKE': + for saltModule, args in action.items(): + log.debug("pillarWatch engine: saltModule: %s" % saltModule) + log.debug("pillarWatch engine: args: %s" % args) + #__salt__[saltModule](**args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + + + log.debug("pillarWatch engine: ##### checking watched pillars for changes #####") + + # try to open the file that stores the previous runs data + # if the file doesn't exist, create a blank one + try: + # maybe change this location + dataFile = open("/opt/so/state/pillarWatch.txt", "r+") + except FileNotFoundError: + log.warn("pillarWatch engine: No previous pillarWatch data saved") + dataFile = open("/opt/so/state/pillarWatch.txt", "w+") + + df = dataFile.read() + for i in fpa: + log.trace("pillarWatch engine: files: %s" % i['files']) + log.trace("pillarWatch engine: pillar: %s" % i['pillar']) + log.trace("pillarWatch engine: actions: %s" % i['actions']) + pillarFiles = i['files'] + pillar = i['pillar'] + default = str(i['default']) + actions = i['actions'] + # these are the keys that we are going to look for as we traverse the pillarFiles + patterns = pillar.split(".") + # check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file + for pillarFile in reversed(pillarFiles): + currentPillarValue = default + previousPillarValue = '' + ''' + if 'jinja' in os.path.splitext(pillarFile)[1]: + processJinjaFile() + ''' + # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later + patternFound = 0 + with open(pillarFile, "r") as file: + log.debug("pillarWatch engine: checking file: %s" % pillarFile) + for line in file: + log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file)) + log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) + # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar + # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look + # for pipeline. once pipeline is found, it will record the value + if re.search('^' + patterns[patternFound] + ':', line.strip()): + # strip the newline because it makes the logs u-g-l-y + log.debug("pillarWatch engine: found: %s" % line.strip('\n')) + patternFound += 1 + # we have found the final key in the pillar that we are looking for, get the previous value and current value + if patternFound == len(patterns): + currentPillarValue = str(line.split(":")[1]).strip() + # we have found the pillar so we dont need to loop through the file anymore + break + + # if key and value was found in the first file, then we don't want to look in + # any more files since we use the first file as the source of truth. + if patternFound == len(patterns): + break + + # at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars + previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE' + # check the contents of the dataFile that stores the previousPillarValue(s). + # find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value + for l in df.splitlines(): + if pillar in l: + previousPillarValue = str(l.split(":")[1].strip()) + log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) + # if the pillar we are checking for changes has been defined in the dataFile, + # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. + if pillar in df: + df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) + else: + df += pillar + ': ' + currentPillarValue + '\n' + log.trace("pillarWatch engine: df: %s" % df) + if previousPillarValue != "PREVIOUSPILLARVALUENOTSAVEDINDATAFILE": + checkChangesTakeAction() + else: + log.info("pillarWatch engine: %s was not previously tracked. not tacking action." % pillar) + + + dataFile.seek(0) + dataFile.write(df) + dataFile.truncate() + dataFile.close() diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 7c43e99e10..4f3bc31a14 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -4,3 +4,30 @@ engines_dirs: engines: - checkmine: interval: 60 + - pillarWatch: + fpa: + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.ruleset + default: ETOPEN + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.oinkcode + default: '' + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + interval: 10 diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 0a65f3e014..6e320e4a6e 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -27,6 +27,11 @@ checkmine_engine: - source: salt://salt/engines/master/checkmine.py - makedirs: True +pillarWatch_engine: + file.managed: + - name: /etc/salt/engines/pillarWatch.py + - source: salt://salt/engines/master/pillarWatch.py + engines_config: file.managed: - name: /etc/salt/master.d/engines.conf @@ -38,6 +43,7 @@ salt_master_service: - enable: True - watch: - file: checkmine_engine + - file: pillarWatch_engine - file: engines_config - order: last diff --git a/salt/soc/config.sls b/salt/soc/config.sls index e4dad8df2e..8d1f0f694d 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -9,7 +9,14 @@ include: - manager.sync_es_users -socdirtest: +sigmarepodir: + file.directory: + - name: /opt/so/conf/sigma/repos + - user: 939 + - group: 939 + - makedirs: True + +socdirelastaertrules: file.directory: - name: /opt/so/rules/elastalert/rules - user: 939 @@ -45,6 +52,15 @@ socsaltdir: - mode: 770 - makedirs: True +socanalytics: + file.managed: + - name: /opt/so/conf/soc/analytics.js + - source: salt://soc/files/soc/analytics.js + - user: 939 + - group: 939 + - mode: 600 + - show_changes: False + socconfig: file.managed: - name: /opt/so/conf/soc/soc.json @@ -64,6 +80,36 @@ socmotd: - mode: 600 - template: jinja +filedetectionsbackup: + file.managed: + - name: /opt/so/conf/soc/so-detections-backup.py + - source: salt://soc/files/soc/so-detections-backup.py + - user: 939 + - group: 939 + - mode: 600 + +crondetectionsruntime: + cron.present: + - name: /usr/sbin/so-detections-runtime-status cron + - identifier: detections-runtime-status + - user: root + - minute: '*/10' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' + +crondetectionsbackup: + cron.present: + - name: python3 /opt/so/conf/soc/so-detections-backup.py &>> /opt/so/log/soc/detections-backup.log + - identifier: detections-backup + - user: root + - minute: '0' + - hour: '0' + - daymonth: '*' + - month: '*' + - dayweek: '*' + socsigmafinalpipeline: file.managed: - name: /opt/so/conf/soc/sigma_final_pipeline.yaml diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index de372a98fd..9f5faf50bf 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1,5 +1,6 @@ soc: enabled: False + telemetryEnabled: true config: logFilename: /opt/sensoroni/logs/sensoroni-server.log logLevel: info @@ -70,30 +71,39 @@ soc: icon: fa-person-running target: '' links: - - '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' - name: actionProcessAncestors description: actionProcessAncestorsHelp icon: fa-people-roof target: '' links: - - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' + - name: actionRelatedAlerts + description: actionRelatedAlertsHelp + icon: fa-bell + links: + - '/#/alerts?q=rule.uuid: {:so_detection.publicId|escape} | groupby rule.name event.module* event.severity_label' + target: '' eventFields: default: - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - log.id.uid - network.community_id - - event.dataset - ':kratos:audit': + ':kratos:': - soc_timestamp + - event.dataset - http_request.headers.x-real-ip - identity_id - http_request.headers.user-agent + - msg '::conn': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -104,6 +114,7 @@ soc: - network.community_id '::dce_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -114,6 +125,7 @@ soc: - log.id.uid '::dhcp': - soc_timestamp + - event.dataset - client.address - server.address - host.domain @@ -122,6 +134,7 @@ soc: - log.id.uid '::dnp3': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -130,6 +143,7 @@ soc: - log.id.uid '::dnp3_control': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -139,6 +153,7 @@ soc: - log.id.uid '::dnp3_objects': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -148,6 +163,7 @@ soc: - log.id.uid '::dns': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -160,6 +176,7 @@ soc: - network.community_id '::dpd': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -170,6 +187,7 @@ soc: - log.id.uid '::file': - soc_timestamp + - event.dataset - source.ip - destination.ip - file.name @@ -180,6 +198,7 @@ soc: - log.id.uid '::ftp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -192,6 +211,7 @@ soc: - log.id.uid '::http': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -206,6 +226,7 @@ soc: - network.community_id '::intel': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -216,6 +237,7 @@ soc: - log.id.uid '::irc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -228,6 +250,7 @@ soc: - log.id.uid '::kerberos': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -238,6 +261,7 @@ soc: - log.id.uid '::modbus': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -246,6 +270,7 @@ soc: - log.id.uid '::mysql': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -257,6 +282,7 @@ soc: - log.id.uid '::notice': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -268,6 +294,7 @@ soc: - network.community_id '::ntlm': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -280,6 +307,7 @@ soc: - log.id.uid '::pe': - soc_timestamp + - event.dataset - file.is_64bit - file.is_exe - file.machine @@ -288,6 +316,7 @@ soc: - log.id.fuid '::radius': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -299,6 +328,7 @@ soc: - radius.result '::rdp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -314,6 +344,7 @@ soc: - log.id.uid '::rfb': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -325,6 +356,7 @@ soc: - log.id.uid '::signatures': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -338,6 +370,7 @@ soc: - log.id.uid '::sip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -355,6 +388,7 @@ soc: - log.id.uid '::smb_files': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -368,6 +402,7 @@ soc: - log.id.uid '::smb_mapping': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -378,11 +413,12 @@ soc: - log.id.uid '::smtp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - - smtp.from + - smtp.mail_from - smtp.recipient_to - smtp.subject - smtp.useragent @@ -390,6 +426,7 @@ soc: - network.community_id '::snmp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -399,6 +436,7 @@ soc: - log.id.uid '::socks': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -410,11 +448,13 @@ soc: - log.id.uid '::software': - soc_timestamp + - event.dataset - source.ip - software.name - software.type '::ssh': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -425,19 +465,52 @@ soc: - ssh.client - ssh.server - log.id.uid - '::ssl': + ':suricata:ssl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - ssl.server_name - ssl.certificate.subject + - ssl.version + - log.id.uid + ':zeek:ssl': + - soc_timestamp + - event.dataset + - source.ip + - source.port + - destination.ip + - destination.port + - ssl.server_name - ssl.validation_status - ssl.version - log.id.uid + '::ssl': + - soc_timestamp + - event.dataset + - source.ip + - source.port + - destination.ip + - destination.port + - ssl.server_name + - ssl.version + - log.id.uid + '::stun': + - soc_timestamp + - event.dataset + - source.ip + - source.port + - destination.ip + - destination.port + - stun.class + - stun.method + - stun.attribute.types + - log.id.uid ':zeek:syslog': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -446,17 +519,18 @@ soc: - network.protocol - syslog.severity - log.id.uid - '::tunnels': + '::tunnel': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - - tunnel_type - - action - - log.id.uid + - event.action + - tunnel.type '::weird': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -465,6 +539,7 @@ soc: - log.id.uid '::x509': - soc_timestamp + - event.dataset - x509.certificate.subject - x509.certificate.key.type - x509.certificate.key.length @@ -472,6 +547,7 @@ soc: - log.id.fuid '::firewall': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -483,6 +559,7 @@ soc: - network.community_id ':pfsense:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -494,16 +571,26 @@ soc: - network.community_id ':osquery:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - source.hostname - - event.dataset - process.executable - user.name + ':strelka:': + - soc_timestamp + - event.dataset + - file.name + - file.size + - hash.md5 + - file.source + - file.mime_type + - log.id.fuid ':strelka:file': - soc_timestamp + - event.dataset - file.name - file.size - hash.md5 @@ -512,6 +599,7 @@ soc: - log.id.fuid ':suricata:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -523,23 +611,25 @@ soc: - network.community_id ':windows_eventlog:': - soc_timestamp + - event.dataset - user.name ':elasticsearch:': - soc_timestamp + - event.dataset - agent.name - message - log.level - metadata.version - metadata.pipeline - - event.dataset ':kibana:': - soc_timestamp + - event.dataset - host.name - message - kibana.log.meta.req.headers.x-real-ip - - event.dataset ':syslog:syslog': - soc_timestamp + - event.dataset - host.name - metadata.ip_address - real_message @@ -547,6 +637,7 @@ soc: - syslog.application ':aws:': - soc_timestamp + - event.dataset - aws.cloudtrail.event_category - aws.cloudtrail.event_type - event.provider @@ -558,6 +649,7 @@ soc: - source.geo.region_iso_code ':squid:': - soc_timestamp + - event.dataset - url.original - destination.ip - destination.geo.country_iso_code @@ -565,6 +657,7 @@ soc: - source.ip '::sysmon_operational': - soc_timestamp + - event.dataset - event.action - winlog.computer_name - user.name @@ -572,51 +665,58 @@ soc: - process.pid '::network_connection': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - source.hostname - - event.dataset - process.executable - user.name '::process_terminated': - soc_timestamp + - event.dataset - process.executable - process.pid - winlog.computer_name '::file_create': - soc_timestamp + - event.dataset - file.target - process.executable - process.pid - winlog.computer_name '::registry_value_set': - soc_timestamp + - event.dataset - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name '::process_creation': - soc_timestamp + - event.dataset - process.command_line - process.pid - process.parent.executable - process.working_directory '::registry_create_delete': - soc_timestamp + - event.dataset - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name '::dns_query': - soc_timestamp + - event.dataset - dns.query.name - dns.answers.name - process.executable - winlog.computer_name '::file_create_stream_hash': - soc_timestamp + - event.dataset - file.target - hash.md5 - hash.sha256 @@ -625,6 +725,7 @@ soc: - winlog.computer_name '::bacnet': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -634,6 +735,7 @@ soc: - log.id.uid '::bacnet_discovery': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -643,6 +745,7 @@ soc: - log.id.uid '::bacnet_property': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -652,6 +755,7 @@ soc: - log.id.uid '::bsap_ip_header': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -661,12 +765,14 @@ soc: - log.id.uid '::bsap_ip_rdb': - soc_timestamp + - event.dataset - bsap.application.function - bsap.application.sub.function - bsap.vector.variables - log.id.uid '::bsap_serial_header': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -677,11 +783,13 @@ soc: - log.id.uid '::bsap_serial_rdb': - soc_timestamp + - event.dataset - bsap.rdb.function - bsap.vector.variables - log.id.uid '::cip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -689,9 +797,9 @@ soc: - cip.service - cip.status_code - log.id.uid - - event.dataset '::cip_identity': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -701,6 +809,7 @@ soc: - log.id.uid '::cip_io': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -710,6 +819,7 @@ soc: - log.id.uid '::cotp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -718,6 +828,7 @@ soc: - log.id.uid '::ecat_arp_info': - soc_timestamp + - event.dataset - source.ip - destination.ip - source.mac @@ -725,6 +836,7 @@ soc: - ecat.arp.type '::ecat_aoe_info': - soc_timestamp + - event.dataset - source.mac - source.port - destination.mac @@ -732,6 +844,7 @@ soc: - ecat.command '::ecat_coe_info': - soc_timestamp + - event.dataset - ecat.message.number - ecat.message.type - ecat.request.response.type @@ -739,6 +852,7 @@ soc: - ecat.sub.index '::ecat_dev_info': - soc_timestamp + - event.dataset - ecat.device.type - ecat.features - ecat.ram.size @@ -746,17 +860,20 @@ soc: - ecat.slave.address '::ecat_log_address': - soc_timestamp + - event.dataset - source.mac - destination.mac - ecat.command '::ecat_registers': - soc_timestamp + - event.dataset - source.mac - destination.mac - ecat.command - ecat.register.type '::enip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -764,9 +881,9 @@ soc: - enip.command - enip.status_code - log.id.uid - - event.dataset '::modbus_detailed': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -775,6 +892,7 @@ soc: - log.id.uid '::opcua_binary': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -784,6 +902,7 @@ soc: - log.id.uid '::opcua_binary_activate_session': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -794,6 +913,7 @@ soc: - log.id.uid '::opcua_binary_activate_session_diagnostic_info': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -803,6 +923,7 @@ soc: - log.id.uid '::opcua_binary_activate_session_locale_id': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -812,6 +933,7 @@ soc: - log.id.uid '::opcua_binary_browse': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -821,6 +943,7 @@ soc: - log.id.uid '::opcua_binary_browse_description': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -828,6 +951,7 @@ soc: - log.id.uid '::opcua_binary_browse_response_references': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -837,6 +961,7 @@ soc: - log.id.uid '::opcua_binary_browse_result': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -845,6 +970,7 @@ soc: - log.id.uid '::opcua_binary_create_session': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -853,6 +979,7 @@ soc: - log.id.uid '::opcua_binary_create_session_endpoints': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -862,6 +989,7 @@ soc: - log.id.uid '::opcua_binary_create_session_user_token': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -870,6 +998,7 @@ soc: - log.id.uid '::opcua_binary_create_subscription': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -878,6 +1007,7 @@ soc: - log.id.uid '::opcua_binary_get_endpoints': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -887,6 +1017,7 @@ soc: - log.id.uid '::opcua_binary_get_endpoints_description': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -896,6 +1027,7 @@ soc: - log.id.uid '::opcua_binary_get_endpoints_user_token': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -905,6 +1037,7 @@ soc: - log.id.uid '::opcua_binary_read': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -914,6 +1047,7 @@ soc: - log.id.uid '::opcua_binary_status_code_detail': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -923,6 +1057,7 @@ soc: - log.id.uid '::profinet': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -932,6 +1067,7 @@ soc: - log.id.uid '::profinet_dce_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -940,6 +1076,7 @@ soc: - log.id.uid '::s7comm': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -949,6 +1086,7 @@ soc: - log.id.uid '::s7comm_plus': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -958,6 +1096,7 @@ soc: - log.id.uid '::s7comm_read_szl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -967,6 +1106,7 @@ soc: - log.id.uid '::s7comm_upload_download': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -976,49 +1116,50 @@ soc: - log.id.uid '::tds': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.command - log.id.uid - - event.dataset '::tds_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.procedure_name - log.id.uid - - event.dataset '::tds_sql_batch': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.header_type - log.id.uid - - event.dataset ':endpoint:events_x_api': - soc_timestamp + - event.dataset - host.name - user.name - process.name - process.Ext.api.name - process.thread.Ext.call_stack_final_user_module.path - - event.dataset ':endpoint:events_x_file': - soc_timestamp + - event.dataset - host.name - user.name - process.name - event.action - file.path - - event.dataset ':endpoint:events_x_library': - soc_timestamp + - event.dataset - host.name - user.name - process.name @@ -1026,9 +1167,9 @@ soc: - dll.path - dll.code_signature.status - dll.code_signature.subject_name - - event.dataset ':endpoint:events_x_network': - soc_timestamp + - event.dataset - host.name - user.name - process.name @@ -1038,39 +1179,113 @@ soc: - destination.ip - destination.port - network.community_id - - event.dataset ':endpoint:events_x_process': - soc_timestamp + - event.dataset - host.name - user.name - process.parent.name - process.name - event.action - process.working_directory - - event.dataset ':endpoint:events_x_registry': - soc_timestamp + - event.dataset - host.name - user.name - process.name - event.action - registry.path - - event.dataset ':endpoint:events_x_security': - soc_timestamp + - event.dataset - host.name - user.name - process.executable - event.action - event.outcome + ':system:': + - soc_timestamp + - event.dataset + - process.name + - process.pid + - user.effective.name + - user.name + - system.auth.sudo.command + - message + ':opencanary:': + - soc_timestamp - event.dataset + - source.ip + - source.port + - logdata.HOSTNAME + - destination.port + - logdata.PATH + - logdata.USERNAME + - logdata.USERAGENT + ':elastic_agent:': + - soc_timestamp + - event.dataset + - message + ':kismet:': + - soc_timestamp + - event.dataset + - device.manufacturer + - client.mac + - network.wireless.ssid + - network.wireless.bssid + ':playbook:': + - soc_timestamp + - event.dataset + - rule.name + - event.severity_label + - event_data.event.dataset + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port + - event_data.process.executable + - event_data.process.pid + ':sigma:': + - soc_timestamp + - event.dataset + - rule.name + - event.severity_label + - event_data.event.dataset + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port + - event_data.process.executable + - event_data.process.pid + ':netflow:': + - soc_timestamp + - event.dataset + - source.ip + - source.port + - destination.ip + - destination.port + - network.type + - network.transport + - network.direction + - netflow.type + - netflow.exporter.version + - observer.ip + ':soc:': + - soc_timestamp + - event.dataset + - source.ip + - soc.fields.requestMethod + - soc.fields.requestPath + - soc.fields.statusCode + - event.action + - soc.fields.error server: bindAddress: 0.0.0.0:9822 baseUrl: / maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads - airgapEnabled: false modules: cases: soc filedatastore: @@ -1079,11 +1294,38 @@ soc: hostUrl: elastalertengine: allowRegex: '' - autoUpdateEnabled: false + autoUpdateEnabled: true + autoEnabledSigmaRules: + default: + - core+critical + - securityonion-resources+critical + - securityonion-resources+high + so-eval: + - securityonion-resources+critical + - securityonion-resources+high + so-import: + - securityonion-resources+critical + - securityonion-resources+high communityRulesImportFrequencySeconds: 86400 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert + reposFolder: /opt/sensoroni/sigma/repos rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint + stateFilePath: /opt/sensoroni/fingerprints/elastalertengine.state + integrityCheckFrequencySeconds: 600 + rulesRepos: + default: + - repo: https://github.com/Security-Onion-Solutions/securityonion-resources + license: Elastic-2.0 + folder: sigma/stable + community: true + airgap: + - repo: file:///nsm/rules/detect-sigma/repos/securityonion-resources + license: Elastic-2.0 + folder: sigma/stable + community: true sigmaRulePackages: - core - emerging_threats_addon @@ -1131,19 +1373,38 @@ soc: - rbac/users_roles strelkaengine: allowRegex: '' - autoUpdateEnabled: false - compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py - denyRegex: '.*' + autoEnabledYaraRules: + - securityonion-yara + autoUpdateEnabled: true + communityRulesImportFrequencySeconds: 86400 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 + compileYaraPythonScriptPath: /opt/sensoroni/yara/compile_yara.py + denyRegex: '' reposFolder: /opt/sensoroni/yara/repos rulesRepos: - - repo: https://github.com/Security-Onion-Solutions/securityonion-yara - license: DRL + default: + - repo: https://github.com/Security-Onion-Solutions/securityonion-yara + license: DRL + community: true + airgap: + - repo: file:///nsm/rules/detect-yara/repos/securityonion-yara + license: DRL + community: true yaraRulesFolder: /opt/sensoroni/yara/rules + stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state + integrityCheckFrequencySeconds: 600 suricataengine: allowRegex: '' + autoUpdateEnabled: true + communityRulesImportFrequencySeconds: 86400 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 communityRulesFile: /nsm/rules/suricata/emerging-all.rules - denyRegex: '.*' + denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint + stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state + integrityCheckFrequencySeconds: 600 client: enableReverseLookup: false docsUrl: /docs/ @@ -1154,7 +1415,7 @@ soc: tipTimeoutMs: 6000 cacheExpirationMs: 300000 casesEnabled: true - detectionsEnabled: false + detectionsEnabled: true inactiveTools: ['toolUnused'] tools: - name: toolKibana @@ -1182,11 +1443,6 @@ soc: icon: fa-external-link-alt target: so-cyberchef link: /cyberchef/ - - name: toolPlaybook - description: toolPlaybookHelp - icon: fa-external-link-alt - target: so-playbook - link: /playbook/projects/detection-playbooks/issues/ - name: toolNavigator description: toolNavigatorHelp icon: fa-external-link-alt @@ -1235,7 +1491,7 @@ soc: showSubtitle: true - name: Elastalerts description: '' - query: '_type:elastalert | groupby rule.name' + query: 'event.dataset:sigma.alert | groupby rule.name' showSubtitle: true - name: Alerts description: Show all alerts grouped by alert source @@ -1492,202 +1748,226 @@ soc: queries: - name: Overview description: Overview of all events - query: '* | groupby -sankey event.dataset event.category* | groupby -pie event.category | groupby -bar event.module* | groupby event.dataset | groupby event.module* | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - - name: SOC Auth - description: SOC (Security Onion Console) authentication logs - query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby -sankey http_request.headers.x-real-ip identity_id | groupby http_request.headers.x-real-ip | groupby identity_id | groupby http_request.headers.user-agent' - - name: Elastalerts - description: Elastalert logs - query: '_index: "*:elastalert*" | groupby rule_name | groupby alert_info.type' + query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: SOC Logins + description: SOC (Security Onion Console) logins + query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip identity_id | groupby identity_id | groupby http_request.headers.user-agent' + - name: SOC Login Failures + description: SOC (Security Onion Console) login failures + query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent' - name: Alerts description: Overview of all alerts - query: 'tags:alert | groupby event.module* | groupby rule.name | groupby event.severity | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination_geo.organization_name' - name: NIDS Alerts description: NIDS (Network Intrusion Detection System) alerts - query: 'event.category:network AND tags:alert | groupby rule.category | groupby -sankey source.ip destination.ip | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - - name: Sysmon Overview - description: Overview of all Sysmon data types - query: 'event.dataset:windows.sysmon_operational | groupby -sankey event.action host.name | groupby -sankey host.name user.name | groupby host.name | groupby event.category event.action | groupby user.name | groupby dns.question.name | groupby process.executable | groupby winlog.event_data.TargetObject | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'event.category:network AND tags:alert | groupby rule.category | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby destination_geo.organization_name' + - name: Elastic Agent Overview + description: Overview of all events from Elastic Agents + query: 'event.module:endpoint | groupby event.dataset | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name' + - name: Elastic Agent API Events + description: API (Application Programming Interface) events from Elastic Agents + query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby process.Ext.api.name' + - name: Elastic Agent File Events + description: File events from Elastic Agents + query: 'event.dataset:endpoint.events.file | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby file.path' + - name: Elastic Agent Library Events + description: Library events from Elastic Agents + query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name' + - name: Elastic Agent Network Events + description: Network events from Elastic Agents + query: 'event.dataset:endpoint.events.network | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: Elastic Agent Process Events + description: Process events from Elastic Agents + query: 'event.dataset:endpoint.events.process | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.parent.name | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.action | groupby process.working_directory' + - name: Elastic Agent Registry Events + description: Registry events from Elastic Agents + query: 'event.dataset:endpoint.events.registry | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby registry.path' + - name: Elastic Agent Security Events + description: Security events from Elastic Agents + query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.executable | groupby process.executable | groupby event.action | groupby event.outcome' - name: Host Overview description: Overview of all host data types - query: '((event.category:registry OR event.category:host OR event.category:process OR event.category:driver OR event.category:configuration) OR (event.category:file AND _exists_:process.executable) OR (event.category:network AND _exists_:host.name)) | groupby event.dataset* event.category* event.action* | groupby event.type | groupby host.name | groupby user.name | groupby file.name | groupby process.executable' + query: '((event.category:registry OR event.category:host OR event.category:process OR event.category:driver OR event.category:configuration) OR (event.category:file AND _exists_:process.executable) OR (event.category:network AND _exists_:host.name)) | groupby event.dataset* event.category* event.action* | groupby event.type | groupby -sankey event.type host.name | groupby host.name | groupby user.name | groupby file.name | groupby process.executable' - name: Host Registry Changes description: Windows Registry changes - query: 'event.category: registry | groupby -sankey event.action host.name | groupby event.dataset event.action | groupby host.name | groupby process.executable | groupby registry.path | groupby process.executable registry.path' - - name: Host DNS & Process Mappings + query: 'event.category: registry | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby event.dataset event.action | groupby process.executable | groupby registry.path | groupby process.executable registry.path' + - name: Host DNS and Process Mappings description: DNS queries mapped to originating processes - query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data' + query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby host.name | groupby -sankey host.name dns.question.name | groupby dns.question.name | groupby event.dataset event.type | groupby process.executable | groupby dns.answers.data' - name: Host Process Activity description: Process activity captured on an endpoint - query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' - - name: Host File Activity - description: File activity captured on an endpoint - query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable' - - name: Host Network & Process Mappings + query: 'event.category:process | groupby host.name | groupby -sankey host.name user.name* | groupby user.name | groupby event.dataset event.action | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' + - name: Host File and Process Mappings + description: File activity mapped to originating processes + query: 'event.category: file AND _exists_:process.name AND _exists_:process.executable | groupby host.name | groupby -sankey host.name process.name | groupby process.name | groupby process.executable | groupby event.dataset event.action event.type | groupby file.name' + - name: Host Network and Process Mappings description: Network activity mapped to originating processes - query: 'event.category: network AND _exists_:process.executable | groupby -sankey event.action host.name | groupby -sankey host.name user.name | groupby event.dataset* event.type* event.action* | groupby host.name | groupby user.name | groupby dns.question.name | groupby process.executable | groupby winlog.event_data.TargetObject | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' - - name: Host API Events - description: API (Application Programming Interface) events from endpoints - query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.name | groupby process.Ext.api.name' - - name: Host Library Events - description: Library events from endpoints - query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name' - - name: Host Security Events - description: Security events from endpoints - query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.executable | groupby event.action | groupby event.outcome' + query: 'event.category: network AND _exists_:process.executable | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.dataset* event.type* event.action* | groupby dns.question.name | groupby process.executable | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: Sysmon Overview + description: Overview of all Sysmon data types + query: 'event.dataset:windows.sysmon_operational | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.category event.action | groupby dns.question.name | groupby process.executable | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port' - name: Strelka description: Strelka file analysis - query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.name' + query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby -sankey file.source file.name | groupby file.name' - name: Zeek Notice description: Zeek notice logs - query: 'event.dataset:zeek.notice | groupby -sankey notice.note destination.ip | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - - name: Connections and Metadata with community_id - description: Network connections that include community_id - query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + query: 'event.dataset:zeek.notice | groupby notice.note | groupby -sankey notice.note source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby notice.message | groupby notice.sub_message | groupby source_geo.organization_name | groupby destination_geo.organization_name' + - name: Connections and Metadata with Community ID + description: Network connections that include network.community_id + query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby source_geo.organization_name | groupby source.geo.country_name | groupby destination_geo.organization_name | groupby destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' - name: Connections seen by Zeek or Suricata description: Network connections logged by Zeek or Suricata query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui' - name: DCE_RPC description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata - query: 'tags:dce_rpc | groupby -sankey dce_rpc.endpoint dce_rpc.operation | groupby dce_rpc.endpoint | groupby dce_rpc.operation | groupby dce_rpc.named_pipe | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:dce_rpc | groupby dce_rpc.endpoint | groupby -sankey dce_rpc.endpoint dce_rpc.operation | groupby dce_rpc.operation | groupby -sankey dce_rpc.operation dce_rpc.named_pipe | groupby dce_rpc.named_pipe | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: DHCP description: DHCP (Dynamic Host Configuration Protocol) leases - query: 'tags:dhcp | groupby host.hostname | groupby dhcp.message_types | groupby -sankey client.address server.address | groupby client.address | groupby server.address | groupby host.domain' + query: 'tags:dhcp | groupby host.hostname | groupby -sankey host.hostname client.address | groupby client.address | groupby -sankey client.address server.address | groupby server.address | groupby dhcp.message_types | groupby host.domain' - name: DNS description: DNS (Domain Name System) queries - query: 'tags:dns | groupby dns.query.name | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby -sankey source.ip destination.ip | groupby dns.answers.name | groupby dns.query.type_name | groupby dns.response.code_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination_geo.organization_name' - name: DPD description: DPD (Dynamic Protocol Detection) errors - query: 'tags:dpd | groupby error.reason | groupby network.protocol | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination_geo.organization_name' - name: Files description: Files seen in network traffic query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination_geo.organization_name' - name: FTP description: FTP (File Transfer Protocol) network metadata - query: 'tags:ftp | groupby -sankey ftp.command destination.ip | groupby ftp.command | groupby ftp.argument | groupby ftp.user | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:ftp | groupby ftp.command | groupby -sankey ftp.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ftp.argument | groupby ftp.user' - name: HTTP description: HTTP (Hyper Text Transport Protocol) network metadata query: 'tags:http | groupby http.method | groupby -sankey http.method http.virtual_host | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby file.resp_mime_types | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: Intel description: Zeek Intel framework hits - query: 'tags:intel | groupby intel.indicator | groupby -sankey source.ip intel.indicator | groupby intel.indicator_type | groupby intel.seen_where | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:intel | groupby intel.indicator | groupby -sankey intel.indicator source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby intel.indicator_type | groupby intel.seen_where' - name: IRC description: IRC (Internet Relay Chat) network metadata query: 'tags:irc | groupby irc.command.type | groupby -sankey irc.command.type irc.username | groupby irc.username | groupby irc.nickname | groupby irc.command.value | groupby irc.command.info | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: Kerberos description: Kerberos network metadata - query: 'tags:kerberos | groupby kerberos.service | groupby -sankey kerberos.service destination.ip | groupby kerberos.client | groupby kerberos.request_type | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:kerberos | groupby kerberos.service | groupby -sankey kerberos.service source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby kerberos.client | groupby kerberos.request_type' - name: MySQL description: MySQL network metadata - query: 'tags:mysql | groupby mysql.command | groupby -sankey mysql.command destination.ip | groupby mysql.argument | groupby mysql.success | groupby mysql.response | groupby mysql.rows | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:mysql | groupby mysql.command | groupby -sankey mysql.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby mysql.argument | groupby mysql.success | groupby mysql.response | groupby mysql.rows' - name: NTLM description: NTLM (New Technology LAN Manager) network metadata - query: 'tags:ntlm | groupby ntlm.server.dns.name | groupby ntlm.server.nb.name | groupby -sankey source.ip destination.ip | groupby ntlm.server.tree.name | groupby ntlm.success | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:ntlm | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ntlm.server.dns.name | groupby ntlm.server.nb.name | groupby ntlm.server.tree.name | groupby ntlm.success | groupby source.ip | groupby destination.ip' - name: PE description: PE (Portable Executable) files transferred via network traffic - query: 'tags:pe | groupby file.machine | groupby -sankey file.machine file.os | groupby file.os | groupby file.subsystem | groupby file.section_names | groupby file.is_exe | groupby file.is_64bit' + query: 'tags:pe | groupby file.machine | groupby -sankey file.machine file.os | groupby file.os | groupby -sankey file.os file.subsystem | groupby file.subsystem | groupby file.section_names | groupby file.is_exe | groupby file.is_64bit' - name: RADIUS description: RADIUS (Remote Authentication Dial-In User Service) network metadata - query: 'tags:radius | groupby -sankey user.name destination.ip | groupby user.name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:radius | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: RDP description: RDP (Remote Desktop Protocol) network metadata - query: 'tags:rdp | groupby client.name | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:rdp | groupby client.name | groupby -sankey client.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: RFB description: RFB (Remote Frame Buffer) network metadata - query: 'tags:rfb | groupby rfb.desktop.name | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:rfb | groupby rfb.desktop.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: Signatures description: Zeek signatures query: 'event.dataset:zeek.signatures | groupby signature_id' - name: SIP description: SIP (Session Initiation Protocol) network metadata - query: 'tags:sip | groupby client.user_agent | groupby sip.method | groupby sip.uri | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:sip | groupby sip.method | groupby -sankey sip.method source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby client.user_agent | groupby sip.method | groupby sip.uri' - name: SMB_Files description: Files transferred via SMB (Server Message Block) - query: 'tags:smb_files | groupby file.action | groupby file.path | groupby file.name | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:smb_files | groupby file.action | groupby -sankey file.action source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby file.path | groupby file.name' - name: SMB_Mapping description: SMB (Server Message Block) mapping network metadata - query: 'tags:smb_mapping | groupby smb.share_type | groupby smb.path | groupby smb.service | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:smb_mapping | groupby smb.share_type | groupby -sankey smb.share_type smb.path | groupby smb.path | groupby -sankey smb.path smb.service | groupby smb.service | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: SMTP description: SMTP (Simple Mail Transfer Protocol) network metadata - query: 'tags:smtp | groupby smtp.from | groupby smtp.recipient_to | groupby -sankey source.ip destination.ip | groupby smtp.subject | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:smtp | groupby smtp.mail_from | groupby -sankey smtp.mail_from smtp.recipient_to | groupby smtp.recipient_to | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby smtp.subject | groupby destination_geo.organization_name' - name: SNMP description: SNMP (Simple Network Management Protocol) network metadat - query: 'tags:snmp | groupby snmp.community | groupby snmp.version | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:snmp | groupby snmp.community | groupby -sankey snmp.community source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby snmp.version' - name: Software description: Software seen by Zeek via network traffic - query: 'tags:software | groupby -sankey software.type source.ip | groupby software.type | groupby software.name | groupby source.ip' + query: 'tags:software | groupby software.type | groupby -sankey software.type source.ip | groupby source.ip | groupby software.name' - name: SSH description: SSH (Secure Shell) connections seen by Zeek - query: 'tags:ssh | groupby ssh.client | groupby ssh.server | groupby -sankey source.ip destination.ip | groupby ssh.direction | groupby ssh.version | groupby ssh.hassh_version | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:ssh | groupby ssh.client | groupby -sankey ssh.client source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ssh.server | groupby ssh.version | groupby ssh.hassh_version | groupby ssh.direction | groupby source_geo.organization_name | groupby destination_geo.organization_name' - name: SSL description: SSL/TLS network metadata - query: 'tags:ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey source.ip ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' + query: 'tags:ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + - name: SSL - Suricata + description: SSL/TLS network metadata from Suricata + query: 'event.dataset:suricata.ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' + - name: SSL - Zeek + description: SSL/TLS network metadata from Zeek + query: 'event.dataset:zeek.ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: STUN description: STUN (Session Traversal Utilities for NAT) network metadata - query: 'tags:stun* | groupby -sankey source.ip destination.ip | groupby destination.geo.country_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby event.dataset' + query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby stun.class | groupby -sankey stun.class stun.method | groupby stun.method | groupby stun.attribute.types' - name: Syslog description: Syslog logs - query: 'tags:syslog | groupby syslog.severity_label | groupby syslog.facility_label | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol' + query: 'tags:syslog | groupby syslog.severity_label | groupby syslog.facility_label | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby event.dataset' - name: TDS description: TDS (Tabular Data Stream) network metadata - query: 'tags:tds* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby tds.command | groupby tds.header_type | groupby tds.procedure_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby tds.query' + query: 'tags:tds* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby tds.command | groupby tds.header_type | groupby tds.procedure_name | groupby tds.query' - name: Tunnel description: Tunnels seen by Zeek - query: 'tags:tunnel | groupby -sankey source.ip destination.ip | groupby tunnel.type | groupby event.action | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name' + query: 'tags:tunnel | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby tunnel.type | groupby event.action | groupby destination.geo.country_name' - name: Weird description: Weird network traffic seen by Zeek - query: 'event.dataset:zeek.weird | groupby -sankey weird.name destination.ip | groupby weird.name | groupby weird.additional_info | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'event.dataset:zeek.weird | groupby weird.name | groupby -sankey weird.name source.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: WireGuard description: WireGuard VPN network metadata - query: 'tags:wireguard | groupby -sankey source.ip destination.ip | groupby destination.geo.country_name | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:wireguard | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name' - name: x509 description: x.509 certificates seen by Zeek - query: 'tags:x509 | groupby -sankey x509.certificate.key.length x509.san_dns | groupby x509.certificate.key.length | groupby x509.san_dns | groupby x509.certificate.key.type | groupby x509.certificate.subject | groupby x509.certificate.issuer' + query: 'tags:x509 | groupby x509.certificate.key.length | groupby -sankey x509.certificate.key.length x509.san_dns | groupby x509.san_dns | groupby x509.certificate.key.type | groupby x509.certificate.subject | groupby x509.certificate.issuer' - name: ICS Overview description: Overview of ICS (Industrial Control Systems) network metadata - query: 'tags:ics | groupby event.dataset | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby source.mac | groupby destination.mac' + query: 'tags:ics | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.mac | groupby destination.mac' - name: ICS BACnet description: BACnet (Building Automation and Control Networks) network metadata - query: 'tags:bacnet* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:bacnet* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS BSAP description: BSAP (Bristol Standard Asynchronous Protocol) network metadata - query: 'tags:bsap* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:bsap* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS CIP description: CIP (Common Industrial Protocol) network metadata - query: 'tags:cip* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:cip* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS COTP description: COTP (Connection Oriented Transport Protocol) network metadata - query: 'tags:cotp* | groupby -sankey source.ip destination.ip | groupby cotp.pdu.name | groupby cotp.pdu.code | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:cotp* | groupby cotp.pdu.name | groupby -sankey cotp.pdu.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby cotp.pdu.code' - name: ICS DNP3 description: DNP3 (Distributed Network Protocol) network metadata - query: 'tags:dnp3* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby dnp3.function_code | groupby dnp3.object_type | groupby dnp3.fc_request | groupby dnp3.fc_reply | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:dnp3* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dnp3.function_code | groupby dnp3.object_type | groupby dnp3.fc_request | groupby dnp3.fc_reply' - name: ICS ECAT description: ECAT (Ethernet for Control Automation Technology) network metadata - query: 'tags:ecat* | groupby -sankey event.dataset source.mac destination.mac | groupby event.dataset | groupby source.mac | groupby destination.mac | groupby ecat.command | groupby ecat.register.type' + query: 'tags:ecat* | groupby event.dataset | groupby -sankey event.dataset ecat.command | groupby ecat.command | groupby -sankey ecat.command source.mac | groupby source.mac | groupby -sankey source.mac destination.mac | groupby destination.mac | groupby ecat.register.type' - name: ICS ENIP description: ENIP (Ethernet Industrial Protocol) network metadata - query: 'tags:enip* | groupby -sankey source.ip destination.ip | groupby enip.command | groupby enip.status_code | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:enip* | groupby enip.command | groupby -sankey enip.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby enip.status_code' - name: ICS Modbus description: Modbus network metadata - query: 'tags:modbus* | groupby -sankey event.dataset modbus.function | groupby event.dataset | groupby modbus.function | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:modbus* | groupby event.dataset | groupby -sankey event.dataset modbus.function | groupby modbus.function | groupby -sankey modbus.function source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS OPC UA description: OPC UA (Unified Architecture) network metadata - query: 'tags:opcua* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:opcua* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS Profinet description: Profinet (Process Field Network) network metadata - query: 'tags:profinet* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:profinet* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS S7 description: S7 (Siemens) network metadata - query: 'tags:s7* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:s7* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' + - name: NetFlow + description: NetFlow records + query: 'event.module:netflow | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.type | groupby network.transport | groupby network.direction | groupby netflow.type | groupby netflow.exporter.version | groupby observer.ip | groupby source.as.organization.name | groupby source.geo.country_name | groupby destination.as.organization.name | groupby destination.geo.country_name' - name: Firewall description: Firewall logs - query: 'observer.type:firewall | groupby -sankey event.action observer.ingress.interface.name | groupby event.action | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'observer.type:firewall | groupby event.action | groupby -sankey event.action observer.ingress.interface.name | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' - name: Firewall Auth description: Firewall authentication logs query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message' - name: VLAN description: VLAN (Virtual Local Area Network) tagged logs - query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name' + query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby -sankey network.vlan.id source.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name' - name: GeoIP - Destination Countries description: GeoIP tagged logs visualized by destination countries query: '* AND _exists_:destination.geo.country_name | groupby destination.geo.country_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby event.dataset | groupby event.module' @@ -1700,6 +1980,15 @@ soc: - name: GeoIP - Source Organizations description: GeoIP tagged logs visualized by source organizations query: '* AND _exists_:source_geo.organization_name | groupby source_geo.organization_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.geo.country_name | groupby event.dataset | groupby event.module' + - name: Kismet - WiFi Devices + description: WiFi devices seen by Kismet sensors + query: 'event.module: kismet | groupby network.wireless.ssid | groupby device.manufacturer | groupby -pie device.manufacturer | groupby event.dataset' + - name: SOC Detections - Runtime Status + description: Runtime Status of Detections + query: 'event.dataset:soc.detections | groupby soc.detection_type soc.error_type | groupby soc.error_analysis | groupby soc.rule.name | groupby soc.error_message' + + + job: alerts: advanced: false @@ -1717,6 +2006,7 @@ soc: eventFields: default: - soc_timestamp + - event.dataset - rule.name - event.severity_label - source.ip @@ -1729,13 +2019,38 @@ soc: - rule.rev ':playbook:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - - event_data.event.module - - event_data.event.category + - event_data.event.dataset + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port - event_data.process.executable - event_data.process.pid - - event_data.winlog.computer_name + ':sigma:': + - soc_timestamp + - event.dataset + - rule.name + - event.severity_label + - event_data.event.dataset + - rule.category + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port + - event_data.process.executable + - event_data.process.pid + ':strelka:': + - soc_timestamp + - event.dataset + - file.name + - file.size + - hash.md5 + - file.source + - file.mime_type + - log.id.fuid queryBaseFilter: tags:alert queryToggleFilters: - name: acknowledged @@ -1750,17 +2065,17 @@ soc: - acknowledged queries: - name: 'Group By Name, Module' - query: '* | groupby rule.name event.module* event.severity_label' + query: '* | groupby rule.name event.module* event.severity_label rule.uuid' - name: 'Group By Sensor, Source IP/Port, Destination IP/Port, Name' - query: '* | groupby observer.name source.ip source.port destination.ip destination.port rule.name network.community_id event.severity_label' + query: '* | groupby observer.name source.ip source.port destination.ip destination.port rule.name network.community_id event.severity_label rule.uuid' - name: 'Group By Source IP, Name' - query: '* | groupby source.ip rule.name event.severity_label' + query: '* | groupby source.ip rule.name event.severity_label rule.uuid' - name: 'Group By Source Port, Name' - query: '* | groupby source.port rule.name event.severity_label' + query: '* | groupby source.port rule.name event.severity_label rule.uuid' - name: 'Group By Destination IP, Name' - query: '* | groupby destination.ip rule.name event.severity_label' + query: '* | groupby destination.ip rule.name event.severity_label rule.uuid' - name: 'Group By Destination Port, Name' - query: '* | groupby destination.port rule.name event.severity_label' + query: '* | groupby destination.port rule.name event.severity_label rule.uuid' - name: Ungroup query: '*' grid: @@ -1866,6 +2181,7 @@ soc: - red customEnabled: false detections: + advanced: true viewEnabled: true createLink: /detection/create eventFetchLimit: 500 @@ -1874,29 +2190,49 @@ soc: mostRecentlyUsedLimit: 5 safeStringMaxLength: 100 queryBaseFilter: '_index:"*:so-detection" AND so_kind:detection' + presets: + manualSync: + customEnabled: false + labels: + - ElastAlert + - Strelka + - Suricata eventFields: default: - so_detection.title - so_detection.isEnabled - - so_detection.language - so_detection.severity + - so_detection.language + - so_detection.ruleset + - soc_timestamp queries: - name: "All Detections" - query: "_id:*" + query: "_id:* | groupby so_detection.language | groupby so_detection.ruleset so_detection.isEnabled" + description: Show all Detections, community and custom - name: "Custom Detections" - query: "so_detection.isCommunity:false" + query: "so_detection.isCommunity:false AND NOT so_detection.ruleset: securityonion-resources" + description: Show all custom detections - name: "All Detections - Enabled" - query: "so_detection.isEnabled:true" + query: "so_detection.isEnabled:true | groupby so_detection.language | groupby so_detection.ruleset so_detection.severity" + description: Show all enalbed Detections - name: "All Detections - Disabled" - query: "so_detection.isEnabled:false" + query: "so_detection.isEnabled:false | groupby so_detection.language | groupby so_detection.ruleset so_detection.severity" + description: Show all disabled Detections - name: "Detection Type - Suricata (NIDS)" - query: "so_detection.language:suricata" - - name: "Detection Type - Sigma - All" - query: "so_detection.language:sigma" - - name: "Detection Type - Sigma - Windows" - query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"' - - name: "Detection Type - Yara (Strelka)" - query: "so_detection.language:yara" + query: "so_detection.language:suricata | groupby so_detection.ruleset so_detection.isEnabled | groupby so_detection.category" + description: Show all NIDS Detections, which are run with Suricata + - name: "Detection Type - Sigma (Elastalert) - All" + query: "so_detection.language:sigma | groupby so_detection.ruleset so_detection.isEnabled | groupby so_detection.category | groupby so_detection.product" + description: Show all Sigma Detections, which are run with Elastalert + - name: "Detection Type - YARA (Strelka)" + query: "so_detection.language:yara | groupby so_detection.ruleset so_detection.isEnabled" + description: Show all YARA detections, which are used by Strelka + - name: "Security Onion - Grid Detections" + query: "so_detection.ruleset:securityonion-resources" + description: Show Detections for this Security Onion Grid + - name: "Detections with Overrides" + query: "_exists_:so_detection.overrides | groupby so_detection.language | groupby so_detection.ruleset so_detection.isEnabled" + description: Show Detections that have Overrides detection: presets: severity: diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 93ca07ac88..4d4b5f6fdc 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -8,6 +8,7 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} {% from 'soc/merged.map.jinja' import DOCKER_EXTRA_HOSTS %} +{% from 'soc/merged.map.jinja' import SOCMERGED %} include: - soc.config @@ -24,12 +25,16 @@ so-soc: - binds: - /nsm/rules:/nsm/rules:rw - /opt/so/conf/strelka:/opt/sensoroni/yara:rw + - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/uploads:/nsm/soc/uploads:rw - /opt/so/log/soc/:/opt/sensoroni/logs/:rw - /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro +{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %} + - /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro +{% endif %} - /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro - /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro - /opt/so/conf/soc/sigma_so_pipeline.yaml:/opt/sensoroni/sigma_so_pipeline.yaml:ro @@ -39,6 +44,9 @@ so-soc: - /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw - /opt/so/conf/soc/queue:/opt/sensoroni/queue:rw - /opt/so/saltstack:/opt/so/saltstack:rw + - /opt/so/conf/soc/migrations:/opt/so/conf/soc/migrations:rw + - /nsm/backup/detections-migration:/nsm/backup/detections-migration:ro + - /opt/so/state:/opt/so/state:rw - extra_hosts: {% for node in DOCKER_EXTRA_HOSTS %} {% for hostname, ip in node.items() %} @@ -66,6 +74,7 @@ so-soc: - file: socdatadir - file: soclogdir - file: socconfig + - file: socanalytics - file: socmotd - file: socbanner - file: soccustom diff --git a/salt/soc/files/bin/compile_yara.py b/salt/soc/files/bin/compile_yara.py deleted file mode 100644 index 43c8b1a09c..0000000000 --- a/salt/soc/files/bin/compile_yara.py +++ /dev/null @@ -1,14 +0,0 @@ -import os -import yara -import glob -import sys - -def compile_yara_rules(rules_dir: str) -> None: - compiled_rules_path: str = os.path.join(rules_dir, "rules.yar.compiled") - rule_files: list[str] = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) - - if rule_files: - rules: yara.Rules = yara.compile(filepaths={os.path.basename(f): f for f in rule_files}) - rules.save(compiled_rules_path) - -compile_yara_rules(sys.argv[1]) diff --git a/salt/soc/files/soc/analytics.js b/salt/soc/files/soc/analytics.js new file mode 100644 index 0000000000..6a0d72d5d4 --- /dev/null +++ b/salt/soc/files/soc/analytics.js @@ -0,0 +1,5 @@ +(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': + new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], + j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= + 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); + })(window,document,'script','dataLayer','GTM-TM46SL7T'); diff --git a/salt/soc/files/soc/motd.md b/salt/soc/files/soc/motd.md index d6b0d3d279..005a2be0f8 100644 --- a/salt/soc/files/soc/motd.md +++ b/salt/soc/files/soc/motd.md @@ -12,6 +12,10 @@ To see all the latest features and fixes in this version of Security Onion, clic Want the best hardware for your enterprise deployment? Check out our [enterprise appliances](https://securityonionsolutions.com/hardware/)! +## Premium Support + +Experiencing difficulties and need priority support or remote assistance? We offer a [premium support plan](https://securityonionsolutions.com/support/) to assist corporate, educational, and government organizations. + ## Customize This Space Make this area your own by customizing the content in the [Config](/#/config?s=soc.files.soc.motd__md) interface. diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index d227c3f01e..8314361f5a 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -17,6 +17,16 @@ transformations: dst_ip: destination.ip.keyword dst_port: destination.port winlog.event_data.User: user.name + logtype: event.code # OpenCanary + # Maps "opencanary" product to SO IDH logs + - id: opencanary_idh_add-fields + type: add_condition + conditions: + event.module: 'opencanary' + event.dataset: 'opencanary.idh' + rule_conditions: + - type: logsource + product: opencanary # Maps "antivirus" category to Windows Defender logs shipped by Elastic Agent Winlog Integration # winlog.event_data.threat_name has to be renamed prior to ingestion, it is originally winlog.event_data.Threat Name - id: antivirus_field-mappings_windows-defender @@ -79,3 +89,20 @@ transformations: - type: logsource product: windows category: driver_load + - id: linux_security_add-fields + type: add_condition + conditions: + event.module: 'system' + event.dataset: 'system.auth' + rule_conditions: + - type: logsource + product: linux + service: auth + # event.code should always be a string + - id: convert_event_code_to_string + type: convert_type + target_type: 'str' + field_name_conditions: + - type: include_fields + fields: + - event.code diff --git a/salt/soc/files/soc/so-detections-backup.py b/salt/soc/files/soc/so-detections-backup.py new file mode 100644 index 0000000000..085b1e4c7e --- /dev/null +++ b/salt/soc/files/soc/so-detections-backup.py @@ -0,0 +1,113 @@ +# Copyright 2020-2023 Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# This script queries Elasticsearch for Custom Detections and all Overrides, +# and git commits them to disk at $OUTPUT_DIR + +import os +import subprocess +import json +import requests +from requests.auth import HTTPBasicAuth +import urllib3 +from datetime import datetime + +# Suppress SSL warnings +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +# Constants +ES_URL = "https://localhost:9200/so-detection/_search" +QUERY_DETECTIONS = '{"query": {"bool": {"must": [{"match_all": {}}, {"term": {"so_detection.ruleset": "__custom__"}}]}},"size": 10000}' +QUERY_OVERRIDES = '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}},"size": 10000}' +OUTPUT_DIR = "/nsm/backup/detections/repo" +AUTH_FILE = "/opt/so/conf/elasticsearch/curl.config" + +def get_auth_credentials(auth_file): + with open(auth_file, 'r') as file: + for line in file: + if line.startswith('user ='): + return line.split('=', 1)[1].strip().replace('"', '') + +def query_elasticsearch(query, auth): + headers = {"Content-Type": "application/json"} + response = requests.get(ES_URL, headers=headers, data=query, auth=auth, verify=False) + response.raise_for_status() + return response.json() + +def save_content(hit, base_folder, subfolder="", extension="txt"): + so_detection = hit["_source"]["so_detection"] + public_id = so_detection["publicId"] + content = so_detection["content"] + file_dir = os.path.join(base_folder, subfolder) + os.makedirs(file_dir, exist_ok=True) + file_path = os.path.join(file_dir, f"{public_id}.{extension}") + with open(file_path, "w") as f: + f.write(content) + return file_path + +def save_overrides(hit): + so_detection = hit["_source"]["so_detection"] + public_id = so_detection["publicId"] + overrides = so_detection["overrides"] + language = so_detection["language"] + folder = os.path.join(OUTPUT_DIR, language, "overrides") + os.makedirs(folder, exist_ok=True) + extension = "yaml" if language == "sigma" else "txt" + file_path = os.path.join(folder, f"{public_id}.{extension}") + with open(file_path, "w") as f: + f.write('\n'.join(json.dumps(override) for override in overrides) if isinstance(overrides, list) else overrides) + return file_path + +def ensure_git_repo(): + if not os.path.isdir(os.path.join(OUTPUT_DIR, '.git')): + subprocess.run(["git", "config", "--global", "init.defaultBranch", "main"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "init"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "remote", "add", "origin", "default"], check=True) + +def commit_changes(): + ensure_git_repo() + subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.email", "securityonion@local.invalid"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.name", "securityonion"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "add", "."], check=True) + status_result = subprocess.run(["git", "-C", OUTPUT_DIR, "status"], capture_output=True, text=True) + print(status_result.stdout) + commit_result = subprocess.run(["git", "-C", OUTPUT_DIR, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True) + if commit_result.returncode == 1: + print("No changes to commit.") + elif commit_result.returncode == 0: + print("Changes committed successfully.") + else: + commit_result.check_returncode() + +def main(): + try: + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"Backing up Custom Detections and all Overrides to {OUTPUT_DIR} - {timestamp}\n") + + os.makedirs(OUTPUT_DIR, exist_ok=True) + + auth_credentials = get_auth_credentials(AUTH_FILE) + username, password = auth_credentials.split(':', 1) + auth = HTTPBasicAuth(username, password) + + # Query and save custom detections + detections = query_elasticsearch(QUERY_DETECTIONS, auth)["hits"]["hits"] + for hit in detections: + save_content(hit, OUTPUT_DIR, hit["_source"]["so_detection"]["language"], "yaml" if hit["_source"]["so_detection"]["language"] == "sigma" else "txt") + + # Query and save overrides + overrides = query_elasticsearch(QUERY_OVERRIDES, auth)["hits"]["hits"] + for hit in overrides: + save_overrides(hit) + + commit_changes() + + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"Backup Completed - {timestamp}") + except Exception as e: + print(f"An error occurred: {e}") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/salt/soc/files/soc/so-detections-backup_test.py b/salt/soc/files/soc/so-detections-backup_test.py new file mode 100644 index 0000000000..3afa118865 --- /dev/null +++ b/salt/soc/files/soc/so-detections-backup_test.py @@ -0,0 +1,159 @@ +# Copyright 2020-2023 Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +import unittest +from unittest.mock import patch, MagicMock, mock_open, call +import requests +import os +import subprocess +import json +from datetime import datetime +import importlib + +ds = importlib.import_module('so-detections-backup') + +class TestBackupScript(unittest.TestCase): + + def setUp(self): + self.output_dir = '/nsm/backup/detections/repo' + self.auth_file_path = '/nsm/backup/detections/repo' + self.mock_auth_data = 'user = "so_elastic:@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS"' + self.auth_credentials = 'so_elastic:@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS' + self.auth = requests.auth.HTTPBasicAuth('so_elastic', '@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS') + self.mock_detection_hit = { + "_source": { + "so_detection": { + "publicId": "test_id", + "content": "test_content", + "language": "suricata" + } + } + } + self.mock_override_hit = { + "_source": { + "so_detection": { + "publicId": "test_id", + "overrides": [{"key": "value"}], + "language": "sigma" + } + } + } + + def assert_file_written(self, mock_file, expected_path, expected_content): + mock_file.assert_called_once_with(expected_path, 'w') + mock_file().write.assert_called_once_with(expected_content) + + @patch('builtins.open', new_callable=mock_open, read_data='user = "so_elastic:@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS"') + def test_get_auth_credentials(self, mock_file): + credentials = ds.get_auth_credentials(self.auth_file_path) + self.assertEqual(credentials, self.auth_credentials) + mock_file.assert_called_once_with(self.auth_file_path, 'r') + + @patch('requests.get') + def test_query_elasticsearch(self, mock_get): + mock_response = MagicMock() + mock_response.json.return_value = {'hits': {'hits': []}} + mock_response.raise_for_status = MagicMock() + mock_get.return_value = mock_response + + response = ds.query_elasticsearch(ds.QUERY_DETECTIONS, self.auth) + + self.assertEqual(response, {'hits': {'hits': []}}) + mock_get.assert_called_once_with( + ds.ES_URL, + headers={"Content-Type": "application/json"}, + data=ds.QUERY_DETECTIONS, + auth=self.auth, + verify=False + ) + + @patch('os.makedirs') + @patch('builtins.open', new_callable=mock_open) + def test_save_content(self, mock_file, mock_makedirs): + file_path = ds.save_content(self.mock_detection_hit, self.output_dir, 'subfolder', 'txt') + expected_path = f'{self.output_dir}/subfolder/test_id.txt' + self.assertEqual(file_path, expected_path) + mock_makedirs.assert_called_once_with(f'{self.output_dir}/subfolder', exist_ok=True) + self.assert_file_written(mock_file, expected_path, 'test_content') + + @patch('os.makedirs') + @patch('builtins.open', new_callable=mock_open) + def test_save_overrides(self, mock_file, mock_makedirs): + file_path = ds.save_overrides(self.mock_override_hit) + expected_path = f'{self.output_dir}/sigma/overrides/test_id.yaml' + self.assertEqual(file_path, expected_path) + mock_makedirs.assert_called_once_with(f'{self.output_dir}/sigma/overrides', exist_ok=True) + self.assert_file_written(mock_file, expected_path, json.dumps({"key": "value"})) + + @patch('subprocess.run') + def test_ensure_git_repo(self, mock_run): + mock_run.return_value = MagicMock(returncode=0) + + ds.ensure_git_repo() + + mock_run.assert_has_calls([ + call(["git", "config", "--global", "init.defaultBranch", "main"], check=True), + call(["git", "-C", self.output_dir, "init"], check=True), + call(["git", "-C", self.output_dir, "remote", "add", "origin", "default"], check=True) + ]) + + @patch('subprocess.run') + def test_commit_changes(self, mock_run): + mock_status_result = MagicMock() + mock_status_result.stdout = "On branch main\nnothing to commit, working tree clean" + mock_commit_result = MagicMock(returncode=1) + # Ensure sufficient number of MagicMock instances for each subprocess.run call + mock_run.side_effect = [mock_status_result, mock_commit_result, MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0)] + + print("Running test_commit_changes...") + ds.commit_changes() + print("Finished test_commit_changes.") + + mock_run.assert_has_calls([ + call(["git", "-C", self.output_dir, "config", "user.email", "securityonion@local.invalid"], check=True), + call(["git", "-C", self.output_dir, "config", "user.name", "securityonion"], check=True), + call(["git", "-C", self.output_dir, "add", "."], check=True), + call(["git", "-C", self.output_dir, "status"], capture_output=True, text=True), + call(["git", "-C", self.output_dir, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True) + ]) + + @patch('builtins.print') + @patch('so-detections-backup.commit_changes') + @patch('so-detections-backup.save_overrides') + @patch('so-detections-backup.save_content') + @patch('so-detections-backup.query_elasticsearch') + @patch('so-detections-backup.get_auth_credentials') + @patch('os.makedirs') + def test_main(self, mock_makedirs, mock_get_auth, mock_query, mock_save_content, mock_save_overrides, mock_commit, mock_print): + mock_get_auth.return_value = self.auth_credentials + mock_query.side_effect = [ + {'hits': {'hits': [{"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}]}}, + {'hits': {'hits': [{"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}}]}} + ] + + with patch('datetime.datetime') as mock_datetime: + mock_datetime.now.return_value.strftime.return_value = "2024-05-23 20:49:44" + ds.main() + + mock_makedirs.assert_called_once_with(self.output_dir, exist_ok=True) + mock_get_auth.assert_called_once_with(ds.AUTH_FILE) + mock_query.assert_has_calls([ + call(ds.QUERY_DETECTIONS, self.auth), + call(ds.QUERY_OVERRIDES, self.auth) + ]) + mock_save_content.assert_called_once_with( + {"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}, + self.output_dir, + "sigma", + "yaml" + ) + mock_save_overrides.assert_called_once_with( + {"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}} + ) + mock_commit.assert_called_once() + mock_print.assert_called() + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index c22ed2210d..f2c88fde98 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -30,6 +30,24 @@ {# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #} {% do SOCMERGED.config.server.modules.pop('cases') %} +{# set Sigma rules based on role if defined and default if not #} +{% if GLOBALS.role in SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules[GLOBALS.role]}) %} +{% else %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules.default}) %} +{% endif %} + +{# set elastalertengine.rulesRepos and strelkaengine.rulesRepos based on airgap or not #} +{% if GLOBALS.airgap %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %} +{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %} +{% do SOCMERGED.config.server.update({'airgapEnabled': true}) %} +{% else %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %} +{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %} +{% do SOCMERGED.config.server.update({'airgapEnabled': false}) %} +{% endif %} + {# remove these modules if detections is disabled #} {% if not SOCMERGED.config.server.client.detectionsEnabled %} {% do SOCMERGED.config.server.modules.pop('elastalertengine') %} @@ -41,10 +59,6 @@ {% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %} {% endif %} -{% if pillar.manager.playbook == 0 %} -{% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %} -{% endif %} - {% set standard_actions = SOCMERGED.config.pop('actions') %} {% if pillar.global.endgamehost != '' %} @@ -76,6 +90,7 @@ {% do SOCMERGED.config.server.client.update({'job': {'actions': standard_actions}}) %} {% do SOCMERGED.config.server.client.alerts.update({'actions': standard_actions}) %} {% do SOCMERGED.config.server.client.cases.update({'actions': standard_actions}) %} +{% do SOCMERGED.config.server.client.detections.update({'actions': standard_actions}) %} {# replace the _x_ with . for soc ui to config conversion #} {% do SOCMERGED.config.eventFields.update({':endpoint:events.api': SOCMERGED.config.eventFields.pop(':endpoint:events_x_api') }) %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index cb939f7581..1f64eb0bc7 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -2,6 +2,11 @@ soc: enabled: description: You can enable or disable SOC. advanced: True + telemetryEnabled: + title: SOC Telemetry + description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting. + global: True + helpLink: telemetry.html files: soc: banner__md: @@ -34,7 +39,7 @@ soc: helpLink: soc-customization.html sigma_final_pipeline__yaml: title: Final Sigma Pipeline - description: Final Processing Pipeline for Sigma Rules (future use, not yet complete) + description: Final Processing Pipeline for Sigma Rules. syntax: yaml file: True global: True @@ -78,14 +83,53 @@ soc: advanced: True modules: elastalertengine: - sigmaRulePackages: - description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone. (future use, not yet complete)' + additionalAlerters: + title: Additional Alerters + description: Specify additional alerters to enable for all Sigma rules, one alerter name per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. Note that the configuration parameters for these alerters must be provided in the ElastAlert configuration section. Filter for 'Alerter' to find this related setting. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key. global: True - advanced: False - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false. (future use, not yet complete)' + helpLink: sigma.html + forcedType: "[]string" + multiline: True + allowRegex: + description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: sigma.html + autoEnabledSigmaRules: + default: &autoEnabledSigmaRules + description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical. These will be applied based on role if defined and default if not.' + global: True + advanced: True + helpLink: sigma.html + so-eval: *autoEnabledSigmaRules + so-import: *autoEnabledSigmaRules + denyRegex: + description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: sigma.html + communityRulesImportFrequencySeconds: + description: 'How often to check for new Sigma rules (in seconds). This applies to both Community Rule Packages and any configured Git repos.' + global: True + advanced: True + helpLink: sigma.html + integrityCheckFrequencySeconds: + description: 'How often the ElastAlert integrity checker runs (in seconds). This verifies the integrity of deployed rules.' global: True advanced: True + rulesRepos: + default: &eerulesRepos + description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update." + global: True + advanced: True + forcedType: "[]{}" + helpLink: sigma.html + airgap: *eerulesRepos + sigmaRulePackages: + description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, the new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + global: True + advanced: False + helpLink: sigma.html elastic: index: description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records. @@ -99,6 +143,7 @@ soc: description: Duration (in milliseconds) to wait for a response from the Elasticsearch host before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int casesEnabled: description: Set to true if the SOC case management module, natively integrated with Elasticsearch, should be enabled. global: True @@ -143,13 +188,63 @@ soc: description: Duration (in milliseconds) to wait for a response from the Salt API when executing tasks known for being long running before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int relayTimeoutMs: description: Duration (in milliseconds) to wait for a response from the Salt API when executing common grid management tasks before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int strelkaengine: - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false. (future use, not yet complete)' + allowRegex: + description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: yara.html + autoEnabledYaraRules: + description: 'YARA rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara' + global: True + advanced: True + helpLink: sigma.html + denyRegex: + description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: yara.html + communityRulesImportFrequencySeconds: + description: 'How often to check for new YARA rules (in seconds). This applies to both Community Rules and any configured Git repos.' + global: True + advanced: True + helpLink: yara.html + integrityCheckFrequencySeconds: + description: 'How often the Strelka integrity checker runs (in seconds). This verifies the integrity of deployed rules.' + global: True + advanced: True + rulesRepos: + default: &serulesRepos + description: "Custom Git repositories to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Strelka --> Full Update." + global: True + advanced: True + forcedType: "[]{}" + helpLink: yara.html + airgap: *serulesRepos + suricataengine: + allowRegex: + description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: suricata.html + denyRegex: + description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: suricata.html + communityRulesImportFrequencySeconds: + description: 'How often to check for new Suricata rules (in seconds).' + global: True + advanced: True + helpLink: suricata.html + integrityCheckFrequencySeconds: + description: 'How often the Suricata integrity checker runs (in seconds). This verifies the integrity of deployed rules.' global: True advanced: True client: @@ -160,22 +255,26 @@ soc: description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int webSocketTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server websocket before giving up and reconnecting. global: True advanced: True + forcedType: int tipTimeoutMs: description: Duration (in milliseconds) to show the popup tips, which typically indicate a successful operation. global: True + forcedType: int cacheExpirationMs: description: Duration (in milliseconds) of cached data within the browser, including users and settings. global: True advanced: True + forcedType: int casesEnabled: description: Set to true to enable case management in SOC. global: True detectionsEnabled: - description: Set to true to enable the Detections module in SOC. (future use, not yet complete) + description: Set to true to enable the Detections module in SOC. global: True inactiveTools: description: List of external tools to remove from the SOC UI. @@ -219,6 +318,7 @@ soc: alerts: *appSettings cases: *appSettings dashboards: *appSettings + detections: *appSettings grid: maxUploadSize: description: The maximum number of bytes for an uploaded PCAP import file. diff --git a/salt/soc/tools/sbin/so-detections-runtime-status b/salt/soc/tools/sbin/so-detections-runtime-status new file mode 100644 index 0000000000..ed3ee58008 --- /dev/null +++ b/salt/soc/tools/sbin/so-detections-runtime-status @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# Set the default output destination to stdout +output_dest="/dev/stdout" + +# If the "cron" flag is passed, change the output destination to the log file +if [ "$1" = "cron" ]; then + output_dest="/opt/so/log/soc/detections_runtime-status_sigma.log" +fi + +# Run the query and output based on the output_dest value +/sbin/so-elasticsearch-query '*:elastalert_error*/_search' -d '{"query":{"range":{"@timestamp":{"gte":"now-11m","lte":"now"}}},"size": 50}' | \ +jq --compact-output '.hits.hits[] | { + _timestamp: ._source["@timestamp"], + "rule.name": ._source.data.rule, + error_type: "runtime_status", + error_message: ._source.message, + detection_type: "sigma", + event_module: "soc", + event_dataset: "soc.detections", + error_analysis: ( + if ._source.message | contains("Unknown column [winlog.channel]") then "Target logsource never seen" + elif ._source.message | contains("parsing_exception") then "Syntax Error" + else "Unknown" + end + ) + }' >> $output_dest + diff --git a/salt/soctopus/config.sls b/salt/soctopus/config.sls deleted file mode 100644 index 35b55d2968..0000000000 --- a/salt/soctopus/config.sls +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} - -include: - - nginx.config - -soctopusdir: - file.directory: - - name: /opt/so/conf/soctopus/sigma-import - - user: 939 - - group: 939 - - makedirs: True - -soctopus-sync: - file.recurse: - - name: /opt/so/conf/soctopus/templates - - source: salt://soctopus/files/templates - - user: 939 - - group: 939 - - template: jinja - - defaults: - GLOBALS: {{ GLOBALS }} - -soctopusconf: - file.managed: - - name: /opt/so/conf/soctopus/SOCtopus.conf - - source: salt://soctopus/files/SOCtopus.conf - - user: 939 - - group: 939 - - mode: 600 - - template: jinja - - show_changes: False - - defaults: - GLOBALS: {{ GLOBALS }} - -soctopuslogdir: - file.directory: - - name: /opt/so/log/soctopus - - user: 939 - - group: 939 - -playbookrulesdir: - file.directory: - - name: /opt/so/rules/elastalert/playbook - - user: 939 - - group: 939 - - makedirs: True - -playbookrulessync: - file.recurse: - - name: /opt/so/rules/elastalert/playbook - - source: salt://soctopus/files/templates - - user: 939 - - group: 939 - - template: jinja - - defaults: - GLOBALS: {{ GLOBALS }} - -soctopus_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://soctopus/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -#soctopus_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://soctopus/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/defaults.yaml b/salt/soctopus/defaults.yaml deleted file mode 100644 index cb7f286aeb..0000000000 --- a/salt/soctopus/defaults.yaml +++ /dev/null @@ -1,2 +0,0 @@ -soctopus: - enabled: False diff --git a/salt/soctopus/disabled.sls b/salt/soctopus/disabled.sls deleted file mode 100644 index 9293a9d71d..0000000000 --- a/salt/soctopus/disabled.sls +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -include: - - soctopus.sostatus - -so-soctopus: - docker_container.absent: - - force: True - -so-soctopus_so-status.disabled: - file.comment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-soctopus$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/enabled.sls b/salt/soctopus/enabled.sls deleted file mode 100644 index 567562fbbb..0000000000 --- a/salt/soctopus/enabled.sls +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'docker/docker.map.jinja' import DOCKER %} - -include: - - soctopus.config - - soctopus.sostatus - -so-soctopus: - docker_container.running: - - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }} - - hostname: soctopus - - name: so-soctopus - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-soctopus'].ip }} - - binds: - - /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro - - /opt/so/log/soctopus/:/var/log/SOCtopus/:rw - - /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw - - /opt/so/conf/navigator/layers/:/etc/playbook/:rw - - /opt/so/conf/soctopus/sigma-import/:/SOCtopus/sigma-import/:rw - {% if GLOBALS.airgap %} - - /nsm/repo/rules/sigma:/soctopus/sigma - {% endif %} - {% if DOCKER.containers['so-soctopus'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-soctopus'].custom_bind_mounts %} - - {{ BIND }} - {% endfor %} - {% endif %} - - port_bindings: - {% for BINDING in DOCKER.containers['so-soctopus'].port_bindings %} - - {{ BINDING }} - {% endfor %} - - extra_hosts: - - {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}} - - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-soctopus'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-soctopus'].extra_hosts %} - - {{ XTRAHOST }} - {% endfor %} - {% endif %} - {% if DOCKER.containers['so-soctopus'].extra_env %} - - environment: - {% for XTRAENV in DOCKER.containers['so-soctopus'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - - watch: - - file: /opt/so/conf/soctopus/SOCtopus.conf - - require: - - file: soctopusconf - - file: navigatordefaultlayer - -delete_so-soctopus_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-soctopus$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf deleted file mode 100644 index d9dd03f167..0000000000 --- a/salt/soctopus/files/SOCtopus.conf +++ /dev/null @@ -1,77 +0,0 @@ -{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} -{%- set THEHIVEURL = salt['pillar.get']('global:hiveurl', '') %} -{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} -{%- set PLAYBOOK_KEY = salt['pillar.get']('secrets:playbook_automation_api_key', '') %} -{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} -{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} - -[es] -es_url = https://{{ GLOBALS.manager_ip }}:9200 -es_ip = {{ GLOBALS.manager_ip }} -es_user = {{ ES_USER }} -es_pass = "{{ ES_PASS }}" -es_index_pattern = so-* -es_verifycert = no - -[cortex] -auto_analyze_alerts = no -cortex_url = https://{{THEHIVEURL}}/cortex/ -cortex_key = {{ CORTEXKEY }} -supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS - -[fir] -fir_url = YOURFIRURL -fir_token = YOURFIRTOKEN -fir_actor = 3 -fir_category = 3 -fir_confidentiality = 1 -fir_detection = 2 -fir_plan = 8 -fir_severity = 4 -fir_verifycert = no - -[grr] -grr_url = YOURGRRURL -grr_user = YOURGRRUSER -grr_pass = YOURGRRPASS - -[hive] -hive_url = https://{{THEHIVEURL}}/thehive/ -hive_key = {{ HIVEKEY }} -hive_tlp = 3 -hive_verifycert = no - -[misp] -misp_url = YOURMISPURL -misp_key = YOURMISPKEY -misp_verifycert = no -distrib = 0 -threat = 4 -analysis = 0 - -[rtir] -rtir_url = YOURRTIRURL -rtir_api = REST/1.0/ -rtir_user = YOURRTIRUSER -rtir_pass = YOURRTIRPASS -rtir_queue = Incidents -rtir_creator = root -rtir_verifycert = no - -[slack] -slack_url = YOURSLACKWORKSPACE -slack_webhook = YOURSLACKWEBHOOK - -[soc] -soc_url = http://{{ GLOBALS.manager }}:9822 - -[playbook] -playbook_url = http://{{ GLOBALS.manager }}:3000/playbook -playbook_ext_url = https://{{ GLOBALS.url_base }}/playbook -playbook_key = {{ PLAYBOOK_KEY }} -playbook_verifycert = no -playbook_unit_test_index = playbook-testing -playbook_rulesets = {{ salt['pillar.get']('soctopus:playbook:rulesets')|join(",") }} - -[log] -logfile = /var/log/SOCtopus/soctopus.log diff --git a/salt/soctopus/files/templates/es-generic.template b/salt/soctopus/files/templates/es-generic.template deleted file mode 100644 index af98590473..0000000000 --- a/salt/soctopus/files/templates/es-generic.template +++ /dev/null @@ -1,5 +0,0 @@ -alert: modules.so.playbook-es.PlaybookESAlerter -elasticsearch_host: "{{ GLOBALS.manager_ip }}:9200" -play_title: "" -play_url: "https://{{ GLOBALS.manager_ip }}/playbook/issues/6000" -sigma_level: "" diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template deleted file mode 100644 index 505d4ec410..0000000000 --- a/salt/soctopus/files/templates/generic.template +++ /dev/null @@ -1,22 +0,0 @@ -alert: -- "modules.so.playbook-es.PlaybookESAlerter" - -elasticsearch_host: "{{ GLOBALS.url_base }}:9200" -play_title: "" -play_id: "" -event.module: "playbook" -event.dataset: "playbook.alert" -event.severity: -rule.category: -play_url: "https://{{ GLOBALS.url_base }}/playbook/issues/6000" -kibana_pivot: "https://{{ GLOBALS.url_base }}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{[_id]}'),sort:!('@timestamp',desc))" -soc_pivot: "https://{{ GLOBALS.url_base }}/#/hunt" -sigma_level: "" - -index: '.ds-logs-*' -name: EQL -priority: 3 -realert: - minutes: 0 -type: any -filter: diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template deleted file mode 100644 index eb1857bb66..0000000000 --- a/salt/soctopus/files/templates/osquery.template +++ /dev/null @@ -1,13 +0,0 @@ -alert: -- "modules.so.playbook-es.PlaybookESAlerter" - -elasticsearch_host: "{{ GLOBALS.url_base }}:9200" -play_title: "" -event.module: "playbook" -event.dataset: "alert" -event.severity: -rule.category: -play_url: "https://{{ GLOBALS.url_base }}/playbook/issues/6000" -kibana_pivot: "https://{{ GLOBALS.url_base }}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{[_id]}'),sort:!('@timestamp',desc))" -soc_pivot: "https://{{ GLOBALS.url_base }}/#/hunt" -sigma_level: "" diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls deleted file mode 100644 index c9359a68c9..0000000000 --- a/salt/soctopus/init.sls +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'soctopus/map.jinja' import SOCTOPUSMERGED %} - -include: -{% if SOCTOPUSMERGED.enabled %} - - soctopus.enabled -{% else %} - - soctopus.disabled -{% endif %} diff --git a/salt/soctopus/map.jinja b/salt/soctopus/map.jinja deleted file mode 100644 index 07df21dbb9..0000000000 --- a/salt/soctopus/map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one - or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at - https://securityonion.net/license; you may not use this file except in compliance with the - Elastic License 2.0. #} - -{% import_yaml 'soctopus/defaults.yaml' as SOCTOPUSDEFAULTS %} -{% set SOCTOPUSMERGED = salt['pillar.get']('soctopus', SOCTOPUSDEFAULTS.soctopus, merge=True) %} diff --git a/salt/soctopus/soc_soctopus.yaml b/salt/soctopus/soc_soctopus.yaml deleted file mode 100644 index 4c235ebb7a..0000000000 --- a/salt/soctopus/soc_soctopus.yaml +++ /dev/null @@ -1,10 +0,0 @@ -soctopus: - enabled: - description: You can enable or disable SOCtopus. - helpLink: playbook.html - playbook: - rulesets: - description: List of playbook rulesets. - advanced: True - helpLink: playbook.html - global: True diff --git a/salt/soctopus/sostatus.sls b/salt/soctopus/sostatus.sls deleted file mode 100644 index 8a888235e6..0000000000 --- a/salt/soctopus/sostatus.sls +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-soctopus_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-soctopus - - unless: grep -q so-soctopus /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/tools/sbin/so-soctopus-restart b/salt/soctopus/tools/sbin/so-soctopus-restart deleted file mode 100755 index 24b3aff858..0000000000 --- a/salt/soctopus/tools/sbin/so-soctopus-restart +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-restart soctopus $1 diff --git a/salt/soctopus/tools/sbin/so-soctopus-start b/salt/soctopus/tools/sbin/so-soctopus-start deleted file mode 100755 index 990ece70ee..0000000000 --- a/salt/soctopus/tools/sbin/so-soctopus-start +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-start soctopus $1 diff --git a/salt/soctopus/tools/sbin/so-soctopus-stop b/salt/soctopus/tools/sbin/so-soctopus-stop deleted file mode 100755 index 39efa6435c..0000000000 --- a/salt/soctopus/tools/sbin/so-soctopus-stop +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-stop soctopus $1 diff --git a/salt/stig/soc_stig.yaml b/salt/stig/soc_stig.yaml index 1fb030c316..597aab8096 100644 --- a/salt/stig/soc_stig.yaml +++ b/salt/stig/soc_stig.yaml @@ -1,6 +1,6 @@ stig: enabled: - description: You can enable or disable the application of STIGS using oscap. Note that the actions performed by OSCAP are not automatically reversible. + description: You can enable or disable the application of STIGS using oscap. Note that the actions performed by OSCAP are not automatically reversible. Requires a valid Security Onion license key. forcedType: bool advanced: True run_interval: diff --git a/salt/strelka/backend/config.sls b/salt/strelka/backend/config.sls index db18a68ccf..b39e06ac83 100644 --- a/salt/strelka/backend/config.sls +++ b/salt/strelka/backend/config.sls @@ -50,16 +50,6 @@ backend_taste: - user: 939 - group: 939 -{% if STRELKAMERGED.rules.enabled %} -strelkarules: - file.recurse: - - name: /opt/so/conf/strelka/rules - - source: salt://strelka/rules - - user: 939 - - group: 939 - - clean: True -{% endif %} - {% else %} {{sls}}_state_not_allowed: diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index fc56f41971..a26905e1f5 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -17,7 +17,7 @@ strelka_backend: - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-backend:{{ GLOBALS.so_version }} - binds: - /opt/so/conf/strelka/backend/:/etc/strelka/:ro - - /opt/so/conf/strelka/rules/:/etc/yara/:ro + - /opt/so/conf/strelka/rules/compiled/:/etc/yara/:ro {% if DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} - {{ BIND }} @@ -43,7 +43,7 @@ strelka_backend: {% endif %} - restart_policy: on-failure - watch: - - file: strelkarules + - file: strelkasensorcompiledrules delete_so-strelka-backend_so-status.disabled: file.uncomment: diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py new file mode 100644 index 0000000000..09e3f46808 --- /dev/null +++ b/salt/strelka/compile_yara/compile_yara.py @@ -0,0 +1,99 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +import argparse +import glob +import hashlib +import json +import os +import yara +from datetime import datetime +from concurrent.futures import ThreadPoolExecutor + +def check_syntax(rule_file): + try: + # Testing if compilation throws a syntax error, don't save the result + yara.compile(filepath=rule_file) + return (True, rule_file, None) + except yara.SyntaxError as e: + # Return the error message for logging purposes + return (False, rule_file, str(e)) + +def compile_yara_rules(rules_dir): + compiled_dir = os.path.join(rules_dir, "compiled") + compiled_rules_path = "/opt/so/saltstack/local/salt/strelka/rules/compiled/rules.compiled" + rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) + files_to_compile = {} + removed_count = 0 + success_count = 0 + + # Use ThreadPoolExecutor to parallelize syntax checks + with ThreadPoolExecutor() as executor: + results = executor.map(check_syntax, rule_files) + + # Collect yara files and prepare for batch compilation + ts = str(datetime.utcnow().isoformat()) + failure_ids = [] + success_ids = [] + for success, rule_file, error_message in results: + rule_id = os.path.splitext(os.path.basename(rule_file))[0] + if success: + files_to_compile[os.path.basename(rule_file)] = rule_file + success_count += 1 + success_ids.append(rule_id) + else: + failure_ids.append(rule_id) + # Extract just the UUID from the rule file name + log_entry = { + "event_module": "soc", + "event_dataset": "soc.detections", + "log.level": "error", + "error_message": error_message, + "error_analysis": "Syntax Error", + "detection_type": "YARA", + "rule_uuid": rule_id, + "error_type": "runtime_status" + } + with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file: + json.dump(log_entry, log_file) + log_file.write('\n') # Ensure new entries start on new lines + os.remove(rule_file) + removed_count += 1 + + # Compile all remaining valid rules into a single file + compiled_sha256="" + if files_to_compile: + compiled_rules = yara.compile(filepaths=files_to_compile) + compiled_rules.save(compiled_rules_path) + print(f"All remaining rules compiled and saved into {compiled_rules_path}") + # Hash file + with open(compiled_rules_path, 'rb') as hash_file: + compiled_sha256=hashlib.sha256(hash_file.read()).hexdigest() + # Remove the rules.compiled if there aren't any files to be compiled + else: + if os.path.exists(compiled_rules_path): + os.remove(compiled_rules_path) + + # Create compilation report + compilation_report = { + "timestamp": ts, + "compiled_sha256": compiled_sha256, + "failure": failure_ids, + "success": success_ids + } + + # Write total + with open('/opt/so/state/detections_yara_compilation-total.log', 'w+') as report_file: + json.dump(compilation_report, report_file) + + # Print summary of compilation results + print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Compile YARA rules from the specified directory") + parser.add_argument("rules_dir", help="Directory containing YARA rules to compile") + args = parser.parse_args() + +compile_yara_rules(args.rules_dir) diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 929bef113c..f03afa61b9 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -5,27 +5,21 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} -# Strelka config -strelkaconfdir: - file.directory: - - name: /opt/so/conf/strelka - - user: 939 - - group: 939 - - makedirs: True - -strelkarulesdir: - file.directory: - - name: /opt/so/conf/strelka/rules - - user: 939 - - group: 939 - - makedirs: True +{% if GLOBALS.is_manager %} +include: + - strelka.manager +{% endif %} -strelkareposdir: - file.directory: - - name: /opt/so/conf/strelka/repos +# Strelka config +strelkasensorcompiledrules: + file.recurse: + - name: /opt/so/conf/strelka/rules/compiled/ + - source: salt://strelka/rules/compiled/ - user: 939 - group: 939 + - clean: True - makedirs: True strelkadatadir: @@ -40,29 +34,26 @@ strelkalogdir: - name: /nsm/strelka/log - user: 939 - group: 939 - - makedirs: True - -strelka_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://strelka/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 strelkagkredisdatadir: file.directory: - name: /nsm/strelka/gk-redis-data - user: 939 - group: 939 - - makedirs: True strelkacoordredisdatadir: file.directory: - name: /nsm/strelka/coord-redis-data - user: 939 - group: 939 - - makedirs: True + +strelka_sbin: + file.recurse: + - name: /usr/sbin + - source: salt://strelka/tools/sbin + - user: 939 + - group: 939 + - file_mode: 755 {% else %} diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index da259fa142..4d69bf53b3 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -563,7 +563,7 @@ strelka: options: location: '/etc/yara/' compiled: - enabled: False + enabled: True filename: "rules.compiled" store_offset: True offset_meta_key: "StrelkaHexDump" @@ -733,28 +733,6 @@ strelka: enabled: False rules: enabled: True - repos: - - https://github.com/Security-Onion-Solutions/securityonion-yara.git - excluded: - - apt_flame2_orchestrator.yar - - apt_tetris.yar - - gen_susp_js_obfuscatorio.yar - - gen_webshells.yar - - generic_anomalies.yar - - general_cloaking.yar - - thor_inverse_matches.yar - - yara_mixed_ext_vars.yar - - apt_apt27_hyperbro.yar - - apt_turla_gazer.yar - - gen_google_anomaly.yar - - gen_icon_anomalies.yar - - gen_nvidia_leaked_cert.yar - - gen_sign_anomalies.yar - - gen_susp_xor.yar - - gen_webshells_ext_vars.yar - - configured_vulns_ext_vars.yar - - expl_outlook_cve_2023_23397.yar - - gen_mal_3cx_compromise_mar23.yar filecheck: historypath: '/nsm/strelka/history/' strelkapath: '/nsm/strelka/unprocessed/' diff --git a/salt/strelka/manager.sls b/salt/strelka/manager.sls new file mode 100644 index 0000000000..6a4aea4167 --- /dev/null +++ b/salt/strelka/manager.sls @@ -0,0 +1,37 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{# if strelka.manager or strelka in allowed_states #} +{% if sls in allowed_states or sls.split('.')[0] in allowed_states %} + +# Strelka config +strelkarulesdir: + file.directory: + - name: /opt/so/conf/strelka/rules + - user: 939 + - group: 939 + - makedirs: True + +strelkacompileyara: + file.managed: + - name: /opt/so/conf/strelka/compile_yara.py + - source: salt://strelka/compile_yara/compile_yara.py + - user: 939 + - group: 939 + +strelkareposdir: + file.directory: + - name: /opt/so/conf/strelka/repos + - user: 939 + - group: 939 + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} diff --git a/salt/strelka/rules/compiled/DO.NOT.TOUCH b/salt/strelka/rules/compiled/DO.NOT.TOUCH new file mode 100644 index 0000000000..e69de29bb2 diff --git a/salt/strelka/rules/repos.txt.jinja b/salt/strelka/rules/repos.txt.jinja deleted file mode 100644 index 043a022034..0000000000 --- a/salt/strelka/rules/repos.txt.jinja +++ /dev/null @@ -1,2 +0,0 @@ -# DO NOT EDIT THIS FILE! Strelka YARA rule repos are stored here from the strelka:rules:repos pillar section -{{ STRELKAREPOS | join('\n') }} diff --git a/salt/strelka/soc_strelka.yaml b/salt/strelka/soc_strelka.yaml index e5240b9c9d..947215bd5e 100644 --- a/salt/strelka/soc_strelka.yaml +++ b/salt/strelka/soc_strelka.yaml @@ -578,18 +578,6 @@ strelka: global: False helpLink: strelka.html advanced: False - repos: - description: List of repos for so-yara-download to use to download rules. - readonly: False - global: False - helpLink: strelka.html - advanced: False - excluded: - description: List of rules to exclude so-yara-update from download and propagating to backend nodes. - readonly: False - global: False - helpLink: strelka.html - advanced: False filecheck: historypath: description: The path for previously scanned files. diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 0252d3a813..d819d1cf9e 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -30,9 +30,16 @@ suricata: cluster-type: cluster_flow defrag: "yes" use-mmap: "yes" + mmap-locked: "no" threads: 1 tpacket-v3: "yes" ring-size: 5000 + block-size: 32768 + block-timeout: 10 + use-emergency-flush: "yes" + buffer-size: 32768 + disable-promisc: "no" + checksum-checks: kernel vars: address-groups: HOME_NET: diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index d351605276..3e015d1001 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -7,6 +7,7 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} +{% from 'suricata/map.jinja' import SURICATAMERGED %} include: @@ -24,6 +25,13 @@ so-suricata: - {{ XTRAENV }} {% endfor %} {% endif %} + {# we look at SURICATAMERGED.config['af-packet'][0] since we only allow one interface and therefore always the first list item #} + {% if SURICATAMERGED.config['af-packet'][0]['mmap-locked'] == "yes" and DOCKER.containers['so-suricata'].ulimits %} + - ulimits: + {% for ULIMIT in DOCKER.containers['so-suricata'].ulimits %} + - {{ ULIMIT }} + {% endfor %} + {% endif %} - binds: - /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 7f7b04aeff..a5012317a2 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -32,11 +32,22 @@ - interface: {{ GLOBALS.sensor.interface }} cluster-id: {{ SURICATAMERGED.config['af-packet']['cluster-id'] }} cluster-type: {{ SURICATAMERGED.config['af-packet']['cluster-type'] }} - defrag: {{ SURICATAMERGED.config['af-packet'].defrag }} - use-mmap: {{ SURICATAMERGED.config['af-packet']['use-mmap'] }} + defrag: "{{ SURICATAMERGED.config['af-packet'].defrag }}" + use-mmap: "{{ SURICATAMERGED.config['af-packet']['use-mmap'] }}" + mmap-locked: "{{ SURICATAMERGED.config['af-packet']['mmap-locked'] }}" threads: {{ SURICATAMERGED.config['af-packet'].threads }} - tpacket-v3: {{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }} + tpacket-v3: "{{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}" ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }} + block-size: {{ SURICATAMERGED.config['af-packet']['block-size'] }} + block-timeout: {{ SURICATAMERGED.config['af-packet']['block-timeout'] }} + use-emergency-flush: "{{ SURICATAMERGED.config['af-packet']['use-emergency-flush'] }}" + buffer-size: {{ SURICATAMERGED.config['af-packet']['buffer-size'] }} + disable-promisc: "{{ SURICATAMERGED.config['af-packet']['disable-promisc'] }}" +{% if SURICATAMERGED.config['af-packet']['checksum-checks'] in ['yes', 'no'] %} + checksum-checks: "{{ SURICATAMERGED.config['af-packet']['checksum-checks'] }}" +{% else %} + checksum-checks: {{ SURICATAMERGED.config['af-packet']['checksum-checks'] }} +{% endif %} {% endload %} {% do SURICATAMERGED.config.pop('af-packet') %} {% do SURICATAMERGED.config.update({'af-packet': afpacket}) %} diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 7decaa6d32..e157ff852b 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -4,13 +4,15 @@ suricata: helpLink: suricata.html thresholding: sids__yaml: - description: Threshold SIDS List + description: Threshold SIDS List. This setting is readonly; Use the Detections screen to modify rules. syntax: yaml file: True global: True multiline: True title: SIDS helpLink: suricata.html + readonlyUi: True + advanced: True classification: classification__config: description: Classifications config file. @@ -21,14 +23,14 @@ suricata: helpLink: suricata.html pcap: filesize: - description: Max file size for individual PCAP files written by Suricata. Increasing this number could improve write performance at the expense of pcap retrieval times. + description: Maximum file size for individual PCAP files written by Suricata. Increasing this number could improve write performance at the expense of pcap retrieval time. advanced: True - helplink: suricata.html + helpLink: suricata.html maxsize: - description: Size in GB for total usage size of PCAP on disk. - helplink: suricata.html + description: Maximum size in GB for total disk usage of all PCAP files written by Suricata. + helpLink: suricata.html compression: - description: Enable compression of Suricata PCAP. + description: Enable compression of Suricata PCAP files. advanced: True helpLink: suricata.html lz4-checksum: @@ -36,11 +38,11 @@ suricata: advanced: True helpLink: suricata.html lz4-level: - description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. + description: lz4 compression level of PCAP files. Set to 0 for no compression. Set to 16 for maximum compression. advanced: True helpLink: suricata.html filename: - description: Filename output for Suricata PCAP. + description: Filename output for Suricata PCAP files. advanced: True readonly: True helpLink: suricata.html @@ -50,13 +52,13 @@ suricata: readonly: True helpLink: suricata.html use-stream-depth: - description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth. + description: Set to "no" to ignore the stream depth and capture the entire flow. Set to "yes" to truncate the flow based on the stream depth. advanced: True regex: ^(yes|no)$ regexFailureMessage: You must enter either yes or no. helpLink: suricata.html conditional: - description: Set to "all" to capture PCAP for all flows. Set to "alerts" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. + description: Set to "all" to record PCAP for all flows. Set to "alerts" to only record PCAP for Suricata alerts. Set to "tag" to only record PCAP for tagged rules. regex: ^(all|alerts|tag)$ regexFailureMessage: You must enter either all, alert or tag. helpLink: suricata.html @@ -83,6 +85,11 @@ suricata: use-mmap: advanced: True readonly: True + mmap-locked: + description: Prevent swapping by locking the memory map. + advanced: True + regex: ^(yes|no)$ + helpLink: suricata.html threads: description: The amount of worker threads. helpLink: suricata.html @@ -94,6 +101,36 @@ suricata: description: Buffer size for packets per thread. forcedType: int helpLink: suricata.html + block-size: + description: This must be configured to a sufficiently high value to accommodate a significant number of packets, considering byte size and MTU constraints. Ensure it aligns with a power of 2 and is a multiple of the page size. + advanced: True + forcedType: int + helpLink: suricata.html + block-timeout: + description: If a block remains unfilled after the specified block-timeout milliseconds, it is passed to userspace. + advanced: True + forcedType: int + helpLink: suricata.html + use-emergency-flush: + description: In high-traffic environments, enabling this option to 'yes' aids in recovering from packet drop occurrences. However, it may lead to some packets, possibly at max ring flush, not being inspected. + advanced: True + regex: ^(yes|no)$ + helpLink: suricata.html + buffer-size: + description: Increasing the value of the receive buffer may improve performance. + advanced: True + forcedType: int + helpLink: suricata.html + disable-promisc: + description: Promiscuous mode can be disabled by setting this to "yes". + advanced: True + regex: ^(yes|no)$ + helpLink: suricata.html + checksum-checks: + description: "Opt for the checksum verification mode suitable for the interface. During capture, it's possible that some packets may exhibit invalid checksums due to the network card handling the checksum computation. You have several options: 'kernel': Relies on indications sent by the kernel for each packet (default). 'yes': Enforces checksum validation. 'no': Disables checksum validation. 'auto': Suricata employs a statistical approach to detect checksum offloading." + advanced: True + regex: ^(kernel|yes|no|auto)$ + helpLink: suricata.html threading: set-cpu-affinity: description: Bind(yes) or unbind(no) management and worker threads to a core or range of cores. @@ -113,84 +150,40 @@ suricata: helpLink: suricata.html vars: address-groups: - HOME_NET: - description: List of hosts or networks. - regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ + HOME_NET: &suriaddressgroup + description: Assign a list of hosts, or networks, using CIDR notation, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. + regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. helpLink: suricata.html - EXTERNAL_NET: - description: List of hosts or networks. - helpLink: suricata.html - HTTP_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - SMTP_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - SQL_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - DNS_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - TELNET_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - AIM_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - DC_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - DNP3_SERVER: - description: List of hosts or networks. - helpLink: suricata.html - DNP3_CLIENT: - description: List of hosts or networks. - helpLink: suricata.html - MODBUS_CLIENT: - description: List of hosts or networks. - helpLink: suricata.html - MODBUS_SERVER: - description: List of hosts or networks. - helpLink: suricata.html - ENIP_CLIENT: - description: List of hosts or networks. - helpLink: suricata.html - ENIP_SERVER: - description: List of hosts or networks. - helpLink: suricata.html + duplicates: True + EXTERNAL_NET: *suriaddressgroup + HTTP_SERVERS: *suriaddressgroup + SMTP_SERVERS: *suriaddressgroup + SQL_SERVERS: *suriaddressgroup + DNS_SERVERS: *suriaddressgroup + TELNET_SERVERS: *suriaddressgroup + AIM_SERVERS: *suriaddressgroup + DC_SERVERS: *suriaddressgroup + DNP3_SERVER: *suriaddressgroup + DNP3_CLIENT: *suriaddressgroup + MODBUS_CLIENT: *suriaddressgroup + MODBUS_SERVER: *suriaddressgroup + ENIP_CLIENT: *suriaddressgroup + ENIP_SERVER: *suriaddressgroup port-groups: - HTTP_PORTS: - description: List of ports to look for HTTP traffic on. - helpLink: suricata.html - SHELLCODE_PORTS: - description: List of ports to look for SHELLCODE traffic on. - helpLink: suricata.html - ORACLE_PORTS: - description: List of ports to look for ORACLE traffic on. - helpLink: suricata.html - SSH_PORTS: - description: List of ports to look for SSH traffic on. - helpLink: suricata.html - DNP3_PORTS: - description: List of ports to look for DNP3 traffic on. - helpLink: suricata.html - MODBUS_PORTS: - description: List of ports to look for MODBUS traffic on. - helpLink: suricata.html - FILE_DATA_PORTS: - description: List of ports to look for FILE_DATA traffic on. - helpLink: suricata.html - FTP_PORTS: - description: List of ports to look for FTP traffic on. - helpLink: suricata.html - VXLAN_PORTS: - description: List of ports to look for VXLAN traffic on. - helpLink: suricata.html - TEREDO_PORTS: - description: List of ports to look for TEREDO traffic on. - helpLink: suricata.html + HTTP_PORTS: &suriportgroup + description: Assign a list of network port numbers to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. + helpLink: suricata.html + duplicates: True + SHELLCODE_PORTS: *suriportgroup + ORACLE_PORTS: *suriportgroup + SSH_PORTS: *suriportgroup + DNP3_PORTS: *suriportgroup + MODBUS_PORTS: *suriportgroup + FILE_DATA_PORTS: *suriportgroup + FTP_PORTS: *suriportgroup + VXLAN_PORTS: *suriportgroup + TEREDO_PORTS: *suriportgroup outputs: eve-log: types: diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml index d3718f0b52..3cf2f731d2 100644 --- a/salt/telegraf/defaults.yaml +++ b/salt/telegraf/defaults.yaml @@ -11,6 +11,7 @@ telegraf: quiet: 'false' scripts: eval: + - agentstatus.sh - checkfiles.sh - influxdbsize.sh - lasthighstate.sh @@ -23,6 +24,7 @@ telegraf: - zeekcaptureloss.sh - zeekloss.sh standalone: + - agentstatus.sh - checkfiles.sh - eps.sh - influxdbsize.sh @@ -38,6 +40,7 @@ telegraf: - zeekloss.sh - features.sh manager: + - agentstatus.sh - influxdbsize.sh - lasthighstate.sh - os.sh @@ -46,6 +49,7 @@ telegraf: - sostatus.sh - features.sh managersearch: + - agentstatus.sh - eps.sh - influxdbsize.sh - lasthighstate.sh diff --git a/salt/telegraf/enabled.sls b/salt/telegraf/enabled.sls index b1fa0c247c..8e3bc9fbe3 100644 --- a/salt/telegraf/enabled.sls +++ b/salt/telegraf/enabled.sls @@ -56,6 +56,7 @@ so-telegraf: - /opt/so/log/raid:/var/log/raid:ro - /opt/so/log/sostatus:/var/log/sostatus:ro - /opt/so/log/salt:/var/log/salt:ro + - /opt/so/log/agents:/var/log/agents:ro {% if DOCKER.containers['so-telegraf'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/telegraf/scripts/agentstatus.sh b/salt/telegraf/scripts/agentstatus.sh new file mode 100644 index 0000000000..a390552fc9 --- /dev/null +++ b/salt/telegraf/scripts/agentstatus.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + + + +# if this script isn't already running +if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then + + LOGFILE=/var/log/agents/agentstatus.log + + # Check to see if the file is there yet so we don't break install verification since there is a 5 minute delay for this file to show up + if [ -f $LOGFILE ]; then + ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',') + ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',') + INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',') + OFFLINE=$(cat $LOGFILE | grep -wF offline | awk '{print $2}' | tr -d ',') + UPDATING=$(cat $LOGFILE | grep -wF updating | awk '{print $2}' | tr -d ',') + UNENROLLED=$(cat $LOGFILE | grep -wF unenrolled | awk '{print $2}' | tr -d ',') + OTHER=$(cat $LOGFILE | grep -wF other | awk '{print $2}' | tr -d ',') + EVENTS=$(cat $LOGFILE | grep -wF events | awk '{print $2}' | tr -d ',') + TOTAL=$(cat $LOGFILE | grep -wF total | awk '{print $2}' | tr -d ',') + ALL=$(cat $LOGFILE | grep -wF all | awk '{print $2}' | tr -d ',') + ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}') + + echo "agentstatus online=$ONLINE,error=$ERROR,inactive=$INACTIVE,offline=$OFFLINE,updating=$UPDATING,unenrolled=$UNENROLLED,other=$OTHER,events=$EVENTS,total=$TOTAL,all=$ALL,active=$ACTIVE" + fi + +fi + +exit 0 diff --git a/salt/top.sls b/salt/top.sls index 16b355476b..370914d917 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -67,7 +67,6 @@ base: - idstools - suricata.manager - healthcheck - - mysql - elasticsearch - elastic-fleet-package-registry - kibana @@ -78,8 +77,6 @@ base: - curator.disabled - elastalert - utility - - soctopus - - playbook - elasticfleet '*_manager and G@saltversion:{{saltversion}}': @@ -90,6 +87,7 @@ base: - registry - nginx - influxdb + - strelka.manager - soc - kratos - firewall @@ -99,7 +97,6 @@ base: - backup.config_backup - idstools - suricata.manager - - mysql - elasticsearch - logstash - redis @@ -108,8 +105,6 @@ base: - curator.disabled - elastalert - utility - - soctopus - - playbook - elasticfleet - stig @@ -132,7 +127,6 @@ base: - idstools - suricata.manager - healthcheck - - mysql - elasticsearch - logstash - redis @@ -145,20 +139,18 @@ base: - curator.disabled - elastalert - utility - - soctopus - - playbook - elasticfleet - stig '*_searchnode and G@saltversion:{{saltversion}}': - match: compound + - firewall - ssl + - elasticsearch + - logstash - sensoroni - telegraf - nginx - - firewall - - elasticsearch - - logstash - elasticfleet.install_agent_grid - stig @@ -170,6 +162,7 @@ base: - registry - nginx - influxdb + - strelka.manager - soc - kratos - firewall @@ -179,7 +172,6 @@ base: - backup.config_backup - idstools - suricata.manager - - mysql - elasticsearch - logstash - redis @@ -188,8 +180,6 @@ base: - kibana - elastalert - utility - - soctopus - - playbook - elasticfleet - stig @@ -222,6 +212,7 @@ base: - manager - nginx - influxdb + - strelka.manager - soc - kratos - sensoroni diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index c69ce5ea14..1594eed584 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -24,7 +24,7 @@ zeek: advanced: False helpLink: zeek.html multiline: True - regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ + regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. node: lb_procs: @@ -60,6 +60,7 @@ zeek: file: True global: True advanced: True + duplicates: True file_extraction: description: Contains a list of file or MIME types Zeek will extract from the network streams. Values must adhere to the following format - {"MIME_TYPE":"FILE_EXTENSION"} helpLink: zeek.html diff --git a/setup/so-functions b/setup/so-functions index 4aae0f5bd8..d91161203f 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -559,7 +559,7 @@ check_requirements() { local num_nics=${#nic_list[@]} if [[ $is_eval ]]; then - req_mem=12 + req_mem=8 req_cores=4 req_nics=2 elif [[ $is_standalone ]]; then @@ -762,21 +762,6 @@ copy_salt_master_config() { logCmd "systemctl restart salt-master" } -create_local_directories() { - info "Creating local pillar and salt directories" - PILLARSALTDIR=${SCRIPTDIR::-5} - for i in "pillar" "salt"; do - for d in $(find $PILLARSALTDIR/$i -type d); do - suffixdir=${d//$PILLARSALTDIR/} - if [ ! -d "$local_salt_dir/$suffixdir" ]; then - logCmd "mkdir -pv $local_salt_dir$suffixdir" - fi - done - logCmd "chown -R socore:socore $local_salt_dir/$i" - done - -} - create_local_nids_rules() { title "Create a local.rules file so it doesn't get removed on updates" logCmd "mkdir -p /opt/so/saltstack/local/salt/idstools" @@ -792,7 +777,6 @@ create_manager_pillars() { create_global create_sensoroni_pillar backup_pillar - soctopus_pillar docker_pillar redis_pillar idstools_pillar @@ -1109,10 +1093,6 @@ generate_ssl() { generate_passwords(){ title "Generate Random Passwords" - MYSQLPASS=$(get_random_value) - PLAYBOOKDBPASS=$(get_random_value) - PLAYBOOKADMINPASS=$(get_random_value) - PLAYBOOKAUTOMATIONPASS=$(get_random_value) INFLUXPASS=$(get_random_value) INFLUXTOKEN=$(head -c 64 /dev/urandom | base64 --wrap=0) SENSORONIKEY=$(get_random_value) @@ -1167,11 +1147,6 @@ install_cleanup() { # that will disrupt automated tests should be placed beneath this statement. [ -n "$TESTING" ] && return - # If Mysql is running stop it - if docker ps --format "{{.Names}}" 2>&1 | grep -q "so-mysql"; then - logVmd "/usr/sbin/so-mysql-stop" - fi - if [[ $setup_type == 'iso' ]]; then info "Removing so-setup permission entry from sudoers file" logCmd "sed -i '/so-setup/d' /etc/sudoers" @@ -1267,7 +1242,33 @@ soc_pillar() { " config:"\ " server:"\ " srvKey: '$SOCSRVKEY'"\ - "" > "$soc_pillar_file" + " modules:"\ + " elastalertengine:"\ + " allowRegex: '$ELASTALERT_ALLOW_REGEX'" > "$soc_pillar_file" + if [[ -n "$ELASTALERT_FAIL_ERROR_COUNT" ]]; then + printf '%s\n'\ + " failAfterConsecutiveErrorCount: $ELASTALERT_FAIL_ERROR_COUNT" >> "$soc_pillar_file" + fi + + printf '%s\n'\ + " strelkaengine:"\ + " allowRegex: '$STRELKA_ALLOW_REGEX'" >> "$soc_pillar_file" + if [[ -n "$STRELKA_FAIL_ERROR_COUNT" ]]; then + printf '%s\n'\ + " failAfterConsecutiveErrorCount: $STRELKA_FAIL_ERROR_COUNT" >> "$soc_pillar_file" + fi + + printf '%s\n'\ + " suricataengine:"\ + " allowRegex: '$SURICATA_ALLOW_REGEX'" >> "$soc_pillar_file" + if [[ -n "$SURICATA_FAIL_ERROR_COUNT" ]]; then + printf '%s\n'\ + " failAfterConsecutiveErrorCount: $SURICATA_FAIL_ERROR_COUNT" >> "$soc_pillar_file" + fi + + if [[ $telemetry -ne 0 ]]; then + echo " telemetryEnabled: false" >> $soc_pillar_file + fi } telegraf_pillar() { @@ -1279,17 +1280,11 @@ telegraf_pillar() { manager_pillar() { touch $adv_manager_pillar_file title "Create the manager pillar" - if [[ $is_import ]]; then - PLAYBOOK=0 - else - PLAYBOOK=1 - fi printf '%s\n'\ "manager:"\ " proxy: '$so_proxy'"\ " no_proxy: '$no_proxy_string'"\ " elastalert: 1"\ - " playbook: $PLAYBOOK"\ "" > "$manager_pillar_file" } @@ -1342,6 +1337,10 @@ create_global() { echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file echo " registry_host: '$HOSTNAME'" >> $global_pillar_file echo " endgamehost: '$ENDGAMEHOST'" >> $global_pillar_file + + if [ "$install_type" = 'EVAL' ]; then + echo " pcapengine: SURICATA" >> $global_pillar_file + fi } create_sensoroni_pillar() { @@ -1362,16 +1361,6 @@ backup_pillar() { touch $adv_backup_pillar_file } -soctopus_pillar() { - title "Create the soctopus pillar file" - touch $adv_soctopus_pillar_file - printf '%s\n'\ - "soctopus:"\ - " playbook:"\ - " rulesets:"\ - " - windows" > "$soctopus_pillar_file" -} - docker_pillar() { title "Create the docker pillar file" touch $adv_docker_pillar_file @@ -1413,7 +1402,7 @@ make_some_dirs() { mkdir -p $local_salt_dir/salt/firewall/portgroups mkdir -p $local_salt_dir/salt/firewall/ports - for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc soctopus docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do + for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do mkdir -p $local_salt_dir/pillar/$THEDIR touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls @@ -1606,7 +1595,9 @@ reinstall_init() { # Kill any salt processes (safely) for service in "${salt_services[@]}"; do # Stop the service in the background so we can exit after a certain amount of time - systemctl stop "$service" & + if check_service_status "$service"; then + systemctl stop "$service" & + fi local pid=$! local count=0 @@ -1949,7 +1940,6 @@ saltify() { salt_install_module_deps() { logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/" - logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/" } salt_patch_x509_v2() { @@ -1967,11 +1957,6 @@ secrets_pillar(){ mkdir -p $local_salt_dir/pillar printf '%s\n'\ "secrets:"\ - " mysql: $MYSQLPASS"\ - " playbook_db: $PLAYBOOKDBPASS"\ - " playbook_admin: $PLAYBOOKADMINPASS"\ - " playbook_automation: $PLAYBOOKAUTOMATIONPASS"\ - " playbook_automation_api_key: "\ " import_pass: $IMPORTPASS"\ " influx_pass: $INFLUXPASS" > $local_salt_dir/pillar/secrets.sls fi diff --git a/setup/so-setup b/setup/so-setup index e2de39f50e..571fcc75d3 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -245,6 +245,12 @@ if [ -n "$test_profile" ]; then WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r WEBPASSWD2=0n10nus3r + STRELKA_ALLOW_REGEX="EquationGroup_Toolset_Apr17__ELV_.*" + STRELKA_FAIL_ERROR_COUNT=1 + ELASTALERT_ALLOW_REGEX="Security Onion" + ELASTALERT_FAIL_ERROR_COUNT=1 + SURICATA_ALLOW_REGEX="(200033\\d|2100538|2102466)" + SURICATA_FAIL_ERROR_COUNT=1 update_sudoers_for_testing fi @@ -447,6 +453,7 @@ if ! [[ -f $install_opt_file ]]; then get_redirect # Does the user want to allow access to the UI? collect_so_allow + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_standalone ]]; then waitforstate=true @@ -468,6 +475,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_manager ]]; then info "Setting up as node type manager" @@ -488,6 +496,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_managersearch ]]; then info "Setting up as node type managersearch" @@ -508,6 +517,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_sensor ]]; then info "Setting up as node type sensor" @@ -597,6 +607,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_receiver ]]; then @@ -661,7 +672,7 @@ if ! [[ -f $install_opt_file ]]; then # Add the socore user add_socore_user_manager - create_local_directories + create_local_directories ${SCRIPTDIR::-5} setup_salt_master_dirs create_manager_pillars @@ -753,16 +764,10 @@ if ! [[ -f $install_opt_file ]]; then if [[ ! $is_airgap ]]; then title "Downloading IDS Rules" logCmd "so-rule-update" - title "Downloading YARA rules" - logCmd "su socore -c '/usr/sbin/so-yara-download'" if [[ $monints || $is_import ]]; then title "Restarting Suricata to pick up the new rules" logCmd "so-suricata-restart" fi - if [[ $monints ]]; then - title "Restarting Strelka to use new rules" - logCmd "so-strelka-restart" - fi fi title "Setting up Kibana Default Space" logCmd "so-kibana-space-defaults" @@ -775,10 +780,6 @@ if ! [[ -f $install_opt_file ]]; then error "Failed to run so-elastic-fleet-setup" fail_setup fi - if [[ ! $is_import ]]; then - title "Setting up Playbook" - logCmd "so-playbook-reset" - fi checkin_at_boot set_initial_firewall_access logCmd "salt-call schedule.enable -linfo --local" @@ -807,7 +808,6 @@ if ! [[ -f $install_opt_file ]]; then configure_minion "$minion_type" check_sos_appliance drop_install_options - logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/" verify_setup fi diff --git a/setup/so-variables b/setup/so-variables index 511dfc43bd..42ed8fc5c2 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -112,12 +112,6 @@ export sensoroni_pillar_file adv_sensoroni_pillar_file="$local_salt_dir/pillar/sensoroni/adv_sensoroni.sls" export adv_sensoroni_pillar_file -soctopus_pillar_file="$local_salt_dir/pillar/soctopus/soc_soctopus.sls" -export soctopus_pillar_file - -adv_soctopus_pillar_file="$local_salt_dir/pillar/soctopus/adv_soctopus.sls" -export adv_soctopus_pillar_file - docker_pillar_file="$local_salt_dir/pillar/docker/soc_docker.sls" export docker_pillar diff --git a/setup/so-verify b/setup/so-verify index b4c79a88c8..d22b80fc24 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -67,6 +67,7 @@ log_has_errors() { grep -vE "Reading first line of patchfile" | \ grep -vE "Command failed with exit code" | \ grep -vE "Running scope as unit" | \ + grep -vE "securityonion-resources/sigma/stable" | \ grep -vE "log-.*-pipeline_failed_attempts" &> "$error_log" if [[ $? -eq 0 ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index ff8c9fe8d5..d950f29210 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -14,7 +14,7 @@ whiptail_airgap() { [[ $is_manager || $is_import ]] && node_str='manager' INTERWEBS=$(whiptail --title "$whiptail_title" --menu \ - "How should this $node_str be installed?" 10 70 2 \ + "How should this $node_str be installed?\n\nFor more information, please see:\n$DOC_BASE_URL/airgap.html" 13 70 2 \ "Standard " "This $node_str has access to the Internet" \ "Airgap " "This $node_str does not have access to the Internet" 3>&1 1>&2 2>&3 ) @@ -144,6 +144,26 @@ whiptail_cancel() { exit 1 } +whiptail_accept_telemetry() { + + [ -n "$TESTING" ] && return + + read -r -d '' message <<- EOM + + The Security Onion development team could use your help! Enabling SOC + Telemetry will help the team understand which UI features are being + used and enables informed prioritization of future development. + + Adjust this setting at anytime via the SOC Configuration screen. + + Documentation: https://docs.securityonion.net/en/2.4/telemetry.html + + Enable SOC Telemetry to help improve future releases? + EOM + whiptail --title "$whiptail_title" --yesno "$message" 15 75 + telemetry=$? +} + whiptail_check_exitstatus() { case $1 in 1) @@ -288,9 +308,9 @@ whiptail_dhcp_or_static() { [ -n "$TESTING" ] && return address_type=$(whiptail --title "$whiptail_title" --menu \ - "Choose how to set up your management interface:" 20 78 4 \ - "STATIC" "Set a static IPv4 address" \ - "DHCP" "Use DHCP to configure the Management Interface" 3>&1 1>&2 2>&3 ) + "Choose how to set up your management interface. We recommend using a static IP address." 20 78 4 \ + "STATIC" "Set a static IPv4 address (recommended)" \ + "DHCP" "Use DHCP to configure the management interface" 3>&1 1>&2 2>&3 ) local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -431,6 +451,14 @@ whiptail_end_settings() { done fi + if [[ ! $is_airgap ]] && [[ $waitforstate ]]; then + if [[ $telemetry -eq 0 ]]; then + __append_end_msg "SOC Telemetry: enabled" + else + __append_end_msg "SOC Telemetry: disabled" + fi + fi + # ADVANCED if [[ $MANAGERADV == 'ADVANCED' ]]; then __append_end_msg "Advanced Manager Settings:" @@ -447,7 +475,7 @@ whiptail_end_settings() { read -r -d '' msg <<-EOM $end_msg - Press the Tab key to select yes or no. + Press the TAB key to select yes or no. EOM whiptail --title "The following options have been set, would you like to proceed?" --yesno "$msg" 24 75 --scrolltext @@ -564,8 +592,8 @@ whiptail_install_type() { "IMPORT" "Import PCAP or log files " \ "EVAL" "Evaluation mode (not for production) " \ "STANDALONE" "Standalone production install " \ - "DISTRIBUTED" "Distributed install submenu " \ - "DESKTOP" "Install Security Onion Desktop" \ + "DISTRIBUTED" "Distributed deployment " \ + "DESKTOP" "Security Onion Desktop" \ 3>&1 1>&2 2>&3 ) elif [[ "$OSVER" == "focal" ]]; then diff --git a/sigs/securityonion-2.4.70-20240529.iso.sig b/sigs/securityonion-2.4.70-20240529.iso.sig new file mode 100644 index 0000000000..c3825eb6e7 Binary files /dev/null and b/sigs/securityonion-2.4.70-20240529.iso.sig differ