diff --git a/README.md b/README.md index 5f55d86..8d255df 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ This repository will contain orchestration files and documentation for using the individual Hub Docker containers. -## Location of hub 4.7.3 archive: +## Location of hub 4.8.0 archive: -https://github.com/blackducksoftware/hub/archive/v4.7.3.tar.gz +https://github.com/blackducksoftware/hub/archive/v4.8.0.tar.gz ## Important Upgrade Announcement diff --git a/docker-compose/README.md b/docker-compose/README.md index 39fee39..1cee7c7 100644 --- a/docker-compose/README.md +++ b/docker-compose/README.md @@ -399,4 +399,3 @@ This example will add a second Job Runner container. It is also possible to remo ``` docker-compose -p hub scale jobrunner=1 ``` - diff --git a/docker-compose/bin/hub_add_replication_user.sh b/docker-compose/bin/hub_add_replication_user.sh index ac36ec0..c416950 100755 --- a/docker-compose/bin/hub_add_replication_user.sh +++ b/docker-compose/bin/hub_add_replication_user.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-compose/bin/hub_create_data_dump.sh b/docker-compose/bin/hub_create_data_dump.sh index 4b538e6..775cf54 100755 --- a/docker-compose/bin/hub_create_data_dump.sh +++ b/docker-compose/bin/hub_create_data_dump.sh @@ -7,7 +7,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { @@ -68,7 +68,28 @@ docker exec -i ${container_id} pg_dump -U blackduck -Fc -f /tmp/bds_hub.dump bds exitCode=$? [ ${exitCode} -ne 0 ] && fail "Cannot create the dump file from the container [Container Id: ${container_id}]" 8 -docker cp ${container_id}:/tmp/bds_hub.dump ${local_dest_dump_file} +# Create an absolute path to copy to, adds support for symbolic links +if [ ! -d "$local_dest_dump_file" ]; then + cd `dirname $local_dest_dump_file` + base_file=`basename $local_dest_dump_file` + symlink_count=0 + while [ -L "$base_file" ]; do + (( symlink_count++ )) + if [ "$symlink_count" -gt 100 ]; then + fail "MAXSYMLINK level reached." 1 + fi + base_file=`readlink $base_file` + cd `dirname $base_file` + base_file=`basename $base_file` + done +present_dir=`pwd -P` +local_absolute_path=$present_dir/$base_file + +else + local_absolute_path=${local_dest_dump_file} +fi + +docker cp ${container_id}:/tmp/bds_hub.dump "${local_absolute_path}" exitCode=$? [ ${exitCode} -ne 0 ] && fail "Was not able to copy the dump file over [Container Id: ${container_id}]" 9 diff --git a/docker-compose/bin/hub_db_migrate.sh b/docker-compose/bin/hub_db_migrate.sh index 8706b9f..4d799d0 100755 --- a/docker-compose/bin/hub_db_migrate.sh +++ b/docker-compose/bin/hub_db_migrate.sh @@ -13,7 +13,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-compose/bin/hub_replication_changepassword.sh b/docker-compose/bin/hub_replication_changepassword.sh index 9cce664..2cffa4f 100755 --- a/docker-compose/bin/hub_replication_changepassword.sh +++ b/docker-compose/bin/hub_replication_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-compose/bin/hub_reportdb_changepassword.sh b/docker-compose/bin/hub_reportdb_changepassword.sh index 287dac8..7d90ba5 100755 --- a/docker-compose/bin/hub_reportdb_changepassword.sh +++ b/docker-compose/bin/hub_reportdb_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-compose/bin/system_check.sh b/docker-compose/bin/system_check.sh index d487e12..79145ec 100755 --- a/docker-compose/bin/system_check.sh +++ b/docker-compose/bin/system_check.sh @@ -27,7 +27,7 @@ set -o noglob #set -o xtrace -readonly HUB_VERSION="${HUB_VERSION:-4.7.3}" +readonly HUB_VERSION="${HUB_VERSION:-4.8.0}" readonly OUTPUT_FILE="${SYSTEM_CHECK_OUTPUT_FILE:-$(date +"system_check_%Y%m%dT%H%M%S%z.txt")}" readonly OUTPUT_FILE_TOC="$(mktemp -t "$(basename "${OUTPUT_FILE}").XXXXXXXXXX")" trap 'rm -f "${OUTPUT_FILE_TOC}"' EXIT @@ -50,6 +50,10 @@ readonly REQ_DISK_MB=250000 readonly REQ_DOCKER_VERSIONS="17.06.x 17.09.x 17.12.x 18.03.x" readonly REQ_ENTROPY=100 +readonly REQ_SYSCTL_KEEPALIVE_TIME=600 +readonly REQ_SYSCTL_KEEPALIVE_INTERVAL=30 +readonly REQ_SYSCTL_KEEPALIVE_PROBES=10 + readonly TRUE="TRUE" readonly FALSE="FALSE" readonly UNKNOWN="UNKNOWN" # Yay for tri-valued booleans! Treated as $FALSE. @@ -192,6 +196,8 @@ is_root() { # OS_NAME -- (out) operating system name # IS_LINUX -- (out) TRUE/FALSE # IS_MACOS -- (out) TRUE/FALSE. macOS is not considered to be Linux. +# IS_REDHAT -- (out) TRUE/FALSE +# IS_RHEL -- (out) TRUE/FALSE # Arguments: # None # Returns: @@ -204,12 +210,19 @@ get_os_name() { # Find the local release name. IS_LINUX="$TRUE" IS_MACOS="$FALSE" + IS_REDHAT="$FALSE" + IS_RHEL="$FALSE" if have_command lsb_release ; then OS_NAME="$(lsb_release -a ; echo ; echo -n uname -a:\ ; uname -a)" elif [[ -e /etc/fedora-release ]]; then OS_NAME="$(cat /etc/fedora-release)" elif [[ -e /etc/redhat-release ]]; then OS_NAME="$(cat /etc/redhat-release)" + IS_REDHAT="$TRUE" + local ENTERPRISE="$(cat /etc/redhat-release | cut -d' ' -f 3)" + if [[ "${ENTERPRISE}" == 'Enterprise' ]]; then + IS_RHEL="$TRUE" + fi elif [[ -e /etc/centos-release ]]; then OS_NAME="$(cat /etc/centos-release)" elif [[ -e /etc/SuSE-release ]]; then @@ -229,6 +242,8 @@ get_os_name() { readonly OS_NAME readonly IS_LINUX readonly IS_MACOS + readonly IS_REDHAT + readonly IS_RHEL fi check_boolean "${IS_LINUX}" @@ -250,6 +265,40 @@ is_linux() { check_boolean "${IS_LINUX}" } + +################################################################ +# Determine whether the current operating system is a Red Hat Linux +# variant. +# +# Globals: +# IS_REDHAT -- (out) TRUE/FALSE, true if the system is red hat +# Arguments: +# None +# Returns: +# true if this is a Red Hat system +################################################################ +is_redhat() { + [[ -n "${IS_REDHAT}" ]] || get_os_name + check_boolean "${IS_REDHAT}" +} + +################################################################ +# Determine whether the current operating system is a Red Hat +# Enterprise Linux variant. +# +# Globals: +# IS_RHEL -- (out) TRUE/FALSE, true if the system is red hat +# enterprise linux +# Arguments: +# None +# Returns: +# true if this is a Red Hat system +################################################################ +is_rhel() { + [[ -n "${IS_RHEL}" ]] || get_os_name + check_boolean "${IS_RHEL}" +} + ################################################################ # Expose the running operating system name. See also # http://linuxmafia.com/faq/Admin/release-files.html @@ -622,6 +671,76 @@ echo_port_status() { fi } + +################################################################ +# Check critical IPV4 syctl values on linux +# +# Globals: +# SYSCTL_KEEPALIVE_TIME -- (out) - The current keepalive time +# SYSCTL_KEEPALIVE_INTERVAL -- (out) - The current keepalive interval +# SYSCTL_KEEPALIVE_PROBES -- (out) - The current # of keepalive probes +# SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS -- (out) TRUE/FALSE +# SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS -- (out) TRUE/FALSE +# SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS -- (out) TRUE/FALSE +# +# +################################################################ +get_sysctl_keepalive() { + + if [[ -z "${SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS}" ]] ; then + + if ! is_linux ; then + readonly SYSCTL_KEEPALIVE_TIME="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_INTERVAL="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_PROBES="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="UNKNOWN" + return -1 + fi + + if ! have_command sysctl ; then + readonly SYSCTL_KEEPALIVE_TIME="Can't check sysctl keepalive, sysctl not found." + readonly SYSCTL_KEEPALIVE_INTERVAL="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_PROBES="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="UNKNOWN" + return -1 + fi + + echo "Checking sysctl keepalive parameters..." + readonly SYSCTL_KEEPALIVE_TIME=$(sysctl net.ipv4.tcp_keepalive_time | awk -F' = ' '{print $2}') + readonly SYSCTL_KEEPALIVE_INTERVAL=$(sysctl net.ipv4.tcp_keepalive_intvl | awk -F' = ' '{print $2}') + readonly SYSCTL_KEEPALIVE_PROBES=$(sysctl net.ipv4.tcp_keepalive_probes | awk -F' = ' '{print $2}') + + SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="TRUE" + SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="TRUE" + SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="TRUE" + + if [[ "${SYSCTL_KEEPALIVE_TIME}" -lt "${REQ_SYSCTL_KEEPALIVE_TIME}" ]] ; then + SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="FALSE" + fi + + if [[ "${SYSCTL_KEEPALIVE_INTERVAL}" -lt "${REQ_SYSCTL_KEEPALIVE_INTERVAL}" ]] ; then + SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="FALSE" + fi + + if [[ "${SYSCTL_KEEPALIVE_PROBES}" -lt "${REQ_SYSCTL_KEEPALIVE_PROBES}" ]] ; then + SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="FALSE" + fi + + readonly SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS + readonly SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS + readonly SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS + + fi + + + +} + + ################################################################ # Get a list of running processes. # @@ -668,6 +787,7 @@ is_docker_present() { # # Globals: # DOCKER_VERSION -- (out) docker version. +# DOCKER_EDITION - (out) ee/ce for enterprise/consumer # DOCKER_VERSION_CHECK -- (out) PASS/FAIL docker version is supported. # REQ_DOCKER_VERSIONS -- (in) supported docker versions. # Arguments: @@ -682,19 +802,98 @@ check_docker_version() { return 1 fi + # Read the docker version strings and parse them + # The string will look something like this: + # "Docker version 18.03.1-ce, build 9ee9f40" + # 3rd field based on cutting by " " is the version string + # 2nd field based on cutting by "-" is the edition with a comma on the end + # 1st field based on cutting with "," will be the edition without the comma + # edition is "ee" for Enterprise and "ce" for community echo "Checking docker version..." readonly DOCKER_VERSION="$(docker --version)" + readonly DOCKER_EDITION="$(docker --version | cut -d' ' -f3 | cut -d- -f2 | cut -d, -f1)" local docker_base_version="$(docker --version | cut -d' ' -f3 | cut -d. -f1-2)" + + if [[ ! "${REQ_DOCKER_VERSIONS}" =~ ${docker_base_version}.x ]]; then readonly DOCKER_VERSION_CHECK="$FAIL. Running ${DOCKER_VERSION}, supported versions are: ${REQ_DOCKER_VERSIONS}" else readonly DOCKER_VERSION_CHECK="$PASS. ${DOCKER_VERSION} installed." fi + fi check_passfail "${DOCKER_VERSION_CHECK}" } + +################################################################ +# Check whether a supported version of docker is installed +# +# Globals: +# DOCKER_VERSION -- (out) docker version. +# DOCKER_EDITION - (out) ee/ce for enterprise/consumer +# DOCKER_VERSION_CHECK -- (out) PASS/FAIL docker version is supported. +# REQ_DOCKER_VERSIONS -- (in) supported docker versions. +# Arguments: +# None +# Returns: +# true if a supported version of docker is installed. +################################################################ +is_docker_ee() { + if [[ -z "${IS_DOCKER_EE}" ]] ; then + if [[ -z "${DOCKER_EDITION}" ]]; then + check_docker_version + fi + + IS_DOCKER_EE="$FALSE" + if [[ "${DOCKER_EDITION}" == "ee" ]]; then + IS_DOCKER_EE="$TRUE" + fi + + readonly IS_DOCKER_EE + fi + + check_boolean "${IS_DOCKER_EE}" +} + +################################################################ +# Check whether the version of docker installed is supported for the OS +# version that was detected +# +# Globals: +# DOCKER_OS_COMPAT - Information on OS/Docker compatibility +# DOCKER_OS_COMPAT_CHECK -- (out) PASS/FAIL docker version is supported on the OS +# Arguments: +# None +# Returns: +# true if a supported version of docker is installed. +################################################################ +check_docker_os_compatibility() { + if [[ -z "${DOCKER_OS_COMPAT}" ]] ; then + + if [[ -z "${DOCKER_VERSION_CHECK}" ]]; then + check_docker_version + fi + + if [[ -z "${OS_NAME}" ]]; then + get_os_name + fi + + DOCKER_OS_COMPAT_CHECK="$PASS" + DOCKER_OS_COMPAT="No Compatibility problems between OS and Docker" + + if is_rhel && is_docker_ee ; then + DOCKER_OS_COMPAT_CHECK="$FAIL" + DOCKER_OS_COMPAT="Docker EE on Red Hat Enterprise Linux is not supported." + fi + + fi + + check_passfail "${DOCKER_OS_COMPAT}" +} + + ################################################################ # Check whether docker-compose is installed. # @@ -1730,12 +1929,13 @@ generate_report() { [[ "$#" -le 1 ]] || error_exit "usage: $FUNCNAME [ ]" local -r target="${1:-$OUTPUT_FILE}" - cat < /dev/null > "${OUTPUT_FILE_TOC}" + # Reserve this section number; the body will be generated later. + echo "1. Problems found" > "${OUTPUT_FILE_TOC}" local -r header="${REPORT_SEPARATOR} System check for Black Duck Software Hub version $HUB_VERSION " - local report=$(cat < "${target}" - if [[ -n "${failures}" ]]; then - # Insert the failure section at the head of the report. - report="$(generate_report_section "Problems detected" 0) + (echo "Table of contents:"; echo; sort -n "${OUTPUT_FILE_TOC}"; echo) >> "${target}" + cat >> "${target}" <> "${target}" +END + echo "$report" >> "${target}" } @@ -2004,6 +2215,7 @@ main() { check_disk_space get_processes get_package_list + get_sysctl_keepalive check_entropy get_interface_info @@ -2015,6 +2227,7 @@ main() { check_docker_version get_docker_compose_version check_docker_startup_info + check_docker_os_compatibility get_docker_images get_docker_containers get_docker_processes diff --git a/docker-compose/docker-compose.dbmigrate.yml b/docker-compose/docker-compose.dbmigrate.yml index 23d6a9d..39d49f0 100644 --- a/docker-compose/docker-compose.dbmigrate.yml +++ b/docker-compose/docker-compose.dbmigrate.yml @@ -1,7 +1,7 @@ version: '2.1' services: cfssl: - image: blackducksoftware/hub-cfssl:4.7.3 + image: blackducksoftware/hub-cfssl:4.8.0 volumes: - cert-volume:/etc/cfssl healthcheck: @@ -12,7 +12,7 @@ services: user: 'cfssl:root' logstash: - image: blackducksoftware/hub-logstash:4.7.3 + image: blackducksoftware/hub-logstash:4.8.0 volumes: - log-volume:/var/lib/logstash/data healthcheck: @@ -23,7 +23,7 @@ services: user: 'logstash:root' postgres: - image: blackducksoftware/hub-postgres:4.7.3 + image: blackducksoftware/hub-postgres:4.8.0 links: - cfssl - logstash diff --git a/docker-compose/docker-compose.externaldb.yml b/docker-compose/docker-compose.externaldb.yml index 8637c72..9a26432 100644 --- a/docker-compose/docker-compose.externaldb.yml +++ b/docker-compose/docker-compose.externaldb.yml @@ -1,7 +1,7 @@ version: '2.1' services: cfssl: - image: blackducksoftware/hub-cfssl:4.7.3 + image: blackducksoftware/hub-cfssl:4.8.0 volumes: ['cert-volume:/etc/cfssl'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8888/api/v1/cfssl/scaninfo'] @@ -12,7 +12,7 @@ services: restart: always mem_limit: 512M logstash: - image: blackducksoftware/hub-logstash:4.7.3 + image: blackducksoftware/hub-logstash:4.8.0 volumes: ['log-volume:/var/lib/logstash/data'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:9600/'] @@ -23,7 +23,7 @@ services: restart: always mem_limit: 1024m registration: - image: blackducksoftware/hub-registration:4.7.3 + image: blackducksoftware/hub-registration:4.8.0 links: [logstash] volumes: ['config-volume:/opt/blackduck/hub/hub-registration/config'] env_file: hub-proxy.env @@ -37,8 +37,9 @@ services: restart: always mem_limit: 640M zookeeper: - image: blackducksoftware/hub-zookeeper:4.7.3 + image: blackducksoftware/hub-zookeeper:4.8.0 links: [logstash] + volumes: ['zookeeper-data-volume:/opt/blackduck/zookeeper/data', 'zookeeper-datalog-volume:/opt/blackduck/zookeeper/datalog'] healthcheck: test: [CMD, zkServer.sh, status, /opt/blackduck/zookeeper/conf/zoo.cfg] interval: 30s @@ -48,7 +49,7 @@ services: restart: always mem_limit: 384M solr: - image: blackducksoftware/hub-solr:4.7.3 + image: blackducksoftware/hub-solr:4.8.0 links: [logstash, zookeeper] volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data'] healthcheck: @@ -60,7 +61,7 @@ services: restart: always mem_limit: 640M authentication: - image: blackducksoftware/hub-authentication:4.7.3 + image: blackducksoftware/hub-authentication:4.8.0 links: [cfssl, logstash, registration, zookeeper] volumes: ['authentication-volume:/opt/blackduck/hub/hub-authentication/security'] env_file: [hub-proxy.env, hub-postgres.env] @@ -75,7 +76,7 @@ services: restart: always mem_limit: 1024M webapp: - image: blackducksoftware/hub-webapp:4.7.3 + image: blackducksoftware/hub-webapp:4.8.0 links: [cfssl, logstash, registration, zookeeper, solr] volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security'] env_file: [hub-proxy.env, hub-postgres.env] @@ -90,7 +91,7 @@ services: restart: always mem_limit: 2560M scan: - image: blackducksoftware/hub-scan:4.7.3 + image: blackducksoftware/hub-scan:4.8.0 links: [cfssl, logstash, registration, zookeeper] volumes: ['scan-volume:/opt/blackduck/hub/hub-scan/security'] env_file: [hub-proxy.env, hub-postgres.env] @@ -105,7 +106,7 @@ services: restart: always mem_limit: 2560M jobrunner: - image: blackducksoftware/hub-jobrunner:4.7.3 + image: blackducksoftware/hub-jobrunner:4.8.0 links: [cfssl, logstash, registration, zookeeper, solr] env_file: [hub-proxy.env, hub-postgres.env] healthcheck: @@ -118,7 +119,7 @@ services: restart: always mem_limit: 4608M webserver: - image: blackducksoftware/hub-nginx:4.7.3 + image: blackducksoftware/hub-nginx:4.8.0 ports: ['443:8443'] env_file: hub-webserver.env links: [webapp, cfssl, documentation] @@ -133,7 +134,7 @@ services: restart: always mem_limit: 640M documentation: - image: blackducksoftware/hub-documentation:4.7.3 + image: blackducksoftware/hub-documentation:4.8.0 links: [logstash] user: tomcat:root healthcheck: @@ -146,4 +147,4 @@ services: mem_limit: 512M volumes: {authentication-volume: null, cert-volume: null, config-volume: null, log-volume: null, webserver-volume: null, webapp-volume: null, scan-volume: null, solr6-volume: null, - monitor-log-volume: null} + monitor-log-volume: null, zookeeper-data-volume: null, zookeeper-datalog-volume: null} diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index 1c820be..1c6172b 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -1,7 +1,7 @@ version: '2.1' services: postgres: - image: blackducksoftware/hub-postgres:4.7.3 + image: blackducksoftware/hub-postgres:4.8.0 ports: ['55436:5432'] links: [cfssl, logstash] volumes: ['postgres96-data-volume:/var/lib/postgresql/data'] @@ -16,7 +16,7 @@ services: authentication: links: [postgres, cfssl, logstash, registration, zookeeper] user: authentication:root - image: blackducksoftware/hub-authentication:4.7.3 + image: blackducksoftware/hub-authentication:4.8.0 volumes: ['authentication-volume:/opt/blackduck/hub/hub-authentication/security'] env_file: [hub-proxy.env] healthcheck: @@ -31,7 +31,7 @@ services: webapp: links: [postgres, cfssl, logstash, registration, zookeeper, solr] user: tomcat:root - image: blackducksoftware/hub-webapp:4.7.3 + image: blackducksoftware/hub-webapp:4.8.0 volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security'] env_file: [hub-proxy.env] healthcheck: @@ -46,7 +46,7 @@ services: scan: links: [postgres, cfssl, logstash, registration, zookeeper] user: tomcat:root - image: blackducksoftware/hub-scan:4.7.3 + image: blackducksoftware/hub-scan:4.8.0 volumes: ['scan-volume:/opt/blackduck/hub/hub-scan/security'] env_file: [hub-proxy.env] healthcheck: @@ -61,7 +61,7 @@ services: jobrunner: links: [postgres, cfssl, logstash, registration, zookeeper, solr] user: jobrunner:root - image: blackducksoftware/hub-jobrunner:4.7.3 + image: blackducksoftware/hub-jobrunner:4.8.0 env_file: [hub-proxy.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh] @@ -72,7 +72,7 @@ services: restart: always mem_limit: 4608M cfssl: - image: blackducksoftware/hub-cfssl:4.7.3 + image: blackducksoftware/hub-cfssl:4.8.0 volumes: ['cert-volume:/etc/cfssl'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8888/api/v1/cfssl/scaninfo'] @@ -83,7 +83,7 @@ services: restart: always mem_limit: 512M logstash: - image: blackducksoftware/hub-logstash:4.7.3 + image: blackducksoftware/hub-logstash:4.8.0 volumes: ['log-volume:/var/lib/logstash/data'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:9600/'] @@ -94,7 +94,7 @@ services: restart: always mem_limit: 1024m registration: - image: blackducksoftware/hub-registration:4.7.3 + image: blackducksoftware/hub-registration:4.8.0 links: [logstash] volumes: ['config-volume:/opt/blackduck/hub/hub-registration/config'] env_file: hub-proxy.env @@ -108,8 +108,9 @@ services: restart: always mem_limit: 640M zookeeper: - image: blackducksoftware/hub-zookeeper:4.7.3 + image: blackducksoftware/hub-zookeeper:4.8.0 links: [logstash] + volumes: ['zookeeper-data-volume:/opt/blackduck/zookeeper/data', 'zookeeper-datalog-volume:/opt/blackduck/zookeeper/datalog'] healthcheck: test: [CMD, zkServer.sh, status, /opt/blackduck/zookeeper/conf/zoo.cfg] interval: 30s @@ -119,7 +120,7 @@ services: restart: always mem_limit: 384M solr: - image: blackducksoftware/hub-solr:4.7.3 + image: blackducksoftware/hub-solr:4.8.0 links: [logstash, zookeeper] volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data'] healthcheck: @@ -131,7 +132,7 @@ services: restart: always mem_limit: 640M webserver: - image: blackducksoftware/hub-nginx:4.7.3 + image: blackducksoftware/hub-nginx:4.8.0 ports: ['443:8443'] env_file: hub-webserver.env links: [webapp, cfssl, documentation] @@ -146,7 +147,7 @@ services: restart: always mem_limit: 640M documentation: - image: blackducksoftware/hub-documentation:4.7.3 + image: blackducksoftware/hub-documentation:4.8.0 links: [logstash] user: tomcat:root healthcheck: @@ -159,4 +160,5 @@ services: mem_limit: 512M volumes: {postgres96-data-volume: null, authentication-volume: null, cert-volume: null, config-volume: null, log-volume: null, webserver-volume: null, webapp-volume: null, - scan-volume: null, solr6-volume: null, monitor-log-volume: null} + scan-volume: null, solr6-volume: null, monitor-log-volume: null, zookeeper-data-volume: null, + zookeeper-datalog-volume: null} diff --git a/docker-compose/hub-proxy.env b/docker-compose/hub-proxy.env index f1ce2e1..0e79836 100644 --- a/docker-compose/hub-proxy.env +++ b/docker-compose/hub-proxy.env @@ -27,7 +27,7 @@ BLACKDUCK_SWAGGER_PROXY_PREFIX= BLACKDUCK_SWAGGER_DISPLAYALL= # Do not change -HUB_VERSION=4.7.3 +HUB_VERSION=4.8.0 # Whether or not to include ignored components in the reports BLACKDUCK_REPORT_IGNORED_COMPONENTS=false diff --git a/docker-compose/hub-webserver.env b/docker-compose/hub-webserver.env index e2798d6..ac3ba03 100644 --- a/docker-compose/hub-webserver.env +++ b/docker-compose/hub-webserver.env @@ -20,4 +20,7 @@ IPV4_ONLY=0 # Change this to 1 if you are also using Hub Alert. # This will enable Alert routes in 'webserver' # See Hub Alert documentation for details. -USE_ALERT=0 \ No newline at end of file +USE_ALERT=0 + +# Change this to 1 if you are also using Hub Binary uploads +USE_BINARY_UPLOADS=0 diff --git a/docker-swarm/bin/hub_add_replication_user.sh b/docker-swarm/bin/hub_add_replication_user.sh index ac36ec0..c416950 100755 --- a/docker-swarm/bin/hub_add_replication_user.sh +++ b/docker-swarm/bin/hub_add_replication_user.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-swarm/bin/hub_create_data_dump.sh b/docker-swarm/bin/hub_create_data_dump.sh index 4b538e6..775cf54 100755 --- a/docker-swarm/bin/hub_create_data_dump.sh +++ b/docker-swarm/bin/hub_create_data_dump.sh @@ -7,7 +7,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { @@ -68,7 +68,28 @@ docker exec -i ${container_id} pg_dump -U blackduck -Fc -f /tmp/bds_hub.dump bds exitCode=$? [ ${exitCode} -ne 0 ] && fail "Cannot create the dump file from the container [Container Id: ${container_id}]" 8 -docker cp ${container_id}:/tmp/bds_hub.dump ${local_dest_dump_file} +# Create an absolute path to copy to, adds support for symbolic links +if [ ! -d "$local_dest_dump_file" ]; then + cd `dirname $local_dest_dump_file` + base_file=`basename $local_dest_dump_file` + symlink_count=0 + while [ -L "$base_file" ]; do + (( symlink_count++ )) + if [ "$symlink_count" -gt 100 ]; then + fail "MAXSYMLINK level reached." 1 + fi + base_file=`readlink $base_file` + cd `dirname $base_file` + base_file=`basename $base_file` + done +present_dir=`pwd -P` +local_absolute_path=$present_dir/$base_file + +else + local_absolute_path=${local_dest_dump_file} +fi + +docker cp ${container_id}:/tmp/bds_hub.dump "${local_absolute_path}" exitCode=$? [ ${exitCode} -ne 0 ] && fail "Was not able to copy the dump file over [Container Id: ${container_id}]" 9 diff --git a/docker-swarm/bin/hub_db_migrate.sh b/docker-swarm/bin/hub_db_migrate.sh index 8706b9f..4d799d0 100755 --- a/docker-swarm/bin/hub_db_migrate.sh +++ b/docker-swarm/bin/hub_db_migrate.sh @@ -13,7 +13,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-swarm/bin/hub_replication_changepassword.sh b/docker-swarm/bin/hub_replication_changepassword.sh index 9cce664..2cffa4f 100755 --- a/docker-swarm/bin/hub_replication_changepassword.sh +++ b/docker-swarm/bin/hub_replication_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-swarm/bin/hub_reportdb_changepassword.sh b/docker-swarm/bin/hub_reportdb_changepassword.sh index 287dac8..7d90ba5 100755 --- a/docker-swarm/bin/hub_reportdb_changepassword.sh +++ b/docker-swarm/bin/hub_reportdb_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-swarm/bin/system_check.sh b/docker-swarm/bin/system_check.sh index d487e12..79145ec 100755 --- a/docker-swarm/bin/system_check.sh +++ b/docker-swarm/bin/system_check.sh @@ -27,7 +27,7 @@ set -o noglob #set -o xtrace -readonly HUB_VERSION="${HUB_VERSION:-4.7.3}" +readonly HUB_VERSION="${HUB_VERSION:-4.8.0}" readonly OUTPUT_FILE="${SYSTEM_CHECK_OUTPUT_FILE:-$(date +"system_check_%Y%m%dT%H%M%S%z.txt")}" readonly OUTPUT_FILE_TOC="$(mktemp -t "$(basename "${OUTPUT_FILE}").XXXXXXXXXX")" trap 'rm -f "${OUTPUT_FILE_TOC}"' EXIT @@ -50,6 +50,10 @@ readonly REQ_DISK_MB=250000 readonly REQ_DOCKER_VERSIONS="17.06.x 17.09.x 17.12.x 18.03.x" readonly REQ_ENTROPY=100 +readonly REQ_SYSCTL_KEEPALIVE_TIME=600 +readonly REQ_SYSCTL_KEEPALIVE_INTERVAL=30 +readonly REQ_SYSCTL_KEEPALIVE_PROBES=10 + readonly TRUE="TRUE" readonly FALSE="FALSE" readonly UNKNOWN="UNKNOWN" # Yay for tri-valued booleans! Treated as $FALSE. @@ -192,6 +196,8 @@ is_root() { # OS_NAME -- (out) operating system name # IS_LINUX -- (out) TRUE/FALSE # IS_MACOS -- (out) TRUE/FALSE. macOS is not considered to be Linux. +# IS_REDHAT -- (out) TRUE/FALSE +# IS_RHEL -- (out) TRUE/FALSE # Arguments: # None # Returns: @@ -204,12 +210,19 @@ get_os_name() { # Find the local release name. IS_LINUX="$TRUE" IS_MACOS="$FALSE" + IS_REDHAT="$FALSE" + IS_RHEL="$FALSE" if have_command lsb_release ; then OS_NAME="$(lsb_release -a ; echo ; echo -n uname -a:\ ; uname -a)" elif [[ -e /etc/fedora-release ]]; then OS_NAME="$(cat /etc/fedora-release)" elif [[ -e /etc/redhat-release ]]; then OS_NAME="$(cat /etc/redhat-release)" + IS_REDHAT="$TRUE" + local ENTERPRISE="$(cat /etc/redhat-release | cut -d' ' -f 3)" + if [[ "${ENTERPRISE}" == 'Enterprise' ]]; then + IS_RHEL="$TRUE" + fi elif [[ -e /etc/centos-release ]]; then OS_NAME="$(cat /etc/centos-release)" elif [[ -e /etc/SuSE-release ]]; then @@ -229,6 +242,8 @@ get_os_name() { readonly OS_NAME readonly IS_LINUX readonly IS_MACOS + readonly IS_REDHAT + readonly IS_RHEL fi check_boolean "${IS_LINUX}" @@ -250,6 +265,40 @@ is_linux() { check_boolean "${IS_LINUX}" } + +################################################################ +# Determine whether the current operating system is a Red Hat Linux +# variant. +# +# Globals: +# IS_REDHAT -- (out) TRUE/FALSE, true if the system is red hat +# Arguments: +# None +# Returns: +# true if this is a Red Hat system +################################################################ +is_redhat() { + [[ -n "${IS_REDHAT}" ]] || get_os_name + check_boolean "${IS_REDHAT}" +} + +################################################################ +# Determine whether the current operating system is a Red Hat +# Enterprise Linux variant. +# +# Globals: +# IS_RHEL -- (out) TRUE/FALSE, true if the system is red hat +# enterprise linux +# Arguments: +# None +# Returns: +# true if this is a Red Hat system +################################################################ +is_rhel() { + [[ -n "${IS_RHEL}" ]] || get_os_name + check_boolean "${IS_RHEL}" +} + ################################################################ # Expose the running operating system name. See also # http://linuxmafia.com/faq/Admin/release-files.html @@ -622,6 +671,76 @@ echo_port_status() { fi } + +################################################################ +# Check critical IPV4 syctl values on linux +# +# Globals: +# SYSCTL_KEEPALIVE_TIME -- (out) - The current keepalive time +# SYSCTL_KEEPALIVE_INTERVAL -- (out) - The current keepalive interval +# SYSCTL_KEEPALIVE_PROBES -- (out) - The current # of keepalive probes +# SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS -- (out) TRUE/FALSE +# SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS -- (out) TRUE/FALSE +# SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS -- (out) TRUE/FALSE +# +# +################################################################ +get_sysctl_keepalive() { + + if [[ -z "${SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS}" ]] ; then + + if ! is_linux ; then + readonly SYSCTL_KEEPALIVE_TIME="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_INTERVAL="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_PROBES="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="UNKNOWN" + return -1 + fi + + if ! have_command sysctl ; then + readonly SYSCTL_KEEPALIVE_TIME="Can't check sysctl keepalive, sysctl not found." + readonly SYSCTL_KEEPALIVE_INTERVAL="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_PROBES="Can't check sysctl keepalive on non-linux system." + readonly SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="UNKNOWN" + readonly SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="UNKNOWN" + return -1 + fi + + echo "Checking sysctl keepalive parameters..." + readonly SYSCTL_KEEPALIVE_TIME=$(sysctl net.ipv4.tcp_keepalive_time | awk -F' = ' '{print $2}') + readonly SYSCTL_KEEPALIVE_INTERVAL=$(sysctl net.ipv4.tcp_keepalive_intvl | awk -F' = ' '{print $2}') + readonly SYSCTL_KEEPALIVE_PROBES=$(sysctl net.ipv4.tcp_keepalive_probes | awk -F' = ' '{print $2}') + + SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="TRUE" + SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="TRUE" + SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="TRUE" + + if [[ "${SYSCTL_KEEPALIVE_TIME}" -lt "${REQ_SYSCTL_KEEPALIVE_TIME}" ]] ; then + SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS="FALSE" + fi + + if [[ "${SYSCTL_KEEPALIVE_INTERVAL}" -lt "${REQ_SYSCTL_KEEPALIVE_INTERVAL}" ]] ; then + SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS="FALSE" + fi + + if [[ "${SYSCTL_KEEPALIVE_PROBES}" -lt "${REQ_SYSCTL_KEEPALIVE_PROBES}" ]] ; then + SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS="FALSE" + fi + + readonly SYSCTL_KEEPALIVE_TIME_MEETS_RECOMMENDATIONS + readonly SYSCTL_KEEPALIVE_INTERVAL_MEETS_RECOMMENDATIONS + readonly SYSCTL_KEEPALIVE_PROBES_MEETS_RECOMMENDATIONS + + fi + + + +} + + ################################################################ # Get a list of running processes. # @@ -668,6 +787,7 @@ is_docker_present() { # # Globals: # DOCKER_VERSION -- (out) docker version. +# DOCKER_EDITION - (out) ee/ce for enterprise/consumer # DOCKER_VERSION_CHECK -- (out) PASS/FAIL docker version is supported. # REQ_DOCKER_VERSIONS -- (in) supported docker versions. # Arguments: @@ -682,19 +802,98 @@ check_docker_version() { return 1 fi + # Read the docker version strings and parse them + # The string will look something like this: + # "Docker version 18.03.1-ce, build 9ee9f40" + # 3rd field based on cutting by " " is the version string + # 2nd field based on cutting by "-" is the edition with a comma on the end + # 1st field based on cutting with "," will be the edition without the comma + # edition is "ee" for Enterprise and "ce" for community echo "Checking docker version..." readonly DOCKER_VERSION="$(docker --version)" + readonly DOCKER_EDITION="$(docker --version | cut -d' ' -f3 | cut -d- -f2 | cut -d, -f1)" local docker_base_version="$(docker --version | cut -d' ' -f3 | cut -d. -f1-2)" + + if [[ ! "${REQ_DOCKER_VERSIONS}" =~ ${docker_base_version}.x ]]; then readonly DOCKER_VERSION_CHECK="$FAIL. Running ${DOCKER_VERSION}, supported versions are: ${REQ_DOCKER_VERSIONS}" else readonly DOCKER_VERSION_CHECK="$PASS. ${DOCKER_VERSION} installed." fi + fi check_passfail "${DOCKER_VERSION_CHECK}" } + +################################################################ +# Check whether a supported version of docker is installed +# +# Globals: +# DOCKER_VERSION -- (out) docker version. +# DOCKER_EDITION - (out) ee/ce for enterprise/consumer +# DOCKER_VERSION_CHECK -- (out) PASS/FAIL docker version is supported. +# REQ_DOCKER_VERSIONS -- (in) supported docker versions. +# Arguments: +# None +# Returns: +# true if a supported version of docker is installed. +################################################################ +is_docker_ee() { + if [[ -z "${IS_DOCKER_EE}" ]] ; then + if [[ -z "${DOCKER_EDITION}" ]]; then + check_docker_version + fi + + IS_DOCKER_EE="$FALSE" + if [[ "${DOCKER_EDITION}" == "ee" ]]; then + IS_DOCKER_EE="$TRUE" + fi + + readonly IS_DOCKER_EE + fi + + check_boolean "${IS_DOCKER_EE}" +} + +################################################################ +# Check whether the version of docker installed is supported for the OS +# version that was detected +# +# Globals: +# DOCKER_OS_COMPAT - Information on OS/Docker compatibility +# DOCKER_OS_COMPAT_CHECK -- (out) PASS/FAIL docker version is supported on the OS +# Arguments: +# None +# Returns: +# true if a supported version of docker is installed. +################################################################ +check_docker_os_compatibility() { + if [[ -z "${DOCKER_OS_COMPAT}" ]] ; then + + if [[ -z "${DOCKER_VERSION_CHECK}" ]]; then + check_docker_version + fi + + if [[ -z "${OS_NAME}" ]]; then + get_os_name + fi + + DOCKER_OS_COMPAT_CHECK="$PASS" + DOCKER_OS_COMPAT="No Compatibility problems between OS and Docker" + + if is_rhel && is_docker_ee ; then + DOCKER_OS_COMPAT_CHECK="$FAIL" + DOCKER_OS_COMPAT="Docker EE on Red Hat Enterprise Linux is not supported." + fi + + fi + + check_passfail "${DOCKER_OS_COMPAT}" +} + + ################################################################ # Check whether docker-compose is installed. # @@ -1730,12 +1929,13 @@ generate_report() { [[ "$#" -le 1 ]] || error_exit "usage: $FUNCNAME [ ]" local -r target="${1:-$OUTPUT_FILE}" - cat < /dev/null > "${OUTPUT_FILE_TOC}" + # Reserve this section number; the body will be generated later. + echo "1. Problems found" > "${OUTPUT_FILE_TOC}" local -r header="${REPORT_SEPARATOR} System check for Black Duck Software Hub version $HUB_VERSION " - local report=$(cat < "${target}" - if [[ -n "${failures}" ]]; then - # Insert the failure section at the head of the report. - report="$(generate_report_section "Problems detected" 0) + (echo "Table of contents:"; echo; sort -n "${OUTPUT_FILE_TOC}"; echo) >> "${target}" + cat >> "${target}" <> "${target}" +END + echo "$report" >> "${target}" } @@ -2004,6 +2215,7 @@ main() { check_disk_space get_processes get_package_list + get_sysctl_keepalive check_entropy get_interface_info @@ -2015,6 +2227,7 @@ main() { check_docker_version get_docker_compose_version check_docker_startup_info + check_docker_os_compatibility get_docker_images get_docker_containers get_docker_processes diff --git a/docker-swarm/docker-compose.dbmigrate.yml b/docker-swarm/docker-compose.dbmigrate.yml index df7fdf1..d398a9f 100644 --- a/docker-swarm/docker-compose.dbmigrate.yml +++ b/docker-swarm/docker-compose.dbmigrate.yml @@ -1,7 +1,7 @@ version: '3.1' services: cfssl: - image: blackducksoftware/hub-cfssl:4.7.3 + image: blackducksoftware/hub-cfssl:4.8.0 volumes: - cert-volume:/etc/cfssl healthcheck: @@ -12,7 +12,7 @@ services: user: 'cfssl:root' logstash: - image: blackducksoftware/hub-logstash:4.7.3 + image: blackducksoftware/hub-logstash:4.8.0 volumes: - log-volume:/var/lib/logstash/data healthcheck: @@ -23,7 +23,7 @@ services: user: 'logstash:root' postgres: - image: blackducksoftware/hub-postgres:4.7.3 + image: blackducksoftware/hub-postgres:4.8.0 links: - cfssl - logstash diff --git a/docker-swarm/docker-compose.externaldb.yml b/docker-swarm/docker-compose.externaldb.yml index 91e52ff..18b551f 100644 --- a/docker-swarm/docker-compose.externaldb.yml +++ b/docker-swarm/docker-compose.externaldb.yml @@ -1,7 +1,7 @@ version: '3.1' services: cfssl: - image: blackducksoftware/hub-cfssl:4.7.3 + image: blackducksoftware/hub-cfssl:4.8.0 volumes: ['cert-volume:/etc/cfssl'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8888/api/v1/cfssl/scaninfo'] @@ -16,7 +16,7 @@ services: limits: {memory: 640M} reservations: {memory: 640M} logstash: - image: blackducksoftware/hub-logstash:4.7.3 + image: blackducksoftware/hub-logstash:4.8.0 volumes: ['log-volume:/var/lib/logstash/data'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:9600/'] @@ -31,7 +31,7 @@ services: limits: {memory: 1024M} reservations: {memory: 1024M} registration: - image: blackducksoftware/hub-registration:4.7.3 + image: blackducksoftware/hub-registration:4.8.0 links: [logstash] volumes: ['config-volume:/opt/blackduck/hub/hub-registration/config'] env_file: hub-proxy.env @@ -49,8 +49,9 @@ services: limits: {memory: 640M} reservations: {memory: 640M} zookeeper: - image: blackducksoftware/hub-zookeeper:4.7.3 + image: blackducksoftware/hub-zookeeper:4.8.0 links: [logstash] + volumes: ['zookeeper-data-volume:/opt/blackduck/zookeeper/data', 'zookeeper-datalog-volume:/opt/blackduck/zookeeper/datalog'] healthcheck: test: [CMD, zkServer.sh, status, /opt/blackduck/zookeeper/conf/zoo.cfg] interval: 30s @@ -64,7 +65,7 @@ services: limits: {memory: 384M} reservations: {memory: 384M} solr: - image: blackducksoftware/hub-solr:4.7.3 + image: blackducksoftware/hub-solr:4.8.0 links: [logstash, zookeeper] volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data'] healthcheck: @@ -80,7 +81,7 @@ services: limits: {memory: 640M} reservations: {memory: 640M} authentication: - image: blackducksoftware/hub-authentication:4.7.3 + image: blackducksoftware/hub-authentication:4.8.0 links: [cfssl, logstash, registration, zookeeper] volumes: ['authentication-volume:/opt/blackduck/hub/hub-authentication/security'] env_file: [hub-proxy.env, hub-postgres.env] @@ -99,7 +100,7 @@ services: limits: {cpus: '1', memory: 1024M} reservations: {memory: 1024M} webapp: - image: blackducksoftware/hub-webapp:4.7.3 + image: blackducksoftware/hub-webapp:4.8.0 links: [cfssl, logstash, registration, zookeeper, solr] volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security'] env_file: [hub-proxy.env, hub-postgres.env] @@ -118,7 +119,7 @@ services: limits: {cpus: '1', memory: 2560M} reservations: {cpus: '1', memory: 2560M} scan: - image: blackducksoftware/hub-scan:4.7.3 + image: blackducksoftware/hub-scan:4.8.0 links: [cfssl, logstash, registration, zookeeper] volumes: ['scan-volume:/opt/blackduck/hub/hub-scan/security'] env_file: [hub-proxy.env, hub-postgres.env] @@ -137,7 +138,7 @@ services: limits: {cpus: '1', memory: 2560M} reservations: {cpus: '1', memory: 2560M} jobrunner: - image: blackducksoftware/hub-jobrunner:4.7.3 + image: blackducksoftware/hub-jobrunner:4.8.0 links: [cfssl, logstash, registration, zookeeper, solr] env_file: [hub-proxy.env, hub-postgres.env] healthcheck: @@ -154,7 +155,7 @@ services: limits: {cpus: '1', memory: 4608M} reservations: {cpus: '1', memory: 4608M} webserver: - image: blackducksoftware/hub-nginx:4.7.3 + image: blackducksoftware/hub-nginx:4.8.0 ports: ['443:8443'] env_file: hub-webserver.env links: [webapp, cfssl, documentation] @@ -173,7 +174,7 @@ services: limits: {memory: 512M} reservations: {memory: 512M} documentation: - image: blackducksoftware/hub-documentation:4.7.3 + image: blackducksoftware/hub-documentation:4.8.0 links: [logstash] user: tomcat:root healthcheck: @@ -190,4 +191,4 @@ services: reservations: {memory: 512M} volumes: {authentication-volume: null, cert-volume: null, config-volume: null, log-volume: null, webserver-volume: null, webapp-volume: null, scan-volume: null, solr6-volume: null, - monitor-log-volume: null} + monitor-log-volume: null, zookeeper-data-volume: null, zookeeper-datalog-volume: null} diff --git a/docker-swarm/docker-compose.yml b/docker-swarm/docker-compose.yml index 2d1a466..6a384bb 100644 --- a/docker-swarm/docker-compose.yml +++ b/docker-swarm/docker-compose.yml @@ -1,7 +1,7 @@ version: '3.1' services: postgres: - image: blackducksoftware/hub-postgres:4.7.3 + image: blackducksoftware/hub-postgres:4.8.0 ports: ['55436:5432'] links: [cfssl, logstash] volumes: ['postgres96-data-volume:/var/lib/postgresql/data'] @@ -20,7 +20,7 @@ services: authentication: links: [postgres, cfssl, logstash, registration, zookeeper] user: authentication:root - image: blackducksoftware/hub-authentication:4.7.3 + image: blackducksoftware/hub-authentication:4.8.0 volumes: ['authentication-volume:/opt/blackduck/hub/hub-authentication/security'] env_file: [hub-proxy.env] healthcheck: @@ -39,7 +39,7 @@ services: webapp: links: [postgres, cfssl, logstash, registration, zookeeper, solr] user: tomcat:root - image: blackducksoftware/hub-webapp:4.7.3 + image: blackducksoftware/hub-webapp:4.8.0 volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security'] env_file: [hub-proxy.env] healthcheck: @@ -58,7 +58,7 @@ services: scan: links: [postgres, cfssl, logstash, registration, zookeeper] user: tomcat:root - image: blackducksoftware/hub-scan:4.7.3 + image: blackducksoftware/hub-scan:4.8.0 volumes: ['scan-volume:/opt/blackduck/hub/hub-scan/security'] env_file: [hub-proxy.env] healthcheck: @@ -77,7 +77,7 @@ services: jobrunner: links: [postgres, cfssl, logstash, registration, zookeeper, solr] user: jobrunner:root - image: blackducksoftware/hub-jobrunner:4.7.3 + image: blackducksoftware/hub-jobrunner:4.8.0 env_file: [hub-proxy.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh] @@ -92,7 +92,7 @@ services: limits: {cpus: '1', memory: 4608M} reservations: {cpus: '1', memory: 4608M} cfssl: - image: blackducksoftware/hub-cfssl:4.7.3 + image: blackducksoftware/hub-cfssl:4.8.0 volumes: ['cert-volume:/etc/cfssl'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8888/api/v1/cfssl/scaninfo'] @@ -107,7 +107,7 @@ services: limits: {memory: 640M} reservations: {memory: 640M} logstash: - image: blackducksoftware/hub-logstash:4.7.3 + image: blackducksoftware/hub-logstash:4.8.0 volumes: ['log-volume:/var/lib/logstash/data'] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:9600/'] @@ -122,7 +122,7 @@ services: limits: {memory: 1024M} reservations: {memory: 1024M} registration: - image: blackducksoftware/hub-registration:4.7.3 + image: blackducksoftware/hub-registration:4.8.0 links: [logstash] volumes: ['config-volume:/opt/blackduck/hub/hub-registration/config'] env_file: hub-proxy.env @@ -140,8 +140,9 @@ services: limits: {memory: 640M} reservations: {memory: 640M} zookeeper: - image: blackducksoftware/hub-zookeeper:4.7.3 + image: blackducksoftware/hub-zookeeper:4.8.0 links: [logstash] + volumes: ['zookeeper-data-volume:/opt/blackduck/zookeeper/data', 'zookeeper-datalog-volume:/opt/blackduck/zookeeper/datalog'] healthcheck: test: [CMD, zkServer.sh, status, /opt/blackduck/zookeeper/conf/zoo.cfg] interval: 30s @@ -155,7 +156,7 @@ services: limits: {memory: 384M} reservations: {memory: 384M} solr: - image: blackducksoftware/hub-solr:4.7.3 + image: blackducksoftware/hub-solr:4.8.0 links: [logstash, zookeeper] volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data'] healthcheck: @@ -171,7 +172,7 @@ services: limits: {memory: 640M} reservations: {memory: 640M} webserver: - image: blackducksoftware/hub-nginx:4.7.3 + image: blackducksoftware/hub-nginx:4.8.0 ports: ['443:8443'] env_file: hub-webserver.env links: [webapp, cfssl, documentation] @@ -190,7 +191,7 @@ services: limits: {memory: 512M} reservations: {memory: 512M} documentation: - image: blackducksoftware/hub-documentation:4.7.3 + image: blackducksoftware/hub-documentation:4.8.0 links: [logstash] user: tomcat:root healthcheck: @@ -207,4 +208,5 @@ services: reservations: {memory: 512M} volumes: {postgres96-data-volume: null, authentication-volume: null, cert-volume: null, config-volume: null, log-volume: null, webserver-volume: null, webapp-volume: null, - scan-volume: null, solr6-volume: null, monitor-log-volume: null} + scan-volume: null, solr6-volume: null, monitor-log-volume: null, zookeeper-data-volume: null, + zookeeper-datalog-volume: null} diff --git a/docker-swarm/hub-proxy.env b/docker-swarm/hub-proxy.env index f1ce2e1..0e79836 100644 --- a/docker-swarm/hub-proxy.env +++ b/docker-swarm/hub-proxy.env @@ -27,7 +27,7 @@ BLACKDUCK_SWAGGER_PROXY_PREFIX= BLACKDUCK_SWAGGER_DISPLAYALL= # Do not change -HUB_VERSION=4.7.3 +HUB_VERSION=4.8.0 # Whether or not to include ignored components in the reports BLACKDUCK_REPORT_IGNORED_COMPONENTS=false diff --git a/docker-swarm/hub-webserver.env b/docker-swarm/hub-webserver.env index e2798d6..ac3ba03 100644 --- a/docker-swarm/hub-webserver.env +++ b/docker-swarm/hub-webserver.env @@ -20,4 +20,7 @@ IPV4_ONLY=0 # Change this to 1 if you are also using Hub Alert. # This will enable Alert routes in 'webserver' # See Hub Alert documentation for details. -USE_ALERT=0 \ No newline at end of file +USE_ALERT=0 + +# Change this to 1 if you are also using Hub Binary uploads +USE_BINARY_UPLOADS=0 diff --git a/docs/en_US/getting_started.pdf b/docs/en_US/getting_started.pdf new file mode 100644 index 0000000..2e02272 Binary files /dev/null and b/docs/en_US/getting_started.pdf differ diff --git a/docs/en_US/install_compose.pdf b/docs/en_US/install_compose.pdf new file mode 100644 index 0000000..6076251 Binary files /dev/null and b/docs/en_US/install_compose.pdf differ diff --git a/docs/en_US/install_kubernetes.pdf b/docs/en_US/install_kubernetes.pdf new file mode 100644 index 0000000..20b47f3 Binary files /dev/null and b/docs/en_US/install_kubernetes.pdf differ diff --git a/docs/en_US/install_openshift.pdf b/docs/en_US/install_openshift.pdf new file mode 100644 index 0000000..2305ac3 Binary files /dev/null and b/docs/en_US/install_openshift.pdf differ diff --git a/docs/en_US/install_swarm.pdf b/docs/en_US/install_swarm.pdf new file mode 100644 index 0000000..fa742b4 Binary files /dev/null and b/docs/en_US/install_swarm.pdf differ diff --git a/docs/en_US/release_notes.pdf b/docs/en_US/release_notes.pdf new file mode 100644 index 0000000..9dcd0b5 Binary files /dev/null and b/docs/en_US/release_notes.pdf differ diff --git a/docs/en_US/release_notes_bd_hub.pdf b/docs/en_US/release_notes_bd_hub.pdf deleted file mode 100644 index 2f29728..0000000 Binary files a/docs/en_US/release_notes_bd_hub.pdf and /dev/null differ diff --git a/docs/en_US/hub_getting_started.pdf b/docs/ja_JA/getting_started.pdf similarity index 86% rename from docs/en_US/hub_getting_started.pdf rename to docs/ja_JA/getting_started.pdf index 82a7271..2c8b59f 100644 Binary files a/docs/en_US/hub_getting_started.pdf and b/docs/ja_JA/getting_started.pdf differ diff --git a/docs/ja_JA/hub_getting_started.pdf b/docs/ja_JA/hub_getting_started.pdf index 9e40f2a..2c8b59f 100644 Binary files a/docs/ja_JA/hub_getting_started.pdf and b/docs/ja_JA/hub_getting_started.pdf differ diff --git a/docs/ja_JA/hub_install_compose.pdf b/docs/ja_JA/hub_install_compose.pdf index c1e09ff..abb58f9 100644 Binary files a/docs/ja_JA/hub_install_compose.pdf and b/docs/ja_JA/hub_install_compose.pdf differ diff --git a/docs/ja_JA/hub_install_kubernetes.pdf b/docs/ja_JA/hub_install_kubernetes.pdf index fd91dd4..8a27324 100644 Binary files a/docs/ja_JA/hub_install_kubernetes.pdf and b/docs/ja_JA/hub_install_kubernetes.pdf differ diff --git a/docs/ja_JA/hub_install_openshift.pdf b/docs/ja_JA/hub_install_openshift.pdf index 7b9007f..a1bec08 100644 Binary files a/docs/ja_JA/hub_install_openshift.pdf and b/docs/ja_JA/hub_install_openshift.pdf differ diff --git a/docs/ja_JA/hub_install_swarm.pdf b/docs/ja_JA/hub_install_swarm.pdf index 66e76ae..16a0b27 100644 Binary files a/docs/ja_JA/hub_install_swarm.pdf and b/docs/ja_JA/hub_install_swarm.pdf differ diff --git a/docs/en_US/hub_install_compose.pdf b/docs/ja_JA/install_compose.pdf similarity index 64% rename from docs/en_US/hub_install_compose.pdf rename to docs/ja_JA/install_compose.pdf index 1efe1cd..abb58f9 100644 Binary files a/docs/en_US/hub_install_compose.pdf and b/docs/ja_JA/install_compose.pdf differ diff --git a/docs/en_US/hub_install_kubernetes.pdf b/docs/ja_JA/install_kubernetes.pdf similarity index 61% rename from docs/en_US/hub_install_kubernetes.pdf rename to docs/ja_JA/install_kubernetes.pdf index 6c5fa97..8a27324 100644 Binary files a/docs/en_US/hub_install_kubernetes.pdf and b/docs/ja_JA/install_kubernetes.pdf differ diff --git a/docs/en_US/hub_install_openshift.pdf b/docs/ja_JA/install_openshift.pdf similarity index 63% rename from docs/en_US/hub_install_openshift.pdf rename to docs/ja_JA/install_openshift.pdf index 50350c3..a1bec08 100644 Binary files a/docs/en_US/hub_install_openshift.pdf and b/docs/ja_JA/install_openshift.pdf differ diff --git a/docs/en_US/hub_install_swarm.pdf b/docs/ja_JA/install_swarm.pdf similarity index 62% rename from docs/en_US/hub_install_swarm.pdf rename to docs/ja_JA/install_swarm.pdf index 5f5b3bb..16a0b27 100644 Binary files a/docs/en_US/hub_install_swarm.pdf and b/docs/ja_JA/install_swarm.pdf differ diff --git a/docs/ja_JA/release_notes.pdf b/docs/ja_JA/release_notes.pdf new file mode 100644 index 0000000..405fc4e Binary files /dev/null and b/docs/ja_JA/release_notes.pdf differ diff --git a/docs/ja_JA/release_notes_bd_hub.pdf b/docs/ja_JA/release_notes_bd_hub.pdf index d5c7453..405fc4e 100644 Binary files a/docs/ja_JA/release_notes_bd_hub.pdf and b/docs/ja_JA/release_notes_bd_hub.pdf differ diff --git a/kubernetes/1-cfssl.yml b/kubernetes/1-cfssl.yml index 4c0e890..f148bfa 100644 --- a/kubernetes/1-cfssl.yml +++ b/kubernetes/1-cfssl.yml @@ -22,7 +22,7 @@ items: - emptyDir: {} name: dir-cfssl containers: - - image: blackducksoftware/hub-cfssl:4.7.3 + - image: blackducksoftware/hub-cfssl:4.8.0 livenessProbe: exec: command: diff --git a/kubernetes/1-cm-hub.yml b/kubernetes/1-cm-hub.yml index 6c87685..9192bf0 100644 --- a/kubernetes/1-cm-hub.yml +++ b/kubernetes/1-cm-hub.yml @@ -59,7 +59,7 @@ items: RUN_SECRETS_DIR: /tmp/secrets - HUB_VERSION: 4.7.3 + HUB_VERSION: 4.8.0 # HOSTS THAT WON'T GO THROUGH A PROXY HUB_PROXY_NON_PROXY_HOSTS: "solr" diff --git a/kubernetes/2-postgres-db-external.yml b/kubernetes/2-postgres-db-external.yml index c055776..e20609b 100644 --- a/kubernetes/2-postgres-db-external.yml +++ b/kubernetes/2-postgres-db-external.yml @@ -25,6 +25,19 @@ items: volumes: - name: postgres-persistent-vol emptyDir: {} +# In some filesystems, red hat's postgres user (26) won't by default be able to write to directories. +# This is a declarative way to fix this (will work iff you're allowed to run containers as uid 0). +# initContainers: +# - name: chmod +# command: ["chown", "-R", "26:26", "/var/lib/pgsql/data/" ] +# image: alpine +# volumeMounts: +# - name: postgres-persistent-vol +# mountPath: /var/lib/pgsql/data +# subPath: blackduck +# readOnly: false +# securityContext: +# runAsUser: 0 containers: - name: postgres image: registry.access.redhat.com/rhscl/postgresql-96-rhel7:1 diff --git a/kubernetes/2-postgres-db-internal.yml b/kubernetes/2-postgres-db-internal.yml index 15db64c..4165866 100644 --- a/kubernetes/2-postgres-db-internal.yml +++ b/kubernetes/2-postgres-db-internal.yml @@ -52,7 +52,7 @@ items: - name: postgres securityContext: runAsUser: 70 - image: blackducksoftware/hub-postgres:4.7.3 + image: blackducksoftware/hub-postgres:4.8.0 livenessProbe: exec: command: diff --git a/kubernetes/3-hub.yml b/kubernetes/3-hub.yml index 45c9511..70cba9b 100644 --- a/kubernetes/3-hub.yml +++ b/kubernetes/3-hub.yml @@ -35,7 +35,7 @@ items: volumeMounts: - name: db-passwords mountPath: /tmp/secrets - image: blackducksoftware/hub-jobrunner:4.7.3 + image: blackducksoftware/hub-jobrunner:4.8.0 name: jobrunner livenessProbe: exec: @@ -99,7 +99,7 @@ items: # path: WEBSERVER_CUSTOM_KEY_FILE containers: - name: webserver - image: blackducksoftware/hub-nginx:4.7.3 + image: blackducksoftware/hub-nginx:4.8.0 envFrom: - configMapRef: name: hub-config @@ -167,7 +167,7 @@ items: limits: cpu: 1 memory: 3072M - image: blackducksoftware/hub-webapp:4.7.3 + image: blackducksoftware/hub-webapp:4.8.0 livenessProbe: exec: command: @@ -204,7 +204,7 @@ items: ports: - containerPort: 8443 protocol: TCP - - image: blackducksoftware/hub-logstash:4.7.3 + - image: blackducksoftware/hub-logstash:4.8.0 livenessProbe: exec: command: @@ -246,7 +246,7 @@ items: app: documentation spec: containers: - - image: blackducksoftware/hub-documentation:4.7.3 + - image: blackducksoftware/hub-documentation:4.8.0 livenessProbe: exec: command: @@ -300,7 +300,7 @@ items: memory: "640M" limits: memory: "640M" - image: blackducksoftware/hub-solr:4.7.3 + image: blackducksoftware/hub-solr:4.8.0 livenessProbe: exec: command: @@ -338,7 +338,7 @@ items: - emptyDir: {} name: dir-registration containers: - - image: blackducksoftware/hub-registration:4.7.3 + - image: blackducksoftware/hub-registration:4.8.0 envFrom: - configMapRef: name: hub-config @@ -387,10 +387,12 @@ items: spec: volumes: - emptyDir: {} - name: dir-zookeeper + name: dir-zookeeper-data + - emptyDir: {} + name: dir-zookeeper-datalog containers: - name: zookeeper - image: blackducksoftware/hub-zookeeper:4.7.3 + image: blackducksoftware/hub-zookeeper:4.8.0 envFrom: - configMapRef: name: hub-config @@ -415,8 +417,10 @@ items: - containerPort: 2181 protocol: TCP volumeMounts: - - mountPath: "/opt/blackduck/hub/logs" - name: dir-zookeeper + - mountPath: "/opt/blackduck/zookeeper/data" + name: dir-zookeeper-data + - mountPath: "/opt/blackduck/zookeeper/datalog" + name: dir-zookeeper-datalog - apiVersion: v1 kind: Service metadata: @@ -554,7 +558,7 @@ items: path: HUB_POSTGRES_USER_PASSWORD_FILE containers: - name: hub-scan - image: blackducksoftware/hub-scan:4.7.3 + image: blackducksoftware/hub-scan:4.8.0 livenessProbe: exec: command: @@ -635,7 +639,7 @@ items: - key: blackduck_user path: HUB_POSTGRES_USER_PASSWORD_FILE containers: - - image: blackducksoftware/hub-authentication:4.7.3 + - image: blackducksoftware/hub-authentication:4.8.0 livenessProbe: exec: command: diff --git a/kubernetes/README.md b/kubernetes/README.md index 9996f2b..4d36bec 100644 --- a/kubernetes/README.md +++ b/kubernetes/README.md @@ -1,36 +1,40 @@ -# Black Duck Hub On Kubernetes / Openshift. +# Black Duck Hub On Kubernetes / OpenShift -## Existing hub customers: Migrating to a new version. +## Existing Hub customers: Migrating to a new version -### First: 4.6 and earlier, postgres migration required if you have data you need to keep . -If you have a previous version of the hub (4.6 or earlier), migrate your postgres data on your storage mount, so that it +### First: For Hub 4.6 and earlier, postgres migration required if you have data you need to keep +If you have a previous version of the Hub (4.6 or earlier), migrate your postgres data on your storage mount, so that it lives underneath a directory matching the value of the subPath clause in your postgres database. -### Second: bring down the hub, and bring it back up. +### Second: Bring down the Hub, and bring it back up. -- Stop all containers for the hub. You can do this by deleting the deployments, make sure you dont lose any data in the process. -- Follow the directions in this respository, replacing the volume mounts with your original mounts in your old hub. +- Stop all the Hub's containers. You can do this by deleting the deployments; make sure you don't lose any data in the process. -At this point, your hub should be happily deployed. Expose its webserver service (or deployment controller) if you havent already, and you can begin scanning. +- Follow the directions in this respository, replacing the volume mounts with your original mounts in your old Hub. + +At this point, your Hub should be happily deployed. Expose its webserver service (or deployment controller) if you haven't already, and you can begin scanning. ## Requirements -The hub is extensively tested on kubernetes 1.8 / openshift 3.6. +The Hub is extensively tested on Kubernetes 1.8 / OpenShift 3.6 Other versions are supported as well, so long as all the API constructs in these YAMLs are supported in the corresponding orchestration version. -### Installing the Hub quickly. +### Installing the Hub quickly + +All commands below assume: + +- you are using the namespace (or OpenShift project name) 'myhub'. -All below commands assume: -- you are using the namespace (or openshift project name) 'myhub'. - you have a cluster with at least 10 cores / 20GB of allocatable memory. + - you have administrative access to your cluster. ### Hub setup instructions #### If you're in a hurry, skip to the quickstart section: -The quickstart section shows how to quickly get a prototypical hub up and running. +The quickstart section shows how to quickly get a prototypical Hub up and running. #### Before you start: @@ -38,35 +42,36 @@ Clone this repository, and cd to `install/hub` to run these commands, so the fil #### Step 0: -Make a namespaces/project for your hub: +Make a namespaces/project for your Hub: - For openshift:`oc new-project myhub` + - For kubernetes:`kubectl create ns myhub` -#### Step 1: Setting up service accounts (if you need them). +#### Step 1: Setting up service accounts (if you need them) This may not be necessary for some users, feel free to skip to the *next* section if you think you don't need to setup any special service accounts (i.e. if you're running in a namespace that has administrative capabilities). -- First create your service account (Openshift users, use `oc`): +- First create your service account (OpenShift users, use `oc`): ``` kubectl create serviceaccount postgresapp -n myhub ``` - - For openshift: You need to create a service account for the hub, and allow that + - For OpenShift: You need to create a service account for the Hub, and allow that user to run processes as user 70. A generic version of these steps which may work for you is defined below: ``` oc adm policy add-scc-to-user anyuid system:serviceaccount:myhub:postgres ``` - - *Optional for kubernetes*: You may need to create RBAC bindings with your cluster administrator that allow pods to run as any uid. Consult with your kubernetes administrator and show them your installation workflow (as defined below) to determine if this is necessary in your cluster. + - *Optional for Kubernetes*: You may need to create RBAC bindings with your cluster administrator that allow pods to run as any UID. Consult with your Kubernetes administrator and show them your installation workflow (as defined below) to determine if this is necessary in your cluster. -#### Step 2: Create your cfssl container, and the core hub config map. +#### Step 2: Create your cfssl container, and the core Hub config map -Note we may edit the configmap later for external postgres or other settings. For now, leave it as it is by default, and run these commands (openshift users: use `oc` instead of `kubectl`). +Note: We may edit the configmap later for external postgres or other settings. For now, leave it as it is by default, and run these commands (OpenShift users: use `oc` instead of `kubectl`). ``` kubectl create -f 1-cfssl.yml -n myhub @@ -75,21 +80,21 @@ kubectl create -f 1-cm-hub.yml -n myhub #### Step 3: Choose your postgres database type, and then setup your postgres database -There are two ways to run the hub's postgres database, and we refer to them as *internal*, or *external*. - -Choose internal if you don't care about maintaining your own databse, and are able to run containers as any user in your cluster. +There are two ways to run the Hub's postgres database, and we refer to them as *internal*, or *external*. -Otherwise, choose external. +Choose internal if you don't care about maintaining your own databse and are able to run containers as any user in your cluster; otherwise, choose external. *Note: Obviously, you only need to do ONE of the two below steps, before moving on to step 3 ~ choose EITHER Internal OR External database setup!*. ##### Step 3 (INTERNAL database setup option) -If you are okay using an internal database, and are able to run containers as user 70, then you can (in most cases) just start the hub using the snippet of kubectl create statements below. +If you are okay using an internal database, and are able to run containers as user 70, then you can (in most cases) just start the Hub using the snippet of kubectl create statements below. -- Note: the default yaml files don't have persistent volumes. You will need to replace all emptyDir volumes with a persistentVolumeClaim (or Volume) of your choosing. 1G is enough for all volumes other than postgres. Postgres should have 100G, to ensure it will have plenty of storage even if you do thousands of scans early on. +- Note: The default yaml files don't have persistent volumes. You will need to replace all emptyDir volumes with a persistentVolumeClaim (or Volume) of your choosing. 1G is enough for all volumes other than postgres. Postgres should have 100G, to ensure it will have plenty of storage even if you do thousands of scans early on. -- Note: before doing this, there is an initPod that runs as user 0 to set storage permissions. If you don't want to run it as user 0, and are sure your storage will be writeable by the postgres user, delete that initPod clause entirely. +- Note: Postgres is known to have problems running in a container when writing to *Gluster-based* persistent volumes. (See [here](https://bugzilla.redhat.com/show_bug.cgi?id=1512691) for details.) If you are using Gluster for your underlying file system, then you should use an *external* database. + +- Note: When installing an internal database, there is an initPod that runs as user 0 to set storage permissions. If you don't want to run it as user 0, and are sure your storage will be writeable by the postgres user, delete that initPod clause entirely. ``` kubectl create -f 2-postgres-db-internal.yml -n myhub @@ -99,15 +104,17 @@ That's it, now, skip ahead to step 4! ##### Step 3 (EXTERNAL database setup option) -Note that if you did the internal database setup step, this obviously is not needed. For a concrete example of this, check the quickstart external db example. +Note: If you set up an internal database, please skip this step. + +For a concrete example of setting up an external database, check the quickstart external db example. -- Note that by 'external' we mean, any postgres other then the official `hub-postgres` image which ships with the blackduck containers. Our official hub-postgres image bootstraps its own schema, and uses CFSSL for authentication. In this case, you will have to setup auth and the schema yourself. +- Note that by 'external' we mean, any postgres other then the official `hub-postgres` image which ships with the Black Duck containers. Our official hub-postgres image bootstraps its own schema, and uses CFSSL for authentication. In this case, you will have to setup auth and the schema yourself. - For simplicity, we use an example password below (blackduck123). So, now lets do our external database setup, in two steps: -1) First lets make sure we create secrets which will match our passwords that we will set in the external database. +1) First lets make sure we create secrets that will match our passwords that we will set in the external database. ``` kubectl create secret generic db-creds --from-literal=blackduck=blackduck123 --from-literal=blackduck_user=blackduck123 -n myhub @@ -117,34 +124,34 @@ kubectl create secret generic db-creds --from-literal=blackduck=blackduck123 --f 3) Finally, edit the `HUB_POSTGRES_HOST` field in the `hub-db-config` configmap to match the DNS name or IP address of your external postgres host (alternatively, use a headless service for advanced users). Use `kubectl edit cm` or `oc edit cm` to do this. -Your external database is now set up. Move on to step 4 to install the hub. +Your external database is now set up. Move on to step 4 to install the Hub. -#### Step 4: Finally, create the hub app's containers. +#### Step 4: Finally, create the Hub app's containers -You have now set up the main initial containers that the blackduck hub depends on, and set its database up; you can start the rest of the application. As mentioned earlier, for fully production deployment, you'll want to replace emptyDir's with real storage directories based on your admin's recommendation. Then all you have to do is create the 3rd yaml file, like so, and the hub will be up and running in a few minutes: +You have now set up the main initial containers that the Black Duck Hub depends on, and set its database up; you can start the rest of the application. As mentioned earlier, for fully production deployment, you'll want to replace emptyDir's with real storage directories based on your admin's recommendation. Then all you have to do is create the 3rd yaml file, like so, and the Hub will be up and running in a few minutes: ``` #### Done setting up the external DB. kubectl create -f 3-hub.yml -n myhub ``` -If all the above pods are properly scheduled and running, you can then -expose the webserver endpoint, and start using the hub to scan projects. +If all the above pods are properly scheduled and running, you can then expose the webserver endpoint, and start using the Hub to scan projects. + +### Quick-start examples: The easiest way to get a Hub up and running in your Cloud Native environment. -### Quick start examples: The easiest way to get a hub up and running on your Cloud Native environment. +The following two quick starts show how to get the Hub up 'instantly' for a prototype configuration that you can evolve. -The following two quick starts show how to get the hub up 'instantly' for a prototype configuration that you can evolve. -- These are only examples, not 'installers', and should be leveraged by administrators who know what they are doing to quickly grok the hub setup process. -- Do not assume that running these scripts is a replacement for actually understanding the hub setup/configuration process. -- Building on the points above: Make sure you make any production modifications (volumes, certificates, etc) that you need before running them. Contact blackduck support if you have questions on how to adopt these scripts to match any special hub configurations you need. +If you're just learning the Hub for the first time, these are a great way to get started quickly. So feel free to dive in and try the quick starts out to get the Hub up and running quickly in your cloud native environment! -That said: If you're just learning the hub for the first time, these are a great way to get started quickly. So feel free to dive in and try the quick starts out to get the hub up and running quickly in your cloud native environment! +- These are only examples, not 'installers', and should be leveraged by administrators who know what they are doing to quickly grok the Hub setup process. +- Do not assume that running these scripts are a replacement for actually understanding the Hub setup/configuration process. +- Make any production modifications (volumes, certificates, etc) before running the Hub in production. Contact Black Duck support if you have questions on how to adopt these scripts to match any special Hub configurations you need. Openshift users: use `oc` instead of kubectl, and `project` instead of namespace. #### Kubernetes Internal DB 'quick start' script: -Clone this repository , and cd to `install/hub` to run these commands, so the files are local ! +Clone this repository , and cd to `install/hub` to run these commands, so the files are local. ``` #start quickstart-internal @@ -163,7 +170,7 @@ kubectl create -f 3-hub.yml -n myhub #### External DB 'quick start' script: -Clone this repository , and cd to `install/hub` to run these commands, so the files are local ! Also make sure you can write to tmpfs if running this script. +Clone this repository, and cd to `install/hub` to run these commands, so the files are local. Also, make sure you can write to tmpfs if running this script. ``` #start quickstart-external @@ -202,31 +209,28 @@ kubectl create -f 3-hub.yml -n myhub #end quickstart-external ``` -### After deployment: Consider using Auto scaling. +### After deployment: Consider using Auto scaling - `kubectl create -f autoscale.yml` will ensure that you always have enough jobrunners and scan service runners to keep up with your dynamic workload. -### Fine tune your configuration +### Fine-tune your configuration -There are several ways to fine tune your configuration. Some may be essential -to your organizations use of the hub (for example, external proxys might be needed). +There are several ways to fine-tune your configuration. Some may be essential to your organization's use of the Hub (for example, external proxys might be needed). -- External databases: These are not necessary for any particular scenario, but -might be a preference. -- External proxies: For datacenters that are airgapped. -- Custom nginx certificates: So you can use trusted internal TLS certs to access the hub. +- External databases: These are not necessary for any particular scenario, but might be a preference. +- External proxies: For datacenters that are air-gapped. +- Custom nginx certificates: So you can use trusted internal TLS certs to access the Hub. - Scaling to 100s, 1000s, or more of scans: configuration. -There are several options that can be configured in the yml files for Kubernetes/Openshift as described below. We use kubernetes and openshift interchangeably for these, as the changes are agnostic to the underlying orchestration. +There are several options that can be configured in the yml files for Kubernetes/OpenShift as described below. We use Kubernetes and OpenShift interchangeably for these, as the changes are agnostic to the underlying orchestration. -*We go through them below* +*Each is discussed, below.* -#### I want to run the hub with no security context constraints. +#### Running the Hub with no security context constraints -Follow the "external configured database" directions above. Use either your own -postgres, or, you can use any postgres container as exemplified. +Follow the "external configured database" directions above. Use either your own postgres, or, you can use any postgres container as exemplified. -#### I want custom hostnames, ports, and proxys for the hub-nginx container. +#### Custom hostnames, ports, and proxys for the hub-nginx container ##### Host Name Modification @@ -265,12 +269,10 @@ If a proxy is required for external internet access, you'll need to configure it *Note that '/run/secrets/' can be any directory, specifiable in the $RUN_SECRETS_DIR enviroment variable* -There are three methods for specifying a proxy password when using Docker +There are three methods for specifying a proxy password when using Docker: - add a Kubernetes secret called HUB_PROXY_PASSWORD_FILE - - mount a directory that contains a file called HUB_PROXY_PASSWORD_FILE to /run/secrets (better to use secrets here) - - specify an environment variable called 'HUB_PROXY_PASSWORD' that contains the proxy password There are the services that will require the proxy password: @@ -322,7 +324,7 @@ kubectl create secret generic db_user --from-file=./username.txt --from-file=./p #### Using a Custom web server certificate-key pair -The Hub allows users to use their own web server certificate-key pairs for establishing ssl connection. +The Hub allows users to use their own web server certificate-key pairs for establishing SSL connections. * Create a Kubernetes secret each called 'WEBSERVER_CUSTOM_CERT_FILE' and 'WEBSERVER_CUSTOM_KEY_FILE' with the custom certificate and custom key in your namespace. @@ -333,8 +335,7 @@ kubectl secret create WEBSERVER_CUSTOM_CERT_FILE --from-file= kubectl secret create WEBSERVER_CUSTOM_KEY_FILE --from-file= ``` -For the webserver service, add secrets by copying their values into 'env' -values for the pod specifications in the webserver. +For the webserver service, add secrets by copying their values into 'env' values for the pod specifications in the webserver. ##### Hub Reporting Database @@ -417,23 +418,22 @@ metadata: EOF ``` -### How To Expose kubernetes/openshift Services +### How To Expose Kubernetes/OpenShift Services -Your cluster administrator will have the final say in how you expose the hub to the outside world. +Your cluster administrator will have the final say in how you expose the Hub to the outside world. -Some common patterns are listed below. +Some common methodologies are listed below. #### Cloud load balancers vs. NodePorts -The simplest way to expose the hub for a simple POC, or for a cloud based cluster, is via -a cloud load balancer. +The simplest way to expose the Hub for a simple POC, or for a cloud based cluster, is via a cloud load balancer. - `kubebctl expose --type=Loadbalancer` will work in a large cloud like GKE or certain AWS clusters. - `kubectl expose --type=NodePort` is a good solution for small clusters: And you can use your API Server's port to access the hubb. IF you use this option, make sure to export `HUB_WEBSERVER_HOST` and `HUB_WEBSERVER_PORT` as needed. -For example, a typical invocation to expose the hub might be: +For example, a typical invocation to expose the Hub might be: ``` kubectl expose --namespace=default deployment webserver --type=LoadBalancer --port=443 --target-port=8443 --name=nginx-gateway @@ -441,9 +441,9 @@ For example, a typical invocation to expose the hub might be: #### Openshift routers -Your administrator can help you define a route if you're using openshift. Make sure to turn on TLS -passthrough if going down this road. You will then likely access your cluster at a URL that openshift -defined for you, available in the `Routes` UI of your openshift console's webapp. +Your administrator can help you define a route if you're using OpenShift. Make sure to turn on TLS +passthrough if going down this road. You will then likely access your cluster at a URL that OpenShift +defined for you, available in the `Routes` UI of your OpenShift console's webapp. #### Testing an exposed hub @@ -473,10 +473,10 @@ And you should be able to see a result which includes an HTTP page. ### Debugging a running deployment -The following exemplifies debugging of a deployment. If you have any doubt that your cluster +The following shows how to debug a deployment. If you have any doubt that your cluster is working properly, go through these steps and see where the divergence has occurred. -Find all the pods that are running: They all should be alive: +Find all the pods that are running; They all should be alive: ``` ubuntu@ip-10-0-22-242:~$ kubectl get pods @@ -493,7 +493,7 @@ zookeeper-3368690434-rnz3m 1/1 Running 0 26m Now jot those pods down, we will exec into them to confirm they are functioning properly. -Check the logs for the web app: They should be active over time: +Check the logs for the webapp: They should be active over time: ``` kubectl logs nginx-webapp-2564656559-6fbq8 -c webapp @@ -523,18 +523,16 @@ You should see something like this (assuming you used chrome, curl, and so on to #### Exposing endpoints -Note that finally, you should make sure that you keep exposed the NGINX and Postgres -endpoints so external clients can access them as necessary. +Note that finally, you should make sure that you keep exposed the NGINX and Postgres endpoints so external clients can access them as necessary. ### More fine tuning -We conclude with more recipes for fine tuning your hub configuration. Note that it's -advisable that you first get a simple hub up and running before adopting these tuning snippets. +We conclude with more recipes for fine tuning your Hub configuration. Note that it's advisable that you first get a simple Hub up and running before adopting these tuning snippets. -#### NGINX TLS Configuration details. +#### NGINX TLS Configuration details -Create a configmap/secret which can hold data necessary for injecting your organization's credentials into nginx. +Create a configmap/secret that can hold data necessary for injecting your organization's credentials into nginx. ``` apiVersion: v1 @@ -547,11 +545,11 @@ items: data: WEBSERVER_CUSTOM_CERT_FILE: | -----BEGIN CERTIFICATE----- - ….. (insert organizations certs here) + ... (insert certs of your organization here) -----END CERTIFICATE----- WEBSERVER_CUSTOM_KEY_FILE: | -----BEGIN PRIVATE KEY----- - …… (insert organizations SSL keys here) + ... (insert SSL keys of your organization here) -----END PRIVATE KEY----- ``` @@ -561,7 +559,7 @@ Then create that config map: kubectl create -f nginx.yml ``` -And update the nginx pod segment for nginx, like so, adding the following volume/volume-mount pair: +And update the nginx pod segment for nginx, adding the following volume/volume-mount pair: ``` volumes @@ -574,12 +572,11 @@ volumeMounts: - mountPath: /run/secrets name: dir-certs ``` -#### Loadbalancer and Proxy settings. +#### Loadbalancer and Proxy settings -Also, export HUB_PROXY_PORT and HUB_PROXY_HOST values, inside the nginx pod, as needed based on your load balancer host / port. Especially important to note if using hostnames and node ports that are (non 8443). +Also, export HUB_PROXY_PORT and HUB_PROXY_HOST values, inside the nginx pod, as needed based on your load balancer host / port. Especially important to note if using hostnames and node ports that are non-8443. -A diagram of a typical set of envionrment variables that would be exported for -containers is shown in the 2-cm-hub-yml file. +A diagram of a typical set of envionrment variables that would be exported for containers is shown in the 2-cm-hub-yml file. ``` PUBLIC_HUB_WEBSERVER_HOST=hub.my.company diff --git a/kubernetes/bin/hub_add_replication_user.sh b/kubernetes/bin/hub_add_replication_user.sh index ac36ec0..c416950 100755 --- a/kubernetes/bin/hub_add_replication_user.sh +++ b/kubernetes/bin/hub_add_replication_user.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/kubernetes/bin/hub_create_data_dump.sh b/kubernetes/bin/hub_create_data_dump.sh new file mode 100755 index 0000000..775cf54 --- /dev/null +++ b/kubernetes/bin/hub_create_data_dump.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +# Prerequisites: +# 1. The database container is running. +# 2. The database container has been properly initialized. + +set -e + +TIMEOUT=${TIMEOUT:-10} +HUB_VERSION=${HUB_VERSION:-4.8.0} +HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} + +function fail() { + message=$1 + exit_status=$2 + echo "${message}" + exit ${exit_status} +} + +function set_container_id() { + container_id=( `docker ps -q -f label=com.blackducksoftware.hub.version=${HUB_VERSION} \ + -f label=com.blackducksoftware.hub.image=${HUB_DATABASE_IMAGE_NAME}` ) + return 0 +} + +# There should be one argument: destination of the path with name of the file +[ $# -ne "1" ] && fail "Usage: $0 " 1 +local_dest_dump_file="$1" + +# Check that docker is on our path +[ "$(type -p docker)" == "" ] && fail docker not found on the search path 2 + +# Check that we can contact the docker daemon +docker ps > /dev/null +success=$? +[ ${success} -ne 0 ] && fail "Could not contact docker daemon. Is DOCKER_HOST set correctly?" 3 + +# Find the database container ID(s); give the container a few seconds to start if necessary +sleep_count=0 +until set_container_id && [ "${#container_id[*]}" -gt 0 ] ; do + sleep_count=$(( ${sleep_count} + 1 )) + [ ${sleep_count} -gt ${TIMEOUT} ] && fail "Database container not ready after ${TIMEOUT} seconds." 4 + sleep 1 +done + +# Check that exactly one instance of the database container is up and running +[ "${#container_id[*]}" -ne 1 ] && fail "${#container_id[*]} instances of the hub database container are running." 5 + +# Make sure that postgres is ready +sleep_count=0 +until docker exec -i -u postgres ${container_id} pg_isready -q ; do + sleep_count=$(( ${sleep_count} + 1 )) + [ ${sleep_count} -gt ${TIMEOUT} ] && fail "Database server in container ${container_id} not ready after ${TIMEOUT} seconds." 6 + sleep 1 +done + +# Make sure that bds_hub exists +sleep_count=0 +until [ "$(docker exec -i -u postgres ${container_id} psql -A -t -c "select count(*) from pg_database where datname = 'bds_hub'" postgres 2> /dev/null)" -eq 1 ] ; do + sleep_count=$(( ${sleep_count} + 1 )) + [ ${sleep_count} -gt ${TIMEOUT} ] && fail "Database bds_hub in container ${container_id} not ready after ${TIMEOUT} seconds." 7 + sleep 1 +done + +# Here we go... +echo Creating a dump from the container "${container_id}" '...' +docker exec -i ${container_id} pg_dump -U blackduck -Fc -f /tmp/bds_hub.dump bds_hub +exitCode=$? +[ ${exitCode} -ne 0 ] && fail "Cannot create the dump file from the container [Container Id: ${container_id}]" 8 + +# Create an absolute path to copy to, adds support for symbolic links +if [ ! -d "$local_dest_dump_file" ]; then + cd `dirname $local_dest_dump_file` + base_file=`basename $local_dest_dump_file` + symlink_count=0 + while [ -L "$base_file" ]; do + (( symlink_count++ )) + if [ "$symlink_count" -gt 100 ]; then + fail "MAXSYMLINK level reached." 1 + fi + base_file=`readlink $base_file` + cd `dirname $base_file` + base_file=`basename $base_file` + done +present_dir=`pwd -P` +local_absolute_path=$present_dir/$base_file + +else + local_absolute_path=${local_dest_dump_file} +fi + +docker cp ${container_id}:/tmp/bds_hub.dump "${local_absolute_path}" +exitCode=$? +[ ${exitCode} -ne 0 ] && fail "Was not able to copy the dump file over [Container Id: ${container_id}]" 9 + +# After copy, remove the dump from the container. +docker exec -it ${container_id} rm /tmp/bds_hub.dump + +echo Success with creating the dump and copying over to "[Destination Dir: $(dirname ${local_dest_dump_file})]" from the container: "[Container Id: ${container_id}]" diff --git a/kubernetes/bin/hub_db_migrate.sh b/kubernetes/bin/hub_db_migrate.sh index 8706b9f..4d799d0 100755 --- a/kubernetes/bin/hub_db_migrate.sh +++ b/kubernetes/bin/hub_db_migrate.sh @@ -13,7 +13,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/kubernetes/bin/hub_replication_changepassword.sh b/kubernetes/bin/hub_replication_changepassword.sh index 9cce664..2cffa4f 100755 --- a/kubernetes/bin/hub_replication_changepassword.sh +++ b/kubernetes/bin/hub_replication_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/kubernetes/bin/hub_reportdb_changepassword.sh b/kubernetes/bin/hub_reportdb_changepassword.sh index 287dac8..7d90ba5 100755 --- a/kubernetes/bin/hub_reportdb_changepassword.sh +++ b/kubernetes/bin/hub_reportdb_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.7.3} +HUB_VERSION=${HUB_VERSION:-4.8.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() {