diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 4abedb2..1550014 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,6 +1,63 @@ -# Find the Dockerfile for mcr.microsoft.com/azure-functions/powershell:3.0-powershell${VARIANT}-core-tools at this URL -# https://github.com/Azure/azure-functions-docker/blob/master/host/3.0/buster/amd64/powershell +# azure-terraform image +# +# reference: +# https://github.com/microsoft/vscode-dev-containers +# https://hub.docker.com/_/microsoft-vscode-devcontainers +# https://github.com/microsoft/vscode-dev-containers/blob/master/containers/azure-terraform/.devcontainer/Dockerfile -# Update the VARIANT arg in devcontainer.json to pick a supported PowerShell version: 7, 6 -ARG VARIANT=7 -FROM mcr.microsoft.com/azure-functions/powershell:3.0-powershell${VARIANT}-core-tools +# You can pick any Debian/Ubuntu-based image. 😊 +FROM mcr.microsoft.com/vscode/devcontainers/base:ubuntu-18.04 + +COPY library-scripts/*.sh /tmp/library-scripts/ + +# [Option] Install zsh +ARG INSTALL_ZSH="true" +# [Option] Upgrade OS packages to their latest versions +ARG UPGRADE_PACKAGES="false" + +# Install needed packages and setup non-root user. Use a separate RUN statement to add your own dependencies. +ARG USERNAME=vscode +ARG USER_UID=1000 +ARG USER_GID=$USER_UID +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && bash /tmp/library-scripts/common-debian.sh "${INSTALL_ZSH}" "${USERNAME}" "${USER_UID}" "${USER_GID}" "${UPGRADE_PACKAGES}" \ + && apt-get install -y graphviz \ + && apt-get clean -y && rm -rf /var/lib/apt/lists/* + +# [Option] Install Azure CLI +ARG INSTALL_AZURE_CLI="true" +# [Option] Install Docker CLI +ARG INSTALL_DOCKER="true" +# [Option] Install Node.js +ARG INSTALL_NODE="true" +ARG NODE_VERSION="lts/*" +ENV NVM_DIR=/usr/local/share/nvm +ENV NVM_SYMLINK_CURRENT=true \ + PATH=${NVM_DIR}/current/bin:${PATH} +RUN if [ "${INSTALL_AZURE_CLI}" = "true" ]; then bash /tmp/library-scripts/azcli-debian.sh; fi \ + && if [ "${INSTALL_NODE}" = "true" ]; then bash /tmp/library-scripts/node-debian.sh "${NVM_DIR}" "${NODE_VERSION}" "${USERNAME}"; fi \ + && if [ "${INSTALL_DOCKER}" = "true" ]; then \ + bash /tmp/library-scripts/docker-debian.sh "true" "/var/run/docker-host.sock" "/var/run/docker.sock" "${USERNAME}"; \ + else \ + echo '#!/bin/bash\n"$@"' > /usr/local/share/docker-init.sh && chmod +x /usr/local/share/docker-init.sh; \ + fi \ + && rm -rf /var/lib/apt/lists/* + +# Install Terraform, tflint, Go, PowerShell, and other useful tools +# TODO: move this into main "RUN" layer above +ARG TERRAFORM_VERSION=0.12.30 +ARG TFLINT_VERSION=0.18.0 +RUN bash /tmp/library-scripts/terraform-debian.sh "${TERRAFORM_VERSION}" "${TFLINT_VERSION}" \ + && bash /tmp/library-scripts/powershell-debian.sh \ + && bash /tmp/library-scripts/kubectl-helm-debian.sh \ + && bash /tmp/library-scripts/terraform-pre-commit.sh \ + && bash /tmp/library-scripts/tflint-plugins.sh \ + && bash /tmp/library-scripts/go-debian.sh \ + && rm -rf /tmp/library-scripts + +ENTRYPOINT [ "/usr/local/share/docker-init.sh" ] +CMD [ "sleep", "infinity" ] + +# [Optional] Uncomment this section to install additional OS packages. +# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ +# && apt-get -y install --no-install-recommends diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index a0f5362..40a933d 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,31 +1,45 @@ // For format details, see https://aka.ms/vscode-remote/devcontainer.json or this file's README at: -// https://github.com/microsoft/vscode-dev-containers/blob/master/containers/azure-functions-pwsh/README.md +// https://github.com/microsoft/vscode-dev-containers/blob/master/containers/azure-terraform/.devcontainer/devcontainer.json { - "name": "Azure Functions & PowerShell", + "name": "Azure Terraform", "build": { "dockerfile": "Dockerfile", "args": { - // Update the VARIANT arg to pick a supported PowerShell version: 7, 6 - "VARIANT": "7" + "TERRAFORM_VERSION": "0.12.30", + "TFLINT_VERSION": "0.22.0", + "INSTALL_AZURE_CLI": "true", + "INSTALL_DOCKER": "true", + "INSTALL_NODE": "true" } }, - "forwardPorts": [ 7071 ], - "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ], - + "mounts": [ + "source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind" + ], + "overrideCommand": false, // Set *default* container specific settings.json values on container create. "settings": { - "terminal.integrated.shell.linux": "/usr/bin/pwsh" + "terminal.integrated.shell.linux": "/bin/bash" }, - // Add the IDs of extensions you want installed when the container is created. "extensions": [ - "ms-azuretools.vscode-azurefunctions", - "ms-vscode.powershell" - ] - + "hashicorp.terraform", + "ms-azuretools.vscode-azureterraform", + "ms-vscode.azurecli", + "ms-azuretools.vscode-docker", + "aaron-bond.better-comments", + "coenraads.bracket-pair-colorizer-2", + "eamodio.gitlens", + "ms-kubernetes-tools.vscode-kubernetes-tools", + "yzhang.markdown-all-in-one", + "davidanson.vscode-markdownlint", + "ziyasal.vscode-open-in-github", + "ms-vscode.powershell", + "redhat.vscode-yaml", + ], + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], // Use 'postCreateCommand' to run commands after the container is created. - // "postCreateCommand": "dotnet restore", - - // Uncomment to connect as a non-root user. See https://aka.ms/vscode-remote/containers/non-root. - // "remoteUser": "vscode" + // "postCreateCommand": "terraform --version", + // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode" } diff --git a/.devcontainer/library-scripts/README.md b/.devcontainer/library-scripts/README.md new file mode 100644 index 0000000..ab8a66d --- /dev/null +++ b/.devcontainer/library-scripts/README.md @@ -0,0 +1,5 @@ +# Warning: Folder contents may be replaced + +The contents of this folder will be automatically replaced with a file of the same name in the repository's [script-library folder](https://github.com/microsoft/vscode-dev-containers/tree/master/script-library) whenever the repository is packaged. + +To retain your edits, move the file to a different location. You may also delete the files if they are not needed. diff --git a/.devcontainer/library-scripts/azcli-debian.sh b/.devcontainer/library-scripts/azcli-debian.sh new file mode 100644 index 0000000..cde7fd7 --- /dev/null +++ b/.devcontainer/library-scripts/azcli-debian.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/azcli.md +# +# Syntax: ./azcli-debian.sh + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +export DEBIAN_FRONTEND=noninteractive + +# Install curl, apt-transport-https, lsb-release, or gpg if missing +if ! dpkg -s apt-transport-https curl ca-certificates lsb-release > /dev/null 2>&1 || ! type gpg > /dev/null 2>&1; then + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends apt-transport-https curl ca-certificates lsb-release gnupg2 +fi + +# Install the Azure CLI +echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/azure-cli.list +curl -sL https://packages.microsoft.com/keys/microsoft.asc | (OUT=$(apt-key add - 2>&1) || echo $OUT) +apt-get update +apt-get install -y azure-cli +echo "Done!" \ No newline at end of file diff --git a/.devcontainer/library-scripts/common-debian.sh b/.devcontainer/library-scripts/common-debian.sh new file mode 100644 index 0000000..2c8e98d --- /dev/null +++ b/.devcontainer/library-scripts/common-debian.sh @@ -0,0 +1,341 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/common.md +# +# Syntax: ./common-debian.sh [install zsh flag] [username] [user UID] [user GID] [upgrade packages flag] [install Oh My *! flag] + +INSTALL_ZSH=${1:-"true"} +USERNAME=${2:-"automatic"} +USER_UID=${3:-"automatic"} +USER_GID=${4:-"automatic"} +UPGRADE_PACKAGES=${5:-"true"} +INSTALL_OH_MYS=${6:-"true"} + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +# Ensure that login shells get the correct path if the user updated the PATH using ENV. +rm -f /etc/profile.d/00-restore-env.sh +echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\${PATH}" > /etc/profile.d/00-restore-env.sh +chmod +x /etc/profile.d/00-restore-env.sh + +# If in automatic mode, determine if a user already exists, if not use vscode +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in ${POSSIBLE_USERS[@]}; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=vscode + fi +elif [ "${USERNAME}" = "none" ]; then + USERNAME=root + USER_UID=0 + USER_GID=0 +fi + +# Load markers to see which steps have already run +MARKER_FILE="/usr/local/etc/vscode-dev-containers/common" +if [ -f "${MARKER_FILE}" ]; then + echo "Marker file found:" + cat "${MARKER_FILE}" + source "${MARKER_FILE}" +fi + +# Ensure apt is in non-interactive to avoid prompts +export DEBIAN_FRONTEND=noninteractive + +# Function to call apt-get if needed +apt-get-update-if-needed() +{ + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + echo "Running apt-get update..." + apt-get update + else + echo "Skipping apt-get update." + fi +} + +# Run install apt-utils to avoid debconf warning then verify presence of other common developer tools and dependencies +if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then + apt-get-update-if-needed + + PACKAGE_LIST="apt-utils \ + git \ + openssh-client \ + gnupg2 \ + iproute2 \ + procps \ + lsof \ + htop \ + net-tools \ + psmisc \ + curl \ + wget \ + rsync \ + ca-certificates \ + unzip \ + zip \ + nano \ + vim-tiny \ + less \ + jq \ + lsb-release \ + apt-transport-https \ + dialog \ + libc6 \ + libgcc1 \ + libkrb5-3 \ + libgssapi-krb5-2 \ + libicu[0-9][0-9] \ + liblttng-ust0 \ + libstdc++6 \ + zlib1g \ + locales \ + sudo \ + ncdu \ + man-db \ + strace" + + # Install libssl1.1 if available + if [[ ! -z $(apt-cache --names-only search ^libssl1.1$) ]]; then + PACKAGE_LIST="${PACKAGE_LIST} libssl1.1" + fi + + # Install appropriate version of libssl1.0.x if available + LIBSSL=$(dpkg-query -f '${db:Status-Abbrev}\t${binary:Package}\n' -W 'libssl1\.0\.?' 2>&1 || echo '') + if [ "$(echo "$LIBSSL" | grep -o 'libssl1\.0\.[0-9]:' | uniq | sort | wc -l)" -eq 0 ]; then + if [[ ! -z $(apt-cache --names-only search ^libssl1.0.2$) ]]; then + # Debian 9 + PACKAGE_LIST="${PACKAGE_LIST} libssl1.0.2" + elif [[ ! -z $(apt-cache --names-only search ^libssl1.0.0$) ]]; then + # Ubuntu 18.04, 16.04, earlier + PACKAGE_LIST="${PACKAGE_LIST} libssl1.0.0" + fi + fi + + echo "Packages to verify are installed: ${PACKAGE_LIST}" + apt-get -y install --no-install-recommends ${PACKAGE_LIST} 2> >( grep -v 'debconf: delaying package configuration, since apt-utils is not installed' >&2 ) + + PACKAGES_ALREADY_INSTALLED="true" +fi + +# Get to latest versions of all packages +if [ "${UPGRADE_PACKAGES}" = "true" ]; then + apt-get-update-if-needed + apt-get -y upgrade --no-install-recommends + apt-get autoremove -y +fi + +# Ensure at least the en_US.UTF-8 UTF-8 locale is available. +# Common need for both applications and things like the agnoster ZSH theme. +if [ "${LOCALE_ALREADY_SET}" != "true" ] && ! grep -o -E '^\s*en_US.UTF-8\s+UTF-8' /etc/locale.gen > /dev/null; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + locale-gen + LOCALE_ALREADY_SET="true" +fi + +# Create or update a non-root user to match UID/GID. +if id -u ${USERNAME} > /dev/null 2>&1; then + # User exists, update if needed + if [ "${USER_GID}" != "automatic" ] && [ "$USER_GID" != "$(id -G $USERNAME)" ]; then + groupmod --gid $USER_GID $USERNAME + usermod --gid $USER_GID $USERNAME + fi + if [ "${USER_UID}" != "automatic" ] && [ "$USER_UID" != "$(id -u $USERNAME)" ]; then + usermod --uid $USER_UID $USERNAME + fi +else + # Create user + if [ "${USER_GID}" = "automatic" ]; then + groupadd $USERNAME + else + groupadd --gid $USER_GID $USERNAME + fi + if [ "${USER_UID}" = "automatic" ]; then + useradd -s /bin/bash --gid $USERNAME -m $USERNAME + else + useradd -s /bin/bash --uid $USER_UID --gid $USERNAME -m $USERNAME + fi +fi + +# Add add sudo support for non-root user +if [ "${USERNAME}" != "root" ] && [ "${EXISTING_NON_ROOT_USER}" != "${USERNAME}" ]; then + echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME + chmod 0440 /etc/sudoers.d/$USERNAME + EXISTING_NON_ROOT_USER="${USERNAME}" +fi + +# ** Shell customization section ** +if [ "${USERNAME}" = "root" ]; then + USER_RC_PATH="/root" +else + USER_RC_PATH="/home/${USERNAME}" +fi + +# .bashrc/.zshrc snippet +RC_SNIPPET="$(cat << EOF +export USER=\$(whoami) + +export PATH=\$PATH:\$HOME/.local/bin +EOF +)" + +# code shim, it fallbacks to code-insiders if code is not available +cat << 'EOF' > /usr/local/bin/code +#!/bin/sh + +get_in_path_except_current() { + which -a "$1" | grep -v "$0" | head -1 +} + +code="$(get_in_path_except_current code)" + +if [ -n "$code" ]; then + exec "$code" "$@" +elif [ "$(command -v code-insiders)" ]; then + exec code-insiders "$@" +else + echo "code or code-insiders is not installed" >&2 + exit 127 +fi +EOF +chmod +x /usr/local/bin/code + +# Codespaces themes - partly inspired by https://github.com/ohmyzsh/ohmyzsh/blob/master/themes/robbyrussell.zsh-theme +CODESPACES_BASH="$(cat \ +<&1 + echo -e "$(cat "${TEMPLATE}")\nDISABLE_AUTO_UPDATE=true\nDISABLE_UPDATE_PROMPT=true" > ${USER_RC_FILE} + if [ "${OH_MY}" = "bash" ]; then + sed -i -e 's/OSH_THEME=.*/OSH_THEME="codespaces"/g' ${USER_RC_FILE} + mkdir -p ${OH_MY_INSTALL_DIR}/custom/themes/codespaces + echo "${CODESPACES_BASH}" > ${OH_MY_INSTALL_DIR}/custom/themes/codespaces/codespaces.theme.sh + else + sed -i -e 's/ZSH_THEME=.*/ZSH_THEME="codespaces"/g' ${USER_RC_FILE} + mkdir -p ${OH_MY_INSTALL_DIR}/custom/themes + echo "${CODESPACES_ZSH}" > ${OH_MY_INSTALL_DIR}/custom/themes/codespaces.zsh-theme + fi + # Shrink git while still enabling updates + cd ${OH_MY_INSTALL_DIR} + git repack -a -d -f --depth=1 --window=1 + + if [ "${USERNAME}" != "root" ]; then + cp -rf ${USER_RC_FILE} ${OH_MY_INSTALL_DIR} /root + chown -R ${USERNAME}:${USERNAME} ${USER_RC_PATH} + fi +} + +if [ "${RC_SNIPPET_ALREADY_ADDED}" != "true" ]; then + echo "${RC_SNIPPET}" >> /etc/bash.bashrc + RC_SNIPPET_ALREADY_ADDED="true" +fi +install-oh-my bash bashrc.osh-template https://github.com/ohmybash/oh-my-bash + +# Optionally install and configure zsh and Oh My Zsh! +if [ "${INSTALL_ZSH}" = "true" ]; then + if ! type zsh > /dev/null 2>&1; then + apt-get-update-if-needed + apt-get install -y zsh + fi + if [ "${ZSH_ALREADY_INSTALLED}" != "true" ]; then + echo "${RC_SNIPPET}" >> /etc/zsh/zshrc + ZSH_ALREADY_INSTALLED="true" + fi + install-oh-my zsh zshrc.zsh-template https://github.com/ohmyzsh/ohmyzsh +fi + +# Write marker file +mkdir -p "$(dirname "${MARKER_FILE}")" +echo -e "\ + PACKAGES_ALREADY_INSTALLED=${PACKAGES_ALREADY_INSTALLED}\n\ + LOCALE_ALREADY_SET=${LOCALE_ALREADY_SET}\n\ + EXISTING_NON_ROOT_USER=${EXISTING_NON_ROOT_USER}\n\ + RC_SNIPPET_ALREADY_ADDED=${RC_SNIPPET_ALREADY_ADDED}\n\ + ZSH_ALREADY_INSTALLED=${ZSH_ALREADY_INSTALLED}" > "${MARKER_FILE}" + +echo "Done!" diff --git a/.devcontainer/library-scripts/docker-debian.sh b/.devcontainer/library-scripts/docker-debian.sh new file mode 100644 index 0000000..7ca6d54 --- /dev/null +++ b/.devcontainer/library-scripts/docker-debian.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/docker.md +# +# Syntax: ./docker-debian.sh [enable non-root docker socket access flag] [source socket] [target socket] [non-root user] [use moby] + +ENABLE_NONROOT_DOCKER=${1:-"true"} +SOURCE_SOCKET=${2:-"/var/run/docker-host.sock"} +TARGET_SOCKET=${3:-"/var/run/docker.sock"} +USERNAME=${4:-"automatic"} +USE_MOBY=${5:-"true"} + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +# Determine the appropriate non-root user +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in ${POSSIBLE_USERS[@]}; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=root + fi +elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then + USERNAME=root +fi + +# Function to run apt-get if needed +apt-get-update-if-needed() +{ + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + echo "Running apt-get update..." + apt-get update + else + echo "Skipping apt-get update." + fi +} + +# Ensure apt is in non-interactive to avoid prompts +export DEBIAN_FRONTEND=noninteractive + +# Install apt-transport-https, curl, lsb-release, gpg if missing +if ! dpkg -s apt-transport-https curl ca-certificates lsb-release > /dev/null 2>&1 || ! type gpg > /dev/null 2>&1; then + apt-get-update-if-needed + apt-get -y install --no-install-recommends apt-transport-https curl ca-certificates lsb-release gnupg2 +fi + +# Install Docker / Moby CLI if not already installed +if type docker > /dev/null 2>&1; then + echo "Docker / Moby CLI already installed." +else + if [ "${USE_MOBY}" = "true" ]; then + DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') + CODENAME=$(lsb_release -cs) + curl -s https://packages.microsoft.com/keys/microsoft.asc | (OUT=$(apt-key add - 2>&1) || echo $OUT) + echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-${DISTRO}-${CODENAME}-prod ${CODENAME} main" > /etc/apt/sources.list.d/microsoft.list + apt-get update + apt-get -y install --no-install-recommends moby-cli + else + curl -fsSL https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]')/gpg | (OUT=$(apt-key add - 2>&1) || echo $OUT) + echo "deb [arch=amd64] https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list + apt-get update + apt-get -y install --no-install-recommends docker-ce-cli + fi +fi + +# Install Docker Compose if not already installed +if type docker-compose > /dev/null 2>&1; then + echo "Docker Compose already installed." +else + LATEST_COMPOSE_VERSION=$(curl -sSL "https://api.github.com/repos/docker/compose/releases/latest" | grep -o -P '(?<="tag_name": ").+(?=")') + curl -sSL "https://github.com/docker/compose/releases/download/${LATEST_COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose +fi + +# If init file already exists, exit +if [ -f "/usr/local/share/docker-init.sh" ]; then + exit 0 +fi + +# By default, make the source and target sockets the same +if [ "${SOURCE_SOCKET}" != "${TARGET_SOCKET}" ]; then + touch "${SOURCE_SOCKET}" + ln -s "${SOURCE_SOCKET}" "${TARGET_SOCKET}" +fi + +# Add a stub if not adding non-root user access, user is root +if [ "${ENABLE_NONROOT_DOCKER}" = "false" ] || [ "${USERNAME}" = "root" ]; then + echo '/usr/bin/env bash -c "\$@"' > /usr/local/share/docker-init.sh + chmod +x /usr/local/share/docker-init.sh + exit 0 +fi + +# If enabling non-root access and specified user is found, setup socat and add script +chown -h "${USERNAME}":root "${TARGET_SOCKET}" +if ! dpkg -s socat > /dev/null 2>&1; then + apt-get-update-if-needed + apt-get -y install socat +fi +tee /usr/local/share/docker-init.sh > /dev/null \ +<< EOF +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- + +set -e + +SOCAT_PATH_BASE=/tmp/vscr-dind-socat +SOCAT_LOG=\${SOCAT_PATH_BASE}.log +SOCAT_PID=\${SOCAT_PATH_BASE}.pid + +# Wrapper function to only use sudo if not already root +sudoIf() +{ + if [ "\$(id -u)" -ne 0 ]; then + sudo "\$@" + else + "\$@" + fi +} + +# Log messages +log() +{ + echo -e "[\$(date)] \$@" | sudoIf tee -a \${SOCAT_LOG} > /dev/null +} + +echo -e "\n** \$(date) **" | sudoIf tee -a \${SOCAT_LOG} > /dev/null +log "Ensuring ${USERNAME} has access to ${SOURCE_SOCKET} via ${TARGET_SOCKET}" + +# If enabled, try to add a docker group with the right GID. If the group is root, +# fall back on using socat to forward the docker socket to another unix socket so +# that we can set permissions on it without affecting the host. +if [ "${ENABLE_NONROOT_DOCKER}" = "true" ] && [ "${SOURCE_SOCKET}" != "${TARGET_SOCKET}" ] && [ "${USERNAME}" != "root" ] && [ "${USERNAME}" != "0" ]; then + SOCKET_GID=\$(stat -c '%g' ${SOURCE_SOCKET}) + if [ "\${SOCKET_GID}" != "0" ]; then + log "Adding user to group with GID \${SOCKET_GID}." + if [ "\$(cat /etc/group | grep :\${SOCKET_GID}:)" = "" ]; then + sudoIf groupadd --gid \${SOCKET_GID} docker-host + fi + # Add user to group if not already in it + if [ "\$(id ${USERNAME} | grep -E 'groups=.+\${SOCKET_GID}\(')" = "" ]; then + sudoIf usermod -aG \${SOCKET_GID} ${USERNAME} + fi + else + # Enable proxy if not already running + if [ ! -f "\${SOCAT_PID}" ] || ! ps -p \$(cat \${SOCAT_PID}) > /dev/null; then + log "Enabling socket proxy." + log "Proxying ${SOURCE_SOCKET} to ${TARGET_SOCKET} for vscode" + sudoIf rm -rf ${TARGET_SOCKET} + (sudoIf socat UNIX-LISTEN:${TARGET_SOCKET},fork,mode=660,user=${USERNAME} UNIX-CONNECT:${SOURCE_SOCKET} 2>&1 | sudoIf tee -a \${SOCAT_LOG} > /dev/null & echo "\$!" | sudoIf tee \${SOCAT_PID} > /dev/null) + else + log "Socket proxy already running." + fi + fi + log "Success" +fi + +# Execute whatever commands were passed in (if any). This allows us +# to set this script to ENTRYPOINT while still executing the default CMD. +set +e +exec "\$@" +EOF +chmod +x /usr/local/share/docker-init.sh +chown ${USERNAME}:root /usr/local/share/docker-init.sh +echo "Done!" \ No newline at end of file diff --git a/.devcontainer/library-scripts/go-debian.sh b/.devcontainer/library-scripts/go-debian.sh new file mode 100644 index 0000000..e11284e --- /dev/null +++ b/.devcontainer/library-scripts/go-debian.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/go.md +# +# Syntax: ./go-debian.sh [Go version] [GOROOT] [GOPATH] [non-root user] [Add GOPATH, GOROOT to rc files flag] [Install tools flag] + +TARGET_GO_VERSION=${1:-"latest"} +TARGET_GOROOT=${2:-"/usr/local/go"} +TARGET_GOPATH=${3:-"/go"} +USERNAME=${4:-"automatic"} +UPDATE_RC=${5:-"true"} +INSTALL_GO_TOOLS=${6:-"true"} + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +# Ensure that login shells get the correct path if the user updated the PATH using ENV. +rm -f /etc/profile.d/00-restore-env.sh +echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh +chmod +x /etc/profile.d/00-restore-env.sh + +# Determine the appropriate non-root user +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do + if id -u "${CURRENT_USER}" > /dev/null 2>&1; then + USERNAME="${CURRENT_USER}" + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=root + fi +elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then + USERNAME=root +fi + +function updaterc() { + if [ "${UPDATE_RC}" = "true" ]; then + echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc..." + echo -e "$1" | tee -a /etc/bash.bashrc >> /etc/zsh/zshrc + fi +} + +export DEBIAN_FRONTEND=noninteractive + +# Install curl, tar, git, other dependencies if missing +if ! dpkg -s curl ca-certificates tar git g++ gcc libc6-dev make pkg-config > /dev/null 2>&1; then + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends curl ca-certificates tar git g++ gcc libc6-dev make pkg-config +fi + +# Get latest version number if latest is specified +if [ "${TARGET_GO_VERSION}" = "latest" ] || [ "${TARGET_GO_VERSION}" = "current" ] || [ "${TARGET_GO_VERSION}" = "lts" ]; then + TARGET_GO_VERSION=$(curl -sSL "https://golang.org/VERSION?m=text" | sed -n '/^go/s///p' ) +fi + +# Install Go +GO_INSTALL_SCRIPT="$(cat < /dev/null 2>&1; then + mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}" + chown -R ${USERNAME} "${TARGET_GOROOT}" "${TARGET_GOPATH}" + su ${USERNAME} -c "${GO_INSTALL_SCRIPT}" +else + echo "Go already installed. Skipping." +fi + +# Install Go tools +GO_TOOLS_WITH_MODULES="\ + golang.org/x/tools/gopls \ + honnef.co/go/tools/... \ + golang.org/x/tools/cmd/gorename \ + golang.org/x/tools/cmd/goimports \ + golang.org/x/tools/cmd/guru \ + golang.org/x/lint/golint \ + github.com/mdempsky/gocode \ + github.com/cweill/gotests/... \ + github.com/haya14busa/goplay/cmd/goplay \ + github.com/sqs/goreturns \ + github.com/josharian/impl \ + github.com/davidrjenni/reftools/cmd/fillstruct \ + github.com/uudashr/gopkgs/v2/cmd/gopkgs \ + github.com/ramya-rao-a/go-outline \ + github.com/acroca/go-symbols \ + github.com/godoctor/godoctor \ + github.com/rogpeppe/godef \ + github.com/zmb3/gogetdoc \ + github.com/fatih/gomodifytags \ + github.com/mgechev/revive \ + github.com/tfsec/tfsec/cmd/tfsec \ + github.com/terraform-docs/terraform-docs \ + github.com/go-delve/delve/cmd/dlv" +if [ "${INSTALL_GO_TOOLS}" = "true" ]; then + echo "Installing common Go tools..." + export PATH=${TARGET_GOROOT}/bin:${PATH} + mkdir -p /tmp/gotools + cd /tmp/gotools + export GOPATH=/tmp/gotools + export GOCACHE=/tmp/gotools/cache + + # Go tools w/module support + export GO111MODULE=on + (echo "${GO_TOOLS_WITH_MODULES}" | xargs -n 1 go get -v )2>&1 + + # gocode-gomod + export GO111MODULE=auto + go get -v -d github.com/stamblerre/gocode 2>&1 + go build -o gocode-gomod github.com/stamblerre/gocode + + # golangci-lint + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b "${TARGET_GOPATH}/bin" 2>&1 + + # Move Go tools into path and clean up + mv /tmp/gotools/bin/* "${TARGET_GOPATH}/bin/" + mv gocode-gomod "${TARGET_GOPATH}/bin/" + rm -rf /tmp/gotools + chown -R ${USERNAME} "${TARGET_GOPATH}" +fi + +# Add GOPATH variable and bin directory into PATH in bashrc/zshrc files (unless disabled) +updaterc "$(cat << EOF +export GOPATH="${TARGET_GOPATH}" +if [[ "\${PATH}" != *"\${GOPATH}/bin"* ]]; then export PATH="\${PATH}:\${GOPATH}/bin"; fi +export GOROOT="${TARGET_GOROOT}" +if [[ "\${PATH}" != *"\${GOROOT}/bin"* ]]; then export PATH="\${PATH}:\${GOROOT}/bin"; fi +EOF +)" + +echo "Done!" diff --git a/.devcontainer/library-scripts/kubectl-helm-debian.sh b/.devcontainer/library-scripts/kubectl-helm-debian.sh new file mode 100644 index 0000000..00b3409 --- /dev/null +++ b/.devcontainer/library-scripts/kubectl-helm-debian.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/kubectl-helm.md +# +# Syntax: ./kubectl-helm-debian.sh + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +export DEBIAN_FRONTEND=noninteractive + +# Install curl if missing +if ! dpkg -s curl ca-certificates > /dev/null 2>&1; then + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends curl ca-certificates +fi + +# Install the kubectl +echo "Downloading kubectl..." +curl -sSL -o /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl +chmod +x /usr/local/bin/kubectl +# Install Helm +echo "Installing Helm..." +curl -s https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash - +echo "Done!" diff --git a/.devcontainer/library-scripts/node-debian.sh b/.devcontainer/library-scripts/node-debian.sh new file mode 100644 index 0000000..d61046d --- /dev/null +++ b/.devcontainer/library-scripts/node-debian.sh @@ -0,0 +1,124 @@ +#!/bin/bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/node.md +# +# Syntax: ./node-debian.sh [directory to install nvm] [node version to install (use "none" to skip)] [non-root user] [Update rc files flag] + +export NVM_DIR=${1:-"/usr/local/share/nvm"} +export NODE_VERSION=${2:-"lts/*"} +USERNAME=${3:-"automatic"} +UPDATE_RC=${4:-"true"} + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +# Ensure that login shells get the correct path if the user updated the PATH using ENV. +rm -f /etc/profile.d/00-restore-env.sh +echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\${PATH}" > /etc/profile.d/00-restore-env.sh +chmod +x /etc/profile.d/00-restore-env.sh + +# Determine the appropriate non-root user +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in ${POSSIBLE_USERS[@]}; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=root + fi +elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then + USERNAME=root +fi + +if [ "${NODE_VERSION}" = "none" ]; then + export NODE_VERSION= +fi + +# Ensure apt is in non-interactive to avoid prompts +export DEBIAN_FRONTEND=noninteractive + +# Install curl, apt-transport-https, tar, or gpg if missing +if ! dpkg -s apt-transport-https curl ca-certificates tar > /dev/null 2>&1 || ! type gpg > /dev/null 2>&1; then + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends apt-transport-https curl ca-certificates tar gnupg2 +fi + +# Install yarn +if type yarn > /dev/null 2>&1; then + echo "Yarn already installed." +else + curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | (OUT=$(apt-key add - 2>&1) || echo $OUT) + echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list + apt-get update + apt-get -y install --no-install-recommends yarn +fi + +# Install the specified node version if NVM directory already exists, then exit +if [ -d "${NVM_DIR}" ]; then + echo "NVM already installed." + if [ "${NODE_VERSION}" != "" ]; then + su ${USERNAME} -c ". $NVM_DIR/nvm.sh && nvm install ${NODE_VERSION} && nvm clear-cache" + fi + exit 0 +fi + + +# Run NVM installer as non-root if needed +mkdir -p ${NVM_DIR} +chown ${USERNAME} ${NVM_DIR} +su ${USERNAME} -c "$(cat << EOF + set -e + + # Do not update profile - we'll do this manually + export PROFILE=/dev/null + + curl -so- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash + source ${NVM_DIR}/nvm.sh + if [ "${NODE_VERSION}" != "" ]; then + nvm alias default ${NODE_VERSION} + fi + nvm clear-cache +EOF +)" 2>&1 + +if [ "${UPDATE_RC}" = "true" ]; then + echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc with NVM scripts..." +(cat < /dev/null 2>&1; then + echo "Fixing permissions of \"\$NVM_DIR\"..." + sudoIf chown -R ${USERNAME}:root \$NVM_DIR + else + echo "Warning: NVM directory is not owned by ${USERNAME} and sudo is not installed. Unable to correct permissions." + fi +fi +[ -s "\$NVM_DIR/nvm.sh" ] && . "\$NVM_DIR/nvm.sh" +[ -s "\$NVM_DIR/bash_completion" ] && . "\$NVM_DIR/bash_completion" +EOF +) | tee -a /etc/bash.bashrc >> /etc/zsh/zshrc +fi + +echo "Done!" \ No newline at end of file diff --git a/.devcontainer/library-scripts/powershell-debian.sh b/.devcontainer/library-scripts/powershell-debian.sh new file mode 100644 index 0000000..a7842ff --- /dev/null +++ b/.devcontainer/library-scripts/powershell-debian.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/powershell.md +# +# Syntax: ./powershell-debian.sh + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +export DEBIAN_FRONTEND=noninteractive + +# Install curl, apt-transport-https, lsb-release, or gpg if missing +if ! dpkg -s apt-transport-https curl ca-certificates lsb-release > /dev/null 2>&1 || ! type gpg > /dev/null 2>&1; then + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends apt-transport-https curl ca-certificates lsb-release gnupg2 +fi + +# Use correct source for distro (Ubuntu/Debian) and Codename (stretch, buster, bionic, focal) +DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') +CODENAME=$(lsb_release -cs) +curl -s https://packages.microsoft.com/keys/microsoft.asc | (OUT=$(apt-key add - 2>&1) || echo $OUT) +echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-${DISTRO}-${CODENAME}-prod ${CODENAME} main" > /etc/apt/sources.list.d/microsoft.list +apt-get update -yq +apt-get install -yq powershell +echo "Done!" diff --git a/.devcontainer/library-scripts/terraform-debian.sh b/.devcontainer/library-scripts/terraform-debian.sh new file mode 100644 index 0000000..dc1f8fb --- /dev/null +++ b/.devcontainer/library-scripts/terraform-debian.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/master/script-library/docs/terraform.md +# +# Syntax: ./terraform-debian.sh [terraform version] [tflint version] + +TERRAFORM_VERSION=${1:-"latest"} +TFLINT_VERSION=${2:-"latest"} + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +if [ "${TERRAFORM_VERSION}" = "latest" ] || [ "${TERRAFORM_VERSION}" = "lts" ] || [ "${TERRAFORM_VERSION}" = "current" ]; then + TERRAFORM_VERSION=$(curl -sSL https://releases.hashicorp.com/terraform/ | grep -m1 -oE '>terraform_[0-9]+\.[0-9]+\.[0-9]+<' | sed 's/^>terraform_\(.*\)<$/\1/') +fi + +if [ "${TFLINT_VERSION}" = "latest" ] || [ "${TFLINT_VERSION}" = "lts" ] || [ "${TFLINT_VERSION}" = "current" ]; then + LATEST_RELEASE=$(curl -sSL -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/terraform-linters/tflint/releases?per_page=1&page=1") + TFLINT_VERSION=$(echo ${LATEST_RELEASE} | grep -oE 'tag_name":\s*"v[^"]+' | sed -n '/tag_name":\s*"v/s///p') +fi + +# Install curl, unzip if missing +if ! dpkg -s curl ca-certificates unzip > /dev/null 2>&1; then + export DEBIAN_FRONTEND=noninteractive + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends curl ca-certificates unzip +fi + +# Install Terraform, tflint +echo "Downloading terraform..." +mkdir -p /tmp/tf-downloads +curl -sSL -o /tmp/tf-downloads/terraform.zip https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip +unzip /tmp/tf-downloads/terraform.zip +mv -f terraform /usr/local/bin/ + +if [ "${TFLINT_VERSION}" != "none" ]; then + echo "Downloading tflint..." + curl -sSL -o /tmp/tf-downloads/tflint.zip https://github.com/terraform-linters/tflint/releases/download/v${TFLINT_VERSION}/tflint_linux_amd64.zip + unzip /tmp/tf-downloads/tflint.zip + mv -f tflint /usr/local/bin/ +fi + +rm -rf /tmp/tf-downloads +echo "Done!" diff --git a/.devcontainer/library-scripts/terraform-pre-commit.sh b/.devcontainer/library-scripts/terraform-pre-commit.sh new file mode 100644 index 0000000..1dd53a6 --- /dev/null +++ b/.devcontainer/library-scripts/terraform-pre-commit.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Installs useful Terraform tools and pre-commit + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +export DEBIAN_FRONTEND=noninteractive + +# Install curl if missing +if ! dpkg -s curl ca-certificates > /dev/null 2>&1; then + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends curl ca-certificates +fi + +# vars +PRECOMMIT_VERSION=${1:-"2.9.3"} + +# pre-commit +apt install -y python3-pip +python3 -m pip install --upgrade pip +python3 -m pip install --upgrade pre-commit==${PRECOMMIT_VERSION} diff --git a/.devcontainer/library-scripts/tflint-plugins.sh b/.devcontainer/library-scripts/tflint-plugins.sh new file mode 100644 index 0000000..501775e --- /dev/null +++ b/.devcontainer/library-scripts/tflint-plugins.sh @@ -0,0 +1,43 @@ +#! /usr/bin/env bash +# +# installs and runs tflint with tflint-ruleset-azurerm plugin +# https://github.com/terraform-linters/tflint-ruleset-azurerm + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +export DEBIAN_FRONTEND=noninteractive + +# Install curl if missing +if ! dpkg -s curl ca-certificates > /dev/null 2>&1; then + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + apt-get update + fi + apt-get -y install --no-install-recommends curl ca-certificates +fi + +message="Downloading tflint and azurerm plugin" +echo "STARTED: $message..." + +# download tflint-ruleset-azurerm plugin +curl -sL "$(curl -Ls https://api.github.com/repos/terraform-linters/tflint-ruleset-azurerm/releases/latest | grep -o -E "https://.+?_linux_amd64.zip")" -o tflint-ruleset-azurerm_linux_amd64.zip && unzip tflint-ruleset-azurerm_linux_amd64.zip && rm tflint-ruleset-azurerm_linux_amd64.zip + +# move tflint-ruleset-azurerm plugin to correct path +install -D -m 777 tflint-ruleset-azurerm /home/vscode/.tflint.d/plugins/tflint-ruleset-azurerm + +echo "FINISHED: $message." + +# check versions +tflint --version + +# create tflint config +# cat > .tflint.hcl << EOF +# plugin "azurerm" { +# enabled = true +# } +# EOF +# cat .tflint.hcl diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..5c61f7c --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,22 @@ +## Description + +Recent fixes and features + +## Acceptance Checklist + +- [ ] Are the source and target branches correct? +- [ ] Has there been a successful build and destroy for the latest commit? + +## Release Notes + +### Added + +- ISSUE_OR_FEATURE_DESCRIPTION. Closed #xxx + +### Changed + +- ISSUE_OR_FEATURE_DESCRIPTION. Closed #xxx + +### Fixed + +- ISSUE_OR_FEATURE_DESCRIPTION. Closed #xxx diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 65594a1..16a6442 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,5 +1,5 @@ # https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions -name: Build environment +name: build # name of GitHub event that triggers workflow # https://help.github.com/en/actions/reference/events-that-trigger-workflows#watch-event-watch @@ -9,7 +9,7 @@ on: types: [started] # trigger via webhook - # https://github.com/adamrushuk/aks-nexus-velero/blob/master/TriggerCustomAction.ps1#L28 + # https://github.com/adamrushuk/devops-lab/blob/master/TriggerCustomAction.ps1#L28 repository_dispatch: types: [build] @@ -22,7 +22,7 @@ on: # https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables env: # prefix: used for some globally unique name requirements - PREFIX: rush + PREFIX: arshz # debug CI_DEBUG: false @@ -37,21 +37,18 @@ env: # prod or staging. # "" disables cert-manager annotations (use if you already have an existing TLS secret) CERT_API_ENVIRONMENT: "" - DEMO_USER_USERNAME: demo_user - # DEMO_USER_PASSWORD: ${{ secrets.DEMO_USER_PASSWORD }} - DNS_DOMAIN_NAME: nexus.thehypepipe.co.uk DNS_RG_NAME: rg-dns - DOCKER_FQDN: docker.thehypepipe.co.uk EMAIL_ADDRESS: certadmin@domain.com ENABLE_TLS_INGRESS: true FORCE_TEST_FAIL: false K8S_TLS_SECRET_NAME: tls-secret - KEY_VAULT_NAME: kv-rush-iz6y KEY_VAULT_CERT_NAME: wildcard-thehypepipe-co-uk + KEY_VAULT_NAME: kv-rush-iz6y KEY_VAULT_RESOURCE_GROUP_NAME: rg-keyvault-acmebot - LOCATION: uksouth + # NOTE: "eastus" is cheaper than "uksouth" + LOCATION: eastus + NEXUS_USER_USERNAME: demo_user ROOT_DOMAIN_NAME: thehypepipe.co.uk - # NEXUS_ADMIN_PASSWORD: ${{ secrets.NEXUS_ADMIN_PASSWORD }} # STORAGE_KEY: 'env var set by Get-StorageKey.ps1' VELERO_ENABLED: true WEAVE_SCOPE_ENABLED: false @@ -60,7 +57,7 @@ env: TF_IN_AUTOMATION: "true" TF_INPUT: "false" TF_PLAN: "tfplan" - TF_VERSION: "0.12.29" # "latest" is supported + TF_VERSION: "0.12.30" # "latest" is supported TF_WORKING_DIR: ./terraform # Env var concatenation is currently not supported at Workflow or Job scope. See workaround below: @@ -70,7 +67,7 @@ jobs: build-and-deploy: # always pin versions - # view installed software: https://help.github.com/en/actions/reference/software-installed-on-github-hosted-runners + # view installed software: https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-software runs-on: ubuntu-18.04 # only run if owner triggered action @@ -91,32 +88,15 @@ jobs: - name: Init tasks - inc Env var concatenation (Workaround) run: | chmod -R +x ./scripts/ - echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV + echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV + echo "ARGOCD_FQDN=argocd.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV + echo "DNS_DOMAIN_NAME=nexus.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV + echo "DOCKER_FQDN=docker.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV echo "TERRAFORM_STORAGE_ACCOUNT=${{ env.PREFIX }}sttfstate${{ env.LOCATION }}001" >> $GITHUB_ENV echo "TERRAFORM_STORAGE_RG=${{ env.PREFIX }}-rg-tfstate-dev-001" >> $GITHUB_ENV - echo "VELERO_STORAGE_RG=${{ env.PREFIX }}-rg-velero-dev-001" >> $GITHUB_ENV echo "VELERO_STORAGE_ACCOUNT=${{ env.PREFIX }}stbckuksouth001" >> $GITHUB_ENV - - # # Show event info - # - name: Show triggered event data - # run: pwsh -command "./scripts/Get-EventData.ps1" - # env: - # GITHUB_CONTEXT: ${{ toJson(github) }} - - # # Linting multiple languages - # - name: Lint Code Base - # uses: github/super-linter@v3 - # env: - # DEFAULT_BRANCH: develop - # # VALIDATE_ALL_CODEBASE: false - # VALIDATE_ANSIBLE: true - # VALIDATE_BASH: true - # VALIDATE_POWERSHELL: true - # VALIDATE_SHELL_SHFMT: true - # VALIDATE_TERRAFORM: true - # VALIDATE_TERRAFORM_TERRASCAN: true - # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + echo "VELERO_STORAGE_RG=${{ env.PREFIX }}-rg-velero-dev-001" >> $GITHUB_ENV # Login @@ -140,6 +120,8 @@ jobs: - name: Replace tokens in Terraform config files run: pwsh -command "./scripts/Replace-Tokens.ps1" env: + ARGOCD_ADMIN_PASSWORD: ${{ secrets.ARGOCD_ADMIN_PASSWORD }} + HELM_CHART_REPO_DEPLOY_PRIVATE_KEY: ${{ secrets.HELM_CHART_REPO_DEPLOY_PRIVATE_KEY }} IFTTT_WEBHOOK_KEY: ${{ secrets.IFTTT_WEBHOOK_KEY }} - name: Create zip file of Function App @@ -218,7 +200,7 @@ jobs: run: ./scripts/ansible.sh env: NEXUS_ADMIN_PASSWORD: ${{ secrets.NEXUS_ADMIN_PASSWORD }} - DEMO_USER_PASSWORD: ${{ secrets.DEMO_USER_PASSWORD }} + NEXUS_USER_PASSWORD: ${{ secrets.NEXUS_USER_PASSWORD }} # Docker @@ -226,8 +208,8 @@ jobs: uses: Azure/docker-login@v1 with: login-server: ${{ env.DOCKER_FQDN }} - username: ${{ env.DEMO_USER_USERNAME }} - password: ${{ secrets.DEMO_USER_PASSWORD }} + username: ${{ env.NEXUS_USER_USERNAME }} + password: ${{ secrets.NEXUS_USER_PASSWORD }} - name: Push images to Docker repo run: ./scripts/push_docker_images.sh @@ -256,7 +238,7 @@ jobs: - name: Unregister NuGet proxy repo run: pwsh -command "Unregister-PSRepository -Name nuget.org-proxy -Verbose" - # Shows at the bottom of a run: https://github.com/adamrushuk/aks-nexus-velero/runs/1035347513?check_suite_focus=true + # Shows at the bottom of a run: https://github.com/adamrushuk/devops-lab/runs/1035347513?check_suite_focus=true - name: Pester report # TODO: remove continue-on-error once bug is fixed continue-on-error: true @@ -270,6 +252,7 @@ jobs: # Notify - name: Notify slack + continue-on-error: true env: SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} @@ -291,10 +274,14 @@ jobs: - uses: actions/checkout@v2 # Init tasks - inc Env var concatenation + # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#environment-files - name: Init tasks - inc Env var concatenation (Workaround) + # * NOTE: different syntax required for Windows agents run: | - echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV - echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV + echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" | Out-File -Append -Encoding utf8 -FilePath "$env:GITHUB_ENV" + echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" | Out-File -Append -Encoding utf8 -FilePath "$env:GITHUB_ENV" + echo "DNS_DOMAIN_NAME=nexus.${{ env.ROOT_DOMAIN_NAME }}" | Out-File -Append -Encoding utf8 -FilePath "$env:GITHUB_ENV" + # Login - name: Login to Azure @@ -307,7 +294,10 @@ jobs: # Velero CLI - name: Test Velero CLI - if: ${{ env.VELERO_ENABLED == true }} + # NOTE: Some functions cast values to a string to perform comparisons + # https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions#functions + # ! WARNING: only single quotes work for boolean comparison + if: env.VELERO_ENABLED == 'true' run: | az aks get-credentials --resource-group "${{ env.AKS_RG_NAME }}" --name "${{ env.AKS_CLUSTER_NAME }}" --overwrite-existing --admin diff --git a/.github/workflows/destroy.yml b/.github/workflows/destroy.yml index 7b37c54..a9c5596 100644 --- a/.github/workflows/destroy.yml +++ b/.github/workflows/destroy.yml @@ -1,11 +1,11 @@ # https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions -name: Destroy environment +name: destroy # name of GitHub event that triggers workflow # https://help.github.com/en/actions/reference/events-that-trigger-workflows#watch-event-watch on: # trigger via webhook - # https://github.com/adamrushuk/aks-nexus-velero/blob/master/TriggerCustomAction.ps1#L28 + # https://github.com/adamrushuk/devops-lab/blob/master/TriggerCustomAction.ps1#L28 repository_dispatch: types: [destroy] @@ -18,7 +18,7 @@ on: # https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables env: # prefix: used for some globally unique name requirements - PREFIX: rush + PREFIX: arshz # debug CI_DEBUG: true @@ -32,7 +32,6 @@ env: # other # prod or staging CERT_API_ENVIRONMENT: staging - DNS_DOMAIN_NAME: nexus.thehypepipe.co.uk DNS_RG_NAME: rg-dns EMAIL_ADDRESS: certadmin@domain.com ENABLE_TLS_INGRESS: true @@ -41,7 +40,8 @@ env: KEY_VAULT_NAME: kv-rush-iz6y KEY_VAULT_CERT_NAME: wildcard-thehypepipe-co-uk KEY_VAULT_RESOURCE_GROUP_NAME: rg-keyvault-acmebot - LOCATION: uksouth + # NOTE: "eastus" is cheaper than "uksouth" + LOCATION: eastus ROOT_DOMAIN_NAME: thehypepipe.co.uk # STORAGE_KEY: 'env var set by Get-StorageKey.ps1' VELERO_ENABLED: true @@ -49,7 +49,7 @@ env: # terraform TF_IN_AUTOMATION: "true" TF_INPUT: "false" - TF_VERSION: "0.12.29" # "latest" is supported + TF_VERSION: "0.12.30" # "latest" is supported TF_WORKING_DIR: terraform # Env var concatenation is currently not supported at Workflow or Job scope. See workaround below: @@ -80,12 +80,14 @@ jobs: - name: Concatenate env vars (Workaround) run: | chmod -R +x ./scripts/ - echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV + echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV + echo "ARGOCD_FQDN=argocd.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV + echo "DNS_DOMAIN_NAME=nexus.${{ env.ROOT_DOMAIN_NAME }}" >> $GITHUB_ENV echo "TERRAFORM_STORAGE_ACCOUNT=${{ env.PREFIX }}sttfstate${{ env.LOCATION }}001" >> $GITHUB_ENV echo "TERRAFORM_STORAGE_RG=${{ env.PREFIX }}-rg-tfstate-dev-001" >> $GITHUB_ENV - echo "VELERO_STORAGE_RG=${{ env.PREFIX }}-rg-velero-dev-001" >> $GITHUB_ENV echo "VELERO_STORAGE_ACCOUNT=${{ env.PREFIX }}stbckuksouth001" >> $GITHUB_ENV + echo "VELERO_STORAGE_RG=${{ env.PREFIX }}-rg-velero-dev-001" >> $GITHUB_ENV # # Show event info # - name: Show triggered event data @@ -99,6 +101,7 @@ jobs: # Ensure AKS cluster is running, else timeouts will occur on k8s Terraform resource destroy tasks - name: Start AKS Cluster + continue-on-error: true run: ./scripts/start_aks_cluster.sh # Prereqs @@ -130,6 +133,7 @@ jobs: # Notify - name: Notify slack + continue-on-error: true env: SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} diff --git a/.github/workflows/start_aks_cluster.yml b/.github/workflows/start_aks_cluster.yml index 13a16eb..20f17a4 100644 --- a/.github/workflows/start_aks_cluster.yml +++ b/.github/workflows/start_aks_cluster.yml @@ -1,4 +1,4 @@ -name: Start AKS Cluster +name: start_aks_cluster on: repository_dispatch: @@ -13,7 +13,7 @@ on: # global environment variables env: # prefix: used for some globally unique name requirements - PREFIX: rush + PREFIX: arshz # debug CI_DEBUG: true @@ -32,32 +32,33 @@ jobs: if: github.actor == github.event.repository.owner.login steps: - # Checkout - - uses: actions/checkout@v2 - # specify different branch - # NOT required as I've changed the default branch to develop - # with: - # ref: develop - - # Init tasks - inc env var concatenation - - name: Init tasks - inc Env var concatenation (Workaround) - run: | - chmod -R +x ./scripts/ - echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV - echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV - - # Login - - name: Login to Azure - run: ./scripts/azure_login.sh - - # Start - # Prereqs: https://docs.microsoft.com/en-us/azure/aks/start-stop-cluster - - name: Start AKS Cluster - run: ./scripts/start_aks_cluster.sh - - # Notify - - name: Notify slack - env: - SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Start AKS Cluster complete" + # Checkout + - uses: actions/checkout@v2 + # specify different branch + # NOT required as I've changed the default branch to develop + # with: + # ref: develop + + # Init tasks - inc env var concatenation + - name: Init tasks - inc Env var concatenation (Workaround) + run: | + chmod -R +x ./scripts/ + echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV + echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV + + # Login + - name: Login to Azure + run: ./scripts/azure_login.sh + + # Start + # Prereqs: https://docs.microsoft.com/en-us/azure/aks/start-stop-cluster + - name: Start AKS Cluster + run: ./scripts/start_aks_cluster.sh + + # Notify + - name: Notify slack + continue-on-error: true + env: + SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Start AKS Cluster complete" diff --git a/.github/workflows/stop_aks_cluster.yml b/.github/workflows/stop_aks_cluster.yml index b7259d7..152263c 100644 --- a/.github/workflows/stop_aks_cluster.yml +++ b/.github/workflows/stop_aks_cluster.yml @@ -1,4 +1,4 @@ -name: Stop AKS Cluster +name: stop_aks_cluster on: repository_dispatch: @@ -13,7 +13,7 @@ on: # global environment variables env: # prefix: used for some globally unique name requirements - PREFIX: rush + PREFIX: arshz # debug CI_DEBUG: true @@ -32,32 +32,33 @@ jobs: if: github.actor == github.event.repository.owner.login steps: - # Checkout - - uses: actions/checkout@v2 - # specify different branch - # NOT required as I've changed the default branch to develop - # with: - # ref: develop - - # Init tasks - inc env var concatenation - - name: Init tasks - inc Env var concatenation (Workaround) - run: | - chmod -R +x ./scripts/ - echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV - echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV - - # Login - - name: Login to Azure - run: ./scripts/azure_login.sh - - # Stop - # Prereqs: https://docs.microsoft.com/en-us/azure/aks/start-stop-cluster - - name: Stop AKS Cluster - run: ./scripts/stop_aks_cluster.sh - - # Notify - - name: Notify slack - env: - SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Stop AKS Cluster complete" + # Checkout + - uses: actions/checkout@v2 + # specify different branch + # NOT required as I've changed the default branch to develop + # with: + # ref: develop + + # Init tasks - inc env var concatenation + - name: Init tasks - inc Env var concatenation (Workaround) + run: | + chmod -R +x ./scripts/ + echo "AKS_RG_NAME=${{ env.PREFIX }}-rg-aks-dev-001" >> $GITHUB_ENV + echo "AKS_CLUSTER_NAME=${{ env.PREFIX }}-aks-001" >> $GITHUB_ENV + + # Login + - name: Login to Azure + run: ./scripts/azure_login.sh + + # Stop + # Prereqs: https://docs.microsoft.com/en-us/azure/aks/start-stop-cluster + - name: Stop AKS Cluster + run: ./scripts/stop_aks_cluster.sh + + # Notify + - name: Notify slack + continue-on-error: true + env: + SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: ./scripts/send_slack_message.sh "[aks-nexus-velero] Stop AKS Cluster complete" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..8bddb06 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.4.0 # "" gets replaced with latest repo release versions by running "pre-commit autoupdate" + hooks: + - id: check-merge-conflict + # - id: trailing-whitespace # find way to ignore markdown files (.md) + # - id: check-yaml + - id: check-added-large-files + - id: detect-private-key + + - repo: https://github.com/antonbabenko/pre-commit-terraform.git + rev: v1.45.0 # Get the latest from: https://github.com/antonbabenko/pre-commit-terraform/releases + hooks: + - id: terraform_docs + - id: terraform_fmt + - id: terraform_validate + - id: terraform_tflint + - id: terraform_tfsec + + # ! WIP: unhandled errors running tflint with pre-commit + # - repo: https://github.com/gruntwork-io/pre-commit + # rev: v0.1.12 # Get the latest from: https://github.com/gruntwork-io/pre-commit/releases + # hooks: + # - id: tflint + # args: + # - "terraform" + # # - "--module" + # # - "--deep" + # - "--config=.tflint.hcl" + # # # - id: terraform-validate + # # # - id: terraform-fmt diff --git a/.tflint.hcl b/.tflint.hcl new file mode 100644 index 0000000..d4e9c23 --- /dev/null +++ b/.tflint.hcl @@ -0,0 +1,3 @@ +plugin "azurerm" { + enabled = true +} diff --git a/README.md b/README.md index 7a4819c..2f26b02 100644 --- a/README.md +++ b/README.md @@ -1,96 +1,81 @@ -![Build environment](https://github.com/adamrushuk/aks-nexus-velero/workflows/Build%20environment/badge.svg) - + # aks-nexus-velero -Provisions an AKS cluster, deploys Nexus Repository OSS, configures Velero backups. +[![Build environment](https://github.com/adamrushuk/devops-lab/workflows/build/badge.svg)](https://github.com/adamrushuk/devops-lab/actions?query=workflow%3A%22build) + +This is the main repo I use to test Kubernetes / DevOps applications, products, and processes. It's essentially my +playground in Azure. +I started off with a Kubernetes cluster, Nexus Repository OSS, and Velero for backups, but there are *loads* more +being used now. + + ## Contents -- [aks-nexus-velero](#aks-nexus-velero) - - [Contents](#contents) - - [Getting Started](#getting-started) - - [Assumptions](#assumptions) - - [Azure Secrets](#azure-secrets) - - [Login to Nexus Console](#login-to-nexus-console) +- [Getting Started](#getting-started) + - [Prereqs](#prereqs) + - [Configure DNS Zone](#configure-dns-zone) + - [Configure Key Vault / LetsEncrypt TLS Certificate](#configure-key-vault--letsencrypt-tls-certificate) + - [Configure Azure Authentication](#configure-azure-authentication) + - [Create Secrets](#create-secrets) + - [Running the Build workflow](#running-the-build-workflow) + - [Running the Destroy workflow](#running-the-destroy-workflow) ## Getting Started -Before you start the `build` GitHub Action workflow, you need to create the following Secrets within -[GitHub Settings](https://help.github.com/en/actions/configuring-and-managing-workflows/creating-and-storing-encrypted-secrets): +Follow the sections below to prepare and configure your environment, ready to run your first build: + +### Prereqs -### Assumptions +DNS zones and TLS certs are typically created out-of-band (outside of the main build automation), so we'll create +these only once, and they will exist across multiple builds. - +#### Configure DNS Zone -- Configure Azure Service Principle for Terraform, and grant permission to manage AAD: -https://www.terraform.io/docs/providers/azuread/guides/service_principal_configuration.html#granting-administrator-permissions +Use the [Setting up ExternalDNS for Services on Azure tutorial](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/azure.md) + to create and configure your DNS zone, as we will be using ExternalDNS within the kubernetes cluster to +dynamically update DNS records. -These API permissions are required for your Terraform Service Principle: +#### Configure Key Vault / LetsEncrypt TLS Certificate -**Azure Active Directory Graph** -Application Permissions: +Use the [keyvault-acmebot Getting Started guide](https://github.com/shibayan/keyvault-acmebot#getting-started) to +deploy AcmeBot and configure a wildcard certificate for your domain. -1. Application.ReadWrite.All - Read and write all applications -1. Directory.Read.All - Read directory data +### Configure Azure Authentication -Delegated Permissions: +Before the [`build`](./.github/workflows/build.yml) GitHub Action workflow can be run, authentication needs to be +configured for Azure. -1. User.Read - Sign in and read user profile +1. [Create a Service Principal with a Client Secret](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/guides/service_principal_client_secret#creating-the-application-and-service-principal). -### Azure Secrets +1. [Grant permissions to manage Azure Active Directory](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/guides/service_principal_configuration#azure-active-directory-permissions). - +### Create Secrets + +Once Azure authentication has been configured, the Service Principle credential values can be [passed as environment variables](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/guides/service_principal_client_secret#configuring-the-service-principal-in-terraform). + +[Use these instructions](https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository) to create the following secrets for your repository: - `ARM_CLIENT_ID` - `ARM_CLIENT_SECRET` - `ARM_SUBSCRIPTION_ID` - `ARM_TENANT_ID` -## Login to Nexus Console - -Follow the steps below to update AKS credentials, get the Nexus admin password, then login and update the password: - -1. Import the AKS Cluster credentials: - - ```bash - # Vars - PREFIX="rush" - AKS_CLUSTER_NAME="$PREFIX-aks-001" - AKS_RG_NAME="$PREFIX-rg-aks-dev-001" - - # AKS Cluster credentials - az aks get-credentials --resource-group $AKS_RG_NAME --name $AKS_CLUSTER_NAME --overwrite-existing --admin - - # [OPTIONAL] View AKS Dashboard - az aks browse --resource-group $AKS_RG_NAME --name $AKS_CLUSTER_NAME - ``` - -1. Get the auto-generated admin password from within the Nexus container: - - ```bash - # Get pod name - pod_name=$(kubectl get pod --namespace nexus -l app=nexus -o jsonpath="{.items[0].metadata.name}") - - # Get admin password from pod - admin_password=$(kubectl exec -n ingress -it $pod_name -- cat /nexus-data/admin.password) - echo "$admin_password" +### Running the Build workflow - # [OPTIONAL] Enter pod shell, then output admin password - kubectl exec -n ingress -it $pod_name -- /bin/bash - echo -e "\nadmin password: \n$(cat /nexus-data/admin.password)\n" - ``` +Now that Azure authentication has been configured with corresponding secrets, the build workflow is ready to be run: -1. Open the Nexus web console +1. Navigate to the [build workflow](../../actions?query=workflow%3Abuild). +1. Click the `Run workflow` drop-down button. +1. Select the desired branch. +1. Click the `Run workflow` button. - ```bash - # Set URL - nexus_host=$(kubectl get ingress -A -o jsonpath="{.items[0].spec.rules[0].host}") - nexus_base_url="https://$nexus_host" +### Running the Destroy workflow - # Sign in as admin, using auto-generated admin password from prereqs section - echo "$nexus_base_url" - ``` +There will be ongoing costs if the environment is left running, so to avoid unexpected bills the destroy workflow +should be run once testing has been completed: -1. Click `Sign in` in top right corner, then login using admin password. -1. Update admin password. -1. Enable anonymous access (to avoid using credential during repo testing). +1. Navigate to the [destroy workflow](../../actions?query=workflow%3Adestroy). +1. Click the `Run workflow` drop-down button. +1. Select the desired branch. +1. Click the `Run workflow` button. diff --git a/aad-pod-identity/default-values.yaml b/aad-pod-identity/default-values.yaml deleted file mode 100644 index 4fc2bc9..0000000 --- a/aad-pod-identity/default-values.yaml +++ /dev/null @@ -1,195 +0,0 @@ -# Default values for aad-pod-identity-helm. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -nameOverride: "" -fullnameOverride: "" - -image: - repository: mcr.microsoft.com/k8s/aad-pod-identity - imagePullPolicy: Always - -# https://github.com/Azure/aad-pod-identity#4-optional-match-pods-in-the-namespace -# By default, AAD Pod Identity matches pods to identities across namespaces. -# To match only pods in the namespace containing AzureIdentity set this to true. -forceNameSpaced: "false" - -# When NMI runs on a node where MIC is running, then MIC token request call is also -# intercepted by NMI. MIC can't get a valid token as to initialize and then -# assign the identity. Installing an exception for MIC would ensure all token requests -# for MIC pods directly go to IMDS and not go through the pod-identity validation -# https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.app-exception.md -installMICException: "true" - -## If using a separate service principal for aad-pod-identity instead of cluster service principal specify the following -## (The chart will perform the base64 encoding for you for values that are stored in secrets.) -adminsecret: {} -# cloud: -# subscriptionID: -# resourceGroup: -# vmType: <`standard` for normal virtual machine nodes, and `vmss` for cluster deployed with a virtual machine scale set> -# tenantID: -# clientID: -# clientSecret: - -# Operation mode for pod-identity. Default is standard mode that has MIC doing identity assignment -# Allowed values: "standard", "managed" -operationMode: "standard" - -mic: - image: mic - tag: 1.6.2 - - priorityClassName: "" - - # log level. Uses V logs (glog) - logVerbosity: 0 - - resources: - limits: - cpu: 200m - memory: 1024Mi - requests: - cpu: 100m - memory: 256Mi - - podAnnotations: {} - - ## Node labels for pod assignment - ## aad-pod-identity is currently only supported on linux - nodeSelector: - kubernetes.io/os: linux - - tolerations: [] - - affinity: {} - - leaderElection: - # Override leader election instance name (default is 'hostname') - instance: "" - # Override the namespace to create leader election objects (default is default namespace) - namespace: "" - # Override leader election name (default is aad-pod-identity-mic) - name: "" - # Override leader election duration (default is 15s) - duration: "" - - # Override http liveliness probe port (default is 8080) - probePort: "" - - # Override interval in seconds at which sync loop should periodically check for errors and reconcile (default is 3600s) - syncRetryDuration: "" - - # Override the defult value of immutable identities. - immutableUserMSIs: [] - # Example of MSIs (should be replaced with the real client ids) - #- "00000000-0000-0000-0000-000000000000" - #- "11111111-1111-1111-1111-111111111111" - - # https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.featureflags.md#batch-create-delete-flag - # default value is 20 - createDeleteBatch: "" - - # https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.featureflags.md#client-qps-flag - # default value is 5 - clientQps: "" - - # default value is 8888 - # prometheus port for metrics - prometheusPort: "" - - # cloud configuration used to authenticate with Azure - cloudConfig: "/etc/kubernetes/azure.json" - - # The maximum retry of UpdateUserMSI call. MIC updates all the identities in a batch. If a single identity contains an error - # or is invalid, then the entire operation fails. Configuring this flag will make MIC retry by removing the erroneous identities - # returned in the error - # Default value is 2. - updateUserMSIMaxRetry: "" - - # The duration to wait before retrying UpdateUserMSI (batch assigning/un-assigning identity from VM/VMSS) in case of errors - # Default value is 1s - updateUserMSIRetryInterval: "" - -nmi: - image: nmi - tag: 1.6.2 - - priorityClassName: "" - - resources: - limits: - cpu: 200m - memory: 512Mi - requests: - cpu: 100m - memory: 256Mi - - podAnnotations: {} - - ## Node labels for pod assignment - ## aad-pod-identity is currently only supported on linux - nodeSelector: - kubernetes.io/os: linux - - tolerations: [] - - affinity: {} - - # Override iptables update interval in seconds (default is 60) - ipTableUpdateTimeIntervalInSeconds: "" - - # Override mic namespace to short circuit MIC token requests (default is default namespace) - micNamespace: "" - - # Override http liveliness probe port (default is 8080) - probePort: "8085" - - # Override number of retries in NMI to find assigned identity in CREATED state (default is 16) - retryAttemptsForCreated: "" - - # Override number of retries in NMI to find assigned identity in ASSIGNED state (default is 4) - retryAttemptsForAssigned: "" - - # Override retry interval to find assigned identities in seconds (default is 5) - findIdentityRetryIntervalInSeconds: "" - - # Enable scale features - https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.featureflags.md#enable-scale-features-flag - # Accepted values are true/false. Default is false. - enableScaleFeatures: "" - - # default value is 9090 - # prometheus port for metrics - prometheusPort: "" - - # https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.featureflags.md#block-instance-metadata-flag - # default is false - blockInstanceMetadata: "" - - # https://github.com/Azure/aad-pod-identity/blob/master/docs/readmes/README.featureflags.md#metadata-header-required-flag - # default is false - metadataHeaderRequired: "" - -rbac: - enabled: true - # NMI requires permissions to get secrets when service principal (type: 1) is used in AzureIdentity. - # If using only MSI (type: 0) in AzureIdentity, secret get permission can be disabled by setting this to false. - allowAccessToSecrets: true - -# Create azure identities and bindings -azureIdentities: [] - # - name: "azure-identity" - # # if not defined, then the azure identity will be deployed in the same namespace as the chart - # namespace: "" - # # type 0: MSI, type 1: Service Principal - # type: 0 - # # /subscriptions/subscription-id/resourcegroups/resource-group/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity-name - # resourceID: "" - # clientID: "" - # binding: - # name: "azure-identity-binding" - # # The selector will also need to be included in labels for app deployment - # selector: "demo" - -# If true, install necessary custom resources. -installCRDs: false diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index 8f61b85..6a516a4 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -7,8 +7,8 @@ api_base_uri: "" api_url: "{{ api_base_uri }}/service/rest" api_user: "admin" autogenerated_admin_password: "{{ lookup('env', 'AUTOGENERATED_ADMIN_PASSWORD') }}" -demo_user_username: "{{ lookup('env', 'DEMO_USER_USERNAME') }}" -demo_user_password: "{{ lookup('env', 'DEMO_USER_PASSWORD') }}" +nexus_user_username: "{{ lookup('env', 'NEXUS_USER_USERNAME') }}" +nexus_user_password: "{{ lookup('env', 'NEXUS_USER_PASSWORD') }}" enable_debug_output: "{{ lookup('env', 'CI_DEBUG') }}" nexus_namespace: "nexus" diff --git a/ansible/roles/pypi_repo/files/hello/setup.py b/ansible/roles/pypi_repo/files/hello/setup.py index 7656d3c..4b2f519 100644 --- a/ansible/roles/pypi_repo/files/hello/setup.py +++ b/ansible/roles/pypi_repo/files/hello/setup.py @@ -14,7 +14,7 @@ long_description_content_type='text/markdown', author='Adam Rush', author_email='adam@example.com', - url='https://github.com/adamrushuk/aks-nexus-velero/tree/develop/ansible/roles/pypi_repo/files/hello/', + url='https://github.com/adamrushuk/devops-lab/tree/develop/ansible/roles/pypi_repo/files/hello/', license='MIT', packages=['helloworld'], zip_safe=False diff --git a/ansible/roles/user/defaults/main.yml b/ansible/roles/user/defaults/main.yml index 743b3c6..ef6b666 100644 --- a/ansible/roles/user/defaults/main.yml +++ b/ansible/roles/user/defaults/main.yml @@ -1,10 +1,10 @@ user_request_body: - userId: "{{ demo_user_username }}" + userId: "{{ nexus_user_username }}" firstName: "demo" lastName: "user" - emailAddress: "{{ demo_user_username }}@domain.com" + emailAddress: "{{ nexus_user_username }}@domain.com" status: "active" - password: "{{ demo_user_password }}" + password: "{{ nexus_user_password }}" roles: [ "nx-admin" ] diff --git a/function_app/TimerTrigger1/readme.md b/function_app/TimerTrigger1/readme.md index fa4456c..049c48d 100644 --- a/function_app/TimerTrigger1/readme.md +++ b/function_app/TimerTrigger1/readme.md @@ -25,7 +25,7 @@ Ensure you have a `local.settings.json` file with the following settings (replac Full instructions here: https://docs.microsoft.com/en-us/azure/azure-functions/functions-develop-vs-code?tabs=csharp#run-functions-locally -Ensure the [Azure Functions Core Tools are installed](https://docs.microsoft.com/en-us/azure/azure-functions/functions-run-local?tabs=windows%2Ccsharp%2Cbash#install-the-azure-functions-core-tools), +Ensure the [Azure Functions Core Tools are installed](https://docs.microsoft.com/en-us/azure/azure-functions/functions-run-local?tabs=windows%2Ccsharp%2Cbash#install-the-azure-functions-core-tools), ```powershell # install prereqs diff --git a/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 b/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 index fddc1f1..6c256bd 100644 --- a/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 +++ b/nexus/repositories/nuget/PSvCloud/Public/Edge/Get-CIEdgeSecurityCheck.ps1 @@ -7,7 +7,7 @@ function Get-CIEdgeSecurityCheck { Retrieves basic vShield edge security information including: - FW enabled (True/False) - FW default action (Allow/Drop) - - Any insecure FW rules + - Any insecure FW rules .PARAMETER Name Specifies the name of the vShield Edge you want to retrieve. @@ -46,7 +46,7 @@ function Get-CIEdgeSecurityCheck { [Parameter(Mandatory = $true, ParameterSetName = "ByName")] [ValidateNotNullOrEmpty()] [String[]]$Name, - + [Parameter(Mandatory = $true, ValueFromPipeline = $true, ParameterSetName = "Standard")] [ValidateNotNullOrEmpty()] $CIEdge @@ -64,9 +64,9 @@ function Get-CIEdgeSecurityCheck { if ($PsCmdlet.ParameterSetName -eq "ByName") { $CIEdge = Get-CIEdge -Name $Name - + } - + # We need this foreach to handle multiple edges returned via 'name' parameter foreach ($Edge in $CIEdge) { # Check Firewall default action @@ -86,10 +86,10 @@ function Get-CIEdgeSecurityCheck { } # Check for insecure firewall setups - $AllowedEnabledRules = $Edge.XML.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule | + $AllowedEnabledRules = $Edge.XML.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule | Where-Object {$_.IsEnabled -eq $true -and $_.Policy -eq "allow"} - # Initialise array ready for PSCustomObject(s) of firewall rules + # Initialise array ready for PSCustomObject(s) of firewall rules $InSecureFirewallRules = @() foreach ($Rule in $AllowedEnabledRules) { @@ -98,42 +98,42 @@ function Get-CIEdgeSecurityCheck { $OffendingRuleCounter = $null switch ($Rule) { - {$Rule.SourceIp -eq "external" -and $Rule.DestinationIp -eq "external" -and $Rule.DestinationPortRange -eq "Any"} { + {$Rule.SourceIp -eq "external" -and $Rule.DestinationIp -eq "external" -and $Rule.DestinationPortRange -eq "Any"} { $OffendingRuleCounter = $true $RuleId = $Rule.Id - $RuleDescription = $Rule.Description + $RuleDescription = $Rule.Description $RuleViolation = "External to External on any port" break } - {$Rule.SourceIp -eq "external" -and $Rule.DestinationIp -eq "any" -and $Rule.DestinationPortRange -eq "Any"} { + {$Rule.SourceIp -eq "external" -and $Rule.DestinationIp -eq "any" -and $Rule.DestinationPortRange -eq "Any"} { $OffendingRuleCounter = $true $RuleId = $Rule.Id - $RuleDescription = $Rule.Description + $RuleDescription = $Rule.Description $RuleViolation = "External to Any on any port" break } - {$Rule.SourceIp -eq "external" -and $Rule.DestinationIp -eq "internal" -and $Rule.DestinationPortRange -eq "Any"} { + {$Rule.SourceIp -eq "external" -and $Rule.DestinationIp -eq "internal" -and $Rule.DestinationPortRange -eq "Any"} { $OffendingRuleCounter = $true $RuleId = $Rule.Id - $RuleDescription = $Rule.Description + $RuleDescription = $Rule.Description $RuleViolation = "External to Internal on any port" break } - {$Rule.SourceIp -eq "any" -and $Rule.DestinationIp -eq "any" -and $Rule.DestinationPortRange -eq "Any"} { + {$Rule.SourceIp -eq "any" -and $Rule.DestinationIp -eq "any" -and $Rule.DestinationPortRange -eq "Any"} { $OffendingRuleCounter = $true $RuleId = $Rule.Id - $RuleDescription = $Rule.Description + $RuleDescription = $Rule.Description $RuleViolation = "Any to Any on any port" break } } - + # Build the offending rule PSCustomObject if ($OffendingRuleCounter) { $InSecureFirewallRules += [PSCustomObject]@{ RuleId = $RuleId RuleDescription = $RuleDescription - RuleViolation = $RuleViolation + RuleViolation = $RuleViolation ExtensionData = $Rule } } diff --git a/nginx/default_nginx_values.yaml b/nginx/default_nginx_values.yaml index 04b6d94..8c60a24 100644 --- a/nginx/default_nginx_values.yaml +++ b/nginx/default_nginx_values.yaml @@ -1,13 +1,20 @@ -# source: https://github.com/kubernetes/ingress-nginx/blob/ingress-nginx-3.11.0/charts/ingress-nginx/values.yaml +# source: https://github.com/kubernetes/ingress-nginx/blob/helm-chart-3.20.1/charts/ingress-nginx/values.yaml ## nginx configuration ## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/controllers/nginx/configuration.md ## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + controller: + name: controller image: repository: k8s.gcr.io/ingress-nginx/controller - tag: "v0.41.2" - digest: sha256:1f4f402b9c14f3ae92b11ada1dfe9893a88f0faeb0b2f4b903e2c67a0c3bf0de + tag: "v0.43.0" + digest: sha256:9bba603b99bf25f6d117cf1235b6598c16033ad027b143c90fa5b3cc583c5713 pullPolicy: IfNotPresent # www-data -> uid 101 runAsUser: 101 @@ -115,6 +122,10 @@ controller: ## Annotations to be added to the udp config configmap annotations: {} + # Maxmind license key to download GeoLite2 Databases + # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + ## Additional command line arguments to pass to nginx-ingress-controller ## E.g. to specify the default SSL certificate you can use ## extraArgs: @@ -398,6 +409,8 @@ controller: enabled: false annotations: {} + # loadBalancerIP: "" + ## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. loadBalancerSourceRanges: [] @@ -576,16 +589,13 @@ controller: ## revisionHistoryLimit: 10 -# Maxmind license key to download GeoLite2 Databases -# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases -maxmindLicenseKey: "" - ## Default 404 backend ## defaultBackend: ## enabled: false + name: defaultbackend image: repository: k8s.gcr.io/defaultbackend-amd64 tag: "1.5" diff --git a/scripts/Start-Test.ps1 b/scripts/Start-Test.ps1 index cf4961a..6b7db6a 100755 --- a/scripts/Start-Test.ps1 +++ b/scripts/Start-Test.ps1 @@ -18,7 +18,7 @@ $taskMessage = "Installing Pester " Write-Verbose "STARTED: $taskMessage..." try { Set-PSRepository -Name "PSGallery" -InstallationPolicy "Trusted" - Install-Module -Name "Pester" -Scope "CurrentUser" -Repository "PSGallery" -Force -RequiredVersion 4.10.1 + Install-Module -Name "Pester" -Scope "CurrentUser" -Repository "PSGallery" -MinimumVersion 5.1.0 -Verbose Write-Verbose "FINISHED: $taskMessage." } diff --git a/scripts/tflint.sh b/scripts/tflint.sh index 1378ea8..26bfcf6 100644 --- a/scripts/tflint.sh +++ b/scripts/tflint.sh @@ -6,6 +6,9 @@ set -euo pipefail trap "echo 'error: Script failed: see failed command above'" ERR +# vars +DISABLED_RULES=("azurerm_log_analytics_workspace_invalid_retention_in_days") + message="Downloading tflint and azurerm plugin" echo "STARTED: $message..." @@ -32,4 +35,5 @@ EOF cat .tflint.hcl # run tflint -TFLINT_LOG=debug ./tflint "$TF_WORKING_DIR" +# expand array for disabled rules +TFLINT_LOG=debug ./tflint "$TF_WORKING_DIR" --disable-rule="${DISABLED_RULES[*]}" diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000..7001f0c --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,9 @@ +# Test README + +**PRE-COMMIT-TERRAFORM DOCS** content will be automatically created below: + +--- + + +*auto populated information + diff --git a/terraform/aks.tf b/terraform/aks.tf index b6bdf8c..43d5548 100644 --- a/terraform/aks.tf +++ b/terraform/aks.tf @@ -54,7 +54,7 @@ resource "azurerm_log_analytics_solution" "aks" { # https://registry.terraform.io/modules/adamrushuk/aks/azurerm/latest module "aks" { source = "adamrushuk/aks/azurerm" - version = "0.4.1" + version = "0.4.2" kubernetes_version = var.kubernetes_version location = azurerm_resource_group.aks.location @@ -67,12 +67,14 @@ module "aks" { # override defaults default_node_pool = { - name = var.agent_pool_profile_name - count = var.agent_pool_node_count + name = var.agent_pool_profile_name + count = var.agent_pool_node_count + # availability_zones = null vm_size = var.agent_pool_profile_vm_size enable_auto_scaling = var.agent_pool_enable_auto_scaling - min_count = var.agent_pool_node_min_count max_count = var.agent_pool_node_max_count + max_pods = 90 + min_count = var.agent_pool_node_min_count os_disk_size_gb = var.agent_pool_profile_disk_size_gb } diff --git a/terraform/dns.tf b/terraform/dns.tf index 09a0f59..d220320 100644 --- a/terraform/dns.tf +++ b/terraform/dns.tf @@ -82,6 +82,7 @@ resource "helm_release" "external_dns" { repository = "https://charts.bitnami.com/bitnami" version = var.external_dns_chart_version timeout = 600 + atomic = true # values = [file("helm/NOT_USED.yaml")] set { diff --git a/terraform/files/akvs-certificate-sync.yaml b/terraform/files/akvs-certificate-sync.yaml index 33b5682..0e93d81 100644 --- a/terraform/files/akvs-certificate-sync.yaml +++ b/terraform/files/akvs-certificate-sync.yaml @@ -3,15 +3,15 @@ apiVersion: spv.no/v1alpha1 kind: AzureKeyVaultSecret metadata: name: certificate-sync - namespace: nexus + namespace: nexus # cert dest namespace spec: vault: - name: __KEY_VAULT_NAME__ # name of key vault + name: __KEY_VAULT_NAME__ # name of key vault object: name: __KEY_VAULT_CERT_NAME__ # key vault certificate name type: certificate output: secret: name: __K8S_TLS_SECRET_NAME__ # kubernetes secret name - type: kubernetes.io/tls # kubernetes secret type + type: kubernetes.io/tls # kubernetes secret type chainOrder: ensureserverfirst diff --git a/terraform/files/argocd-akvs-certificate-sync.yaml b/terraform/files/argocd-akvs-certificate-sync.yaml new file mode 100644 index 0000000..9d1f2d8 --- /dev/null +++ b/terraform/files/argocd-akvs-certificate-sync.yaml @@ -0,0 +1,17 @@ +# https://akv2k8s.io/tutorials/sync/2-certificate/ +apiVersion: spv.no/v1alpha1 +kind: AzureKeyVaultSecret +metadata: + name: argocd-certificate-sync + namespace: argocd # cert dest namespace +spec: + vault: + name: __KEY_VAULT_NAME__ # name of key vault + object: + name: __KEY_VAULT_CERT_NAME__ # key vault certificate name + type: certificate + output: + secret: + name: argocd-ingress-tls # kubernetes secret name + type: kubernetes.io/tls # kubernetes secret type + chainOrder: ensureserverfirst diff --git a/terraform/files/argocd-gitlab.yaml b/terraform/files/argocd-gitlab.yaml new file mode 100644 index 0000000..72206fe --- /dev/null +++ b/terraform/files/argocd-gitlab.yaml @@ -0,0 +1,28 @@ +# https://argoproj.github.io/argo-cd/user-guide/helm/ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + # ! MUST match the existing helm release name: https://argoproj.github.io/argo-cd/user-guide/helm/#helm-release-name + name: ar-gitlab + # namespace of argo cd deployment + namespace: argocd +spec: + project: default + source: + repoURL: git@github.com:adamrushuk/charts-private.git + targetRevision: main + path: gitlab-https + helm: + # target helm version + version: v3 + # values file path is relative from the source.path folder + valueFiles: + - ar-values-dev.yaml + destination: + server: https://kubernetes.default.svc + namespace: gitlab + syncPolicy: {} + # # sync options which modifies sync behavior + # syncOptions: + # # namespace Auto-Creation ensures that namespace specified as the application destination exists in the destination cluster + # - CreateNamespace=true diff --git a/terraform/files/argocd-jenkins.yaml b/terraform/files/argocd-jenkins.yaml new file mode 100644 index 0000000..3dfc029 --- /dev/null +++ b/terraform/files/argocd-jenkins.yaml @@ -0,0 +1,24 @@ +--- +# https://argoproj.github.io/argo-cd/user-guide/helm/ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + # ! MUST match the existing helm release name + name: ar-jenkins + namespace: argocd +spec: + project: default + source: + repoURL: git@github.com:adamrushuk/charts-private.git + targetRevision: main + path: jenkins + helm: + # target helm version + version: v3 + # releaseName: ar-jenkins + valueFiles: + - ar-values-dev.yaml + destination: + server: https://kubernetes.default.svc + namespace: jenkins + syncPolicy: {} diff --git a/terraform/files/argocd-values.yaml b/terraform/files/argocd-values.yaml new file mode 100644 index 0000000..91d952c --- /dev/null +++ b/terraform/files/argocd-values.yaml @@ -0,0 +1,17 @@ +installCRDs: false + +server: + # this is required to disable SSL redirection, as ingress handles this + extraArgs: + - --insecure + service: + type: ClusterIP + + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/hsts: "false" + # (examples in docs are wrong) DO NOT use passthrough if ingress has tls settings + # nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + # nginx.ingress.kubernetes.io/ssl-passthrough: "true" diff --git a/terraform/files/azureIdentities.yaml.tpl b/terraform/files/azureIdentities.yaml.tpl index a37dcce..345dfa9 100644 --- a/terraform/files/azureIdentities.yaml.tpl +++ b/terraform/files/azureIdentities.yaml.tpl @@ -1,5 +1,5 @@ azureIdentities: - - name: "velero" + "velero": # if not defined, then the azure identity will be deployed in the same namespace as the chart namespace: "" # type 0: MSI, type 1: Service Principal diff --git a/terraform/files/function_app.zip b/terraform/files/function_app.zip new file mode 100644 index 0000000..c128107 --- /dev/null +++ b/terraform/files/function_app.zip @@ -0,0 +1 @@ +# PLACEHOLDER so validate doesnt fail on missing file diff --git a/terraform/files/gitlab-akvs-certificate-sync.yaml b/terraform/files/gitlab-akvs-certificate-sync.yaml new file mode 100644 index 0000000..0dbfcc0 --- /dev/null +++ b/terraform/files/gitlab-akvs-certificate-sync.yaml @@ -0,0 +1,17 @@ +# https://akv2k8s.io/tutorials/sync/2-certificate/ +apiVersion: spv.no/v1alpha1 +kind: AzureKeyVaultSecret +metadata: + name: gitlab-certificate-sync + namespace: gitlab # cert dest namespace +spec: + vault: + name: __KEY_VAULT_NAME__ # name of key vault + object: + name: __KEY_VAULT_CERT_NAME__ # key vault certificate name + type: certificate + output: + secret: + name: gitlab-tls # kubernetes secret name + type: kubernetes.io/tls # kubernetes secret type + chainOrder: ensureserverfirst diff --git a/terraform/files/scripts/argocd_config.sh b/terraform/files/scripts/argocd_config.sh new file mode 100644 index 0000000..aa79fb2 --- /dev/null +++ b/terraform/files/scripts/argocd_config.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# +# Argo CD configuration + +# Ensure strict mode and predictable pipeline failure +set -euo pipefail +trap "echo 'error: Script failed: see failed command above'" ERR + +# Vars +ARGOCD_PATH="./argocd" +REPO_SSH_PRIVATE_KEY_PATH="./id_ed25519_argocd" +export ARGOCD_OPTS="--grpc-web" +ARGOCD_HEALTH_CHECK_URL="https://$ARGOCD_FQDN/healthz" + +# Install +VERSION="v1.8.2" +curl -sSL -o "$ARGOCD_PATH" "https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-linux-amd64" +chmod +x "$ARGOCD_PATH" + +# Wait for URL to be responsive +echo "Checking ArgoCD is ready on [$ARGOCD_HEALTH_CHECK_URL]..." +while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' $ARGOCD_HEALTH_CHECK_URL)" != "200" ]]; do + echo "Still waiting for ArgoCD to be ready on [$ARGOCD_HEALTH_CHECK_URL]..." + sleep 10 +done + +# Show version +echo "Showing Argo CD version info for [$ARGOCD_FQDN]..." +"$ARGOCD_PATH" version --server "$ARGOCD_FQDN" + +# Get default admin password +# default password is server pod name, eg: "argocd-server-89c6cd7d4-xxxxx" +echo "Getting default admin password..." +DEFAULT_ARGO_ADMIN_PASSWORD=$(kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2) + +# Login +echo "Logging in to Argo CD with default password..." +if "$ARGOCD_PATH" login "$ARGOCD_FQDN" --username admin --password "$DEFAULT_ARGO_ADMIN_PASSWORD"; then + # Update default admin password + echo "Updating default admin password..." + "$ARGOCD_PATH" account update-password --account admin --current-password "$DEFAULT_ARGO_ADMIN_PASSWORD" --new-password "$ARGOCD_ADMIN_PASSWORD" +else + echo "WARNING: Failed to log into Argo CD using default password..." + echo "Attempting login with new admin password..." + "$ARGOCD_PATH" login "$ARGOCD_FQDN" --username admin --password "$ARGOCD_ADMIN_PASSWORD" +fi + +# Show info +echo "Showing Argo CD cluster info..." +"$ARGOCD_PATH" account list +"$ARGOCD_PATH" cluster list +"$ARGOCD_PATH" app list + +# Add SSH repo connection +# Save repo private key +echo -e "$HELM_CHART_REPO_DEPLOY_PRIVATE_KEY" > "$REPO_SSH_PRIVATE_KEY_PATH" +chmod 600 "$REPO_SSH_PRIVATE_KEY_PATH" +echo "Showing public key fingerprint..." +ssh-keygen -lf "$REPO_SSH_PRIVATE_KEY_PATH" + +# Add a Git repository via SSH using a private key for authentication +# [OPTIONAL] use "--insecure-ignore-host-key" during testing with self-signed certs +"$ARGOCD_PATH" repo add "$REPO_URL" --ssh-private-key-path "$REPO_SSH_PRIVATE_KEY_PATH" diff --git a/terraform/function_app.tf b/terraform/function_app.tf index 9d870f1..4f2c484 100644 --- a/terraform/function_app.tf +++ b/terraform/function_app.tf @@ -36,8 +36,8 @@ data "azurerm_storage_account_sas" "func_app" { # expiry = formatdate("YYYY-MM-DD", timeadd(timestamp(), var.func_app_sas_expires_in_hours)) # hardcoded values to stop timestamp() affecting EVERY Terraform Plan - start = "2020-10-25" - expiry = "2022-01-01" + start = "2020-10-25" + expiry = "2022-01-01" resource_types { object = true diff --git a/terraform/helm/aad_pod_identity_values.yaml b/terraform/helm/aad_pod_identity_values.yaml index 2f5c077..31f16b2 100644 --- a/terraform/helm/aad_pod_identity_values.yaml +++ b/terraform/helm/aad_pod_identity_values.yaml @@ -1,4 +1,5 @@ -# source: https://github.com/Azure/aad-pod-identity/blob/v1.7.0/charts/aad-pod-identity/values.yaml +# source: https://github.com/Azure/aad-pod-identity/blob/v1.7.1/charts/aad-pod-identity/values.yaml + # Default values for aad-pod-identity-helm. # This is a YAML-formatted file. # Declare variables to be passed into your templates. @@ -17,7 +18,13 @@ image: # https://github.com/Azure/aad-pod-identity#4-optional-match-pods-in-the-namespace # By default, AAD Pod Identity matches pods to identities across namespaces. # To match only pods in the namespace containing AzureIdentity set this to true. -forceNameSpaced: "false" +# DEPRECATED - use 'forceNamespaced' instead. +forceNameSpaced: "" + +# https://github.com/Azure/aad-pod-identity#4-optional-match-pods-in-the-namespace +# By default, AAD Pod Identity matches pods to identities across namespaces. +# To match only pods in the namespace containing AzureIdentity set this to true. +forceNamespaced: "false" # When NMI runs on a node where MIC is running, then MIC token request call is also # intercepted by NMI. MIC can't get a valid token as to initialize and then @@ -44,7 +51,7 @@ operationMode: "standard" mic: image: mic - tag: v1.7.0 + tag: v1.7.1 priorityClassName: "" @@ -71,6 +78,22 @@ mic: affinity: {} + # Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/component: mic + + # Limit the number of concurrent disruptions that your application experiences, + # allowing for higher availability while permitting the cluster administrator to manage the clusters nodes. + # ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + podDisruptionBudget: {} + # minAvailable: 1 + leaderElection: # Override leader election instance name (default is 'hostname') instance: "" @@ -124,7 +147,7 @@ mic: nmi: image: nmi - tag: v1.7.0 + tag: v1.7.1 priorityClassName: "" @@ -189,6 +212,10 @@ nmi: # default is false allowNetworkPluginKubenet: false + # Path to kubelet default config. + # default is /etc/default/kubelet + kubeletConfig: "/etc/default/kubelet" + rbac: enabled: true # NMI requires permissions to get secrets when service principal (type: 1) is used in AzureIdentity. @@ -196,15 +223,25 @@ rbac: allowAccessToSecrets: true # Create azure identities and bindings -azureIdentities: [] - # - name: "azure-identity" +# This is a map with the AzureIdentityName being the key and the rest of the blob as value in accordance +# to helm best practices: https://helm.sh/docs/chart_best_practices/values/#consider-how-users-will-use-your-values +azureIdentities: + # "azure-identity": # # if not defined, then the azure identity will be deployed in the same namespace as the chart # namespace: "" - # # type 0: MSI, type 1: Service Principal + # # type 0: User-assigned identity, type 1: Service Principal, type 2: Service principal with certificate # type: 0 # # /subscriptions/subscription-id/resourcegroups/resource-group/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity-name + # # Required for type 0 # resourceID: "" + # # Required for type 0, 1 and 2 # clientID: "" + # # Required for type 1 and 2 + # tenantID: "" + # # Required for type 1 and 2 + # clientPassword: "{\"name\":\"\",\"namespace\":\"\"}" + # # Optional for type 1 and 2 (multi-tenant) + # auxiliaryTenantIDs: [] # binding: # name: "azure-identity-binding" # # The selector will also need to be included in labels for app deployment diff --git a/terraform/helm/kured_values.yaml b/terraform/helm/kured_values.yaml new file mode 100644 index 0000000..7c96e24 --- /dev/null +++ b/terraform/helm/kured_values.yaml @@ -0,0 +1,31 @@ +configuration: + # alertFilterRegexp: "" # alert names to ignore when checking for active alerts + # blockingPodSelector: [] # label selector identifying pods whose presence should prevent reboots + endTime: "17:00" # only reboot before this time of day (default "23:59") + # lockAnnotation: "" # annotation in which to record locking node (default "weave.works/kured-node-lock") + # lockTtl: 0 # force clean annotation after this ammount of time (default 0, disabled) + # messageTemplateDrain: "" # slack message template when notifying about a node being drained (default "Draining node %s") + # messageTemplateReboot: "" # slack message template when notifying about a node being rebooted (default "Rebooted node %s") + # period: "" # reboot check period (default 1h0m0s) + # prometheusUrl: "" # Prometheus instance to probe for active alerts + rebootDays: [mo,tu,we,th,fr] # only reboot on these days (default [su,mo,tu,we,th,fr,sa]) + # rebootSentinel: "" # path to file whose existence signals need to reboot (default "/var/run/reboot-required") + # slackChannel: "" # slack channel for reboot notfications + # slackHookUrl: "" # slack hook URL for reboot notfications + # slackUsername: "" # slack username for reboot notfications (default "kured") + startTime: "09:00" # only reboot after this time of day (default "0:00") + timeZone: "Europe/London" # time-zone to use (valid zones from "time" golang package) + +nodeSelector: + kubernetes.io/os: linux + +extraArgs: {} + +extraEnvVars: +# - name: slackHookUrl +# valueFrom: +# secretKeyRef: +# name: secret_name +# key: secret_key +# - name: regularEnvVariable +# value: 123 diff --git a/terraform/helm/nginx_values.yaml b/terraform/helm/nginx_values.yaml index 4d7248f..6407621 100644 --- a/terraform/helm/nginx_values.yaml +++ b/terraform/helm/nginx_values.yaml @@ -13,20 +13,6 @@ controller: redirect-to-https: "false" ssl-redirect: "false" - ## Annotations to be added to the controller config configuration configmap - ## - configAnnotations: {} - - # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers - proxySetHeaders: {} - - # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers - addHeaders: {} - - # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network - # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply - reportNodeInternalIp: false - ## Allows customization of the source of the IP address or FQDN to report ## in the ingress status field. By default, it reads the information provided ## by the service. If disable, the status field reports the IP address of the @@ -41,7 +27,6 @@ controller: ## pathOverride: "" - ## Additional command line arguments to pass to nginx-ingress-controller ## E.g. to specify the default SSL certificate you can use ## extraArgs: diff --git a/terraform/helm/velero_values.yaml b/terraform/helm/velero_values.yaml index 5f7a0e5..1a25bd6 100644 --- a/terraform/helm/velero_values.yaml +++ b/terraform/helm/velero_values.yaml @@ -1,4 +1,4 @@ -# source: https://github.com/vmware-tanzu/helm-charts/blob/velero-2.13.6/charts/velero/values.yaml +# source: https://github.com/vmware-tanzu/helm-charts/blob/velero-2.14.1/charts/velero/values.yaml ## ## Configuration settings that directly affect the Velero deployment YAML. @@ -32,6 +32,10 @@ podLabels: {} # Resource requests/limits to specify for the Velero deployment. Optional. resources: {} +# Configure the dnsPolicy of the Velero deployment +# See: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: ClusterFirst + # Init containers to add to the Velero deployment's pod spec. At least one plugin provider image is required. # https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure/releases initContainers: @@ -70,6 +74,7 @@ extraVolumeMounts: [] # Settings for Velero's prometheus metrics. Enabled by default. metrics: enabled: true + scrapeInterval: 30s # Pod annotations for Prometheus podAnnotations: @@ -77,9 +82,18 @@ metrics: prometheus.io/port: "8085" prometheus.io/path: "/metrics" + serviceMonitor: + enabled: false + additionalLabels: {} + # Install CRDs as a templates. Enabled by default. installCRDs: true +# Enable/disable all helm hooks annotations +# You should disable this if using a deploy tool that doesn't support helm hooks, +# such as ArgoCD +enableHelmHooks: true + ## ## End of deployment-related settings. ## @@ -202,15 +216,30 @@ credentials: # credentials. Set to false if, for example, using kube2iam or # kiam to provide IAM credentials for the Velero pod. useSecret: true + # Name of the secret to create if `useSecret` is true and `existingSecret` is empty + name: # Name of a pre-existing secret (if any) in the Velero namespace # that should be used to get IAM account credentials. Optional. existingSecret: velero-credentials - # Data to be stored in the Velero secret, if `useSecret` is - # true and `existingSecret` is empty. This should be the contents - # of your IAM credentials file. + # Data to be stored in the Velero secret, if `useSecret` is true and `existingSecret` is empty. + # As of the current Velero release, Velero only uses one secret key/value at a time. + # The key must be named `cloud`, and the value corresponds to the entire content of your IAM credentials file. + # Note that the format will be different for different providers, please check their documentation. + # Here is a list of documentation for plugins maintained by the Velero team: + # [AWS] https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/main/README.md + # [GCP] https://github.com/vmware-tanzu/velero-plugin-for-gcp/blob/main/README.md + # [Azure] https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure/blob/main/README.md secretContents: {} + # cloud: | + # [default] + # aws_access_key_id= + # aws_secret_access_key= # additional key/value pairs to be used as environment variables such as "DIGITALOCEAN_TOKEN: ". Values will be stored in the secret. extraEnvVars: {} + # Name of a pre-existing secret (if any) in the Velero namespace + # that will be used to load environment variables into velero and restic. + # Secret should be in format - https://kubernetes.io/docs/concepts/configuration/secret/#use-case-as-container-environment-variables + extraSecretRef: "" # Whether to create backupstoragelocation crd, if false => do not create a default backup location backupsEnabled: true @@ -236,6 +265,10 @@ restic: # Extra volumeMounts for the Restic daemonset. Optional. extraVolumeMounts: [] + # Configure the dnsPolicy of the Restic daemonset + # See: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: ClusterFirst + # SecurityContext to use for the Velero deployment. Optional. # Set fsGroup for `AWS IAM Roles for Service Accounts` # see more informations at: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html @@ -248,6 +281,8 @@ restic: # mybackup: # labels: # myenv: foo +# annotations: +# myenv: foo # schedule: "0 0 * * *" # template: # ttl: "240h" diff --git a/terraform/helm_aad_pod_identity.tf b/terraform/helm_aad_pod_identity.tf index b4a41e5..9018b63 100644 --- a/terraform/helm_aad_pod_identity.tf +++ b/terraform/helm_aad_pod_identity.tf @@ -44,21 +44,23 @@ resource "helm_release" "aad_pod_identity" { repository = "https://raw.githubusercontent.com/Azure/aad-pod-identity/master/charts" version = var.aad_pod_identity_chart_version timeout = 600 + atomic = true values = [ file("helm/aad_pod_identity_values.yaml"), data.template_file.azureIdentities.rendered ] + # should only be required for helm v2 set { name = "installCRDs" - value = "true" + value = "false" } # allow Kubenet: https://azure.github.io/aad-pod-identity/docs/configure/aad_pod_identity_on_kubenet/ set { name = "nmi.allowNetworkPluginKubenet" - value = "true" + value = "false" } # https://github.com/Azure/aad-pod-identity/wiki/Debugging#increasing-the-verbosity-of-the-logs diff --git a/terraform/helm_akv2k8s.tf b/terraform/helm_akv2k8s.tf index 16b3718..7e61508 100644 --- a/terraform/helm_akv2k8s.tf +++ b/terraform/helm_akv2k8s.tf @@ -43,8 +43,7 @@ resource "local_file" "kubeconfig" { resource "null_resource" "akv2k8s_crds" { triggers = { # always_run = "${timestamp()}" - akv2k8s_yaml_contents = filemd5(var.akv2k8s_yaml_path) - cert_sync_yaml_contents = filemd5(var.cert_sync_yaml_path) + akv2k8s_yaml_contents = filemd5(var.akv2k8s_yaml_path) } provisioner "local-exec" { @@ -53,7 +52,6 @@ resource "null_resource" "akv2k8s_crds" { export KUBECONFIG=${var.aks_config_path} # https://helm.sh/docs/chart_best_practices/custom_resource_definitions/ kubectl apply -f ${var.akv2k8s_yaml_path} - kubectl apply -f ${var.cert_sync_yaml_path} EOT } @@ -103,9 +101,31 @@ resource "helm_release" "akv2k8s" { repository = "http://charts.spvapi.no" version = var.akv2k8s_chart_version timeout = 600 + atomic = true set { name = "controller.logLevel" value = "debug" } } + +# https://www.terraform.io/docs/provisioners/local-exec.html +resource "null_resource" "akv2k8s_cert_sync" { + triggers = { + # always_run = "${timestamp()}" + cert_sync_yaml_contents = filemd5(var.cert_sync_yaml_path) + } + + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = < 2.2 = 2.X.Y tls = "~> 2.1" } # 0.12.X - required_version = "~> 0.12.29" # https://github.com/hashicorp/terraform/releases + required_version = "~> 0.12.30" # https://github.com/hashicorp/terraform/releases } # must include blank features block # https://github.com/terraform-providers/terraform-provider-azurerm/releases provider "azurerm" { - version = "2.37.0" + version = "2.43.0" features {} } @@ -46,7 +46,6 @@ provider "kubernetes" { provider "helm" { kubernetes { - load_config_file = false host = module.aks.full_object.kube_admin_config[0].host client_certificate = base64decode(module.aks.full_object.kube_admin_config[0].client_certificate) client_key = base64decode(module.aks.full_object.kube_admin_config[0].client_key) diff --git a/terraform/variables.tf b/terraform/variables.tf index 3db9ff1..4c8bedc 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -16,26 +16,30 @@ variable "kubernetes_version" { # https://kubernetes.github.io/ingress-nginx/deploy/#using-helm # https://github.com/kubernetes/ingress-nginx/releases # https://github.com/kubernetes/ingress-nginx/blob/ingress-nginx-3.11.0/charts/ingress-nginx/Chart.yaml#L3 +# +# helm repo update # helm search repo ingress-nginx/ingress-nginx +# * also update terraform/helm/nginx_values.yaml variable "nginx_chart_version" { - default = "3.11.0" + default = "3.20.1" } # https://hub.helm.sh/charts/jetstack/cert-manager # helm search repo jetstack/cert-manager variable "cert_manager_chart_version" { - default = "v1.0.4" + default = "v1.1.0" } # https://github.com/vmware-tanzu/helm-charts/releases # helm search repo vmware-tanzu/velero +# * also update terraform/helm/velero_values.yaml variable "velero_chart_version" { - default = "2.13.7" + default = "2.14.5" } # https://hub.docker.com/r/sonatype/nexus3/tags variable "nexus_image_tag" { - default = "3.28.1" + default = "3.29.2" } # https://github.com/adamrushuk/charts/releases @@ -44,6 +48,7 @@ variable "nexus_chart_version" { default = "0.2.8" } +# https://github.com/SparebankenVest/azure-key-vault-to-kubernetes # https://github.com/SparebankenVest/public-helm-charts/releases # https://github.com/SparebankenVest/helm-charts/tree/gh-pages/akv2k8s # https://github.com/SparebankenVest/public-helm-charts/blob/master/stable/akv2k8s/Chart.yaml#L5 @@ -55,14 +60,38 @@ variable "akv2k8s_chart_version" { # https://github.com/Azure/aad-pod-identity/blob/master/charts/aad-pod-identity/Chart.yaml#L4 # helm search repo aad-pod-identity/aad-pod-identity variable "aad_pod_identity_chart_version" { - default = "2.0.3" + default = "3.0.0" } # https://bitnami.com/stack/external-dns/helm # https://github.com/bitnami/charts/blob/master/bitnami/external-dns/Chart.yaml#L21 # helm search repo bitnami/external-dns variable "external_dns_chart_version" { - default = "4.0.0" + default = "4.5.3" +} + +# https://github.com/weaveworks/kured/tree/master/charts/kured +# helm search repo kured/kured +variable "kured_chart_version" { + default = "2.3.1" +} + +# https://github.com/weaveworks/kured#kubernetes--os-compatibility +variable "kured_image_tag" { + default = "1.4.4" +} + + +# argo cd +# https://github.com/argoproj/argo-helm/blob/master/charts/argo-cd/Chart.yaml#L5 +# helm search repo argo/argo-cd +variable "argocd_chart_version" { + default = "2.11.0" +} + +# https://hub.docker.com/r/argoproj/argocd/tags +variable "argocd_image_tag" { + default = "v1.8.2" } #endregion Versions @@ -135,7 +164,7 @@ variable "sla_sku" { variable "aks_container_insights_enabled" { description = "Should Container Insights monitoring be enabled" - default = true + default = false } variable "aks_config_path" { @@ -166,8 +195,25 @@ variable "agent_pool_profile_name" { } variable "agent_pool_profile_vm_size" { + # https://azureprice.net/?region=ukwest¤cy=GBP + # Standard_D2s_v3 - £0.086455 per hour + # 2 x CPU, 8GB RAM, 4 x Data Disks # https://docs.microsoft.com/en-us/azure/virtual-machines/dv3-dsv3-series#dsv3-series - default = "Standard_D2s_v3" + + # Standard_DS2_v2 - £0.130429 per hour + # 2 x CPU, 7GB RAM, 8 x Data Disks + # https://docs.microsoft.com/en-us/azure/virtual-machines/dv2-dsv2-series?toc=/azure/virtual-machines/linux/toc.json&bc=/azure/virtual-machines/linux/breadcrumb/toc.json#dsv2-series + + # ! Standard_B4ms can cause performance issues + # Standard_B4ms - £0.140863 per hour + # 4 x CPU, 16GB RAM, 8 x Data Disks + + # Standard_D4s_v3 - £0.172911 per hour + # 4 x CPU, 16GB RAM, 8 x Data Disks + + # Standard_F8s_v2 - £0.301104 per hour + # 8 x CPU, 16GB RAM, 16 x Data Disks + default = "Standard_D4s_v3" } variable "agent_pool_profile_os_type" { @@ -281,3 +327,35 @@ variable "akv2k8s_exception_yaml_path" { variable "cert_sync_yaml_path" { default = "files/akvs-certificate-sync.yaml" } + + + +# argo cd +variable "argocd_admin_password" { + default = "__ARGOCD_ADMIN_PASSWORD__" +} + +variable "argocd_cert_sync_yaml_path" { + default = "files/argocd-akvs-certificate-sync.yaml" +} + +variable "argocd_fqdn" { + default = "__ARGOCD_FQDN__" +} + +variable "helm_chart_repo_deploy_private_key" { + default = <<-EOT +__HELM_CHART_REPO_DEPLOY_PRIVATE_KEY__ +EOT +} + + + +# gitlab +variable "gitlab_cert_sync_yaml_path" { + default = "files/gitlab-akvs-certificate-sync.yaml" +} + +variable "gitlab_argocd_app_path" { + default = "files/argocd-gitlab.yaml" +} diff --git a/terraform/velero.tf b/terraform/velero.tf index 6bb1ee3..5678f06 100644 --- a/terraform/velero.tf +++ b/terraform/velero.tf @@ -88,13 +88,13 @@ EOT resource "helm_release" "velero" { count = var.velero_enabled ? 1 : 0 - atomic = true chart = "velero" name = "velero" namespace = kubernetes_namespace.velero[0].metadata[0].name repository = "https://vmware-tanzu.github.io/helm-charts" version = var.velero_chart_version timeout = 600 + atomic = true values = ["${file("helm/velero_values.yaml")}"] diff --git a/terraform/velero_mi_auth.tf b/terraform/velero_mi_auth.tf index 4fd7b70..1e2e610 100644 --- a/terraform/velero_mi_auth.tf +++ b/terraform/velero_mi_auth.tf @@ -3,15 +3,15 @@ resource "azurerm_user_assigned_identity" "velero" { count = var.velero_enabled ? 1 : 0 resource_group_name = module.aks.node_resource_group location = var.location - name = "mi-velero" + name = "mi-velero" } # assign velero MI contributor rights to velero storage RG resource "azurerm_role_assignment" "velero_mi_velero_storage_rg" { - count = var.velero_enabled ? 1 : 0 - principal_id = azurerm_user_assigned_identity.velero[0].principal_id - role_definition_name = "Contributor" - scope = azurerm_resource_group.velero[0].id + count = var.velero_enabled ? 1 : 0 + principal_id = azurerm_user_assigned_identity.velero[0].principal_id + role_definition_name = "Contributor" + scope = azurerm_resource_group.velero[0].id } # assign velero MI contributor rights to velero storage RG diff --git a/test/integration.tests.ps1 b/test/integration.tests.ps1 index 54f32c3..0ec2729 100644 --- a/test/integration.tests.ps1 +++ b/test/integration.tests.ps1 @@ -1,6 +1,10 @@ # Pester integration tests for provisioned infrastructure # Assumes az cli has already been logged in +# Documentation: +# - https://pester.dev/docs/migrations/v3-to-v4#update-to-the-new-assertions-syntax +# - https://pester.dev/docs/usage/assertions + # Pester tests Describe "Integration Tests" { @@ -10,20 +14,20 @@ Describe "Integration Tests" { # [CI Param Option] Trigger failed test on purpose if ($env:FORCE_TEST_FAIL -eq "true") { It "FORCE_TEST_FAIL used on Resource Group [$env:AKS_RG_NAME]" { - "false" | Should be "true" + "false" | Should -Be "true" } } It "Resource Group [$env:TERRAFORM_STORAGE_RG] should exist" { - az group exists --name $env:TERRAFORM_STORAGE_RG | Should be "true" + az group exists --name $env:TERRAFORM_STORAGE_RG | Should -Be "true" } It "Storage Account [$env:TERRAFORM_STORAGE_ACCOUNT] should exist" { - az storage account show --name $env:TERRAFORM_STORAGE_ACCOUNT --query "provisioningState" -o tsv | Should be "Succeeded" + az storage account show --name $env:TERRAFORM_STORAGE_ACCOUNT --query "provisioningState" -o tsv | Should -Be "Succeeded" } It "Storage Blob [terraform.tfstate] in Container [terraform] should exist" { - az storage blob exists --account-name $env:TERRAFORM_STORAGE_ACCOUNT --container-name "terraform" --name "terraform.tfstate" --query "exists" -o tsv | Should be "true" + az storage blob exists --account-name $env:TERRAFORM_STORAGE_ACCOUNT --container-name "terraform" --name "terraform.tfstate" --query "exists" -o tsv | Should -Be "true" } } @@ -31,11 +35,11 @@ Describe "Integration Tests" { Context 'When Terraform has provisioned: [AZURE KUBERNETES SERVICE]' { It "Resource Group [$env:AKS_RG_NAME] should exist" { - az group exists --name $env:AKS_RG_NAME | Should be "true" + az group exists --name $env:AKS_RG_NAME | Should -Be "true" } It "Azure Kubernetes Service [$env:AKS_CLUSTER_NAME] should exist" { - az aks show --name $env:AKS_CLUSTER_NAME --resource-group $env:AKS_RG_NAME --query "provisioningState" -o tsv | Should be "Succeeded" + az aks show --name $env:AKS_CLUSTER_NAME --resource-group $env:AKS_RG_NAME --query "provisioningState" -o tsv | Should -Be "Succeeded" } } @@ -63,12 +67,12 @@ Describe "Integration Tests" { # Root domain It "A request to [$testUrl] should return an allowed Status Code: [$($allowedStatusCodes -join ', ')]" { # $responseStatusCode = curl -k -s -o /dev/null -w "%{http_code}" $testUrl - $response.StatusCode | Should BeIn $allowedStatusCodes + $response.StatusCode | Should -BeIn $allowedStatusCodes } It "A request to [$testUrl] should include [$expectedContent] in the returned content" { - # (curl -k -s $testUrl) -match $expectedContent | Should Be $true - $response.Content -match $expectedContent | Should Be $true + # (curl -k -s $testUrl) -match $expectedContent | Should -Be $true + $response.Content -match $expectedContent | Should -Be $true } } @@ -93,7 +97,7 @@ Describe "Integration Tests" { # Tests It "Should have an SSL cert for [$hostname] issued by: [$expectedIssuerName]" { - $certResult.Issuer -like "*$expectedIssuerName*" | Should Be $true + $certResult.Issuer -like "*$expectedIssuerName*" | Should -Be $true } # Do extra supported tests if on Windows OS @@ -106,15 +110,15 @@ Describe "Integration Tests" { if ($env:CI_DEBUG -eq "true") { $sslResult | Format-Custom | Out-String | Write-Verbose } It "Should have Signature Algorithm of [sha256RSA]" { - $sslResult.SignatureAlgorithm | Should Be "sha256RSA" + $sslResult.SignatureAlgorithm | Should -Be "sha256RSA" } It "Should support TLS1.2" { - $sslResult.TLS12 | Should Be $True + $sslResult.TLS12 | Should -Be $True } It "Should not expire within [$warningThreshold] days" { - ($sslResult.Certificate.NotAfter -gt (Get-Date).AddDays($warningThreshold)) | Should Be $True + ($sslResult.Certificate.NotAfter -gt (Get-Date).AddDays($warningThreshold)) | Should -Be $True } } } diff --git a/velero/velero.md b/velero/velero.md index f077bf6..df8e821 100644 --- a/velero/velero.md +++ b/velero/velero.md @@ -59,7 +59,7 @@ az storage account create ` --https-only true ` --kind BlobStorage ` --access-tier Hot - + # Create Blob Container az storage container create -n $blobContainerName --public-access off --account-name $storageAccountName ```