Skip to content

Commit

Permalink
Use Shipyard's deploy target
Browse files Browse the repository at this point in the history
Use shipyards target for deployment and provide it with extra flags to
affect the deployment.
  • Loading branch information
mkolesnik authored and dfarrell07 committed Apr 28, 2020
1 parent 771c651 commit 9f39a35
Show file tree
Hide file tree
Showing 8 changed files with 91 additions and 136 deletions.
16 changes: 12 additions & 4 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,12 @@ git:
jobs:
include:
- env: CMD="make ci"
- env: CMD="make e2e lighthouse=true"
- env: CMD="make e2e lighthouse=true globalnet=true" DEPLOY=true
- env: CMD="make e2e"
DEPLOY_ARGS="--deploytool_broker_args '--service-discovery'"
RELEASE=true
- env: CMD="make e2e"
CLUSTERS_ARGS="--globalnet true"
DEPLOY_ARGS="${CLUSTERS_ARGS} --deploytool_broker_args '--service-discovery'"

install:
- sudo apt-get install moreutils # make ts available
Expand All @@ -24,15 +28,19 @@ services:
script:
- set -o pipefail;
$CMD 2>&1 | ts '[%H:%M:%.S]' -s
after_success:
- echo "Testing cleaning up clusters";
set -o pipefail;
make cleanup 2>&1 | ts '[%H:%M:%.S]' -s
deploy:
- provider: script
script: bash scripts/release
on:
branch: master
condition: $DEPLOY = true
condition: $RELEASE = true
- provider: script
script: bash scripts/release $TRAVIS_TAG
skip_cleanup: true
on:
tags: true
condition: $DEPLOY = true
condition: $RELEASE = true
3 changes: 2 additions & 1 deletion Dockerfile.dapper
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
FROM quay.io/submariner/shipyard-dapper-base

ARG DAPPER_HOST_ARCH
ENV HOST_ARCH=${DAPPER_HOST_ARCH} ARCH=${DAPPER_HOST_ARCH} DAPPER_ENV=REPO DAPPER_ENV=TAG \
ENV HOST_ARCH=${DAPPER_HOST_ARCH} ARCH=${DAPPER_HOST_ARCH} \
DAPPER_ENV="REPO TAG CLUSTERS_ARGS DEPLOY_ARGS" \
DAPPER_SOURCE=/go/src/github.com/submariner-io/submariner-operator DAPPER_DOCKER_SOCKET=true \
OPERATOR_SDK_VERSION=0.12.0 GOROOT=/usr/lib/golang \
KUBEFED_VERSION=0.1.0-rc3
Expand Down
11 changes: 7 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
build_debug ?= false
lighthouse ?= false
status ?= onetime

ifneq (,$(DAPPER_HOST_ARCH))

Expand All @@ -9,12 +8,16 @@ ifneq (,$(DAPPER_HOST_ARCH))
include $(SHIPYARD_DIR)/Makefile.inc

TARGETS := $(shell ls -p scripts | grep -v -e /)
CLUSTERS_ARGS = --cluster_settings scripts/kind-e2e/cluster_settings
CLUSTER_SETTINGS_FLAG = --cluster_settings $(DAPPER_SOURCE)/scripts/kind-e2e/cluster_settings
CLUSTERS_ARGS += $(CLUSTER_SETTINGS_FLAG)
DEPLOY_ARGS += $(CLUSTER_SETTINGS_FLAG) --cable_driver strongswan

clusters: build-all

e2e: clusters
scripts/kind-e2e/e2e.sh --lighthouse $(lighthouse) --globalnet $(globalnet)
deploy: clusters preload_images

e2e: deploy
scripts/kind-e2e/e2e.sh

$(TARGETS): vendor/modules.txt
./scripts/$@ --build_debug $(build_debug)
Expand Down
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,9 @@ make e2e cleanup
## Setup development environment
You will need docker installed in your system, and at least 8GB of RAM. Run:

```
make e2e
```

```
make deploy
```


# Reference
Expand Down
6 changes: 3 additions & 3 deletions scripts/kind-e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Optional useful tools for troubleshooting:

To run the tests simply execute the following:
```bash
make build-all package e2e
make e2e
```

To test with a specific k8s version, an additional **version** parameter can be passed to **make e2e** command:
Expand All @@ -52,8 +52,7 @@ kubectl config list-contexts
You should be able to see 3 contexts. From this stage you can interact with the clusters
as with any normal k8s cluster.


**NOTE**: Each time **make e2e** command is executed, the local code will be built, pushed to kind clusters
**NOTE**: Each time **make e2e** command is executed, the local code will be build, pushed to kind clusters
as docker images, submariner will be redeployed on the clusters from pushed images and E2E tests will be executed.
This mode allows the developers to test their local code fast on a very close to real world scenario setup.

Expand All @@ -65,6 +64,7 @@ make deploy
```

#### Cleanup

At any time you can run a cleanup command that will remove all the kind clusters.

```bash
Expand Down
127 changes: 14 additions & 113 deletions scripts/kind-e2e/e2e.sh
Original file line number Diff line number Diff line change
@@ -1,111 +1,25 @@
#!/usr/bin/env bash

## Process command line flags ##

source /usr/share/shflags/shflags
DEFINE_string 'globalnet' 'false' "Deploy with operlapping CIDRs (set to 'true' to enable)"
DEFINE_string 'lighthouse' 'false' "Deploy with lighthouse"
FLAGS "$@" || exit $?
eval set -- "${FLAGS_ARGV}"

globalnet="${FLAGS_globalnet}"
lighthouse="${FLAGS_lighthouse}"
echo "Running with: globalnet=${globalnet}, lighthouse=${lighthouse}"

set -o pipefail
set -em
set -em -o pipefail

source ${SCRIPTS_DIR}/lib/debug_functions
source ${SCRIPTS_DIR}/lib/version
source ${SCRIPTS_DIR}/lib/utils
source ${SCRIPTS_DIR}/lib/deploy_funcs
source ${SCRIPTS_DIR}/lib/deploy_operator
source ${DAPPER_SOURCE}/scripts/kind-e2e/cluster_settings

### Functions ###

function setup_broker() {
if kubectl get crd clusters.submariner.io > /dev/null 2>&1; then
echo Submariner CRDs already exist, skipping broker creation...
return
fi

echo Installing broker on ${cluster}.
local sd gn
[[ $lighthouse = true ]] && sd=--service-discovery
[[ $globalnet = true ]] && gn=--globalnet
${DAPPER_SOURCE}/bin/subctl --kubeconfig ${PRJ_ROOT}/output/kubeconfigs/kind-config-merged --kubecontext ${cluster} deploy-broker ${sd} ${gn}|& cat
}

function broker_vars() {
SUBMARINER_BROKER_URL=$(kubectl -n default get endpoints kubernetes -o jsonpath="{.subsets[0].addresses[0].ip}:{.subsets[0].ports[?(@.name=='https')].port}")
SUBMARINER_BROKER_CA=$(kubectl -n ${SUBMARINER_BROKER_NS} get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='${SUBMARINER_BROKER_NS}-client')].data['ca\.crt']}")
SUBMARINER_BROKER_TOKEN=$(kubectl -n ${SUBMARINER_BROKER_NS} get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='${SUBMARINER_BROKER_NS}-client')].data.token}"|base64 --decode)
}

function kind_import_images() {
import_image quay.io/submariner/submariner
import_image quay.io/submariner/submariner-route-agent
import_image quay.io/submariner/submariner-operator
import_image quay.io/submariner/lighthouse-agent
[[ $globalnet != "true" ]] || import_image quay.io/submariner/submariner-globalnet

}
### Variables ###

function create_subm_vars() {
# FIXME A better name might be submariner-engine, but just kinda-matching submariner-<random hash> name used by Helm/upstream tests
deployment_name=submariner
operator_deployment_name=submariner-operator
engine_deployment_name=submariner-engine
routeagent_deployment_name=submariner-routeagent
broker_deployment_name=submariner-k8s-broker
globalnet_deployment_name=submariner-globalnet

declare_cidrs
natEnabled=false

subm_engine_image_repo="localhost:5000"
subm_engine_image_tag=local

# FIXME: Actually act on this size request in controller
subm_engine_size=3
subm_colorcodes=blue
subm_debug=false
subm_broker=k8s
subm_cabledriver=strongswan
ce_ipsec_debug=false
ce_ipsec_ikeport=500
ce_ipsec_nattport=4500

subm_ns=submariner-operator
subm_broker_ns=submariner-k8s-broker
}
[[ ! ${DEPLOY_ARGS} =~ "--globalnet" ]] || globalnet=true
[[ ! ${DEPLOY_ARGS} =~ "--service-discovery" ]] || lighthouse=true

function deploy_subm() {
# Add SubM gateway labels
add_subm_gateway_label
# Verify SubM gateway labels
verify_subm_gateway_label

${DAPPER_SOURCE}/bin/subctl join --operator-image "${subm_engine_image_repo}/submariner-operator:local" \
--kubeconfig ${PRJ_ROOT}/output/kubeconfigs/kind-config-merged \
--kubecontext ${cluster} \
--clusterid ${cluster} \
--repository "${subm_engine_image_repo}" \
--version ${subm_engine_image_tag} \
--nattport ${ce_ipsec_nattport} \
--ikeport ${ce_ipsec_ikeport} \
--colorcodes ${subm_colorcodes} \
--cable-driver ${subm_cabledriver} \
--disable-nat \
broker-info.subm |& cat
}
### Functions ###

function connectivity_tests() {
local netshoot_pod nginx_svc_ip
netshoot_pod=$(kubectl get pods -l app=netshoot | awk 'FNR == 2 {print $1}')
nginx_svc_ip=$(with_context cluster3 get_svc_ip nginx-demo)

with_retries 5 test_connection "$netshoot_pod" "$nginx_svc_ip"
if [[ $lighthouse = true ]]; then
resolved_ip=$((kubectl exec "${netshoot_pod}" -- ping -c 1 -W 1 nginx-demo 2>/dev/null || :) \
| grep PING | awk '{print $3}' | tr -d '()')
Expand All @@ -129,55 +43,42 @@ function test_with_e2e_tests {
### Main ###
PRJ_ROOT=$(git rev-parse --show-toplevel)
SUBMARINER_BROKER_NS=submariner-k8s-broker
# FIXME: This can change and break re-running deployments
SUBMARINER_PSK=$((LC_CTYPE=C tr -dc 'a-zA-Z0-9' < /dev/urandom || :) | head -c 64)
declare_kubeconfig
kubectl config view --flatten > ${PRJ_ROOT}/output/kubeconfigs/kind-config-merged
kind_import_images
with_context cluster1 setup_broker
with_context cluster1 broker_vars
# Import functions for testing with Operator
# NB: These are also used to verify non-Operator deployments, thereby asserting the two are mostly equivalent
. ${DAPPER_SOURCE}/scripts/kind-e2e/lib_operator_verify_subm.sh
create_subm_vars
with_context cluster1 verify_subm_broker_secrets
with_context cluster1 broker_vars
if [[ $globalnet = "true" ]]; then
run_sequential "2 3" deploy_subm
else
run_parallel "2 3" deploy_subm
fi
with_context cluster1 verify_subm_broker_secrets
run_parallel "2 3" verify_subm_deployed
echo "Running subctl a second time to verify if running subctl a second time works fine"
with_context cluster3 deploy_subm
with_context cluster3 subctl_install_subm
with_context cluster2 deploy_resource "${RESOURCES_DIR}/netshoot.yaml"
with_context cluster3 deploy_resource "${RESOURCES_DIR}/nginx-demo.yaml"
with_context cluster2 connectivity_tests
# dataplane E2E need to be modified for globalnet
if [[ $globalnet = false ]]; then
if [[ $globalnet != true ]]; then
# run dataplane E2e tests between the two clusters
${DAPPER_SOURCE}/bin/subctl verify-connectivity ${PRJ_ROOT}/output/kubeconfigs/kind-config-cluster2 \
${PRJ_ROOT}/output/kubeconfigs/kind-config-cluster3 \
${DAPPER_SOURCE}/bin/subctl verify-connectivity ${DAPPER_OUTPUT}/kubeconfigs/kind-config-cluster2 \
${DAPPER_OUTPUT}/kubeconfigs/kind-config-cluster3 \
--verbose
fi
cat << EOM
Your 3 virtual clusters are deployed and working properly with your local submariner source code, and can be accessed with:
Your 3 virtual clusters are deployed and working properly with your local source code, and can be accessed with:
export KUBECONFIG=\$(echo \$(git rev-parse --show-toplevel)/output/kubeconfigs/kind-config-cluster{1..3} | sed 's/ /:/g')
$ kubectl config use-context cluster1 # or cluster2, cluster3..
To clean evertyhing up, just run: make cleanup
EOM
44 changes: 37 additions & 7 deletions scripts/kind-e2e/lib_operator_verify_subm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,41 @@ function verify_subm_gateway_label() {
kubectl get node $cluster-worker -o jsonpath='{.metadata.labels}' | grep submariner.io/gateway:true
}

function broker_vars() {
SUBMARINER_BROKER_URL=$(kubectl -n default get endpoints kubernetes -o jsonpath="{.subsets[0].addresses[0].ip}:{.subsets[0].ports[?(@.name=='https')].port}")
SUBMARINER_BROKER_CA=$(kubectl -n ${SUBMARINER_BROKER_NS} get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='${SUBMARINER_BROKER_NS}-client')].data['ca\.crt']}")
SUBMARINER_BROKER_TOKEN=$(kubectl -n ${SUBMARINER_BROKER_NS} get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='${SUBMARINER_BROKER_NS}-client')].data.token}"|base64 --decode)
}

function create_subm_vars() {
# FIXME A better name might be submariner-engine, but just kinda-matching submariner-<random hash> name used by Helm/upstream tests
deployment_name=submariner
operator_deployment_name=submariner-operator
engine_deployment_name=submariner-engine
routeagent_deployment_name=submariner-routeagent
broker_deployment_name=submariner-k8s-broker
globalnet_deployment_name=submariner-globalnet

declare_cidrs
natEnabled=false

subm_engine_image_repo="localhost:5000"
subm_engine_image_tag=local

# FIXME: Actually act on this size request in controller
subm_engine_size=3
subm_colorcodes=blue
subm_debug=false
subm_broker=k8s
subm_cabledriver=strongswan
ce_ipsec_debug=false
ce_ipsec_ikeport=500
ce_ipsec_nattport=4500

subm_ns=submariner-operator
SUBMARINER_BROKER_NS=submariner-k8s-broker
}

function verify_subm_operator() {
# Verify SubM namespace (ignore SubM Broker ns)
kubectl get ns $subm_ns
Expand All @@ -31,6 +66,7 @@ function verify_subm_operator() {
}

function verify_subm_deployed() {

# Verify shared CRDs
verify_endpoints_crd
verify_clusters_crd
Expand Down Expand Up @@ -144,8 +180,6 @@ function verify_subm_cr() {
validate_equals '.spec.ceIPSecDebug' $ce_ipsec_debug
validate_equals '.spec.ceIPSecIKEPort' $ce_ipsec_ikeport
validate_equals '.spec.ceIPSecNATTPort' $ce_ipsec_nattport
# FIXME: Sometimes this changes between runs, causes failures
validate_equals '.spec.ceIPSecPSK' $SUBMARINER_PSK || true
validate_equals '.spec.repository' $subm_engine_image_repo
validate_equals '.spec.version' $subm_engine_image_tag
validate_equals '.spec.broker' $subm_broker
Expand Down Expand Up @@ -219,8 +253,6 @@ function verify_subm_engine_pod() {
validate_pod_container_env 'BROKER_K8S_APISERVER' $SUBMARINER_BROKER_URL
validate_pod_container_env 'BROKER_K8S_REMOTENAMESPACE' $SUBMARINER_BROKER_NS
validate_pod_container_env 'BROKER_K8S_CA' $SUBMARINER_BROKER_CA
# FIXME: This changes between some deployment runs and causes failures
validate_pod_container_env 'CE_IPSEC_PSK' $SUBMARINER_PSK || true
validate_pod_container_env 'CE_IPSEC_DEBUG' $ce_ipsec_debug
validate_pod_container_env 'CE_IPSEC_IKEPORT' $ce_ipsec_ikeport
validate_pod_container_env 'CE_IPSEC_NATTPORT' $ce_ipsec_nattport
Expand Down Expand Up @@ -352,8 +384,6 @@ function verify_subm_engine_container() {
grep "SUBMARINER_CLUSTERCIDR=${cluster_CIDRs[$cluster]}" $env_file
grep "SUBMARINER_COLORCODES=$subm_colorcode" $env_file
grep "SUBMARINER_NATENABLED=$natEnabled" $env_file
# FIXME: This fails on redeploys
#grep "CE_IPSEC_PSK=$SUBMARINER_PSK" $env_file
grep "HOME=/root" $env_file

if kubectl exec $subm_engine_pod_name --namespace=$subm_ns -- command -v command; then
Expand Down Expand Up @@ -441,7 +471,7 @@ function verify_secrets() {
}

function verify_subm_broker_secrets() {
verify_secrets $subm_broker_ns $broker_deployment_name-client $SUBMARINER_BROKER_CA
verify_secrets $SUBMARINER_BROKER_NS $broker_deployment_name-client $SUBMARINER_BROKER_CA
}

function verify_subm_engine_secrets() {
Expand Down
13 changes: 13 additions & 0 deletions scripts/preload_images
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash
set -e

source ${SCRIPTS_DIR}/lib/debug_functions
source ${SCRIPTS_DIR}/lib/version
source ${SCRIPTS_DIR}/lib/deploy_funcs

import_image quay.io/submariner/submariner
import_image quay.io/submariner/submariner-route-agent
import_image quay.io/submariner/submariner-operator
import_image quay.io/submariner/lighthouse-agent
import_image quay.io/submariner/submariner-globalnet

0 comments on commit 9f39a35

Please sign in to comment.