iteration 2 #81
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: pr | |
on: | |
pull_request: | |
types: [opened, synchronize] | |
workflow_dispatch: | |
env: | |
CLUSTER_NAME: ziti-k8s-agent-regression-${{ github.run_id }} | |
AWS_REGION: us-west-2 | |
GKE_REGION: us-central1 | |
GKE_NETWORK_NAME: default | |
GKE_SUBNETWORK_NAME: default | |
NF_NETWORK_NAME: ziti-k8s-agent-regression-${{ github.run_id }} | |
jobs: | |
build_deploy: | |
runs-on: ubuntu-24.04 | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Set up QEMU | |
uses: docker/setup-qemu-action@v3 | |
- name: Set up Docker Buildx | |
uses: docker/setup-buildx-action@v3 | |
- name: Login to Docker Hub | |
uses: docker/login-action@v3 | |
with: | |
username: ${{ secrets.DOCKERHUB_USERNAME }} | |
password: ${{ secrets.DOCKERHUB_TOKEN }} | |
- name: Check Run ID | |
run: echo ${{ github.run_id }} | |
- name: Build and push | |
uses: docker/build-push-action@v6 | |
with: | |
context: . | |
file: Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
push: true | |
tags: netfoundry/ziti-k8s-agent:${{ github.run_id }} | |
create-eks: | |
runs-on: ubuntu-24.04 | |
permissions: | |
id-token: write | |
contents: read | |
outputs: | |
cluster_context: ${{ steps.set-context.outputs.context }} | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Authenticate to AWS Cloud | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
role-session-name: GitHubActions | |
audience: sts.amazonaws.com | |
- name: install aws eksctl | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get the cli for aws eks | |
curl --silent --show-error --fail --location \ | |
https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz \ | |
| tar xz -C /tmp | |
sudo install /tmp/eksctl /usr/local/bin/ | |
- name: create-eks-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
cat <<YAML >eks-cluster.yaml | |
apiVersion: eksctl.io/v1alpha5 | |
kind: ClusterConfig | |
metadata: | |
name: $CLUSTER_NAME | |
region: $AWS_REGION | |
version: "1.30" | |
managedNodeGroups: | |
- name: ng-1 | |
instanceType: t3.medium | |
iam: | |
withAddonPolicies: | |
ebs: true | |
fsx: true | |
efs: true | |
desiredCapacity: 2 | |
privateNetworking: true | |
labels: | |
nodegroup-type: workloads | |
tags: | |
nodegroup-role: worker | |
vpc: | |
cidr: 10.10.0.0/16 | |
publicAccessCIDRs: [] | |
# disable public access to endpoint and only allow private access | |
clusterEndpoints: | |
publicAccess: true | |
privateAccess: true | |
YAML | |
# create the cluster | |
eksctl create cluster -f ./eks-cluster.yaml | |
- name: set-context-name | |
id: set-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text) | |
if [[ -z "$AWS_ACCOUNT_ID" ]]; then | |
echo "ERROR: AWS_ACCOUNT_ID is empty" >&2 | |
exit 1 | |
fi | |
CONTEXT_NAME="arn:aws:eks:${AWS_REGION}:${AWS_ACCOUNT_ID}:cluster/${CLUSTER_NAME}" | |
echo "context=$CONTEXT_NAME" | tee -a $GITHUB_OUTPUT | |
create-gke: | |
runs-on: ubuntu-24.04 | |
permissions: | |
contents: read | |
id-token: write | |
outputs: | |
cluster_context: ${{ steps.set-context.outputs.context }} | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Authenticate to Google Cloud | |
uses: google-github-actions/auth@v2 | |
with: | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} | |
export_environment_variables: true # sets GCP_PROJECT | |
- name: install-gcloud-cli | |
uses: google-github-actions/setup-gcloud@v2 | |
with: | |
version: latest | |
install_components: gke-gcloud-auth-plugin | |
- name: create-gke-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# create the cluster | |
gcloud container --project $GCP_PROJECT clusters create-auto $CLUSTER_NAME \ | |
--region $GKE_REGION \ | |
--release-channel "regular" \ | |
--network "projects/$GCP_PROJECT/global/networks/default" \ | |
--subnetwork "projects/$GCP_PROJECT/regions/$GKE_REGION/subnetworks/default" \ | |
--cluster-ipv4-cidr "/17" \ | |
--services-ipv4-cidr "/22" | |
- name: set-context-name | |
id: set-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
CONTEXT_NAME="gke_${GCP_PROJECT}_${GKE_REGION}_${CLUSTER_NAME}" | |
echo "context=$CONTEXT_NAME" | tee -a $GITHUB_OUTPUT | |
regression_test: | |
needs: [build_deploy, create-eks, create-gke] | |
runs-on: ubuntu-24.04 | |
permissions: | |
contents: read | |
id-token: write | |
env: | |
AWS_CLUSTER: ${{ needs.create-eks.outputs.cluster_context }} | |
GKE_CLUSTER: ${{ needs.create-gke.outputs.cluster_context }} | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Check Run ID | |
run: echo ${{ github.run_id }} | |
- name: install-kubectl | |
uses: azure/setup-kubectl@v3 | |
with: | |
version: latest | |
- name: install-aws-cli | |
uses: unfor19/install-aws-cli-action@v1 | |
with: | |
version: 2 | |
verbose: false | |
arch: amd64 | |
- name: install postman, ziti-edge-tunnel, and ziti | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get postman | |
curl -o- --silent --fail --location https://dl-cli.pstmn.io/install/linux64.sh | bash | |
# get ziti-edge-tunnel | |
curl --silent --fail --location https://get.openziti.io/tun/scripts/install-ubuntu.bash | bash | |
sudo systemctl start ziti-edge-tunnel.service | |
for i in {1..30}; do | |
if sudo systemctl is-active --quiet ziti-edge-tunnel.service; then | |
break | |
fi | |
sleep 1 | |
done | |
sudo systemctl is-active --quiet ziti-edge-tunnel.service | |
sudo ziti-edge-tunnel set_log_level --loglevel DEBUG | |
# get ziti | |
curl --silent --show-error --fail --location https://get.openziti.io/install.bash | sudo bash -s openziti | |
ziti version | |
- name: Authenticate to AWS Cloud | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
role-session-name: GitHubActions | |
audience: sts.amazonaws.com | |
- name: configure-eks-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
aws eks update-kubeconfig --name ${CLUSTER_NAME} --region ${AWS_REGION} | |
- name: Authenticate to Google Cloud | |
uses: google-github-actions/auth@v2 | |
with: | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} | |
export_environment_variables: true # sets GCP_PROJECT | |
- name: install-gcloud-cli | |
uses: google-github-actions/setup-gcloud@v2 | |
with: | |
version: latest | |
install_components: gke-gcloud-auth-plugin | |
- name: configure-gke-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
gcloud container clusters get-credentials "$CLUSTER_NAME" --region "$GKE_REGION" --project "$GCP_PROJECT" | |
- name: create-nf-network-services | |
id: nf_auth | |
shell: bash | |
run: | | |
set -o pipefail | |
set +o xtrace # mask the oauth access token | |
OAUTH_RESP="$( | |
curl --silent --fail --location --request POST \ | |
https://netfoundry-production-xfjiye.auth.us-east-1.amazoncognito.com/oauth2/token \ | |
--header 'Content-Type: application/x-www-form-urlencoded' \ | |
--user "${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }}:${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }}" \ | |
--data-urlencode 'grant_type=client_credentials' | |
)" | |
if [[ -z "$OAUTH_RESP" ]]; then | |
echo "ERROR: OAUTH_RESP is empty" >&2 | |
exit 1 | |
fi | |
ACCESS_TOKEN="$(echo $OAUTH_RESP | jq -r .access_token)" | |
if [[ -z "$ACCESS_TOKEN" ]]; then | |
echo "ERROR: ACCESS_TOKEN is empty" >&2 | |
exit 1 | |
fi | |
echo "::add-mask::$ACCESS_TOKEN" | |
set -o xtrace | |
# compose a Postman global variable file for creating the NF network for this workflow run ID | |
cat <<JSON | tee nf-network-services-create.postman_global.json | |
{ | |
"id": "$(uuidgen)", | |
"name": "nf-network-services-create", | |
"_postman_variable_scope": "global", | |
"values": [ | |
{ | |
"key": "api", | |
"value": "https://gateway.production.netfoundry.io/core/v3", | |
"enabled": true | |
}, | |
{ | |
"key": "token", | |
"value": "https://netfoundry-production-xfjiye.auth.us-east-1.amazoncognito.com/oauth2/token", | |
"enabled": true | |
}, | |
{ | |
"key": "jwt_token", | |
"value": "", | |
"enabled": true | |
}, | |
{ | |
"key": "jwt_type", | |
"value": "Bearer", | |
"enabled": true | |
}, | |
{ | |
"key": "client_id", | |
"value": "${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }}", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "client_secret", | |
"value": "${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }}", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "networkName", | |
"value": "$NF_NETWORK_NAME", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "networkId", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "networkStatus", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "api_token", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "controller-api-endpoint", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "edgeRouterId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "mopEdgeRouterId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "mopEdgeRouterStatus", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "clientIdentityId", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "adminIdentityId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "clientIdentityJwt", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "adminIdentityJwt", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId1", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId1", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId2", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId2", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId3", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId3", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId4", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId4", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
} | |
] | |
} | |
JSON | |
# validate the Postman global variable file | |
if [[ ! -s nf-network-services-create.postman_global.json ]]; then | |
echo "ERROR: nf-network-services-create.postman_global.json is empty" >&2 | |
exit 1 | |
elif ! jq -e . < nf-network-services-create.postman_global.json >/dev/null; then | |
echo "ERROR: nf-network-services-create.postman_global.json is not valid json" >&2 | |
exit 1 | |
fi | |
postman collection run \ | |
test/nf-network-services-create.postman_collection.json \ | |
-g nf-network-services-create.postman_global.json \ | |
-k | |
NF_NETWORK_LIST="$(curl --silent --fail --location --request GET \ | |
https://gateway.production.netfoundry.io/core/v3/networks \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: Bearer $ACCESS_TOKEN")" | |
if [[ -z "$NF_NETWORK_LIST" ]]; then | |
echo "ERROR: NF_NETWORK_LIST is empty" >&2 | |
exit 1 | |
fi | |
NF_NETWORK_ID="$(echo "$NF_NETWORK_LIST" | \ | |
jq -r --arg nf_network_name "$NF_NETWORK_NAME" \ | |
'._embedded.networkList[] | select(.name==$nf_network_name).id')" | |
if [[ -z "$NF_NETWORK_ID" ]]; then | |
echo "ERROR: NF_NETWORK_ID is empty" >&2 | |
exit 1 | |
fi | |
echo NF_NETWORK_ID="$NF_NETWORK_ID" | tee -a $GITHUB_ENV | |
set +o xtrace # mask the oauth access token | |
ZITI_SESSION_OBJ="$( | |
curl --silent --fail --location --request POST \ | |
https://gateway.production.netfoundry.io/core/v3/networks/"$NF_NETWORK_ID"/exchange \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: Bearer $ACCESS_TOKEN" \ | |
--data '{"type": "session"}' | |
)" | |
if [[ -z "$ZITI_SESSION_OBJ" ]]; then | |
echo "ERROR: ZITI_SESSION_OBJ is empty" >&2 | |
exit 1 | |
fi | |
ZITI_SESSION_TOKEN=$(echo "$ZITI_SESSION_OBJ" | jq -r .value) | |
echo "::add-mask::$ZITI_SESSION_TOKEN" | |
set -o xtrace | |
if [[ -z "$ZITI_SESSION_TOKEN" ]]; then | |
echo "ERROR: ZITI_SESSION_TOKEN is empty" >&2 | |
exit 1 | |
fi | |
# call the Ziti controller with the session token to get the list of identities | |
NF_IDENTITY_LIST="$( | |
curl --insecure --silent --fail --location --request GET \ | |
"$(echo "$ZITI_SESSION_OBJ" | jq -r .networkControllerUrl)"/identities \ | |
--header 'Content-Type: application/json' \ | |
--header "zt-session: $ZITI_SESSION_TOKEN" | |
)" | |
if [[ -z "$NF_IDENTITY_LIST" ]]; then | |
echo "ERROR: NF_IDENTITY_LIST is empty" >&2 | |
exit 1 | |
fi | |
# enroll adminUser | |
echo "$NF_IDENTITY_LIST" | jq -r '.data[] | select(.name=="adminUser").enrollment.ott.jwt' | tee adminUser.jwt | |
if [[ ! -s adminUser.jwt ]]; then | |
echo "ERROR: adminUser.jwt is empty" >&2 | |
exit 1 | |
fi | |
ziti edge enroll -j adminUser.jwt -o adminUser.json | |
if [[ ! -s adminUser.json ]]; then | |
echo "ERROR: adminUser.json is empty" >&2 | |
exit 1 | |
elif ! jq -e . < adminUser.json >/dev/null; then | |
echo "ERROR: adminUser.json is not valid json" >&2 | |
exit 1 | |
fi | |
echo "IDENTITY_FILE=adminUser.json" | tee -a $GITHUB_ENV | |
# enroll testUser | |
echo "$NF_IDENTITY_LIST" | jq -r '.data[] | select(.name=="testUser").enrollment.ott.jwt' | tee testUser.jwt | |
if [[ ! -s testUser.jwt ]]; then | |
echo "ERROR: testUser.jwt is empty" >&2 | |
exit 1 | |
fi | |
sudo ziti-edge-tunnel add --jwt "$(< ./testUser.jwt)" --identity testUser | |
- name: Deploy Webhook to Clusters | |
shell: bash | |
env: | |
ZITI_AGENT_IMAGE: netfoundry/ziti-k8s-agent:${{ github.run_id }} | |
ZITI_AGENT_NAMESPACE: ziti # must be new because of the way this test is written | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# IDENTITY_FILE exported in prior step create-nf-network-services | |
SIDECAR_SELECTORS=namespace \ | |
./generate-ziti-agent-manifest.bash > ziti-k8s-agent-namespace-selector.yaml | |
SIDECAR_SELECTORS=pod \ | |
./generate-ziti-agent-manifest.bash > ziti-k8s-agent-pod-selector.yaml | |
# Install cert-manager via helm for both clusters | |
CERT_MANAGER_VERSION=v1.16.2 | |
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash | |
helm repo add jetstack https://charts.jetstack.io | |
helm repo update | |
helm upgrade --install cert-manager jetstack/cert-manager \ | |
--namespace cert-manager \ | |
--create-namespace \ | |
--set crds.enabled=true \ | |
--version "$CERT_MANAGER_VERSION" \ | |
--kube-context "$AWS_CLUSTER" \ | |
--timeout 5m | |
helm upgrade --install cert-manager jetstack/cert-manager \ | |
--namespace cert-manager \ | |
--create-namespace \ | |
--set crds.enabled=true \ | |
--set global.leaderElection.namespace="cert-manager" \ | |
--version "$CERT_MANAGER_VERSION" \ | |
--kube-context "$GKE_CLUSTER" \ | |
--timeout 5m | |
# create new namespace for ziti agent | |
kubectl create namespace "$ZITI_AGENT_NAMESPACE" --context "$AWS_CLUSTER" | |
kubectl create namespace "$ZITI_AGENT_NAMESPACE" --context "$GKE_CLUSTER" | |
# Wait for cert-manager components and verify webhook is properly configured | |
for CLUSTER in "$AWS_CLUSTER" "$GKE_CLUSTER"; do | |
: Waiting for cert-manager components in cluster $CLUSTER | |
kubectl wait --for=condition=Available deployment -l app=cert-manager -n cert-manager --timeout=300s --context "$CLUSTER" | |
kubectl wait --for=condition=Available deployment -l app=webhook -n cert-manager --timeout=300s --context "$CLUSTER" | |
kubectl wait --for=condition=Available deployment -l app=cainjector -n cert-manager --timeout=300s --context "$CLUSTER" | |
: Checking apiserver readiness | |
kubectl wait --for=condition=Available apiservice v1.cert-manager.io --timeout=300s --context $CLUSTER | |
kubectl wait --for=condition=Available apiservice v1.acme.cert-manager.io --timeout=300s --context $CLUSTER | |
: Verifying webhook is properly configured | |
kubectl get validatingwebhookconfigurations,mutatingwebhookconfigurations -n cert-manager --context "$CLUSTER" | |
done | |
# Apply agent manifests with retries | |
for CLUSTER_MANIFEST in \ | |
"${AWS_CLUSTER}=ziti-k8s-agent-namespace-selector.yaml" \ | |
"${GKE_CLUSTER}=ziti-k8s-agent-pod-selector.yaml" | |
do | |
CLUSTER="${CLUSTER_MANIFEST%%=*}" | |
MANIFEST="${CLUSTER_MANIFEST#*=}" | |
: Applying Ziti configurations to cluster $CLUSTER | |
ATTEMPTS=2 | |
DELAY=30 | |
while (( ATTEMPTS-- )); do | |
if kubectl apply -f "$MANIFEST" --context "$CLUSTER"; then | |
break | |
else | |
sleep $DELAY | |
fi | |
done | |
kubectl apply -f "$MANIFEST" --context "$CLUSTER" | |
done | |
: Waiting for ziti-admission-webhook pods to be ready in AWS cluster... | |
kubectl wait --for=condition=Available deployment -l app=ziti-admission-webhook -n $ZITI_AGENT_NAMESPACE --timeout=120s --context $AWS_CLUSTER | |
: Waiting for ziti-admission-webhook pods to be ready in GKE cluster... | |
kubectl wait --for=condition=Available deployment -l app=ziti-admission-webhook -n $ZITI_AGENT_NAMESPACE --timeout=120s --context $GKE_CLUSTER | |
- name: deploy-bookinfo-app | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# test the GKE cluster with pod selectors | |
kubectl create namespace test2 --context $GKE_CLUSTER | |
kubectl apply -f ./test/bookinfo.yaml --context $GKE_CLUSTER -n test2 | |
# wait for pods to be ready | |
kubectl wait --for=condition=Available deployment -l 'app in (details,productpage,ratings,reviews)' -n test2 --timeout=240s --context $GKE_CLUSTER | |
kubectl get deployments -n test2 -l 'app in (details,productpage,ratings,reviews)' -o name --context $GKE_CLUSTER \ | |
| xargs -I DEPLOYMENT kubectl patch DEPLOYMENT -p '{"spec":{"template":{"metadata":{"labels":{"openziti/tunnel-enabled":"true"}}}}}' --context $GKE_CLUSTER -n test2 | |
sleep 5 # Wait for rollout to start (pods to be terminated) | |
kubectl rollout status deployment -l 'app in (details,productpage,ratings,reviews)' -n test2 --timeout=240s --context $GKE_CLUSTER | |
# Then wait for new pods to be fully available | |
kubectl wait --for=condition=Available deployment -l 'app in (details,productpage,ratings,reviews)' -n test2 --timeout=240s --context $GKE_CLUSTER | |
# test the AWS cluster with a namespace selector | |
kubectl create namespace test1 --context $AWS_CLUSTER | |
kubectl label namespace test1 "openziti/tunnel-enabled=true" --context $AWS_CLUSTER | |
kubectl apply -f ./test/bookinfo.yaml --context $AWS_CLUSTER -n test1 | |
# wait for pods to be ready | |
kubectl wait --for=condition=Available deployment -l 'app in (details,productpage,ratings,reviews)' -n test1 --timeout=240s --context $AWS_CLUSTER | |
# this verifies that Ziti's non-default terminatorStrategy: random (configured in | |
# test/nf-network-services-create.postman_collection.json) is evenly distributing requests to all reviews pods | |
# across both clusters | |
- name: run-testcase-01 | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
LOG_EMPTY=0 | |
kubectl get pods -n test1 --context $AWS_CLUSTER | tee ./testcase-01_pods.log | |
kubectl get pods -n test2 --context $GKE_CLUSTER | tee -a ./testcase-01_pods.log | |
# send enough requests to trigger the terminatorStrategy: random evenly distributing of requests across all | |
# pods in both clusters | |
for i in $(seq 1 40); | |
do | |
curl -sS -X GET http://productpage.ziti:9080/productpage?u=test \ | |
| grep reviews \ | |
| tee -a ./testcase-01_curl_output.log \ | |
|| true # ignore individual curl|grep errors because we're verifying the aggregate result later | |
done | |
for LOG in testcase-01_curl_output.log testcase-01_pods.log; do | |
if [[ -s "./$LOG" ]]; then | |
cat "./$LOG" | |
else | |
echo "ERROR: $LOG is empty" >&2 | |
LOG_EMPTY=1 | |
fi | |
done | |
if ! (( LOG_EMPTY )); then | |
python ./test/verify_test_results.py ./testcase-01_pods.log ./testcase-01_curl_output.log | |
else | |
echo "One or more test logs are empty. Skipping verification." | |
exit 1 | |
fi | |
# this changes the stack topology for the next test so that EKS test1 cluster hosts only frontend pods, e.g., | |
# providing the productpage.ziti service, and the GKE test2 cluster hosts only backend pods, e.g., the ratings, | |
# reviews, and details pods | |
- name: split frontend and backend between the clusters | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# in the EKS test1 cluster, scale the backend deployments to 0 | |
kubectl scale deployment -l 'app in (ratings,reviews,details)' --replicas=0 -n test1 --context $AWS_CLUSTER | |
kubectl wait --for=condition=Deleted pods -l 'app in (ratings,reviews,details)' -n test1 --timeout=120s --context $AWS_CLUSTER | |
# in the GKE test2 cluster, scale the frontend deployments to 0 | |
kubectl scale deployment -l app=productpage --replicas=0 -n test2 --context $GKE_CLUSTER | |
kubectl wait --for=condition=Deleted pods -l app=productpage -n test2 --timeout=120s --context $GKE_CLUSTER | |
# this verifies the result has not changed since the last test, despite a radical change of stack topology, | |
# relying on Ziti to adapt to the loss of frontend and backend pods | |
- name: run-testcase-02 | |
shell: bash | |
run: | | |
kubectl get pods -n test1 --context $AWS_CLUSTER | tee ./testcase-02_pods.log | |
kubectl get pods -n test2 --context $GKE_CLUSTER | tee -a ./testcase-02_pods.log | |
LOG_EMPTY=0 | |
# send enough requests to trigger the terminatorStrategy: random evenly distributing of requests across all | |
# pods | |
for i in $(seq 1 40); | |
do | |
curl -s -X GET http://productpage.ziti:9080/productpage?u=test \ | |
| grep reviews \ | |
| tee -a ./testcase-02_curl_output.log || true # ignore individual curl|grep errors because we're verifying the aggregate result later | |
done | |
for LOG in testcase-02_curl_output.log testcase-02_pods.log; do | |
if [[ -s "./$LOG" ]]; then | |
cat "./$LOG" | |
else | |
echo "ERROR: $LOG is empty" >&2 | |
LOG_EMPTY=1 | |
fi | |
done | |
if ! (( LOG_EMPTY )); then | |
python ./test/verify_test_results.py ./testcase-02_pods.log ./testcase-02_curl_output.log | |
else | |
echo "One or more test logs are empty. Skipping verification." | |
exit 1 | |
fi | |
- name: print logs | |
if: failure() | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get cluster info | |
kubectl cluster-info --context "$AWS_CLUSTER" | |
kubectl cluster-info --context "$GKE_CLUSTER" | |
# list all pods | |
kubectl get pods --all-namespaces --context "$AWS_CLUSTER" | |
kubectl get pods --all-namespaces --context "$GKE_CLUSTER" | |
# inspect cert-manager app | |
kubectl --context $AWS_CLUSTER --namespace cert-manager describe pod --selector app=cert-manager | |
kubectl --context $GKE_CLUSTER --namespace cert-manager describe pod --selector app=cert-manager | |
kubectl --context $AWS_CLUSTER --namespace cert-manager logs --selector app=cert-manager | |
kubectl --context $GKE_CLUSTER --namespace cert-manager logs --selector app=cert-manager | |
# inspect cert-manager webhook | |
kubectl --context $AWS_CLUSTER --namespace cert-manager describe pod --selector app=webhook | |
kubectl --context $GKE_CLUSTER --namespace cert-manager describe pod --selector app=webhook | |
kubectl --context $AWS_CLUSTER --namespace cert-manager logs --selector app=webhook | |
kubectl --context $GKE_CLUSTER --namespace cert-manager logs --selector app=webhook | |
# inspect ziti-admission-webhook | |
kubectl --context $AWS_CLUSTER --namespace ziti describe pod --selector app=ziti-admission-webhook | |
kubectl --context $GKE_CLUSTER --namespace ziti describe pod --selector app=ziti-admission-webhook | |
kubectl --context $AWS_CLUSTER --namespace ziti logs --selector app=ziti-admission-webhook | |
kubectl --context $GKE_CLUSTER --namespace ziti logs --selector app=ziti-admission-webhook | |
# inspect bookinfo apps | |
kubectl --context $AWS_CLUSTER --namespace test1 describe pod --selector 'app in (details,ratings)' | |
kubectl --context $GKE_CLUSTER --namespace test2 describe pod --selector 'app in (productpage,reviews)' | |
kubectl --context $AWS_CLUSTER --namespace test1 logs --selector 'app in (details,ratings)' | |
kubectl --context $GKE_CLUSTER --namespace test2 logs --selector 'app in (productpage,reviews)' | |
# inspect ziti-edge-tunnel | |
journalctl -lu ziti-edge-tunnel.service | |
cleanup-debug-delay: | |
if: failure() | |
needs: [regression_test] | |
runs-on: ubuntu-24.04 | |
steps: | |
- name: wait 30 minutes | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
sleep 1800 | |
cleanup-nf: | |
needs: [cleanup-debug-delay] | |
if: always() | |
runs-on: ubuntu-24.04 | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: ./.github/actions/nf-network-cleanup | |
with: | |
network_name: ${{ env.NF_NETWORK_NAME }} | |
client_id: ${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }} | |
client_secret: ${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }} | |
cleanup-eks: | |
needs: [cleanup-debug-delay] | |
if: always() | |
runs-on: ubuntu-24.04 | |
permissions: | |
id-token: write | |
contents: read | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: ./.github/actions/eks-cluster-cleanup | |
with: | |
cluster_name: ${{ env.CLUSTER_NAME }} | |
region: ${{ env.AWS_REGION }} | |
role_to_assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
cleanup-gke: | |
needs: [cleanup-debug-delay] | |
if: always() | |
runs-on: ubuntu-24.04 | |
permissions: | |
id-token: write | |
contents: read | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: ./.github/actions/gke-cluster-cleanup | |
with: | |
cluster_name: ${{ env.CLUSTER_NAME }} | |
region: ${{ env.GKE_REGION }} | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} |