iteration 2 #61
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: pr | |
on: | |
pull_request: | |
types: [opened, synchronize] | |
workflow_dispatch: | |
env: | |
CLUSTER_NAME: ziti-k8s-agent-regression-${{ github.run_id }} | |
AWS_REGION: us-west-2 | |
GKE_REGION: us-central1 | |
GKE_NETWORK_NAME: default | |
GKE_SUBNETWORK_NAME: default | |
NF_NETWORK_NAME: ziti-k8s-agent-regression-${{ github.run_id }} | |
jobs: | |
build_deploy: | |
runs-on: ubuntu-24.04 | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Set up QEMU | |
uses: docker/setup-qemu-action@v3 | |
- name: Set up Docker Buildx | |
uses: docker/setup-buildx-action@v3 | |
- name: Login to Docker Hub | |
uses: docker/login-action@v3 | |
with: | |
username: ${{ secrets.DOCKERHUB_USERNAME }} | |
password: ${{ secrets.DOCKERHUB_TOKEN }} | |
- name: Check Run ID | |
run: echo ${{ github.run_id }} | |
- name: Build and push | |
uses: docker/build-push-action@v6 | |
with: | |
context: . | |
file: Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
push: true | |
tags: netfoundry/ziti-k8s-agent:${{ github.run_id }} | |
create-eks: | |
runs-on: ubuntu-24.04 | |
permissions: | |
id-token: write | |
contents: read | |
outputs: | |
cluster_context: ${{ steps.set-context.outputs.context }} | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Authenticate to AWS Cloud | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
role-session-name: GitHubActions | |
audience: sts.amazonaws.com | |
- name: install aws eksctl | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get the cli for aws eks | |
curl --silent --show-error --fail --location \ | |
https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz \ | |
| tar xz -C /tmp | |
sudo install /tmp/eksctl /usr/local/bin/ | |
- name: create-eks-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
cat <<YAML >eks-cluster.yaml | |
apiVersion: eksctl.io/v1alpha5 | |
kind: ClusterConfig | |
metadata: | |
name: $CLUSTER_NAME | |
region: $AWS_REGION | |
version: "1.30" | |
managedNodeGroups: | |
- name: ng-1 | |
instanceType: t3.medium | |
iam: | |
withAddonPolicies: | |
ebs: true | |
fsx: true | |
efs: true | |
desiredCapacity: 2 | |
privateNetworking: true | |
labels: | |
nodegroup-type: workloads | |
tags: | |
nodegroup-role: worker | |
vpc: | |
cidr: 10.10.0.0/16 | |
publicAccessCIDRs: [] | |
# disable public access to endpoint and only allow private access | |
clusterEndpoints: | |
publicAccess: true | |
privateAccess: true | |
YAML | |
# create the cluster | |
eksctl create cluster -f ./eks-cluster.yaml | |
- name: set-context-name | |
id: set-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text) | |
if [[ -z "$AWS_ACCOUNT_ID" ]]; then | |
echo "ERROR: AWS_ACCOUNT_ID is empty" >&2 | |
exit 1 | |
fi | |
CONTEXT_NAME="arn:aws:eks:${AWS_REGION}:${AWS_ACCOUNT_ID}:cluster/${CLUSTER_NAME}" | |
echo "context=$CONTEXT_NAME" | tee -a $GITHUB_OUTPUT | |
create-gke: | |
runs-on: ubuntu-24.04 | |
permissions: | |
contents: read | |
id-token: write | |
outputs: | |
cluster_context: ${{ steps.set-context.outputs.context }} | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Authenticate to Google Cloud | |
uses: google-github-actions/auth@v2 | |
with: | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} | |
export_environment_variables: true # sets GCP_PROJECT | |
- name: install-gcloud-cli | |
uses: google-github-actions/setup-gcloud@v2 | |
with: | |
version: latest | |
install_components: gke-gcloud-auth-plugin | |
- name: create-gke-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# create the cluster | |
gcloud container --project $GCP_PROJECT clusters create-auto $CLUSTER_NAME \ | |
--region $GKE_REGION \ | |
--release-channel "regular" \ | |
--network "projects/$GCP_PROJECT/global/networks/default" \ | |
--subnetwork "projects/$GCP_PROJECT/regions/$GKE_REGION/subnetworks/default" \ | |
--cluster-ipv4-cidr "/17" \ | |
--services-ipv4-cidr "/22" | |
- name: set-context-name | |
id: set-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
CONTEXT_NAME="gke_${GCP_PROJECT}_${GKE_REGION}_${CLUSTER_NAME}" | |
echo "context=$CONTEXT_NAME" | tee -a $GITHUB_OUTPUT | |
regression_test: | |
needs: [build_deploy, create-eks, create-gke] | |
runs-on: ubuntu-24.04 | |
permissions: | |
contents: read | |
id-token: write | |
env: | |
AWS_CLUSTER: ${{ needs.create-eks.outputs.cluster_context }} | |
GKE_CLUSTER: ${{ needs.create-gke.outputs.cluster_context }} | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Check Run ID | |
run: echo ${{ github.run_id }} | |
- name: install-kubectl | |
uses: azure/setup-kubectl@v3 | |
with: | |
version: latest | |
- name: install-aws-cli | |
uses: unfor19/install-aws-cli-action@v1 | |
with: | |
version: 2 | |
verbose: false | |
arch: amd64 | |
- name: install postman, ziti-edge-tunnel, and ziti | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get postman | |
curl -o- --silent --fail --location https://dl-cli.pstmn.io/install/linux64.sh | bash | |
# get ziti-edge-tunnel | |
curl --silent --fail --location https://get.openziti.io/tun/scripts/install-ubuntu.bash | bash | |
sudo systemctl start ziti-edge-tunnel.service | |
for i in {1..30}; do | |
if sudo systemctl is-active --quiet ziti-edge-tunnel.service; then | |
break | |
fi | |
sleep 1 | |
done | |
sudo systemctl is-active --quiet ziti-edge-tunnel.service | |
sudo ziti-edge-tunnel set_log_level --loglevel DEBUG | |
# get ziti | |
curl --silent --show-error --fail --location https://get.openziti.io/install.bash | sudo bash -s openziti | |
ziti version | |
- name: Authenticate to AWS Cloud | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
role-session-name: GitHubActions | |
audience: sts.amazonaws.com | |
- name: configure-eks-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
aws eks update-kubeconfig --name ${CLUSTER_NAME} --region ${AWS_REGION} | |
- name: Authenticate to Google Cloud | |
uses: google-github-actions/auth@v2 | |
with: | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} | |
export_environment_variables: true # sets GCP_PROJECT | |
- name: install-gcloud-cli | |
uses: google-github-actions/setup-gcloud@v2 | |
with: | |
version: latest | |
install_components: gke-gcloud-auth-plugin | |
- name: configure-gke-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
gcloud container clusters get-credentials "$CLUSTER_NAME" --region "$GKE_REGION" --project "$GCP_PROJECT" | |
- name: create-nf-network-services | |
id: nf_auth | |
shell: bash | |
run: | | |
set -o pipefail | |
set +o xtrace # mask the oauth access token | |
OAUTH_RESP="$( | |
curl --silent --fail --location --request POST \ | |
https://netfoundry-production-xfjiye.auth.us-east-1.amazoncognito.com/oauth2/token \ | |
--header 'Content-Type: application/x-www-form-urlencoded' \ | |
--user "${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }}:${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }}" \ | |
--data-urlencode 'grant_type=client_credentials' | |
)" | |
if [[ -z "$OAUTH_RESP" ]]; then | |
echo "ERROR: OAUTH_RESP is empty" >&2 | |
exit 1 | |
fi | |
ACCESS_TOKEN="$(echo $OAUTH_RESP | jq -r .access_token)" | |
if [[ -z "$ACCESS_TOKEN" ]]; then | |
echo "ERROR: ACCESS_TOKEN is empty" >&2 | |
exit 1 | |
fi | |
echo "::add-mask::$ACCESS_TOKEN" | |
echo "NF_ACCESS_TOKEN=$ACCESS_TOKEN" | tee -a $GITHUB_ENV | |
set -o xtrace | |
# compose a Postman global variable file for creating the NF network for this workflow run ID | |
cat <<JSON | tee nf-network-services-create.postman_global.json | |
{ | |
"id": "$(uuidgen)", | |
"name": "nf-network-services-create", | |
"_postman_variable_scope": "global", | |
"values": [ | |
{ | |
"key": "api", | |
"value": "https://gateway.production.netfoundry.io/core/v3", | |
"enabled": true | |
}, | |
{ | |
"key": "token", | |
"value": "https://netfoundry-production-xfjiye.auth.us-east-1.amazoncognito.com/oauth2/token", | |
"enabled": true | |
}, | |
{ | |
"key": "jwt_token", | |
"value": "", | |
"enabled": true | |
}, | |
{ | |
"key": "jwt_type", | |
"value": "Bearer", | |
"enabled": true | |
}, | |
{ | |
"key": "client_id", | |
"value": "${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }}", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "client_secret", | |
"value": "${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }}", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "networkName", | |
"value": "$NF_NETWORK_NAME", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "networkId", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "networkStatus", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "api_token", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "controller-api-endpoint", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "edgeRouterId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "mopEdgeRouterId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "mopEdgeRouterStatus", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "clientIdentityId", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "adminIdentityId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "clientIdentityJwt", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "adminIdentityJwt", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId1", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId1", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId2", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId2", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId3", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId3", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId4", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId4", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
} | |
] | |
} | |
JSON | |
# validate the Postman global variable file | |
if [[ ! -s nf-network-services-create.postman_global.json ]]; then | |
echo "ERROR: nf-network-services-create.postman_global.json is empty" >&2 | |
exit 1 | |
elif ! jq -e . < nf-network-services-create.postman_global.json >/dev/null; then | |
echo "ERROR: nf-network-services-create.postman_global.json is not valid json" >&2 | |
exit 1 | |
fi | |
postman collection run \ | |
test/nf-network-services-create.postman_collection.json \ | |
-g nf-network-services-create.postman_global.json \ | |
-k | |
NF_NETWORK_LIST="$(curl --silent --fail --location --request GET \ | |
https://gateway.production.netfoundry.io/core/v3/networks \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: Bearer $ACCESS_TOKEN")" | |
if [[ -z "$NF_NETWORK_LIST" ]]; then | |
echo "ERROR: NF_NETWORK_LIST is empty" >&2 | |
exit 1 | |
fi | |
NF_NETWORK_ID="$(echo "$NF_NETWORK_LIST" | \ | |
jq -r --arg nf_network_name "$NF_NETWORK_NAME" \ | |
'._embedded.networkList[] | select(.name==$nf_network_name).id')" | |
if [[ -z "$NF_NETWORK_ID" ]]; then | |
echo "ERROR: NF_NETWORK_ID is empty" >&2 | |
exit 1 | |
fi | |
echo NF_NETWORK_ID="$NF_NETWORK_ID" | tee -a $GITHUB_ENV | |
set +o xtrace # mask the oauth access token | |
ZITI_SESSION_OBJ="$( | |
curl --silent --fail --location --request POST \ | |
https://gateway.production.netfoundry.io/core/v3/networks/"$NF_NETWORK_ID"/exchange \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: Bearer $ACCESS_TOKEN" \ | |
--data '{"type": "session"}' | |
)" | |
if [[ -z "$ZITI_SESSION_OBJ" ]]; then | |
echo "ERROR: ZITI_SESSION_OBJ is empty" >&2 | |
exit 1 | |
fi | |
ZITI_SESSION_TOKEN=$(echo "$ZITI_SESSION_OBJ" | jq -r .value) | |
echo "::add-mask::$ZITI_SESSION_TOKEN" | |
set -o xtrace | |
if [[ -z "$ZITI_SESSION_TOKEN" ]]; then | |
echo "ERROR: ZITI_SESSION_TOKEN is empty" >&2 | |
exit 1 | |
fi | |
# call the Ziti controller with the session token to get the list of identities | |
NF_IDENTITY_LIST="$( | |
curl --insecure --silent --fail --location --request GET \ | |
"$(echo "$ZITI_SESSION_OBJ" | jq -r .networkControllerUrl)"/identities \ | |
--header 'Content-Type: application/json' \ | |
--header "zt-session: $ZITI_SESSION_TOKEN" | |
)" | |
if [[ -z "$NF_IDENTITY_LIST" ]]; then | |
echo "ERROR: NF_IDENTITY_LIST is empty" >&2 | |
exit 1 | |
fi | |
# enroll adminUser | |
echo "$NF_IDENTITY_LIST" | jq -r '.data[] | select(.name=="adminUser").enrollment.ott.jwt' | tee adminUser.jwt | |
if [[ ! -s adminUser.jwt ]]; then | |
echo "ERROR: adminUser.jwt is empty" >&2 | |
exit 1 | |
fi | |
ziti edge enroll -j adminUser.jwt -o adminUser.json | |
if [[ ! -s adminUser.json ]]; then | |
echo "ERROR: adminUser.json is empty" >&2 | |
exit 1 | |
elif ! jq -e . < adminUser.json >/dev/null; then | |
echo "ERROR: adminUser.json is not valid json" >&2 | |
exit 1 | |
fi | |
echo "IDENTITY_FILE=adminUser.json" | tee -a $GITHUB_ENV | |
# enroll testUser | |
echo "$NF_IDENTITY_LIST" | jq -r '.data[] | select(.name=="testUser").enrollment.ott.jwt' | tee testUser.jwt | |
if [[ ! -s testUser.jwt ]]; then | |
echo "ERROR: testUser.jwt is empty" >&2 | |
exit 1 | |
fi | |
sudo ziti-edge-tunnel add --jwt "$(< ./testUser.jwt)" --identity testUser | |
- name: Deploy Webhook to Clusters | |
shell: bash | |
env: | |
ZITI_AGENT_IMAGE: netfoundry/ziti-k8s-agent:${{ github.run_id }} | |
ZITI_AGENT_NAMESPACE: ziti # must be new because of the way this test is written | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# IDENTITY_FILE exported in prior step create-nf-network-services | |
SIDECAR_SELECTORS=namespace \ | |
./generate-ziti-agent-manifest.bash > ziti-k8s-agent-namespace-selector.yaml | |
SIDECAR_SELECTORS=pod \ | |
./generate-ziti-agent-manifest.bash > ziti-k8s-agent-pod-selector.yaml | |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --context $AWS_CLUSTER | |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --context $GKE_CLUSTER | |
: "Waiting for cert-manager pods to be ready in AWS cluster..." | |
kubectl wait --for=condition=ready pod -l app=cert-manager -n cert-manager --timeout=240s --context $AWS_CLUSTER | |
kubectl wait --for=condition=ready pod -l app=webhook -n cert-manager --timeout=240s --context $AWS_CLUSTER | |
: "Waiting for cert-manager pods to be ready in GKE cluster..." | |
kubectl wait --for=condition=ready pod -l app=cert-manager -n cert-manager --timeout=240s --context $GKE_CLUSTER | |
kubectl wait --for=condition=ready pod -l app=webhook -n cert-manager --timeout=480s --context $GKE_CLUSTER | |
# create new namespace for ziti agent | |
kubectl create namespace $ZITI_AGENT_NAMESPACE --context $AWS_CLUSTER | |
kubectl create namespace $ZITI_AGENT_NAMESPACE --context $GKE_CLUSTER | |
# deploy ziti agent | |
kubectl apply -f ziti-k8s-agent-namespace-selector.yaml --context $AWS_CLUSTER | |
kubectl apply -f ziti-k8s-agent-pod-selector.yaml --context $GKE_CLUSTER | |
: "Waiting for ziti-admission-webhook pods to be ready in AWS cluster..." | |
kubectl wait --for=condition=ready pod -l app=ziti-admission-webhook -n $ZITI_AGENT_NAMESPACE --timeout=120s --context $AWS_CLUSTER | |
: "Waiting for ziti-admission-webhook pods to be ready in GKE cluster..." | |
kubectl wait --for=condition=ready pod -l app=ziti-admission-webhook -n $ZITI_AGENT_NAMESPACE --timeout=120s --context $GKE_CLUSTER | |
- name: deploy-bookinfo-app | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# test the AWS cluster with a namespace selector | |
kubectl create namespace test1 --context $AWS_CLUSTER | |
kubectl label namespace test1 "openziti/ziti-tunnel=enabled" --context $AWS_CLUSTER | |
kubectl apply -f ./test/bookinfo.yaml --context $AWS_CLUSTER -n test1 | |
# wait for pods to be ready | |
kubectl wait --for=condition=ready deployment -l 'app in (details,productpage,ratings,reviews)' -n test1 --timeout=60s --context $AWS_CLUSTER | |
# test the GKE cluster with pod selectors | |
kubectl create namespace test2 --context $GKE_CLUSTER | |
kubectl apply -f ./test/bookinfo.yaml --context $GKE_CLUSTER -n test2 | |
# wait for pods to be ready | |
kubectl wait --for=condition=ready deployment -l 'app in (details,productpage,ratings,reviews)' -n test2 --timeout=60s --context $GKE_CLUSTER | |
kubectl get deployments -n test2 -l 'app in (details,productpage,ratings,reviews)' -o name --context $GKE_CLUSTER \ | |
| xargs -I POD DEPLOYMENT kubectl patch DEPLOYMENT -p '{"spec":{"template":{"metadata":{"labels":{"openziti/ziti-tunnel":"enabled"}}}}}' --context $GKE_CLUSTER -n test2 | |
# wait for pods to be ready after patch | |
kubectl wait --for=condition=ready deployment -l 'app in (details,productpage,ratings,reviews)' -n test2 --timeout=60s --context $GKE_CLUSTER | |
# this verifies that Ziti's non-default terminatorStrategy: random (configured in | |
# test/nf-network-services-create.postman_collection.json) is evenly distributing requests to all reviews pods | |
# across both clusters | |
- name: run-testcase-01 | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
if [ -f "./testcase_pods.log" ]; then | |
rm ./testcase_pods.log | |
fi | |
if [ -f "./testcase_curl_output.log" ]; then | |
rm ./testcase_curl_output.log | |
fi | |
kubectl get pods -n test1 --context $AWS_CLUSTER | tee -a ./testcase_pods.log | |
kubectl get pods -n test2 --context $GKE_CLUSTER | tee -a ./testcase_pods.log | |
for i in $(seq 1 40); | |
do | |
curl -sS -X GET http://productpage.ziti:9080/productpage?u=test \ | |
|& grep reviews \ | |
| tee -a ./testcase_curl_output.log | |
done | |
cat ./testcase_curl_output.log | |
cat ./testcase_pods.log | |
python ./test/verify_test_results.py ./testcase_pods.log ./testcase_curl_output.log | |
# this changes the stack topology for the next test so that GKE test2 cluster hosts only frontend pods, e.g., | |
# providing the productpage.ziti service, and the EKS test1 cluster hosts only backend pods, e.g., the reviews | |
# pods that log the requests that are counted by the Python verification script | |
- name: split frontend and backend between the clusters | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# in the EKS test1 cluster, scale the backend deployments to 0 | |
kubectl scale deployment -l 'app in (details,ratings)' --replicas=0 -n test1 --context $AWS_CLUSTER | |
kubectl wait --for=condition=deleted pods -l 'app in (details,ratings)' -n test1 --timeout=120s --context $AWS_CLUSTER | |
# in the GKE test2 cluster, scale the frontend deployments to 0 | |
kubectl scale deployment -l 'app in (productpage,reviews)' --replicas=0 -n test2 --context $GKE_CLUSTER | |
kubectl wait --for=condition=deleted pods -l 'app in (productpage,reviews)' -n test2 --timeout=120s --context $GKE_CLUSTER | |
# this verifies the result has not changed since the last test, despite a radical change of stack topology, | |
# relying on Ziti to adapt to the loss of frontend and backend pods | |
- name: run-testcase-02 | |
shell: bash | |
run: | | |
if [ -f "./testcase_pods.log" ]; then | |
rm ./testcase_pods.log | |
fi | |
if [ -f "./testcase_curl_output.log" ]; then | |
rm ./testcase_curl_output.log | |
fi | |
kubectl get pods -n test1 --context $AWS_CLUSTER | tee -a ./testcase_pods.log | |
kubectl get pods -n test2 --context $GKE_CLUSTER | tee -a ./testcase_pods.log | |
for i in $(seq 1 40); | |
do | |
curl -s -X GET http://productpage.ziti:9080/productpage?u=test \ | |
| grep reviews \ | |
| tee -a ./testcase_curl_output.log | |
done | |
cat ./testcase_curl_output.log | |
cat ./testcase_pods.log | |
python ./test/verify_test_results.py ./testcase_pods.log ./testcase_curl_output.log | |
- name: print logs | |
if: failure() | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get cluster info | |
kubectl cluster-info --context "$AWS_CLUSTER" | |
kubectl cluster-info --context "$GKE_CLUSTER" | |
# list all pods | |
kubectl get pods --all-namespaces --context "$AWS_CLUSTER" | |
kubectl get pods --all-namespaces --context "$GKE_CLUSTER" | |
# inspect cert-manager app | |
kubectl --context $AWS_CLUSTER --namespace cert-manager describe pod --selector app=cert-manager | |
kubectl --context $GKE_CLUSTER --namespace cert-manager describe pod --selector app=cert-manager | |
kubectl --context $AWS_CLUSTER --namespace cert-manager logs --selector app=cert-manager | |
kubectl --context $GKE_CLUSTER --namespace cert-manager logs --selector app=cert-manager | |
# inspect cert-manager webhook | |
kubectl --context $AWS_CLUSTER --namespace cert-manager describe pod --selector app=webhook | |
kubectl --context $GKE_CLUSTER --namespace cert-manager describe pod --selector app=webhook | |
kubectl --context $AWS_CLUSTER --namespace cert-manager logs --selector app=webhook | |
kubectl --context $GKE_CLUSTER --namespace cert-manager logs --selector app=webhook | |
# inspect ziti-admission-webhook | |
kubectl --context $AWS_CLUSTER --namespace ziti describe pod --selector app=ziti-admission-webhook | |
kubectl --context $GKE_CLUSTER --namespace ziti describe pod --selector app=ziti-admission-webhook | |
kubectl --context $AWS_CLUSTER --namespace ziti logs --selector app=ziti-admission-webhook | |
kubectl --context $GKE_CLUSTER --namespace ziti logs --selector app=ziti-admission-webhook | |
# inspect ziti-edge-tunnel | |
journalctl -lu ziti-edge-tunnel.service | |
cleanup-debug-delay: | |
if: failure() | |
needs: [regression_test] | |
runs-on: ubuntu-24.04 | |
steps: | |
- name: wait 30 minutes | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
sleep 1800 | |
cleanup-nf: | |
needs: [cleanup-debug-delay] | |
if: always() | |
runs-on: ubuntu-24.04 | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: ./.github/actions/nf-network-cleanup | |
with: | |
network-name: ${{ env.NF_NETWORK_NAME }} | |
access-token: ${{ env.NF_ACCESS_TOKEN }} | |
cleanup-eks: | |
needs: [cleanup-debug-delay] | |
if: always() | |
runs-on: ubuntu-24.04 | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: ./.github/actions/eks-cluster-cleanup | |
with: | |
cluster-name: ${{ env.CLUSTER_NAME }} | |
region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
cleanup-gke: | |
needs: [cleanup-debug-delay] | |
if: always() | |
runs-on: ubuntu-24.04 | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: ./.github/actions/gke-cluster-cleanup | |
with: | |
cluster-name: ${{ env.CLUSTER_NAME }} | |
region: ${{ env.GKE_REGION }} | |
workload-identity-provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service-account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} |