iteration 2 #59
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: pr | |
on: | |
pull_request: | |
types: [opened, synchronize] | |
workflow_dispatch: | |
env: | |
CLUSTER_NAME: ziti-k8s-agent-regression-${{ github.run_id }} | |
AWS_REGION: us-west-2 | |
GKE_REGION: us-central1 | |
GKE_NETWORK_NAME: default | |
GKE_SUBNETWORK_NAME: default | |
NF_NETWORK_NAME: ziti-k8s-agent-regression-${{ github.run_id }} | |
jobs: | |
build_deploy: | |
runs-on: ubuntu-latest | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Set up QEMU | |
uses: docker/setup-qemu-action@v3 | |
- name: Set up Docker Buildx | |
uses: docker/setup-buildx-action@v3 | |
- name: Login to Docker Hub | |
uses: docker/login-action@v3 | |
with: | |
username: ${{ secrets.DOCKERHUB_USERNAME }} | |
password: ${{ secrets.DOCKERHUB_TOKEN }} | |
- name: Check Run ID | |
run: echo ${{ github.run_id }} | |
- name: Build and push | |
uses: docker/build-push-action@v6 | |
with: | |
context: . | |
file: Dockerfile | |
platforms: linux/amd64,linux/arm64 | |
push: true | |
tags: netfoundry/ziti-k8s-agent:${{ github.run_id }} | |
create-eks: | |
runs-on: ubuntu-latest | |
permissions: | |
id-token: write | |
contents: read | |
outputs: | |
cluster_context: ${{ steps.set-context.outputs.context }} | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Authenticate to AWS Cloud | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
role-session-name: GitHubActions | |
audience: sts.amazonaws.com | |
- name: install aws eksctl | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get the cli for aws eks | |
curl --silent --show-error --fail --location \ | |
https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz \ | |
| tar xz -C /tmp | |
sudo install /tmp/eksctl /usr/local/bin/ | |
- name: create-eks-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
cat <<YAML >eks-cluster.yaml | |
apiVersion: eksctl.io/v1alpha5 | |
kind: ClusterConfig | |
metadata: | |
name: $CLUSTER_NAME | |
region: $AWS_REGION | |
version: "1.30" | |
managedNodeGroups: | |
- name: ng-1 | |
instanceType: t3.medium | |
iam: | |
withAddonPolicies: | |
ebs: true | |
fsx: true | |
efs: true | |
desiredCapacity: 2 | |
privateNetworking: true | |
labels: | |
nodegroup-type: workloads | |
tags: | |
nodegroup-role: worker | |
vpc: | |
cidr: 10.10.0.0/16 | |
publicAccessCIDRs: [] | |
# disable public access to endpoint and only allow private access | |
clusterEndpoints: | |
publicAccess: true | |
privateAccess: true | |
YAML | |
# delete the cluster if it already exists, which shouldn't happen because CLUSTER_NAME is unique to the | |
# workflow run ID | |
STATUS="$(eksctl get clusters --region "$AWS_REGION" -o json \ | |
| jq -r --arg cluster_name "$CLUSTER_NAME" \ | |
'.[] | select(.name==$cluster_name).Status' | |
)" | |
if [[ -n "$STATUS" ]]; then | |
eksctl delete cluster -f ./eks-cluster.yaml --force --disable-nodegroup-eviction | |
fi | |
# create the cluster | |
eksctl create cluster -f ./eks-cluster.yaml | |
- name: set-context-name | |
id: set-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text) | |
if [[ -z "$AWS_ACCOUNT_ID" ]]; then | |
echo "ERROR: AWS_ACCOUNT_ID is empty" >&2 | |
exit 1 | |
fi | |
CONTEXT_NAME="arn:aws:eks:${AWS_REGION}:${AWS_ACCOUNT_ID}:cluster/${CLUSTER_NAME}" | |
echo "context=$CONTEXT_NAME" | tee -a $GITHUB_OUTPUT | |
create-gke: | |
runs-on: ubuntu-latest | |
permissions: | |
contents: read | |
id-token: write | |
outputs: | |
cluster_context: ${{ steps.set-context.outputs.context }} | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Authenticate to Google Cloud | |
uses: google-github-actions/auth@v2 | |
with: | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} | |
- name: install-gcloud-cli | |
uses: google-github-actions/setup-gcloud@v2 | |
with: | |
version: latest | |
install_components: gke-gcloud-auth-plugin | |
- name: create-gke-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# delete the cluster if it already exists, which shouldn't happen because CLUSTER_NAME is unique to the | |
# workflow run ID | |
STATUS="$( | |
gcloud container --project $GCP_PROJECT clusters list --region $GKE_REGION --format json \ | |
| jq -r --arg cluster_name "$CLUSTER_NAME" '.[] | select(.name==$cluster_name).status' | |
)" | |
if [[ -n "$STATUS" ]]; then | |
gcloud container --project $GCP_PROJECT clusters delete $CLUSTER_NAME --region $GKE_REGION --quiet | |
fi | |
# create the cluster | |
gcloud container --project $GCP_PROJECT clusters create-auto $CLUSTER_NAME \ | |
--region $GKE_REGION \ | |
--release-channel "regular" \ | |
--network "projects/$GCP_PROJECT/global/networks/default" \ | |
--subnetwork "projects/$GCP_PROJECT/regions/$GKE_REGION/subnetworks/default" \ | |
--cluster-ipv4-cidr "/17" \ | |
--services-ipv4-cidr "/22" | |
- name: set-context-name | |
id: set-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
CONTEXT_NAME="gke_${GCP_PROJECT}_${GKE_REGION}_${CLUSTER_NAME}" | |
echo "context=$CONTEXT_NAME" | tee -a $GITHUB_OUTPUT | |
regression_test: | |
needs: [build_deploy, create-eks, create-gke] | |
runs-on: ubuntu-latest | |
permissions: | |
contents: read | |
id-token: write | |
env: | |
AWS_CLUSTER: ${{ needs.create-eks.outputs.cluster_context }} | |
GKE_CLUSTER: ${{ needs.create-gke.outputs.cluster_context }} | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Check Run ID | |
run: echo ${{ github.run_id }} | |
- name: install-kubectl | |
uses: azure/setup-kubectl@v3 | |
with: | |
version: latest | |
- name: install-aws-cli | |
uses: unfor19/install-aws-cli-action@v1 | |
with: | |
version: 2 | |
verbose: false | |
arch: amd64 | |
- name: install postman, ziti-edge-tunnel, and ziti | |
shell: bash | |
run: | | |
# get postman | |
curl -o- --silent --fail --location https://dl-cli.pstmn.io/install/linux64.sh | bash | |
# get ziti-edge-tunnel | |
curl --silent --fail --location https://get.openziti.io/tun/scripts/install-ubuntu.bash | bash | |
sudo systemctl start ziti-edge-tunnel.service | |
for i in {1..30}; do | |
if sudo systemctl is-active --quiet ziti-edge-tunnel.service; then | |
break | |
fi | |
sleep 1 | |
done | |
sudo systemctl is-active --quiet ziti-edge-tunnel.service | |
sudo ziti-edge-tunnel set_log_level --loglevel DEBUG | |
# get ziti | |
curl --silent --fail --location https://get.openziti.io/install.bash | sudo bash -s openziti | |
- name: Authenticate to AWS Cloud | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
role-session-name: GitHubActions | |
audience: sts.amazonaws.com | |
- name: configure-eks-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
aws eks update-kubeconfig --name ${CLUSTER_NAME} --region ${AWS_REGION} | |
- name: Authenticate to Google Cloud | |
uses: google-github-actions/auth@v2 | |
with: | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} | |
- name: install-gcloud-cli | |
uses: google-github-actions/setup-gcloud@v2 | |
with: | |
version: latest | |
install_components: gke-gcloud-auth-plugin | |
- name: configure-gke-context | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
gcloud container clusters get-credentials "$CLUSTER_NAME" --region "$GKE_REGION" --project "$GCP_PROJECT" | |
- name: get-cluster-info-and-all-pods | |
shell: bash | |
run: | | |
ATTEMPTS=30 | |
until \ | |
( | |
kubectl cluster-info --context "$AWS_CLUSTER" &>/dev/null && \ | |
kubectl cluster-info --context "$GKE_CLUSTER" &>/dev/null | |
) || ! (( ATTEMPTS-- )) | |
do | |
echo "Waiting for clusters" | |
sleep 1 | |
done | |
kubectl get pods --all-namespaces --context "$AWS_CLUSTER" | |
kubectl get pods --all-namespaces --context "$GKE_CLUSTER" | |
- name: create-nf-network-services | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
OAUTH_RESP="$(curl --silent --fail --location --request POST \ | |
https://netfoundry-production-xfjiye.auth.us-east-1.amazoncognito.com/oauth2/token \ | |
--header 'Content-Type: application/x-www-form-urlencoded' \ | |
--user "${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }}:${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }}" \ | |
--data-urlencode 'grant_type=client_credentials')" | |
if [[ -z "$OAUTH_RESP" ]]; then | |
echo "ERROR: OAUTH_RESP is empty" >&2 | |
exit 1 | |
fi | |
ACCESS_TOKEN="$(echo "$OAUTH_RESP" | jq -r .access_token)" | |
if [[ -z "$ACCESS_TOKEN" ]]; then | |
echo "ERROR: ACCESS_TOKEN is empty" >&2 | |
exit 1 | |
fi | |
ACCESS_TOKEN_TYPE="$(echo "$OAUTH_RESP" | jq -r .token_type)" | |
if [[ -z "$ACCESS_TOKEN_TYPE" ]]; then | |
echo "ERROR: ACCESS_TOKEN_TYPE is empty" >&2 | |
exit 1 | |
fi | |
NF_NETWORK_LIST="$(curl --silent --fail --location --request GET \ | |
https://gateway.production.netfoundry.io/core/v3/networks \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: $ACCESS_TOKEN_TYPE $ACCESS_TOKEN")" | |
if [[ -z "$NF_NETWORK_LIST" ]]; then | |
echo "ERROR: NF_NETWORK_LIST is empty" >&2 | |
exit 1 | |
fi | |
NF_NETWORK_ID="$( | |
echo "$NF_NETWORK_LIST" | jq -r --arg nf_network_name "$NF_NETWORK_NAME" \ | |
'._embedded.networkList[] | select(.name==$nf_network_name).id' | |
)" | |
# delete old network if exists, which shouldn't happen because NF_NETWORK_NAME is unique to the | |
# workflow run ID | |
if [[ -n "$NF_NETWORK_ID" ]]; then | |
NF_NETWORK_STATUS="$(curl --silent --fail --location --request DELETE \ | |
https://gateway.production.netfoundry.io/core/v3/networks/"$NF_NETWORK_ID" \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: $ACCESS_TOKEN_TYPE $ACCESS_TOKEN")" | |
if [[ -z "$NF_NETWORK_STATUS" ]]; then | |
echo "ERROR: NF_NETWORK_STATUS for DELETE operation is empty" >&2 | |
exit 1 | |
fi | |
sleep 120 | |
fi | |
# compose a Postman global variable file for creating the NF network for this workflow run ID | |
cat <<JSON | tee nf-network-services-create.postman_global.json | |
{ | |
"id": "$(uuidgen)", | |
"name": "nf-network-services-create", | |
"_postman_variable_scope": "global", | |
"values": [ | |
{ | |
"key": "api", | |
"value": "https://gateway.production.netfoundry.io/core/v3", | |
"enabled": true | |
}, | |
{ | |
"key": "token", | |
"value": "https://netfoundry-production-xfjiye.auth.us-east-1.amazoncognito.com/oauth2/token", | |
"enabled": true | |
}, | |
{ | |
"key": "jwt_token", | |
"value": "", | |
"enabled": true | |
}, | |
{ | |
"key": "jwt_type", | |
"value": "Bearer", | |
"enabled": true | |
}, | |
{ | |
"key": "client_id", | |
"value": "${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }}", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "client_secret", | |
"value": "${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }}", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "networkName", | |
"value": "$NF_NETWORK_NAME", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "networkId", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "networkStatus", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "api_token", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "controller-api-endpoint", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "edgeRouterId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "mopEdgeRouterId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "mopEdgeRouterStatus", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "clientIdentityId", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "adminIdentityId", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "clientIdentityJwt", | |
"value": "", | |
"type": "any", | |
"enabled": true | |
}, | |
{ | |
"key": "adminIdentityJwt", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId1", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId1", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId2", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId2", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId3", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId3", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "hostConfigId4", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
}, | |
{ | |
"key": "interceptConfigId4", | |
"value": "", | |
"type": "default", | |
"enabled": true | |
} | |
] | |
} | |
JSON | |
# validate the Postman global variable file | |
if [[ ! -s nf-network-services-create.postman_global.json ]]; then | |
echo "ERROR: nf-network-services-create.postman_global.json is empty" >&2 | |
exit 1 | |
elif ! jq -e . < nf-network-services-create.postman_global.json >/dev/null; then | |
echo "ERROR: nf-network-services-create.postman_global.json is not valid json" >&2 | |
exit 1 | |
fi | |
postman collection run \ | |
test/nf-network-services-create.postman_collection.json \ | |
-g nf-network-services-create.postman_global.json \ | |
-k | |
NF_NETWORK_LIST="$(curl --silent --fail --location --request GET \ | |
https://gateway.production.netfoundry.io/core/v3/networks \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: $ACCESS_TOKEN_TYPE $ACCESS_TOKEN")" | |
if [[ -z "$NF_NETWORK_LIST" ]]; then | |
echo "ERROR: NF_NETWORK_LIST is empty" >&2 | |
exit 1 | |
fi | |
NF_NETWORK_ID="$(echo "$NF_NETWORK_LIST" | \ | |
jq -r --arg nf_network_name "$NF_NETWORK_NAME" \ | |
'._embedded.networkList[] | select(.name==$nf_network_name).id')" | |
if [[ -z "$NF_NETWORK_ID" ]]; then | |
echo "ERROR: NF_NETWORK_ID is empty" >&2 | |
exit 1 | |
fi | |
echo NF_NETWORK_ID="$NF_NETWORK_ID" | tee -a $GITHUB_ENV | |
set +o xtrace # mask the oauth access token | |
ZITI_SESSION_OBJ="$( | |
curl --silent --fail --location --request POST \ | |
https://gateway.production.netfoundry.io/core/v3/networks/"$NF_NETWORK_ID"/exchange \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: $ACCESS_TOKEN_TYPE $ACCESS_TOKEN" \ | |
--data '{"type": "session"}' | |
)" | |
if [[ -z "$ZITI_SESSION_OBJ" ]]; then | |
echo "ERROR: ZITI_SESSION_OBJ is empty" >&2 | |
exit 1 | |
fi | |
ZITI_SESSION_TOKEN=$(echo "$ZITI_SESSION_OBJ" | jq -r .value) | |
echo "::add-mask::$ZITI_SESSION_TOKEN" | |
set -o xtrace | |
if [[ -z "$ZITI_SESSION_TOKEN" ]]; then | |
echo "ERROR: ZITI_SESSION_TOKEN is empty" >&2 | |
exit 1 | |
fi | |
# call the Ziti controller with the session token to get the list of identities | |
NF_IDENTITY_LIST="$( | |
curl --insecure --silent --fail --location --request GET \ | |
"$(echo "$ZITI_SESSION_OBJ" | jq -r .networkControllerUrl)"/identities \ | |
--header 'Content-Type: application/json' \ | |
--header "zt-session: $ZITI_SESSION_TOKEN" | |
)" | |
if [[ -z "$NF_IDENTITY_LIST" ]]; then | |
echo "ERROR: NF_IDENTITY_LIST is empty" >&2 | |
exit 1 | |
fi | |
# enroll adminUser | |
echo "$NF_IDENTITY_LIST" | jq -r '.data[] | select(.name=="adminUser").enrollment.ott.jwt' | tee adminUser.jwt | |
if [[ ! -s adminUser.jwt ]]; then | |
echo "ERROR: adminUser.jwt is empty" >&2 | |
exit 1 | |
fi | |
ziti edge enroll -j adminUser.jwt -o adminUser.json | |
if [[ ! -s adminUser.json ]]; then | |
echo "ERROR: adminUser.json is empty" >&2 | |
exit 1 | |
elif ! jq -e . < adminUser.json >/dev/null; then | |
echo "ERROR: adminUser.json is not valid json" >&2 | |
exit 1 | |
fi | |
echo "IDENTITY_FILE=adminUser.json" | tee -a $GITHUB_ENV | |
# enroll testUser | |
echo "$NF_IDENTITY_LIST" | jq -r '.data[] | select(.name=="testUser").enrollment.ott.jwt' | tee testUser.jwt | |
if [[ ! -s testUser.jwt ]]; then | |
echo "ERROR: testUser.jwt is empty" >&2 | |
exit 1 | |
fi | |
sudo ziti-edge-tunnel add --jwt "$(< ./testUser.jwt)" --identity testUser | |
- name: Deploy Webhook to Clusters | |
shell: bash | |
env: | |
ZITI_AGENT_IMAGE: netfoundry/ziti-k8s-agent:${{ github.run_id }} | |
ZITI_AGENT_NAMESPACE: ziti | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# IDENTITY_FILE exported in prior step create-nf-network-services | |
SIDECAR_SELECTORS=namespace \ | |
./generate-ziti-agent-manifest.bash > ziti-k8s-agent-namespace-selector.yaml | |
SIDECAR_SELECTORS=pod \ | |
./generate-ziti-agent-manifest.bash > ziti-k8s-agent-pod-selector.yaml | |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --context $AWS_CLUSTER | |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --context $GKE_CLUSTER | |
: "Waiting for cert-manager pods to be ready in AWS cluster..." | |
kubectl wait --for=condition=ready pod -l app=cert-manager -n cert-manager --timeout=240s --context $AWS_CLUSTER | |
kubectl wait --for=condition=ready pod -l app=webhook -n cert-manager --timeout=240s --context $AWS_CLUSTER | |
: "Waiting for cert-manager pods to be ready in GKE cluster..." | |
kubectl wait --for=condition=ready pod -l app=cert-manager -n cert-manager --timeout=240s --context $GKE_CLUSTER | |
kubectl wait --for=condition=ready pod -l app=webhook -n cert-manager --timeout=480s --context $GKE_CLUSTER | |
kubectl apply -f ziti-k8s-agent-namespace-selector.yaml --context $AWS_CLUSTER | |
kubectl apply -f ziti-k8s-agent-pod-selector.yaml --context $GKE_CLUSTER | |
: "Waiting for ziti-admission-webhook pods to be ready in AWS cluster..." | |
kubectl wait --for=condition=ready pod -l app=ziti-admission-webhook -n $ZITI_AGENT_NAMESPACE --timeout=120s --context $AWS_CLUSTER | |
: "Waiting for ziti-admission-webhook pods to be ready in GKE cluster..." | |
kubectl wait --for=condition=ready pod -l app=ziti-admission-webhook -n $ZITI_AGENT_NAMESPACE --timeout=120s --context $GKE_CLUSTER | |
- name: deploy-bookinfo-app | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
kubectl create namespace test1 --context $AWS_CLUSTER | |
kubectl label namespace test1 openziti/ziti-tunnel=enabled --context $AWS_CLUSTER | |
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.22/samples/bookinfo/platform/kube/bookinfo.yaml --context $AWS_CLUSTER -n test1 | |
kubectl create namespace test2 --context $GKE_CLUSTER | |
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.22/samples/bookinfo/platform/kube/bookinfo.yaml --context $GKE_CLUSTER -n test2 | |
sleep 30 | |
kubectl patch deployment/ratings-v1 -p '{"spec":{"template":{"metadata":{"labels":{"openziti/ziti-tunnel":"enabled"}}}}}' --context $GKE_CLUSTER -n test2 | |
kubectl patch deployment/productpage-v1 -p '{"spec":{"template":{"metadata":{"labels":{"openziti/ziti-tunnel":"enabled"}}}}}' --context $GKE_CLUSTER -n test2 | |
kubectl patch deployment/details-v1 -p '{"spec":{"template":{"metadata":{"labels":{"openziti/ziti-tunnel":"enabled"}}}}}' --context $GKE_CLUSTER -n test2 | |
kubectl patch deployment/reviews-v1 -p '{"spec":{"template":{"metadata":{"labels":{"openziti/ziti-tunnel":"enabled"}}}}}' --context $GKE_CLUSTER -n test2 | |
kubectl patch deployment/reviews-v2 -p '{"spec":{"template":{"metadata":{"labels":{"openziti/ziti-tunnel":"enabled"}}}}}' --context $GKE_CLUSTER -n test2 | |
kubectl patch deployment/reviews-v3 -p '{"spec":{"template":{"metadata":{"labels":{"openziti/ziti-tunnel":"enabled"}}}}}' --context $GKE_CLUSTER -n test2 | |
sleep 120 | |
- name: run-testcase-01 | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
if [ -f "./testcase_pods.log" ]; then | |
rm ./testcase_pods.log | |
fi | |
if [ -f "./testcase_curl_output.log" ]; then | |
rm ./testcase_curl_output.log | |
fi | |
kubectl get pods -n test1 --context $AWS_CLUSTER >> testcase_pods.log | |
kubectl get pods -n test2 --context $GKE_CLUSTER >> testcase_pods.log | |
for i in $(seq 1 40); | |
do | |
curl -s -X GET http://productpage.ziti:9080/productpage?u=test | grep reviews >> testcase_curl_output.log | |
done | |
cat testcase_curl_output.log | |
cat testcase_pods.log | |
test/verify_test_results.py | |
- name: scaledown-2-testcase-02 | |
shell: bash | |
run: | | |
kubectl scale deploy details-v1 --replicas=0 -n test1 --context $AWS_CLUSTER | |
kubectl scale deploy ratings-v1 --replicas=0 -n test1 --context $AWS_CLUSTER | |
kubectl scale deploy productpage-v1 --replicas=0 -n test2 --context $GKE_CLUSTER | |
kubectl scale deploy reviews-v1 --replicas=0 -n test2 --context $GKE_CLUSTER | |
kubectl scale deploy reviews-v2 --replicas=0 -n test2 --context $GKE_CLUSTER | |
kubectl scale deploy reviews-v3 --replicas=0 -n test2 --context $GKE_CLUSTER | |
sleep 150 | |
- name: run-testcase-02 | |
shell: bash | |
run: | | |
if [ -f "./testcase_pods.log" ]; then | |
rm ./testcase_pods.log | |
fi | |
if [ -f "./testcase_curl_output.log" ]; then | |
rm ./testcase_curl_output.log | |
fi | |
kubectl get pods -n test1 --context $AWS_CLUSTER >> testcase_pods.log | |
kubectl get pods -n test2 --context $GKE_CLUSTER >> testcase_pods.log | |
for i in $(seq 1 40); | |
do | |
curl -s -X GET http://productpage.ziti:9080/productpage?u=test | grep reviews >> testcase_curl_output.log | |
done | |
cat testcase_curl_output.log | |
cat testcase_pods.log | |
test/verify_test_results.py | |
- name: delete-bookinfo-app | |
shell: bash | |
run: | | |
kubectl delete -f test/bookinfo.yaml --context $AWS_CLUSTER -n test1 | |
kubectl delete -f test/bookinfo.yaml --context $GKE_CLUSTER -n test2 | |
sleep 30 | |
- name: print logs | |
if: failure() | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# inspect cert-manager app | |
kubectl --context $AWS_CLUSTER --namespace cert-manager describe pod --selector app=cert-manager | |
kubectl --context $GKE_CLUSTER --namespace cert-manager describe pod --selector app=cert-manager | |
kubectl --context $AWS_CLUSTER --namespace cert-manager logs --selector app=cert-manager | |
kubectl --context $GKE_CLUSTER --namespace cert-manager logs --selector app=cert-manager | |
# inspect cert-manager webhook | |
kubectl --context $AWS_CLUSTER --namespace cert-manager describe pod --selector app=webhook | |
kubectl --context $GKE_CLUSTER --namespace cert-manager describe pod --selector app=webhook | |
kubectl --context $AWS_CLUSTER --namespace cert-manager logs --selector app=webhook | |
kubectl --context $GKE_CLUSTER --namespace cert-manager logs --selector app=webhook | |
# inspect ziti-admission-webhook | |
kubectl --context $AWS_CLUSTER --namespace ziti describe pod --selector app=ziti-admission-webhook | |
kubectl --context $GKE_CLUSTER --namespace ziti describe pod --selector app=ziti-admission-webhook | |
kubectl --context $AWS_CLUSTER --namespace ziti logs --selector app=ziti-admission-webhook | |
kubectl --context $GKE_CLUSTER --namespace ziti logs --selector app=ziti-admission-webhook | |
# inspect ziti-edge-tunnel | |
journalctl -lu ziti-edge-tunnel.service | |
cleanup: | |
if: always() | |
needs: [regression_test] | |
runs-on: ubuntu-latest | |
permissions: | |
contents: read | |
id-token: write | |
steps: | |
- name: Checkout | |
uses: actions/checkout@v4 | |
- name: Configure AWS Credentials | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
aws-region: ${{ env.AWS_REGION }} | |
role-to-assume: ${{ secrets.AWS_ROLE_FOR_GITHUB }} | |
role-session-name: GitHubActions | |
audience: sts.amazonaws.com | |
- name: install aws eksctl | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# get the cli for aws eks | |
curl --silent --show-error --fail --location \ | |
https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz \ | |
| tar xz -C /tmp | |
sudo install /tmp/eksctl /usr/local/bin/ | |
- name: sleep-if-failure | |
if: failure() | |
shell: bash | |
run: | | |
# delay cleanup by 30 minutes if regression test failed to allow for debugging | |
sleep 1800 | |
- name: delete-eks-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
# Check if cluster exists before attempting deletion | |
if aws eks describe-cluster --name "$CLUSTER_NAME" --region "$AWS_REGION" >/dev/null 2>&1; then | |
eksctl delete cluster --name "$CLUSTER_NAME" --region "$AWS_REGION" --force --disable-nodegroup-eviction | |
else | |
echo "Cluster $CLUSTER_NAME does not exist in region $AWS_REGION, skipping deletion" | |
fi | |
- name: Configure GCP Credentials | |
uses: google-github-actions/auth@v2 | |
with: | |
workload_identity_provider: ${{ secrets.GCLOUD_WL_ID_FOR_GITHUB }} | |
service_account: ${{ secrets.GCLOUD_SVC_ACCT_FOR_GITHUB }} | |
audience: ${{ secrets.GCLOUD_AUD_ID_FOR_GITHUB }} | |
- name: Set up Cloud SDK | |
uses: google-github-actions/setup-gcloud@v2 | |
- name: delete-gke-cluster | |
shell: bash | |
run: | | |
set -o pipefail | |
set -o xtrace | |
gcloud container --project $GCP_PROJECT clusters delete $CLUSTER_NAME --region $GKE_REGION --quiet | |
- name: delete-nf-network | |
shell: bash | |
run: | | |
set -o pipefail | |
set +o xtrace # mask the oauth access token | |
OAUTH_RESP="$( | |
curl --silent --fail --location --request POST \ | |
https://netfoundry-production-xfjiye.auth.us-east-1.amazoncognito.com/oauth2/token \ | |
--header 'Content-Type: application/x-www-form-urlencoded' \ | |
--user "${{ secrets.NF_API_CLIENT_ID_FOR_GITHUB }}:${{ secrets.NF_API_CLIENT_PW_FOR_GITHUB }}" \ | |
--data-urlencode 'grant_type=client_credentials' | |
)" | |
if [[ -z "$OAUTH_RESP" ]]; then | |
echo "ERROR: OAUTH_RESP is empty" >&2 | |
exit 1 | |
fi | |
ACCESS_TOKEN="$(echo $OAUTH_RESP | jq -r .access_token)" | |
if [[ -z "$ACCESS_TOKEN" ]]; then | |
echo "ERROR: ACCESS_TOKEN is empty" >&2 | |
exit 1 | |
fi | |
echo "::add-mask::$ACCESS_TOKEN" | |
ACCESS_TOKEN_TYPE="$(echo $OAUTH_RESP | jq -r .token_type)" | |
set -o xtrace | |
NF_NETWORK_LIST="$( | |
curl --silent --fail --location --request GET \ | |
https://gateway.production.netfoundry.io/core/v3/networks \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: $ACCESS_TOKEN_TYPE $ACCESS_TOKEN" | |
)" | |
if [[ -z "$NF_NETWORK_LIST" ]]; then | |
echo "ERROR: NF_NETWORK_LIST is empty" >&2 | |
exit 1 | |
fi | |
NF_NETWORK_ID="$( | |
echo $NF_NETWORK_LIST | jq -r --arg nf_network_name "$NF_NETWORK_NAME" \ | |
'._embedded.networkList[] | |
| select(.name==$nf_network_name).id' | |
)" | |
if [[ -z "$NF_NETWORK_ID" ]]; then | |
echo "ERROR: NF_NETWORK_ID is empty" >&2 | |
exit 1 | |
fi | |
echo NF_NETWORK_ID="$NF_NETWORK_ID" | tee -a $GITHUB_ENV | |
NF_NETWORK_STATUS_RESP="$( | |
curl --silent --fail --location --request DELETE \ | |
https://gateway.production.netfoundry.io/core/v3/networks/"$NF_NETWORK_ID" \ | |
--header 'Content-Type: application/json' \ | |
--header "Authorization: $ACCESS_TOKEN_TYPE $ACCESS_TOKEN" | |
)" | |
if [[ -z "$NF_NETWORK_STATUS_RESP" ]]; then | |
echo "ERROR: NF_NETWORK_STATUS_RESP is empty" >&2 | |
exit 1 | |
fi | |
NF_NETWORK_STATUS="$( | |
echo "$NF_NETWORK_STATUS_RESP" | jq -r '.status' | |
)" | |
if [[ -n "$NF_NETWORK_STATUS" ]]; then | |
echo "INFO: NF_NETWORK_STATUS: $NF_NETWORK_STATUS" | |
else | |
echo "ERROR: NF_NETWORK_STATUS is empty" >&2 | |
exit 1 | |
fi |