From 421f4b03c0881d6899f78e122c802c52e5320ad1 Mon Sep 17 00:00:00 2001 From: Pradeep Agarwal Date: Fri, 24 May 2024 15:39:46 +0530 Subject: [PATCH] Push parent charts for 1.2.0 release (#38) * Updated workshop, service mesh recipes and few fixes --- charts/tp-cp-bootstrap/Chart.yaml | 2 +- .../charts/compute-services/Chart.yaml | 2 +- .../templates/deployment.yaml | 8 ++ .../charts/otel-collector/Chart.yaml | 2 +- .../otel-collector/templates/_helpers.tpl | 3 + .../charts/otel-collector/templates/_pod.tpl | 4 +- charts/tp-cp-configuration/Chart.yaml | 2 +- .../files/tp-cp-subscription/Chart.yaml | 2 +- .../files/tp-cp-subscription/values.yaml | 2 +- charts/tp-cp-servicemesh-recipes/Chart.yaml | 2 +- .../templates/servicemesh.yaml | 6 ++ docs/workshop/cp-cluster/eks/README.md | 16 ++-- .../scripts/eks/clean-up-control-plane.sh | 80 +++++++++++-------- 13 files changed, 79 insertions(+), 52 deletions(-) diff --git a/charts/tp-cp-bootstrap/Chart.yaml b/charts/tp-cp-bootstrap/Chart.yaml index 509d8135..26811b55 100644 --- a/charts/tp-cp-bootstrap/Chart.yaml +++ b/charts/tp-cp-bootstrap/Chart.yaml @@ -8,7 +8,7 @@ apiVersion: v2 name: tp-cp-bootstrap description: TIBCO Platform Control Plane bootstrap chart type: application -version: 1.2.25 +version: 1.2.27 appVersion: "1.2.0" keywords: - tibco-platform diff --git a/charts/tp-cp-bootstrap/charts/compute-services/Chart.yaml b/charts/tp-cp-bootstrap/charts/compute-services/Chart.yaml index 3a85785a..e87ac9ad 100644 --- a/charts/tp-cp-bootstrap/charts/compute-services/Chart.yaml +++ b/charts/tp-cp-bootstrap/charts/compute-services/Chart.yaml @@ -7,5 +7,5 @@ apiVersion: v2 name: compute-services -version: 1.2.8 +version: 1.2.9 appVersion: 1.2.0 diff --git a/charts/tp-cp-bootstrap/charts/compute-services/templates/deployment.yaml b/charts/tp-cp-bootstrap/charts/compute-services/templates/deployment.yaml index 73ff7c20..bda7ff75 100644 --- a/charts/tp-cp-bootstrap/charts/compute-services/templates/deployment.yaml +++ b/charts/tp-cp-bootstrap/charts/compute-services/templates/deployment.yaml @@ -100,6 +100,14 @@ spec: value: {{ .Values.dpMetadata.dpConfigureNamespaceChartVersion | quote }} - name: HELM_CLI_TIMEOUT_SECONDS value: "60" + {{- if .Values.global.tibco.containerRegistry.username }} + - name: CONTAINER_REGISTRY_USERNAME + value: {{ .Values.global.tibco.containerRegistry.username }} + {{- end }} + {{- if .Values.global.tibco.containerRegistry.password }} + - name: CONTAINER_REGISTRY_PASSWORD + value: {{ .Values.global.tibco.containerRegistry.password }} + {{- end }} {{- if .Values.dpHelmRepositories }} - name: HELM_REPO_CONFIG value: /helm/config/repo.json diff --git a/charts/tp-cp-bootstrap/charts/otel-collector/Chart.yaml b/charts/tp-cp-bootstrap/charts/otel-collector/Chart.yaml index 6d2acadf..bad56f51 100644 --- a/charts/tp-cp-bootstrap/charts/otel-collector/Chart.yaml +++ b/charts/tp-cp-bootstrap/charts/otel-collector/Chart.yaml @@ -7,7 +7,7 @@ apiVersion: v2 name: otel-collector -version: 0.89.3 +version: 0.89.4 description: OpenTelemetry Collector Helm chart for Kubernetes type: application appVersion: 0.98.0 diff --git a/charts/tp-cp-bootstrap/charts/otel-collector/templates/_helpers.tpl b/charts/tp-cp-bootstrap/charts/otel-collector/templates/_helpers.tpl index 8332be27..81665205 100644 --- a/charts/tp-cp-bootstrap/charts/otel-collector/templates/_helpers.tpl +++ b/charts/tp-cp-bootstrap/charts/otel-collector/templates/_helpers.tpl @@ -246,3 +246,6 @@ The capitalization is important for StatefulSet. {{- print "StatefulSet" -}} {{- end -}} {{- end }} + + +{{- define "otel-collector.container-registry.secret" }}tibco-container-registry-credentials{{end}} diff --git a/charts/tp-cp-bootstrap/charts/otel-collector/templates/_pod.tpl b/charts/tp-cp-bootstrap/charts/otel-collector/templates/_pod.tpl index 1c8488b3..f336d515 100644 --- a/charts/tp-cp-bootstrap/charts/otel-collector/templates/_pod.tpl +++ b/charts/tp-cp-bootstrap/charts/otel-collector/templates/_pod.tpl @@ -7,10 +7,8 @@ in the license file that is distributed with this file. */}} {{- define "otel-collector.pod" -}} -{{- with .Values.imagePullSecrets }} imagePullSecrets: - {{- toYaml . | nindent 2 }} -{{- end }} +- name: {{ include "otel-collector.container-registry.secret" . }} serviceAccountName: {{ include "otel-collector.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 2 }} diff --git a/charts/tp-cp-configuration/Chart.yaml b/charts/tp-cp-configuration/Chart.yaml index b912f7bb..0c051dae 100644 --- a/charts/tp-cp-configuration/Chart.yaml +++ b/charts/tp-cp-configuration/Chart.yaml @@ -9,7 +9,7 @@ apiVersion: v2 name: tp-cp-configuration description: TIBCO Platform Control Plane Configuration chart type: application -version: 1.2.15 +version: 1.2.16 appVersion: "1.2.0" keywords: - tibco-platform diff --git a/charts/tp-cp-configuration/files/tp-cp-subscription/Chart.yaml b/charts/tp-cp-configuration/files/tp-cp-subscription/Chart.yaml index 5981e695..62f58bc4 100644 --- a/charts/tp-cp-configuration/files/tp-cp-subscription/Chart.yaml +++ b/charts/tp-cp-configuration/files/tp-cp-subscription/Chart.yaml @@ -9,5 +9,5 @@ apiVersion: v2 name: tp-cp-subscription description: A Helm chart for provision subscription on TIBCO Platform Control Plane type: application -version: 1.2.4 +version: 1.2.5 appVersion: "1.2.0" diff --git a/charts/tp-cp-configuration/files/tp-cp-subscription/values.yaml b/charts/tp-cp-configuration/files/tp-cp-subscription/values.yaml index dabbb58b..0fbc2235 100644 --- a/charts/tp-cp-configuration/files/tp-cp-subscription/values.yaml +++ b/charts/tp-cp-configuration/files/tp-cp-subscription/values.yaml @@ -19,7 +19,7 @@ image: name: tp-hybrid-server registry: "" repo: "" - tag: 20-distroless + tag: 26-distroless pullPolicy: IfNotPresent fluentbit: diff --git a/charts/tp-cp-servicemesh-recipes/Chart.yaml b/charts/tp-cp-servicemesh-recipes/Chart.yaml index fa9f176f..3e60aa79 100644 --- a/charts/tp-cp-servicemesh-recipes/Chart.yaml +++ b/charts/tp-cp-servicemesh-recipes/Chart.yaml @@ -6,7 +6,7 @@ apiVersion: v2 name: tp-cp-servicemesh-recipes description: TIBCO Platform Infrastructure Data Plane recipes type: application -version: 1.2.12 +version: 1.2.13 appVersion: "1.2.0" keywords: - tibco-platform diff --git a/charts/tp-cp-servicemesh-recipes/templates/servicemesh.yaml b/charts/tp-cp-servicemesh-recipes/templates/servicemesh.yaml index cdd8f523..17b4e2d3 100644 --- a/charts/tp-cp-servicemesh-recipes/templates/servicemesh.yaml +++ b/charts/tp-cp-servicemesh-recipes/templates/servicemesh.yaml @@ -43,6 +43,9 @@ data: - name: istio-crd namespace: ${NAMESPACE} version: {{ .Values.capabilities.servicemesh.version | quote }} + repository: + chartMuseum: + host: ${HELM_REPO} values: - content: | serviceAccount: @@ -55,6 +58,9 @@ data: - name: istio-istiod namespace: ${NAMESPACE} version: {{ .Values.capabilities.servicemesh.version | quote }} + repository: + chartMuseum: + host: ${HELM_REPO} values: - content: | serviceAccount: diff --git a/docs/workshop/cp-cluster/eks/README.md b/docs/workshop/cp-cluster/eks/README.md index 77a36c15..a7f89000 100644 --- a/docs/workshop/cp-cluster/eks/README.md +++ b/docs/workshop/cp-cluster/eks/README.md @@ -14,7 +14,6 @@ Table of Contents * [Using AWS CLI](#using-aws-cli) * [Create EFS](#setup-efs) * [Create RDS instance](#create-rds-instance) - * [Create Redis replication group](#create-redis-replication-group) * [Using Crossplane](#using-crossplane) * [Pre-requisites](#pre-requisites) * [Install Crossplane](#install-crossplane) @@ -111,10 +110,6 @@ export CP_RDS_MASTER_PASSWORD="cp_DBAdminPassword" # replace with desired userna export CP_RDS_INSTANCE_CLASS="db.t3.medium" # replace with desired db instance class export CP_RDS_PORT="5432" # replace with desired db port -## TIBCO® Control Plane Redis specific details -export CP_REDIS_CACHE_NODE_TYPE="cache.t4g.medium" # replace with desired redis cache node type -export CP_REDIS_PORT="6379" # replace with desired redis port - ## Required by external-dns chart export CP_MAIN_INGRESS_CONTROLLER=alb export CP_INGRESS_CONTROLLER=nginx # This value can be same as CP_MAIN_INGRESS_CONTROLLER or nginx if you're using nginx @@ -474,7 +469,6 @@ EOF We will create [crossplane composite resource definitions (XRDs)](https://docs.crossplane.io/v1.13/concepts/composite-resource-definitions/) and [crossplane compositions](https://docs.crossplane.io/v1.13/concepts/compositions/) for - EFS - RDS database instance -- Redis replication group - IAM role, policies and role-policy attachments - SES email identity - kubernetes storage class @@ -534,6 +528,11 @@ As part of claims, we will create following resources: This also creates the secrets in the namespace where the chart will be deployed. TIBCO® Control Plane services can access these resources using the secrets. +> [!IMPORTANT] +> Please note that the RDS DB instance of PostgreSQL created using below crossplane claim does not enforce SSL by default. +> To enforce SSL connection, please check [Requiring an SSL connection to a PostgreSQL DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/PostgreSQL.Concepts.General.SSL.html#PostgreSQL.Concepts.General.SSL.Requiring) + + ```bash export CP_RESOURCE_PREFIX="platform" # unique id to add to AWS resources as prefix (alphanumeric string of max 10 chars) @@ -663,7 +662,7 @@ The `alb` ingress class is used by AWS ALB ingress controller. | EFS storage class | efs-sc | used for TIBCO® Control Plane | | RDS DB instance resource arn (if created using script) | arn:aws:rds:\:\:db:${CP_CLUSTER_NAME}-db | used for TIBCO® Control Plane | | RDS DB details (if created using crossplane) | Secret `${CP_INSTANCE_ID}-rds-details` in `${CP_INSTANCE_ID}-ns` namespace Refer [Install claims](#install-claims) section | used for TIBCO® Control Plane | -| Network Policies Details for Control Plane Namespace | [Control Plane Network Policies Document](https://docs.tibco.com/emp/platform-cp/1.0.0/doc/html/UserGuide/controlling-traffic-with-network-policies.htm) | +| Network Policies Details for Control Plane Namespace | [Control Plane Network Policies Document](https://docs.tibco.com/emp/platform-cp/1.2.0/doc/html/Default.htm#Installation/control-plane-network-policies.htm) | # Control Plane Deployment @@ -740,6 +739,7 @@ ingress-nginx: controller: config: use-forwarded-headers: "true" + proxy-body-size: "150m" EOF ``` Use the following command to get the ingress class name. @@ -812,7 +812,7 @@ Please proceed with deployment of TIBCO® Control Plane on your EKS cluster as p # Clean up -Refer to [the steps to delete the Control Plane](https://docs.tibco.com/emp/platform-cp/1.0.0/doc/html/Default.htm#UserGuide/deleting-control-planes.htm?TocPath=Managing%2520Data%2520Planes%257C_____2). +Refer to [the steps to delete the Control Plane](https://docs.tibco.com/emp/platform-cp/1.2.0/doc/html/Default.htm#Installation/uninstalling-tibco-control-plane.htm). Change the directory to [scripts/eks/](../../scripts/eks) to proceed with the next steps. ```bash diff --git a/docs/workshop/scripts/eks/clean-up-control-plane.sh b/docs/workshop/scripts/eks/clean-up-control-plane.sh index d3960124..74bb4d90 100755 --- a/docs/workshop/scripts/eks/clean-up-control-plane.sh +++ b/docs/workshop/scripts/eks/clean-up-control-plane.sh @@ -56,8 +56,8 @@ if [ "${CP_CROSSPLANE_ENABLED}" == "false" ]; then echo "Deleting RDS db instance" aws rds delete-db-instance --db-instance-identifier ${CP_CLUSTER_NAME}-db --skip-final-snapshot --no-paginate - echo "Deleting Redis" - aws elasticache delete-replication-group --replication-group-id ${CP_CLUSTER_NAME}-redis --no-retain-primary-cluster --no-paginate + # echo "Deleting Redis" + # aws elasticache delete-replication-group --replication-group-id ${CP_CLUSTER_NAME}-redis --no-retain-primary-cluster --no-paginate echo "Waiting to delete RDS db instance" aws rds wait db-instance-deleted --db-instance-identifier ${CP_CLUSTER_NAME}-db @@ -84,36 +84,36 @@ if [ "${CP_CROSSPLANE_ENABLED}" == "false" ]; then done fi - echo "Waiting to delete Redis" - aws elasticache wait replication-group-deleted --replication-group-id ${CP_CLUSTER_NAME}-redis - _deleted=$? - [ ${_deleted} -eq 0 ] || { echo "### ERROR: Failed to delete Redis replication group ${CP_CLUSTER_NAME}-redis after 10 minutes (15 seconds, 40 checks)"; echo "Error code ${_deleted}"; } - if [ ${_deleted} -ne 0 ]; then - echo "Waiting additional 5 minutes to delete Redis replication group" - for n in {1..5}; - do - _status=$(aws elasticache describe-replication-groups --replication-group-id ${CP_CLUSTER_NAME}-redis --query ReplicationGroups[0].Status --output text) - _ret=$? - if [ ${_ret} -eq 0 -o "${_status}" -eq "deleting" ]; then - # sleep for a minute - echo "Redis replication group ${CP_CLUSTER_NAME}-redis is in ${_status} state; Waiting for 1 more minute to check status" - sleep 60 - elif [ ${_ret} -eq 254 -o "${_status}" -eq "" ]; then - # return code 254 indicates that db instance is not found - break - else - echo "### ERROR: deleting ${CP_CLUSTER_NAME}-redis operation did not finish correctly; Exiting!" - echo "### ### Please check AWS Console and re-run the script when ${CP_CLUSTER_NAME}-redis is deleted" - exit ${_ret} - fi - done - fi + # echo "Waiting to delete Redis" + # aws elasticache wait replication-group-deleted --replication-group-id ${CP_CLUSTER_NAME}-redis + # _deleted=$? + # [ ${_deleted} -eq 0 ] || { echo "### ERROR: Failed to delete Redis replication group ${CP_CLUSTER_NAME}-redis after 10 minutes (15 seconds, 40 checks)"; echo "Error code ${_deleted}"; } + # if [ ${_deleted} -ne 0 ]; then + # echo "Waiting additional 5 minutes to delete Redis replication group" + # for n in {1..5}; + # do + # _status=$(aws elasticache describe-replication-groups --replication-group-id ${CP_CLUSTER_NAME}-redis --query ReplicationGroups[0].Status --output text) + # _ret=$? + # if [ ${_ret} -eq 0 -o "${_status}" -eq "deleting" ]; then + # # sleep for a minute + # echo "Redis replication group ${CP_CLUSTER_NAME}-redis is in ${_status} state; Waiting for 1 more minute to check status" + # sleep 60 + # elif [ ${_ret} -eq 254 -o "${_status}" -eq "" ]; then + # # return code 254 indicates that db instance is not found + # break + # else + # echo "### ERROR: deleting ${CP_CLUSTER_NAME}-redis operation did not finish correctly; Exiting!" + # echo "### ### Please check AWS Console and re-run the script when ${CP_CLUSTER_NAME}-redis is deleted" + # exit ${_ret} + # fi + # done + # fi echo "Deleting RDS db subnet group" aws rds delete-db-subnet-group --db-subnet-group-name ${CP_CLUSTER_NAME}-subnet-group - echo "Deleting Cache subnet group" - aws elasticache delete-cache-subnet-group --cache-subnet-group-name ${CP_CLUSTER_NAME}-cache-subnet-group --no-paginate + # echo "Deleting Cache subnet group" + # aws elasticache delete-cache-subnet-group --cache-subnet-group-name ${CP_CLUSTER_NAME}-cache-subnet-group --no-paginate echo "Deleting RDS security group" _rds_sg_id=$(aws ec2 describe-security-groups --filters Name=tag:Resource,Values=${CP_CLUSTER_NAME}-rds --query "SecurityGroups[*].{Name:GroupName,ID:GroupId}" | yq eval '.[].ID // ""') @@ -122,12 +122,24 @@ if [ "${CP_CROSSPLANE_ENABLED}" == "false" ]; then aws ec2 delete-security-group --group-id ${_rds_sg_id} fi - echo "Deleting Redis security group" - _redis_sg_id=$(aws ec2 describe-security-groups --filters Name=tag:Resource,Values=${CP_CLUSTER_NAME}-redis --query "SecurityGroups[*].{Name:GroupName,ID:GroupId}" | yq eval '.[].ID // ""') - if [ "${_redis_sg_id}" != "" ]; then - echo "Detected Redis security group id: ${_redis_sg_id}; Now deleting" - aws ec2 delete-security-group --group-id ${_redis_sg_id} - fi + # echo "Deleting Redis security group" + # _redis_sg_id=$(aws ec2 describe-security-groups --filters Name=tag:Resource,Values=${CP_CLUSTER_NAME}-redis --query "SecurityGroups[*].{Name:GroupName,ID:GroupId}" | yq eval '.[].ID // ""') + # if [ "${_redis_sg_id}" != "" ]; then + # echo "Detected Redis security group id: ${_redis_sg_id}; Now deleting" + # aws ec2 delete-security-group --group-id ${_redis_sg_id} + # fi +fi + +if [ "${CP_CROSSPLANE_ENABLED}" == "true" ]; then + echo "Detaching role policy to IAM role ${CP_CROSSPLANE_ROLE}" + aws iam detach-role-policy --policy-arn arn:aws:iam::aws:policy/AdministratorAccess --role-name ${CP_CROSSPLANE_ROLE} + _ret=$? + [ ${_ret} -eq 0 ] || { echo "### ERROR: failed to detach policy to IAM role for crossplane. Please re-run the script"; exit ${_ret}; } + + echo "Deleting crossplane role" + aws iam delete-role --role-name ${CP_CROSSPLANE_ROLE} + _ret=$? + [ ${_ret} -eq 0 ] || { echo "### ERROR: failed to delete IAM role for crossplane. Please re-run the script"; exit ${_ret}; } fi echo "Deleting cluster"