diff --git a/bin/aws-addon-clusters.ts b/bin/aws-addon-clusters.ts new file mode 100644 index 00000000..b7a684b2 --- /dev/null +++ b/bin/aws-addon-clusters.ts @@ -0,0 +1,45 @@ +#!/usr/bin/env node +import * as cdk from 'aws-cdk-lib'; +import {K8S_VERSIONS_DEV, MultiClusterOptions} from "./multi-cluster-options"; +import {CapacityType, KubernetesVersion} from "aws-cdk-lib/aws-eks"; +import MultiClusterPipelineConstruct from "./multi-cluster-pipeline"; +import * as blueprints from "@aws-quickstart/eks-blueprints"; +import * as eks from "aws-cdk-lib/aws-eks"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; + +const app = new cdk.App(); + +const account = process.env.CDK_DEFAULT_ACCOUNT ?? ""; +const region = process.env.CDK_DEFAULT_REGION ?? "us-east-1"; +const minSize = parseInt(process.env.NODEGROUP_MIN ?? "1"); +const maxSize = parseInt(process.env.NODEGROUP_MAX ?? "3"); +const desiredSize = parseInt(process.env.NODEGROUP_DESIRED ?? "1"); +const gitHubSecret = process.env.GITHUB_SECRET ?? "cdk_blueprints_github_secret"; + +const env : MultiClusterOptions = { + account, + region, + minSize, + maxSize, + desiredSize, + gitHubSecret, + nodeGroupCapacityType: CapacityType.ON_DEMAND, + k8sVersions: K8S_VERSIONS_DEV // K8S_VERSIONS_PROD for full deploy +} + + +const mngProps: blueprints.MngClusterProviderProps = { + version: KubernetesVersion.V1_28, + instanceTypes: [ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.XLARGE2)], + amiType: eks.NodegroupAmiType.AL2_X86_64, + desiredSize: 2, + maxSize: 3, +}; + +console.info("Running CDK with id: addon-tester" ); +console.info("Running CDK with: " + JSON.stringify(env)); + +new MultiClusterPipelineConstruct().buildAsync(app, "addon-tester", env , mngProps).catch( + (e) => console.log("Pipeline construct failed because of error ", e) +); + diff --git a/bin/get-ready-for-test-issues.ts b/bin/get-ready-for-test-issues.ts new file mode 100644 index 00000000..5109b7a3 --- /dev/null +++ b/bin/get-ready-for-test-issues.ts @@ -0,0 +1,54 @@ +import * as AWS from "@aws-sdk/client-secrets-manager"; +import { Octokit } from '@octokit/rest' + +export const READY_FOR_TEST= "Ready for test"; + +/** + * Invoke with + * @param region process.env.CDK_DEFAULT_REGION + * @param secretName process.env.GITHUB_SECRET + * @param repo "jalawala" + * @param owner "aws-eks-addon-publication" + */ +export async function getReadyForTestAddons(region: string, secretName: string, repo: string, owner: string){ + const issues = await getReadyForTestIssues(region, secretName, repo, owner) as Issue[]; + // TODO do something with this addon + issues.forEach(issue => console.log(issue.number + ", " + issue.body)); +} + +async function getReadyForTestIssues(region: string, secretName: string, repo: string, owner: string){ + const sm = new AWS.SecretsManager({region}); + + const accessToken = await getGitHubAccessToken(sm, secretName); + const octokit = new Octokit({ auth: accessToken }); + + const getIssuesRequest = { + headers: { + 'X-GitHub-Api-Version': '2022-11-28' + }, + owner, + repo, + labels: READY_FOR_TEST + }; + + const responsePromise = octokit.request("GET /repos/{owner}/{repo}/issues", getIssuesRequest); + + return responsePromise + .then((response)=> response.data as Issue[]) + .catch((error)=>{console.error(`Create issue error: ${error}`)}) +} + +type Issue = { + number: number; + body: string; +} + +async function getGitHubAccessToken(sm : AWS.SecretsManager, secretName : string) { + const secret = await sm.getSecretValue({ SecretId: secretName }); + const secretString = secret.SecretString; + if (typeof secretString === 'string') { + return secretString; + } else { + throw new Error('SecretString is not a string.'); + } +} diff --git a/bin/multi-cluster-options.ts b/bin/multi-cluster-options.ts new file mode 100644 index 00000000..762a6263 --- /dev/null +++ b/bin/multi-cluster-options.ts @@ -0,0 +1,23 @@ +import {CapacityType, KubernetesVersion} from "aws-cdk-lib/aws-eks"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import * as eks from "aws-cdk-lib/aws-eks"; + +export const K8S_VERSIONS_PROD : KubernetesVersion[] = [KubernetesVersion.V1_25, KubernetesVersion.V1_26, + KubernetesVersion.V1_27, KubernetesVersion.V1_28]; // KubernetesVersion.V1_29 // when the time comes +//export const K8S_VERSIONS_DEV : KubernetesVersion[] = [ KubernetesVersion.V1_26 ,KubernetesVersion.V1_27, KubernetesVersion.V1_28, KubernetesVersion.of("1.29")]; + +export const K8S_VERSIONS_DEV : KubernetesVersion[] = [ KubernetesVersion.of("1.29")]; + + +export interface MultiClusterOptions { + readonly account: string; + readonly region: string; + minSize?: number; + maxSize?: number; + desiredSize?: number; + gitHubSecret?: string; + nodeGroupCapacityType: CapacityType; + instanceTypes?: ec2.InstanceType[]; + amiType?: eks.NodegroupAmiType; + k8sVersions: KubernetesVersion[]; +} diff --git a/bin/multi-cluster-pipeline.ts b/bin/multi-cluster-pipeline.ts new file mode 100644 index 00000000..d764ad86 --- /dev/null +++ b/bin/multi-cluster-pipeline.ts @@ -0,0 +1,145 @@ +import { Construct } from "constructs"; +import * as blueprints from '@aws-quickstart/eks-blueprints'; +import {K8S_VERSIONS_DEV, MultiClusterOptions} from "./multi-cluster-options"; +import {NodegroupAmiType} from "aws-cdk-lib/aws-eks"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import ManagementClusterBuilder from "../lib/crossplane-argocd-gitops/management-cluster-builder"; +import {ProviderMgmtRoleTeam} from "../lib/crossplane-argocd-gitops/custom-addons/mgmt-role-teams"; +import {GenericClusterProvider, LookupRoleProvider} from "@aws-quickstart/eks-blueprints"; +import {IRole} from "aws-cdk-lib/aws-iam"; +import * as iam from 'aws-cdk-lib/aws-iam'; +import {ManagedNodeGroup} from "@aws-quickstart/eks-blueprints/dist/cluster-providers/types"; + +export default class MultiClusterPipelineConstruct { + async buildAsync(scope: Construct, id: string, props: MultiClusterOptions, mngProps: blueprints.MngClusterProviderProps) { + const k8sVersions = props.k8sVersions ?? K8S_VERSIONS_DEV; + const region :string = props.region; + const account : string = props.account; + + const gitProps = { + owner :'jalawala', + secretName : props.gitHubSecret ?? 'github-access-eks-addon', + repoName : 'aws-addon-clusters-main', + revision : 'main' // use this to target a certain branch for deployment + }; + + + await this.prevalidateSecrets(gitProps.secretName, region); + + const addOns: Array = [ + new blueprints.ExternalsSecretsAddOn({ + namespace: "external-secrets", + values: { webhook: { port: 9443 } } + }) + ] + + const clusterProps: blueprints.MngClusterProviderProps = { + minSize: props.minSize, + maxSize: props.maxSize, + desiredSize: props.desiredSize, + nodeGroupCapacityType: props.nodeGroupCapacityType, + } + + const stages : blueprints.StackStage[] = []; + const vpcProvider= new blueprints.VpcProvider(); + + const baseBlueprint = blueprints.EksBlueprint.builder() + .resourceProvider(blueprints.GlobalResources.Vpc, vpcProvider) + .resourceProvider('eks-connector-role', new LookupRoleProvider('eks-connector-role')) + .account(account) + .addOns(...addOns) + .teams(new ProviderMgmtRoleTeam(account)) + .useDefaultSecretEncryption(true); + + const mgmtCluster = new ManagementClusterBuilder(account, region) + .create(scope, 'management-cluster', mngProps) + .account(account) + .region(region) + .resourceProvider(blueprints.GlobalResources.Vpc, vpcProvider); + + const mgmtStage = [{id: `mgmt-cluster-stage` , stackBuilder: mgmtCluster}]; + + for(const k8sVersion of k8sVersions) { + baseBlueprint.version(k8sVersion); + + const blueprintAMD = baseBlueprint + .clusterProvider( + new GenericClusterProvider( { + version: k8sVersion, + mastersRole: blueprints.getNamedResource('eks-connector-role') as IRole, + managedNodeGroups : [addManagedNodeGroup( 'amd-tst-ng',{...clusterProps, + amiType : NodegroupAmiType.AL2_X86_64, + instanceTypes: [ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.XLARGE)]})] + }) + ); + stages.push({ + id: `amd-` + k8sVersion.version.replace(".", "-"), + stackBuilder : blueprintAMD.clone(props.region).id(`amd-` + k8sVersion.version.replace(".", "-")) + }); + + const blueprintARM = baseBlueprint + .clusterProvider( + new GenericClusterProvider( { + version: k8sVersion, + mastersRole: blueprints.getNamedResource('eks-connector-role') as IRole, + managedNodeGroups : [addManagedNodeGroup('arm-tst-ng',{...clusterProps, + amiType : NodegroupAmiType.AL2_ARM_64, + instanceTypes: [ec2.InstanceType.of(ec2.InstanceClass.M7G, ec2.InstanceSize.XLARGE)]})] + }) + ); + stages.push({ + id: `arm-` + k8sVersion.version.replace(".", "-"), + stackBuilder : blueprintARM.clone(props.region).id(`arm-` + k8sVersion.version.replace(".", "-")) + }); + } + + blueprints.CodePipelineStack.builder() + .name(id) + .owner(gitProps.owner) + .codeBuildPolicies( + ([ + new iam.PolicyStatement({ + resources: ["*"], + actions: [ + "codebuild:*", + "sts:AssumeRole", + "secretsmanager:GetSecretValue", + "secretsmanager:ListSecrets", + "secretsmanager:DescribeSecret", + "cloudformation:*" + ] + }) + ]) + ) + .repository({ + targetRevision : gitProps.revision, + credentialsSecretName: gitProps.secretName, + repoUrl: gitProps.repoName + } + ) + .wave({ id: `mgmt-cluster-stage`, stages: mgmtStage }) + .wave({ id: `${id}-wave`, stages }) + .build(scope, id, { env: { account, region } }); + } + + async prevalidateSecrets(secretName: string, region: string) { + try { + await blueprints.utils.validateSecret(secretName, region); + } + catch(error) { + throw new Error(`${secretName} secret must be setup in AWS Secrets Manager in ${region} for the GitHub pipeline. + * @see https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-create-personal-token-CLI.html`); + } + } +} + +function addManagedNodeGroup(id: string, clusterProps: blueprints.MngClusterProviderProps): ManagedNodeGroup { + return { + id, + minSize: clusterProps.minSize, + maxSize: clusterProps.maxSize, + amiType: clusterProps.amiType, + instanceTypes: clusterProps.instanceTypes, + desiredSize: clusterProps.desiredSize + }; +} diff --git a/docs/patterns/crosplane-argocd-gitops.md b/docs/patterns/crosplane-argocd-gitops.md new file mode 100644 index 00000000..47a7f860 --- /dev/null +++ b/docs/patterns/crosplane-argocd-gitops.md @@ -0,0 +1,182 @@ +# Secure Ingress using Cognito Pattern + +## Objective + +The objective of this pattern is to provide a secure authentication mechanism for customer applications using Amazon Cognito, ALB, and Route53, ensuring that only authorized users can access the application. The Kubecost tool is used as a reference or sample implementation to demonstrate the pattern's capabilities. + +To achieve this objective, the pattern utilizes Amazon Cognito to provide user authentication for the application's ingress, with ALB's built-in support for user authentication handling routine tasks such as user sign-up, sign-in, and sign-out. In addition to Amazon Cognito, ALB integrates with any OpenID Connect compliant identity provider (IdP) for a single sign-on experience across applications. ACM and Route53 provide SSL/TLS certificates to secure connections to ALB and authenticate users, preventing sensitive information from being intercepted or tampered with during transmission. + +The pattern also leverages Kubecost to provide real-time cost visibility and analysis for Kubernetes clusters, enabling customers to make informed decisions about resource allocation and utilization. This pattern can be easily adapted and extended to secure ingress for any application, providing a unified and secure solution for user authentication while optimizing costs. By implementing this solution, Amazon EKS customers can have a reliable, scalable, and secure authentication mechanism for their applications, with a cost optimization tool to manage and reduce the costs associated with their Kubernetes clusters. + + +## Architecture + +![Kubecost Architecture](./images/secure-ingress-kubecost-new.png) + + +## Approach + +This blueprint will include the following: + +* A new Well-Architected VPC with both Public and Private subnets. +* A new Well-Architected EKS cluster in the region and account you specify. +* [EBS CSI Driver Amazon EKS Add-on](https://aws-quickstart.github.io/cdk-eks-blueprints/addons/ebs-csi-driver/) allows Amazon Elastic Kubernetes Service (Amazon EKS) clusters to manage the lifecycle of Amazon EBS volumes for persistent volumes. +* AWS and Kubernetes resources needed to support [AWS Load Balancer Controller](https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html). +* [Amazon VPC CNI add-on (VpcCni)](https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html) into your cluster to support native VPC networking for Amazon EKS. +* [External-DNS](https://github.com/kubernetes-sigs/external-dns) allows integration of exposed Kubernetes services and Ingresses with DNS providers +* [Kubecost](https://kubecost.com/) provides real-time cost visibility and insights by uncovering patterns that create overspending on infrastructure to help teams prioritize where to focus optimization efforts +* [Argo CD](https://aws-quickstart.github.io/cdk-eks-blueprints/addons/argo-cd/) is a declarative, GitOps continuous delivery tool for Kubernetes. The Argo CD add-on provisions Argo CD into an EKS cluster, and bootstraping your workloads from public and private Git repositories. +* Create the necessary Cognito resources like user pool, user pool client, domain, [Pre sign-up Lambda trigger and Pre authentication Lambda triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) etc.., and passed to the Argo CD app of apps pattern from which ingress resources can reference. + +## GitOps confguration + +For GitOps, the blueprint bootstrap the ArgoCD addon and points to the [EKS Blueprints Workload](https://github.com/aws-samples/eks-blueprints-workloads) sample repository. + + +## Prerequisites + +Ensure that you have installed the following tools on your machine. + +1. [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +2. [kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [cdk](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_install) +4. [npm](https://docs.npmjs.com/cli/v8/commands/npm-install) + +## Deploy + +1. Let’s start by setting a few environment variables. Change the Region as needed. + +``` +ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text) +AWS_REGION=us-west-2 +``` + +2. Clone the repository and install dependency packages. This repository contains CDK v2 code written in TypeScript. + +``` +git clone https://github.com/aws-samples/cdk-eks-blueprints-patterns.git +cd cdk-eks-blueprints-patterns +npm i +``` + +3. argo-admin-password secret must be defined as plain text (not key/value) in `us-west-2` region. + +``` +aws secretsmanager create-secret --name argo-admin-secret \ + --description "Admin Password for ArgoCD" \ + --secret-string "password123$" \ + --region "us-west-2" +``` +4. The CDK code expects the allowed domain and subdomain names in the CDK context file (cdk.json). + +Create two environment variables. The PARENT_HOSTED_ZONE variable contains your company’s domain name. The DEV_SUBZONE_NAME will be the address for your Kubecost dashboard. + +Generate the cdk.json file: + +``` +PARENT_HOSTED_ZONE=mycompany.a2z.com +DEV_SUBZONE_NAME=dev.mycompany.a2z.com +cat << EOF > cdk.json +{ + "app": "npx ts-node dist/lib/common/default-main.js", + "context": { + "parent.hostedzone.name": "${PARENT_HOSTED_ZONE}", + "dev.subzone.name": "${DEV_SUBZONE_NAME}" + } +} +EOF +``` + + +5. In this solution, we’ll allow access to the Kubecost dashboard based on user email addresses. You can control access to the dashboard by allow-listing an entire domain or individual email addresses. + +Users are required to sign-up before they can access the Kubecost dashboard. The pre sign-up Lambda trigger only allows sign-ups when user’s email domain matches allow-listed domains. When users sign-up, Cognito sends a verification code to their email address. Users have to verify access (using the one time valid code) to their email before they get access to the dashboard. + +If you’d like to limit access to the dashboard by email addresses, you can also create a parameter to store allowed email addresses and add a logic to the pre authentication Lambda trigger. + +Create below parameters with allowed email addresses and domains in the AWS Systems Manager Parameter Store: + +``` +export SSM_PARAMETER_KEY="/secure-ingress-auth-cognito/ALLOWED_DOMAINS" +export SSM_PARAMETER_VALUE="emaildomain1.com,emaildomain2.com" + +aws ssm put-parameter \ + --name "$SSM_PARAMETER_KEY" \ + --value "$SSM_PARAMETER_VALUE" \ + --type "String" \ + --region $AWS_REGION +``` + + +6. Execute the commands below to bootstrap the AWS environment in `us-west-2` + +``` +cdk bootstrap aws://$ACCOUNT_ID/$AWS_REGION +``` + +7. Run the following command from the root of this repository to deploy the pipeline stack: + +``` +make build +make pattern secure-ingress-cognito deploy secure-ingress-blueprint +``` + +## Cluster Access + +Once the deploy completes, you will see output in your terminal window similar to the following: + +``` +Outputs: +secure-ingress-blueprint.secureingressblueprintClusterNameD6A1BE5C = secure-ingress-blueprint +secure-ingress-blueprint.secureingressblueprintConfigCommandD0275968 = aws eks update-kubeconfig —name secure-ingress-blueprint —region us-west-2 —role-arn arn:aws:iam:::role/secure-ingress-blueprint-secureingressblueprintMas-7JD5S67SG7M0 +secure-ingress-blueprint.secureingressblueprintGetTokenCommand21BE2184 = aws eks get-token —cluster-name secure-ingress-blueprint —region us-west-2 —role-arn arn:aws:iam:::role/secure-ingress-blueprint-secureingressblueprintMas-7JD5S67SG7M0 +``` +``` +Stack ARN: +arn:aws:cloudformation:us-west-2::stack/secure-ingress-blueprint/64017120-91ce-11ed-93b2-0a67951f5d5d +``` + + +To update your Kubernetes config for your new cluster, copy and run the secure-ingress-blueprint.secureingressblueprintConfigCommandD0275968 command (the second command) in your terminal. + +``` +aws eks update-kubeconfig —name secure-ingress-blueprint —region us-west-2 —role-arn arn:aws:iam:::role/secure-ingress-blueprint-secureingressblueprintMas-7JD5S67SG7M0 +``` + +Validate that you now have kubectl access to your cluster via the following: + +``` +kubectl get all -n kubecost +``` + +You should see output that lists all namespaces in your cluster. + + +## Test authentication + +Point your browser to the URL of the Kubecost app in your cluster. You can get the URL from the cdk.json file using the below command. + +``` +awk -F':' '/dev.subzone.name/ {print $2}' cdk.json | tr -d '",' | xargs echo +``` + +Your browser will be redirected to a sign-in page. This page is provided by Amazon Cognito hosted UI. + +Since this is your first time accessing the application, sign up as a new user. The data you input here will be saved in the Amazon Cognito user pool you created earlier in the post. + +![Cognito Signup Process](./images/Cognito-Signup-1.png) + +Select “Sign up” and use your email address and create a password + +![Cognito Signup Process](./images/Cognito-Signup-2.png) + +![Cognito Signup Process](./images/Cognito-Signup-3.png) + +Use the verification code received in your email and confirm the account. Once you sign in, ALB will send you to the Kubecost app’s UI: + +![Kubecost](./images/Cognito-Kubecost-1.png) + +Select the “AWS Cluster #1” to view the cost overview, savings and efficiency details. + +![Kubecost Dashboard](./images/Cognito-Kubecost-2.png) + diff --git a/docs/patterns/images/amd-1.29-addon.png b/docs/patterns/images/amd-1.29-addon.png new file mode 100644 index 00000000..dcb92b5b Binary files /dev/null and b/docs/patterns/images/amd-1.29-addon.png differ diff --git a/docs/patterns/images/arm-1.29-addon.png b/docs/patterns/images/arm-1.29-addon.png new file mode 100644 index 00000000..65119bc6 Binary files /dev/null and b/docs/patterns/images/arm-1.29-addon.png differ diff --git a/docs/patterns/images/aws_secret_codepipeline.png b/docs/patterns/images/aws_secret_codepipeline.png new file mode 100644 index 00000000..a8f3694c Binary files /dev/null and b/docs/patterns/images/aws_secret_codepipeline.png differ diff --git a/docs/patterns/images/codepipeline1.png b/docs/patterns/images/codepipeline1.png new file mode 100644 index 00000000..ff872bda Binary files /dev/null and b/docs/patterns/images/codepipeline1.png differ diff --git a/docs/patterns/images/codepipeline2.png b/docs/patterns/images/codepipeline2.png new file mode 100644 index 00000000..fe378269 Binary files /dev/null and b/docs/patterns/images/codepipeline2.png differ diff --git a/docs/patterns/pipeline-multi-env-gitops.md b/docs/patterns/pipeline-multi-env-gitops.md index 08fec903..dbac3989 100644 --- a/docs/patterns/pipeline-multi-env-gitops.md +++ b/docs/patterns/pipeline-multi-env-gitops.md @@ -1,99 +1,575 @@ -# Pipeline Multi Environment Pattern +# GitOps based Multi Cluster Addon and Apps Managament using Crossplane and ArgoCD ## Objective -1. Deploying an EKS cluster across 3 environments( dev, test, and prod ), with a Continuous Deployment pipeline triggered upon a commit to the repository that holds the pipeline configuration. -2. Configuring GitOps tooling (ArgoCD addon) to support multi-team and multi-repositories configuration, in a way that restricts each application to be deployed only into the team namespace, by using ArgoCD projects +The objective of this pattern is to provide GitOps based lifecycle management of Amazon EKS Addons, Kubernetes Applications and Helm charts across various workload clusters using ArgoCD and Crossplane deployed in a Management Cluster. This helps platform and application teams to simplify the process of deploying Addos and Apps from a central Management Cluster. In this Solution, we use CDK to deploy AWS CodePipeline which monitors this platform repo and deploy the Management and Workload Clusters using CDK EKS Blueprints. -### GitOps confguration -For GitOps, the blueprint bootstrap the ArgoCD addon and points to the [EKS Blueprints Workload](https://github.com/aws-samples/eks-blueprints-workloads) sample repository. -The pattern uses the ECSDEMO applications as sample applications to demonstrate how to setup a GitOps configuration with multiple teams and multiple applications. The pattern include the following configurations in terms io: +## Architecture + + +## Approach + +This blueprint will include the following: + +* AWS CodePipeline which deploy the Management and Workload Clusters +* A new Well-Architected EKS cluster `management-cluster` and two workload EKS Clusters `amd-1-29-blueprint` and `arm-1-29-blueprint` in the region and account you specify. +* [Amazon VPC CNI add-on (VpcCni)](https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html) into your cluster to support native VPC networking for Amazon EKS. +* The Management Cluster is deployed with the following Addons. + * Upbound Universal Crossplane Provider + * Upbound AWS Family Crossplane Provider + * Upbound AWS EKS Crossplane Provider + * Kubernetes Crossplane Provider + * Helm Crossplane Provider + * Secrets Store AddOn + * ArgoCD Addon +* The ArgoCD Addon is bootstrapped with [git-ops](https://github.com/aws-samples/eks-blueprints-workloads) which contains Crossplane Manifest files to deploy EKS Addons, Kubernetes Manifests and also Helm Charts. + +## GitOps confguration -1. Application team - it defines 3 application teams that corresponds with the 3 sample applications used -2. ArgoCD bootstrap - the pattern configure the ArgoCD addon to point to the [workload repository](https://github.com/aws-samples/eks-blueprints-workloads) of the EKS Blueprints samples -3. ArgoCD projects - as part of the ArgoCD addon bootstrap, the pattern generate an ArgoCD project for each application team. The ArgoCD are used in order to restrict the deployment of an application to a specific target namespace +For GitOps, the blueprint bootstrap the ArgoCD addon and points to the [EKS Blueprints Workload](https://github.com/aws-samples/eks-blueprints-workloads) sample repository. -You can find the App of Apps configuration for this pattern in the workload repository under the folder [`multi-repo`](https://github.com/aws-samples/eks-blueprints-workloads/tree/main/multi-repo). ## Prerequisites -1. Fork this repository to your GitHub organisation/user -2. Clone your forked repository -3. Install the AWS CDK Toolkit globally on your machine using +Ensure that you have installed the following tools on your machine. + +1. [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +2. [kubectl](https://Kubernetes.io/docs/tasks/tools/) +3. [cdk](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_install) +4. [npm](https://docs.npmjs.com/cli/v8/commands/npm-install) +5. [helm](https://helm.sh/docs/intro/install/) +6. GitHub Access Token for this repo and AWS secret + +Create a plain-text Amazon secret to hold a fine-grained GitHub access token for this repo in the desired region, and +set its name as a value to the GITHUB_SECRET environment variable. Default value is `cdk_blueprints_github_secret`. + +> **WARNING:** When switching the CDK between region, remember to replicate this secret!!!! + +```shell +export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account) +export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region') +export CDK_REPO_GITHUB_PAT_TOKEN= +export CDK_REPO_AWS_SECRET_NAME="cdk_blueprints_github_secret" +aws secretsmanager create-secret --region $AWS_REGION \ + --name $CDK_REPO_AWS_SECRET_NAME \ + --description "GitHub Personal Access Token for CodePipeline to access GitHub account" \ + --secret-string $CDK_REPO_GITHUB_PAT_TOKEN +``` + +The Secret will look like this in the AWS Console. + +![AWS Secret for CodePipeline](./images/aws_secret_codepipeline.png) + +## Deploy + +1. Clone the repository and install dependency packages. This repository contains CDK v2 code written in TypeScript. + +``` +git clone https://github.com/aws-samples/cdk-eks-blueprints-patterns.git +cd cdk-eks-blueprints-patterns +npm i +``` + +2. Execute the commands below to bootstrap the AWS environment + +``` +cdk bootstrap aws://$ACCOUNT_ID/$AWS_REGION +``` + +4. Run the following command from the root of this repository to deploy the pipeline stack: + +``` +make build +make list +make pattern aws-addon-clusters deploy +``` + +## Cluster Access + +### View the CodePipeline + +![codepipeline1](./images/codepipeline1.png) + +![codepipeline2](./images/codepipeline2.png) + +### Create Kube context to access the `management-cluster` + +Go to the CloudFormation Stack `mgmt-cluster-stage-mgmt-cluster-stage-blueprint` outputs and search for a key starting with `mgmtclusterstageblueprintConfigCommand` and copy it's value which an aws command to create a the kubecontext for the `management-cluster` + +The example command looks like below. + +```shell +aws eks update-kubeconfig --name management-cluster --region us-west-2 --role-arn arn:aws:iam::ACCOUNT_ID:role/mgmt-cluster-stage-mgmt-c-managementclusterAccessRo-XYSC5PKL8WnA +``` + +The output will look like below. + +```shell +Updated context arn:aws:eks:us-west-2:ACCOUNT_ID:cluster/management-cluster in /Users//.kube/config +``` + +Set below environment variable to the above context + +```shell +export MANAGEMENT_CLUSTER_CONTEXT="arn:aws:eks:${AWS_REGION}:${ACCOUNT_ID}:cluster/management-cluster" +echo "export MANAGEMENT_CLUSTER_CONTEXT=${MANAGEMENT_CLUSTER_CONTEXT}" >> ~/.bash_profile +``` +Run below command to validate the access to the `management-cluster` + +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get node +``` + +The output will like below. + +```shell +NAME STATUS ROLES AGE VERSION +ip-10-0-116-4.ec2.internal Ready 6d8h v1.28.8-eks-ae9a62a +ip-10-0-175-104.ec2.internal Ready 6d8h v1.28.8-eks-ae9a62a +``` + - ```bash - npm install -g aws-cdk - ``` +Run below command to get list of Crossplane Providers deployed into the `management-cluster` -4. `github-ssh-key` - must contain GitHub SSH private key as a JSON structure containing fields `sshPrivateKey` and `url`. This will be used by ArgoCD addon to authenticate against ay GitHub repository (private or public). The secret is expected to be defined in the region where the pipeline will be deployed to. For more information on SSH credentials setup see [ArgoCD Secrets Support](https://aws-quickstart.github.io/cdk-eks-blueprints/addons/argo-cd/#secrets-support). +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get providers.pkg.crossplane.io +``` + +The output will like below. + +```shell +NAME INSTALLED HEALTHY PACKAGE AGE +helm-provider True True xpkg.upbound.io/crossplane-contrib/provider-helm:v0.18.1 47h +kubernetes-provider True True xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.13.0 25h +provider-aws-eks True True xpkg.upbound.io/upbound/provider-aws-eks:v1.1.0 8d +upbound-provider-family-aws True True xpkg.upbound.io/upbound/provider-family-aws:v1.4.0 8d +``` + + +Run below command to get the Crossplane Providers pods to the `management-cluster` + +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get pod -n upbound-system +``` + +The output will like below. + +```shell +NAME READY STATUS RESTARTS AGE +crossplane-594b65bfdb-pgkxf 1/1 Running 0 6d8h +crossplane-rbac-manager-86c74cf5d-tjcw8 1/1 Running 0 6d8h +helm-provider-4d90a08b9ede-7c874b858b-pp26d 1/1 Running 0 47h +kubernetes-provider-a3cbbe355fa7-55846cfbfb-6tpcl 1/1 Running 0 25h +provider-aws-eks-23042d28ed58-66d9db8476-jr6mb 1/1 Running 0 6d8h +upbound-provider-family-aws-bac5d48bd353-64845bdcbc-4vpn6 1/1 Running 0 6d8h 8d +``` + + +Run below command to get the ArgoCD pods deployed into the `management-cluster` + +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get pod -n argocd +``` + +The output will like below. + +```shell +NAME READY STATUS RESTARTS AGE +blueprints-addon-argocd-application-controller-0 1/1 Running 0 24h +blueprints-addon-argocd-applicationset-controller-7b78c7fc94ls9 1/1 Running 0 24h +blueprints-addon-argocd-dex-server-6cf94ddc54-dfhv7 1/1 Running 0 24h +blueprints-addon-argocd-notifications-controller-6f6b7d95cdd2tl 1/1 Running 0 24h +blueprints-addon-argocd-redis-b8dbc7dc6-h4bs8 1/1 Running 0 24h +blueprints-addon-argocd-repo-server-fd57dc686-zkbsm 1/1 Running 0 4h15m +blueprints-addon-argocd-server-84c8b597c9-98c95 1/1 Running 0 24h +``` + + +### Create Kube context to access the `amd-1-29-blueprint` + +Go to the CloudFormation Stack `amd-1-29-amd-1-29-blueprint` outputs and search for a key starting with `amd129blueprintConfigCommand` and copy it's value which an aws command to create a the kubecontext for the `amd-1-29-blueprint` + +The example command looks like below. + +```shell +aws eks update-kubeconfig --name amd-1-29-blueprint --region us-west-2 --role-arn arn:aws:iam::ACCOUNT_ID:role/eks-connector-role +``` -5. `github-token` secret must be stored in AWS Secrets Manager for the GitHub pipeline. For more information on how to set it up, please refer to the [docs](https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-create-personal-token-CLI.html). The GitHub Personal Access Token should have these scopes: - 1. *repo* - to read the repository - 2. *admin:repo_hook* - if you plan to use webhooks (enabled by default) +The output will look like below. -6. Create the relevant users that will be used by the different teams +```shell +Added new context arn:aws:eks:us-west-2:ACCOUNT_ID:cluster/amd-1-29-blueprint to /Users/jalawala/.kube/config +``` + +Set below environment variable to the above context + +```shell +export WORKLOAD_CLUSTER1_CONTEXT="arn:aws:eks:${AWS_REGION}:${ACCOUNT_ID}:cluster/amd-1-29-blueprint" +echo "export WORKLOAD_CLUSTER1_CONTEXT=${WORKLOAD_CLUSTER1_CONTEXT}" >> ~/.bash_profile +``` +Run below commands to validate the access to the `amd-1-29-blueprint` + +```shell +kubectl --context $WORKLOAD_CLUSTER1_CONTEXT get node +``` +The output will look like below. + +```shell +NAME STATUS ROLES AGE VERSION +ip-10-0-96-158.ec2.internal Ready 6d9h v1.29.3-eks-ae9a62a +``` + +### Create Kube context to access the `arm-1-29-blueprint` + +Go to the CloudFormation Stack `arm-1-29-arm-1-29-blueprint` outputs and search for a key starting with `arm129blueprintConfigCommand` and copy it's value which an aws command to create a the kubecontext for the `arm-1-29-blueprint` + +The example command looks like below. + +```shell +aws eks update-kubeconfig --name arm-1-29-blueprint --region us-west-2 --role-arn arn:aws:iam::$ACCOUNT_ID:role/eks-connector-role +``` + +The output will look like below. - ```bash - aws iam create-user --user-name frontend-user - aws iam create-user --user-name nodejs-user - aws iam create-user --user-name crystal-user - aws iam create-user --user-name platform-user - ``` +```shell +Added new context arn:aws:eks:us-west-2:ACCOUNT_ID:cluster/arm-1-29-blueprint to /Users/jalawala/.kube/config +``` -7. Install project dependencies by running `npm install` in the main folder of this cloned repository +Set below environment variable to the above context -8. In case you haven't done this before, bootstrap your AWS Account for AWS CDK use using: +```shell +export WORKLOAD_CLUSTER2_CONTEXT="arn:aws:eks:${AWS_REGION}:${ACCOUNT_ID}:cluster/arm-1-29-blueprint" +echo "export WORKLOAD_CLUSTER2_CONTEXT=${WORKLOAD_CLUSTER2_CONTEXT}" >> ~/.bash_profile +``` +Run below commands to validate the access to the `arm-1-29-blueprint` - ```bash - cdk bootstrap - ``` +```shell +kubectl --context $WORKLOAD_CLUSTER2_CONTEXT get node +``` +The output will look like below. -9. Modify the code in your forked repo to point to your GitHub username/organisation. This is needed because the AWS CodePipeline that will be automatically created will be triggered upon commits that are made in your forked repo. Open the [pattenrn file source code](../../lib/pipeline-multi-env-gitops/index.ts) and look for the declared const of `gitOwner`. Change it to your GitHub username. +```shell +NAME STATUS ROLES AGE VERSION +ip-10-0-96-158.ec2.internal Ready 6d9h v1.29.3-eks-ae9a62a +``` -10. *OPTIONAL* - As mentioned above, this pattern uses another repository for GitOps. This is the ArgoCD App of Apps configuration that resides in the [aws-samples](https://github.com/aws-samples/eks-blueprints-workloads/tree/main/multi-repo) organisation. If you would like to modify the App of Apps configuration and customise it to your needs, then use the following instructions: +### Update the Trust policy for the Upbound AWS EKS Provider IAM Role. - 1. Fork the [App of Apps](https://github.com/aws-samples/eks-blueprints-workloads/tree/main/multi-repo) workloads repo to your GitHub username +The IAM Role used for IRSA for the Upbound AWS EKS Provider Pod needs to be updated as below to allow Service Accounts for all Upbound AWS Service specific providers to assume the Role. - 2. Modify the [pattern code](../../lib/pipeline-multi-env-gitops/index.ts) with the following changes: +Go to the `mgmt-cluster-stage-mgmt-cluster-stage-blueprint` stack output tab and extract the role name from the `providerawssaiamrole` output. - 1. Change the consts of `devArgoAddonConfig`, `testArgoAddonConfig`, and `prodArgoAddonConfig` to point to your GitHub username +```shell +export providerawssaiamrole=$(aws cloudformation describe-stacks \ + --stack-name mgmt-cluster-stage-mgmt-cluster-stage-blueprint \ + --query 'Stacks[].Outputs[?OutputKey==`providerawssaiamrole`].OutputValue' \ + --output text | awk -F'/' '{print $2}') +echo $providerawssaiamrole +``` - 2. In the `createArgoAddonConfig` function, look for the `git@github.com:aws-samples/eks-blueprints-workloads.git` code under the `sourceRepos` configurations, and add another reference to your forked workload repository +The output will look like below. -## Deploying +```shell +mgmt-cluster-stage-mgmt-c-mgmtclusterstageblueprint-I8cnZsnO37rA +``` -Once all pre-requisites are set you are ready to deploy the pipeline. Run the following command from the root of this repository to deploy the pipeline stack: +Get the OIDC for the `management-cluster` value by running: -```bash -make pattern pipeline-multienv-gitops deploy eks-blueprint-pipeline-stack +```shell +export OIDC_VAL=$(aws eks describe-cluster --name "management-cluster" --region "${AWS_REGION}" --query "cluster.identity.oidc.issuer" --output text | awk -F'/' '{print $5}') +echo $OIDC_VAL ``` -Now you can go to [AWS CodePipeline console](https://eu-west-1.console.aws.amazon.com/codesuite/codepipeline/pipelines), and see how it was automatically created to deploy multiple Amazon EKS clusters to different environments. +The output will like below. -### Notes +```shell +0F745A41ECA76297CBF070C032932033 +``` -1. In case your pipeline fails on the first run, it's because that the AWS CodeBuild step needs elevated permissions at build time. This is described in the official [docs](https://aws-quickstart.github.io/cdk-eks-blueprints/pipelines/#troubleshooting). To resolve this, locate `AccessDeniedException` in the CodeBuild build logs, and attach the following inline policy to it: +Create the Updated Trust policy. Notice the `*` in `provider-aws-*` in the Conditions Section. - ```json +```shell +export IAM_ROLE_TRUST_POLICY="provider-aws-management-cluster-trust-policy.json" +cat > $IAM_ROLE_TRUST_POLICY < $EKS_ACCESS_IAM_POLICY_FILE < +export GIT_OPS_AWS_SECRET_NAME="github-token" +aws secretsmanager create-secret --region $AWS_REGION \ + --name $GIT_OPS_AWS_SECRET_NAME \ + --description "GitHub Personal Access Token for ArgoCD to access Grossplane Manifests" \ + --secret-string $GIT_OPS_GITHUB_PAT_TOKEN + + +cat > secret-store-argocd.yaml < +``` + +Add EKS Cluster to ArgoCD. + +```shell +argocd cluster add $MANAGEMENT_CLUSTER_CONTEXT +``` +The output will look like below. + +```shell +WARNING: This will create a service account `argocd-manager` on the cluster referenced by context `arn:aws:eks:us-west-2:ACCOUNT_ID:cluster/management-cluster` with full cluster level privileges. Do you want to continue [y/N]? y +INFO[0004] ServiceAccount "argocd-manager" already exists in namespace "kube-system" +INFO[0004] ClusterRole "argocd-manager-role" updated +INFO[0005] ClusterRoleBinding "argocd-manager-role-binding" updated +Cluster 'https://0F745A41ECA76297CBF070C032932033.sk1.us-west-2.eks.amazonaws.com' added +``` + +Run the below command to get the list of ArgoCD Applications. + +```shell +argocd app list +``` + +The output will look like below. + +```shell +NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS REPO PATH TARGET +argocd/bootstrap-apps https://kubernetes.default.svc argocd default Synced Healthy Auto-Prune https://github.com/aws-samples/eks-blueprints-workloads ./common/testingClusters main +argocd/cluster1 https://kubernetes.default.svc argocd default Synced Healthy Auto-Prune https://github.com/aws-samples/eks-blueprints-workloads ./clusters/cluster1 main +argocd/cluster2 https://kubernetes.default.svc argocd default Synced Healthy Auto-Prune https://github.com/aws-samples/eks-blueprints-workloads ./clusters/cluster2 main +``` + + +### Validate EKS Addons deployment in Workload Clusters + +Run the below command to get the list of Crossplane AWS Provder Objects deployed in the Management Cluster. + +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get providerconfigs.aws.upbound.io +``` + +The output will look like below. + +```shell +NAME AGE +provider-config-aws-amd-1-29-blueprint 4h52m +provider-config-aws-arm-1-29-blueprint 4h52m +``` + +Run the below command to get the list of Crossplane AWS EKS Provder Addon Objects deployed in the Management Cluster. + +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get addons.eks.aws.upbound.io +``` + +The output will look like below. + +```shell +NAME READY SYNCED EXTERNAL-NAME AGE +addon-eks-pod-identity-agent-amd-1-29 True True amd-1-29-blueprint:eks-pod-identity-agent 4h15m +addon-eks-pod-identity-agent-arm-1-29 True True arm-1-29-blueprint:eks-pod-identity-agent 4h15m +``` + +Go to the Workload EKS Clusters and Ensure that EKS Addon is deployed successfully. + +![amd-1-29-blueprint EKS Addon](./images/amd-1.29-addon.png) + + +![arm-1-29-blueprint EKS Addon](./images/arm-1.29-addon.png) + + +### Validate Kubernetes Manifests deployment in Workload Clusters + +Run the below command to get the list of Crossplane Kubernetes Provder Objects deployed in the Management Cluster. + +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get providerconfigs.kubernetes.crossplane.io +``` + +The output will look like below. + +```shell +NAME AGE +provider-config-k8s-amd-1-29-blueprint 4h31m +provider-config-k8s-arm-1-29-blueprint 4h40m +``` + +Run the below command to get the list of Namespaces in the Workload Cluster `amd-1-29-blueprint` + +```shell +kubectl --context $WORKLOAD_CLUSTER1_CONTEXT get ns +``` + +The output will look like below. + +```shell +NAME STATUS AGE +default Active 8d +external-secrets Active 8d +kube-node-lease Active 8d +kube-public Active 8d +kube-system Active 8d +test-namespace-amd-1-29-blueprint Active 4h9m +``` + + +Run the below command to get the list of Namespaces in the Workload Cluster `arm-1-29-blueprint` + +```shell +kubectl --context $WORKLOAD_CLUSTER2_CONTEXT get ns +``` + +The output will look like below. + +```shell +NAME STATUS AGE +default Active 8d +external-secrets Active 8d +kube-node-lease Active 8d +kube-public Active 8d +kube-system Active 8d +test-namespace-arm-1-29-blueprint Active 4h9m +``` + +### Validate Helm Chart deployment in Workload Clusters + +Run the below command to get the list of Crossplane Kubernetes Provder Objects deployed in the Management Cluster. + +```shell +kubectl --context $MANAGEMENT_CLUSTER_CONTEXT get providerconfigs.helm.crossplane.io +``` + +The output will look like below. + +```shell +NAME AGE +provider-config-helm-amd-1-29-blueprint 4h37m +provider-config-helm-arm-1-29-blueprint 4h46m +``` + +Run the below command to get the list of helm charts in the Workload Cluster `amd-1-29-blueprint` + +```shell +helm --kube-context $WORKLOAD_CLUSTER1_CONTEXT list -A +``` + +The output will look like below. + +```shell +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +blueprints-addon-external-secrets external-secrets 1 2024-05-07 05:25:31.465715836 +0000 UTC deployed external-secrets-0.9.9 v0.9.9 +test-helm-amd-1-29-blueprint default 1 2024-05-15 06:39:17.325950143 +0000 UTC deployed nginx-17.0.1 1.26.0 +``` + + +Run the below command to get the list of Helm Charts in the Workload Cluster `arm-1-29-blueprint` + +```shell +helm --kube-context $WORKLOAD_CLUSTER2_CONTEXT list -A +``` + +The output will look like below. + +```shell +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +blueprints-addon-external-secrets external-secrets 1 2024-05-07 05:26:52.028907405 +0000 UTC deployed external-secrets-0.9.9 v0.9.9 +test-helm-arm-1-29-blueprint default 1 2024-05-15 06:39:17.222351682 +0000 UTC deployed nginx-17.0.1 1.26.0 +``` -The above inconvenience has been fixed in the Blueprints framework as well as in the pattern, so please report such cases if you encounter them. This item is left here for reference in case customers modify the pattern to require additional permissions at build time. -2. This pattern consumes multiple Elastic IP addresses, because 3 VPCs with 3 subnets are created by this pattern. Make sure your account limit for EIP are increased to support additional 9 EIPs (1 per Subnets) diff --git a/lib/crossplane-argocd-gitops/aws-addon-clusters-stack.ts b/lib/crossplane-argocd-gitops/aws-addon-clusters-stack.ts new file mode 100644 index 00000000..61ea2379 --- /dev/null +++ b/lib/crossplane-argocd-gitops/aws-addon-clusters-stack.ts @@ -0,0 +1,16 @@ +import * as cdk from 'aws-cdk-lib'; +import { Construct } from 'constructs'; +// import * as sqs from 'aws-cdk-lib/aws-sqs'; + +export class AwsAddonClustersStack extends cdk.Stack { + constructor(scope: Construct, id: string, props?: cdk.StackProps) { + super(scope, id, props); + + // The code that defines your stack goes here + + // example resource + // const queue = new sqs.Queue(this, 'AwsAddonClustersQueue', { + // visibilityTimeout: cdk.Duration.seconds(300) + // }); + } +} diff --git a/lib/crossplane-argocd-gitops/common/construct-utils.ts b/lib/crossplane-argocd-gitops/common/construct-utils.ts new file mode 100644 index 00000000..9aa760aa --- /dev/null +++ b/lib/crossplane-argocd-gitops/common/construct-utils.ts @@ -0,0 +1,48 @@ +import { utils } from "@aws-quickstart/eks-blueprints"; +import { HelmAddOn } from '@aws-quickstart/eks-blueprints'; +import * as cdk from 'aws-cdk-lib'; + +export const logger = utils.logger; + +export function errorHandler(app: cdk.App, message: string, error?: Error) { + logger.info(message); + if(error){ + logger.error(error.name, error.message, error.stack); + } + new EmptyStack(app); +} + +export function configureApp(logLevel? : number): cdk.App { + logger.settings.minLevel = logLevel ?? 2; // debug., 3 info + logger.settings.hideLogPositionForProduction = true; + utils.userLog.info("=== Run make compile before each run, if any code modification was made. === \n\n"); + + const account = process.env.CDK_DEFAULT_ACCOUNT!; + const region = process.env.CDK_DEFAULT_REGION!; + + HelmAddOn.validateHelmVersions = true; + + return new cdk.App({context: { account, region }}); +} + +export async function prevalidateSecrets(pattern: string, region?: string, ...secrets: string[]) { + for(const secret of secrets) { + try { + await utils.validateSecret(secret, region ?? process.env.CDK_DEFAULT_REGION!); + } + catch(error) { + throw new Error(`${secret} secret must be setup for the ${pattern} pattern to work`); + } + } +} + +export class EmptyStack extends cdk.Stack { + constructor(scope: cdk.App, ...message: string[]) { + super(scope, "empty-error-stack"); + if(message) { + message.forEach(m => logger.info(m)); + } + } +} + + diff --git a/lib/crossplane-argocd-gitops/custom-addons/cluster-secret-store-addon.ts b/lib/crossplane-argocd-gitops/custom-addons/cluster-secret-store-addon.ts new file mode 100644 index 00000000..18d2ee1d --- /dev/null +++ b/lib/crossplane-argocd-gitops/custom-addons/cluster-secret-store-addon.ts @@ -0,0 +1,41 @@ +import { ClusterAddOn, ClusterInfo, ExternalsSecretsAddOn } from "@aws-quickstart/eks-blueprints"; +import { dependable } from "@aws-quickstart/eks-blueprints/dist/utils"; +import { Stack } from "aws-cdk-lib"; +import { KubernetesManifest } from "aws-cdk-lib/aws-eks"; +import { Construct } from "constructs"; + +export class ClusterSecretStoreAddOn implements ClusterAddOn { + + constructor(readonly clusterStoreName: string) {} + + @dependable(ExternalsSecretsAddOn.name) + deploy(clusterInfo: ClusterInfo): void | Promise { + const clusterSecretStore = new KubernetesManifest(clusterInfo.cluster, "ClusterSecretStore", { + cluster: clusterInfo.cluster, + manifest: [ + { + apiVersion: "external-secrets.io/v1beta1", + kind: "ClusterSecretStore", + metadata: { name: this.clusterStoreName }, + spec: { + provider: { + aws: { + service: "SecretsManager", + region: Stack.of(clusterInfo.cluster).region, + auth: { + jwt: { + serviceAccountRef: { + name: "external-secrets-sa", + namespace: "external-secrets", + }, + }, + }, + }, + }, + }, + }, + ], + }); + return Promise.resolve(clusterSecretStore); + } +} \ No newline at end of file diff --git a/lib/crossplane-argocd-gitops/custom-addons/crossplane-aws-provider-addon.ts b/lib/crossplane-argocd-gitops/custom-addons/crossplane-aws-provider-addon.ts new file mode 100644 index 00000000..530fdcff --- /dev/null +++ b/lib/crossplane-argocd-gitops/custom-addons/crossplane-aws-provider-addon.ts @@ -0,0 +1,81 @@ +import 'source-map-support/register'; +import * as blueprints from '@aws-quickstart/eks-blueprints'; +import * as eks from "aws-cdk-lib/aws-eks"; +import { Construct } from 'constructs'; +import { dependable } from '@aws-quickstart/eks-blueprints/dist/utils'; +import { UpboundCrossplaneAddOn } from './upbound-crossplane-addon'; + +export class CrossplaneAwsProviderAddon implements blueprints.ClusterAddOn { + id?: string | undefined; + @dependable(UpboundCrossplaneAddOn.name) + deploy(clusterInfo: blueprints.ClusterInfo): void | Promise { + const cluster = clusterInfo.cluster; + const crossplaneIRSARole = clusterInfo.getAddOnContexts().get("UpboundCrossplaneAddOn")!["arn"]; + const controllerConfig = new eks.KubernetesManifest(clusterInfo.cluster.stack, "ControllerConfig", { + cluster: cluster, + manifest: [ + { + apiVersion: "pkg.crossplane.io/v1alpha1", + kind: "ControllerConfig", + metadata: { + name: "aws-config", + annotations: { + "eks.amazonaws.com/role-arn": crossplaneIRSARole + } + }, + spec: {}, + }, + ], + }); + + const awsEksProvider = new eks.KubernetesManifest(clusterInfo.cluster.stack, "EKSProvider", { + cluster: cluster, + manifest: [ + { + apiVersion: "pkg.crossplane.io/v1", + kind: "Provider", + metadata: { + name: "provider-aws-eks", + }, + spec: { + package: "xpkg.upbound.io/upbound/provider-aws-eks:v1.1.0", + controllerConfigRef: { + name: "aws-config" + } + }, + }, + ], + }); + + awsEksProvider.node.addDependency(controllerConfig); + + // const cfnWaitConditionHandle = new cloudformation.CfnWaitConditionHandle(clusterInfo.cluster.stack, 'MyCfnWaitConditionHandle'); + + // new cloudformation.CfnWaitCondition(clusterInfo.cluster.stack, "EKSProviderWaitCondition", { + // count: 1, + // handle: cfnWaitConditionHandle.ref, + // timeout: "120", + // }).node.addDependency(awsEksProvider); + + // const eksProviderConfig = new eks.KubernetesManifest(clusterInfo.cluster.stack, "EKSProviderConfig", { + // cluster: cluster, + // manifest: [ + // { + // apiVersion: "aws.upbound.io/v1beta1", + // kind: "ProviderConfig", + // metadata: { + // name: "default", + // }, + // spec: { + // credentials: { + // source: "IRSA" + // } + // }, + // }, + // ], + // }); + + // eksProviderConfig.node.addDependency(awsEksProvider); + return Promise.resolve(controllerConfig); + } +} diff --git a/lib/crossplane-argocd-gitops/custom-addons/crossplane-helm-provider-addon.ts b/lib/crossplane-argocd-gitops/custom-addons/crossplane-helm-provider-addon.ts new file mode 100644 index 00000000..d13fff0c --- /dev/null +++ b/lib/crossplane-argocd-gitops/custom-addons/crossplane-helm-provider-addon.ts @@ -0,0 +1,80 @@ +import 'source-map-support/register'; +import * as blueprints from '@aws-quickstart/eks-blueprints'; +import * as eks from "aws-cdk-lib/aws-eks"; +import { Construct } from 'constructs'; +import { dependable } from '@aws-quickstart/eks-blueprints/dist/utils'; +import { UpboundCrossplaneAddOn } from './upbound-crossplane-addon'; + +export class CrossplaneHelmProviderAddon implements blueprints.ClusterAddOn { + id?: string | undefined; + @dependable(UpboundCrossplaneAddOn.name) + deploy(clusterInfo: blueprints.ClusterInfo): void | Promise { + const cluster = clusterInfo.cluster; + const crossplaneIRSARole = clusterInfo.getAddOnContexts().get("UpboundCrossplaneAddOn")!["arn"]; + + const role_binding = { + apiVersion: "rbac.authorization.k8s.io/v1", + kind: "ClusterRoleBinding", + metadata: { + name: "helm-provider" + }, + subjects: [ + { + kind: "ServiceAccount", + name: "helm-provider", + namespace: "upbound-system" + } + ], + roleRef: { + kind: "ClusterRole", + name: "cluster-admin", + apiGroup: "rbac.authorization.k8s.io" + } + }; + + const runtime_config = { + apiVersion: "pkg.crossplane.io/v1beta1", + kind: "DeploymentRuntimeConfig", + metadata: { + name: "helm-runtime-config" + }, + spec: { + deploymentTemplate: { + spec: { + replicas: 1, + selector: {}, + template: {} + } + }, + serviceAccountTemplate: { + metadata: { name: "helm-provider" } + } + } + }; + + const provider = { + apiVersion: "pkg.crossplane.io/v1", + kind: "Provider", + metadata: { name: "helm-provider" }, + spec: { + package: "xpkg.upbound.io/crossplane-contrib/provider-helm:v0.18.1", + runtimeConfigRef: { + name: "helm-runtime-config" + } + } + }; + + const runtimeHelmConfig = new eks.KubernetesManifest(clusterInfo.cluster.stack, "runtimeHelmConfig", { + cluster: cluster, + manifest: [role_binding, runtime_config] + }); + + const awsHelmProvider = new eks.KubernetesManifest(clusterInfo.cluster.stack, "providerHelmResource", { + cluster: cluster, + manifest: [provider] + }); + + awsHelmProvider.node.addDependency(runtimeHelmConfig); + return Promise.resolve(runtimeHelmConfig); + } +} diff --git a/lib/crossplane-argocd-gitops/custom-addons/crossplane-k8s-provider-addon.ts b/lib/crossplane-argocd-gitops/custom-addons/crossplane-k8s-provider-addon.ts new file mode 100644 index 00000000..0bc6b6d5 --- /dev/null +++ b/lib/crossplane-argocd-gitops/custom-addons/crossplane-k8s-provider-addon.ts @@ -0,0 +1,79 @@ +import 'source-map-support/register'; +import * as blueprints from '@aws-quickstart/eks-blueprints'; +import * as eks from "aws-cdk-lib/aws-eks"; +import { Construct } from 'constructs'; +import { dependable } from '@aws-quickstart/eks-blueprints/dist/utils'; +import { UpboundCrossplaneAddOn } from './upbound-crossplane-addon'; + +export class CrossplaneK8sProviderAddon implements blueprints.ClusterAddOn { + id?: string | undefined; + @dependable(UpboundCrossplaneAddOn.name) + deploy(clusterInfo: blueprints.ClusterInfo): void | Promise { + const cluster = clusterInfo.cluster; + const crossplaneIRSARole = clusterInfo.getAddOnContexts().get("UpboundCrossplaneAddOn")!["arn"]; + + const role_binding1 = { + apiVersion: "rbac.authorization.k8s.io/v1", + kind: "ClusterRoleBinding", + metadata: { name: "kubernetes-provider" }, + subjects: [ + { + kind: "ServiceAccount", + name: "kubernetes-provider", + namespace: "upbound-system" + } + ], + roleRef: { + kind: "ClusterRole", + name: "cluster-admin", + apiGroup: "rbac.authorization.k8s.io" + } + }; + + const runtime_config1 = { + apiVersion: "pkg.crossplane.io/v1beta1", + kind: "DeploymentRuntimeConfig", + metadata: { + name: "kubernetes-runtime-config" + }, + spec: { + deploymentTemplate: { + spec: { + replicas: 1, + selector: {}, + template: {} + } + }, + serviceAccountTemplate: { + metadata: { name: "kubernetes-provider" } + } + } + }; + + const providerK8sResource1 = { + apiVersion: "pkg.crossplane.io/v1", + kind: "Provider", + metadata: { name: "kubernetes-provider" }, + spec: { + package: "xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.13.0", + runtimeConfigRef: { + name: "kubernetes-runtime-config" + } + } + }; + + const runtimeK8sConfig1 = new eks.KubernetesManifest(clusterInfo.cluster.stack, "runtimeK8sConfig1", { + cluster: cluster, + manifest: [role_binding1, runtime_config1] + }); + + const awsK8sProvider1 = new eks.KubernetesManifest(clusterInfo.cluster.stack, "awsK8sProvider1", { + cluster: cluster, + manifest: [providerK8sResource1] + }); + + awsK8sProvider1.node.addDependency(runtimeK8sConfig1); + + return Promise.resolve(runtimeK8sConfig1); + } +} diff --git a/lib/crossplane-argocd-gitops/custom-addons/mgmt-role-teams.ts b/lib/crossplane-argocd-gitops/custom-addons/mgmt-role-teams.ts new file mode 100644 index 00000000..f5d9991d --- /dev/null +++ b/lib/crossplane-argocd-gitops/custom-addons/mgmt-role-teams.ts @@ -0,0 +1,13 @@ + +import {PlatformTeam} from "@aws-quickstart/eks-blueprints"; + +export class ProviderMgmtRoleTeam extends PlatformTeam { + constructor(accountID :string) { + // compute the ARN explicitly since we know its name + const computedProviderRoleArn = `arn:aws:iam::${accountID}:role/provider-aws-management-cluster`; + super( { + name: computedProviderRoleArn, + userRoleArn: computedProviderRoleArn + }); + } +} diff --git a/lib/crossplane-argocd-gitops/custom-addons/sleek-addon.ts b/lib/crossplane-argocd-gitops/custom-addons/sleek-addon.ts new file mode 100644 index 00000000..357652a9 --- /dev/null +++ b/lib/crossplane-argocd-gitops/custom-addons/sleek-addon.ts @@ -0,0 +1,99 @@ +import { ClusterInfo } from "@aws-quickstart/eks-blueprints"; +import { CoreAddOn, CoreAddOnProps } from "@aws-quickstart/eks-blueprints/dist/addons/core-addon"; +import { KubernetesManifest } from "aws-cdk-lib/aws-eks"; +import { Construct } from "constructs"; +import { ClusterSecretStoreAddOn } from "./cluster-secret-store-addon"; +import { createNamespace } from "@aws-quickstart/eks-blueprints/dist/utils"; + +export type SecretMapping = { + [k: string]: string[]; +} + +export interface SleekAddOnProps extends Omit { + secretMappings?: SecretMapping[]; // secret name / secret key + saName?: string; +} + +export class SleekAddOn extends CoreAddOn { + + constructor(readonly props: SleekAddOnProps) { + super({ + addOnName: props.addOnName, + version: props.version, + namespace: props.namespace, + saName: props.saName ?? "" // currently required field in the core addon, hence set to something + }); + } + + deploy(clusterInfo: ClusterInfo): Promise { + let secretStruct : Construct | undefined = undefined; + const ns = createNamespace(this.props.namespace!, clusterInfo.cluster); + const clusterStore: Promise = clusterInfo.getScheduledAddOn(ClusterSecretStoreAddOn.name)!; + this.props.secretMappings?.forEach( e => { + const secret = this.createSecret(e, clusterInfo); + if (clusterStore) { + clusterStore.then(e => secret.node.addDependency(e)); + } + if(secretStruct != null) { + secret.node.addDependency(secretStruct); + secret.node.addDependency(ns); + } + secretStruct = secret; + + }); + const result = super.deploy(clusterInfo); + if(secretStruct) { + result.then(e => e.node.addDependency(secretStruct!)); + } + return result; + } + + private createSecret(secretMapping: SecretMapping, clusterInfo: ClusterInfo) { + const secretName: string = Object.keys(secretMapping)[0]; + const keys: string[] = Object.values(secretMapping)[0]; + const awsSecretName = `${this.props.addOnName}-${secretName}`; + const data: KeyData[] = []; + keys.forEach(key => data.push({ + secretKey: key, + remoteRef: { + key: awsSecretName, + property: key + } + })); + const secret = new KubernetesManifest(clusterInfo.cluster, "secret-" + awsSecretName, { + cluster: clusterInfo.cluster, + manifest: [ + { + apiVersion: "external-secrets.io/v1beta1", + kind: "ExternalSecret", + metadata: { + name: secretName, + namespace: this.props.namespace + }, + spec: { + secretStoreRef: { + name: "eksa-secret-store", + kind: "ClusterSecretStore", + }, + target: { + name: secretName, + creationPolicy: "Merge", + }, + data + }, + }, + ], + }); + return secret; + } +} + +interface KeyData { + secretKey: string; + remoteRef: RemoteRefData; +} + +interface RemoteRefData { + key: string; + property: string; +} \ No newline at end of file diff --git a/lib/crossplane-argocd-gitops/custom-addons/upbound-crossplane-addon.ts b/lib/crossplane-argocd-gitops/custom-addons/upbound-crossplane-addon.ts new file mode 100644 index 00000000..9d44e2f9 --- /dev/null +++ b/lib/crossplane-argocd-gitops/custom-addons/upbound-crossplane-addon.ts @@ -0,0 +1,89 @@ +import 'source-map-support/register'; +import * as blueprints from '@aws-quickstart/eks-blueprints'; +import { Construct } from 'constructs'; +import { Values } from "@aws-quickstart/eks-blueprints/dist/spi"; +import merge from "ts-deepmerge"; +import { createNamespace } from '@aws-quickstart/eks-blueprints/dist/utils'; +import {ManagedPolicy, Policy, PolicyDocument} from 'aws-cdk-lib/aws-iam'; +import * as cdk from 'aws-cdk-lib'; + +/** + * User provided options for the Helm Chart + */ +export interface UpboundCrossplaneAddOnProps extends blueprints.HelmAddOnUserProps { + /** + * To Create Namespace using CDK + */ + createNamespace?: boolean; + } + +const defaultProps: blueprints.HelmAddOnProps = { + name: 'uxp', + release: 'blueprints-addon-uxp', + namespace: 'upbound-system', + chart: 'universal-crossplane', + version: '1.14.5-up.1', + repository: 'https://charts.upbound.io/stable', + values: {}, +}; + +export class UpboundCrossplaneAddOn extends blueprints.HelmAddOn { + + readonly options: UpboundCrossplaneAddOnProps; + + constructor( props?: UpboundCrossplaneAddOnProps) { + super({...defaultProps, ...props}); + + this.options = this.props as UpboundCrossplaneAddOnProps; + } + + deploy(clusterInfo: blueprints.ClusterInfo): void | Promise { + const cluster = clusterInfo.cluster; + + // Create the `upbound-system` namespace. + const ns = createNamespace(this.options.namespace!, cluster, true); + + // Create the CrossPlane AWS Provider IRSA. + const serviceAccountName = "provider-aws"; + const sa = cluster.addServiceAccount(serviceAccountName, { + name: serviceAccountName, + namespace: this.options.namespace!, + + }); + sa.node.addDependency(ns); + sa.role.attachInlinePolicy(new Policy(cluster.stack, 'eks-connect-policy', { + document: PolicyDocument.fromJson({ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["sts:AssumeRole"], + "Resource": `arn:aws:iam::${cluster.stack.account}:role/eks-connector-role` + }, + { + "Effect": "Allow", + "Action": ["eks:*"], + "Resource": `*` + } + ] + })})); + + clusterInfo.addAddOnContext(UpboundCrossplaneAddOn.name, { + arn: sa.role.roleArn + }); + + new cdk.CfnOutput(cluster.stack, 'providerawssaiamrole', + { + value: sa.role.roleArn, + description: 'provider AWS IAM role', + exportName : 'providerawssaiamrole' + }); + + let values: Values = this.options.values ?? {}; + values = merge(values, values); + + const chart = this.addHelmChart(clusterInfo, values, false, true); + chart.node.addDependency(sa); + return Promise.resolve(chart); + } +} diff --git a/lib/crossplane-argocd-gitops/management-cluster-builder.ts b/lib/crossplane-argocd-gitops/management-cluster-builder.ts new file mode 100644 index 00000000..90de7670 --- /dev/null +++ b/lib/crossplane-argocd-gitops/management-cluster-builder.ts @@ -0,0 +1,54 @@ +import { Construct } from 'constructs'; +import { utils } from '@aws-quickstart/eks-blueprints'; +import * as blueprints from '@aws-quickstart/eks-blueprints'; +import * as eks from 'aws-cdk-lib/aws-eks'; +import { ObservabilityBuilder } from '@aws-quickstart/eks-blueprints'; +import { UpboundCrossplaneAddOn } from './custom-addons/upbound-crossplane-addon'; +import { CrossplaneAwsProviderAddon } from './custom-addons/crossplane-aws-provider-addon'; +import { CrossplaneK8sProviderAddon } from './custom-addons/crossplane-k8s-provider-addon'; +import { CrossplaneHelmProviderAddon } from './custom-addons/crossplane-helm-provider-addon'; + + +export default class ManagementClusterBuilder { + readonly account: string; + readonly region: string; + + constructor(account: string,region: string) { + this.account = account; + this.region = region; + } + + create(scope: Construct, id: string, mngProps: blueprints.MngClusterProviderProps) { + blueprints.HelmAddOn.validateHelmVersions = false; + + const addOns: Array = [ + new blueprints.addons.ExternalsSecretsAddOn, + new UpboundCrossplaneAddOn, + new CrossplaneAwsProviderAddon, + new CrossplaneK8sProviderAddon, + new CrossplaneHelmProviderAddon, + new blueprints.SecretsStoreAddOn, + new blueprints.ArgoCDAddOn({ + bootstrapRepo: { + repoUrl: "https://github.com/aws-samples/eks-blueprints-workloads", + path: `./crossplane-arocd-gitops/bootstrap`, + targetRevision: 'main', + credentialsSecretName: 'github-token', + credentialsType: 'TOKEN' + } + }), + ]; + + const clusterProvider = new blueprints.MngClusterProvider({...mngProps, + tags: {"scope": "addon"}, + clusterName:id + }); + + return ObservabilityBuilder.builder() + .clusterProvider(clusterProvider) + .version(eks.KubernetesVersion.V1_28) + .enableNativePatternAddOns() + .enableControlPlaneLogging() + .addOns(...addOns); + } +}