diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..e7e21db --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,30 @@ + + +## Description + +Please provide a brief summary of the changes made in this pull request. + +## Type of change + +Please check the relevant options: + +- [ ] New feature (a change which adds functionality) +- [ ] Bug fix (a change which fixes an issue) +- [ ] Refactoring (code cleanup or optimization) +- [ ] Testing (enhanced test coverage, or test improvement) +- [ ] Documentation (changes to documentation) +- [ ] Other (something that's not listed here - please explain in the description or additional information) + +## Checklist + +Please check or cross through each option: + +- [ ] My code aligns with the style of this project +- [ ] I have added comments in hard to understand areas +- [ ] I have added tests that prove my change works +- [ ] I have updated the documentation +- [ ] The CI build is passing for my PR + +## Additional Information + +Please provide any additional information or context related to this pull request. diff --git a/.github/workflows/ci-pipeline.yaml b/.github/workflows/ci-pipeline.yaml index 1dc741f..829fcf8 100644 --- a/.github/workflows/ci-pipeline.yaml +++ b/.github/workflows/ci-pipeline.yaml @@ -47,9 +47,10 @@ jobs: - name: Run End to End Tests run: | go mod tidy - go test -v -timeout 10m + go test -v -timeout 30m working-directory: tests/end-to-end-tests env: + GOMAXPROCS: 8 ARM_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} ARM_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} ARM_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} diff --git a/.github/workflows/docs-pipeline.yaml b/.github/workflows/docs-pipeline.yaml new file mode 100644 index 0000000..8a5cb14 --- /dev/null +++ b/.github/workflows/docs-pipeline.yaml @@ -0,0 +1,39 @@ +name: Deploy Documentation + +permissions: + contents: write + +on: + push: + branches: + - main + +jobs: + deploy: + name: Deploy Documentation + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Configure Git Credentials + run: | + git config user.name github-actions[bot] + git config user.email 41898282+github-actions[bot]@users.noreply.github.com + + - uses: actions/setup-python@v5 + with: + python-version: 3.x + + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + + - uses: actions/cache@v4 + with: + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + + - run: pip install mkdocs-material + + - run: mkdocs gh-deploy --force \ No newline at end of file diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 0000000..f37dbdc --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,5 @@ +{ + "default": true, + "MD007": { "indent": 4 }, + "MD013": false +} \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2acc0fa..14471bc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,42 +1,19 @@ -# Contribution Guidelines +# Contributing -Before you start contributing to the project, please read the following guidelines. We follow the below guidelines to contribute to this repository. +If you want to contribute to the project, raise a PR on GitHub. -## How To Contribute +We use pre-commit to run analysis and checks on the changes being committed. Take the following steps to ensure the pre-commit hook is installed and working: -* **DO** submit all changes via pull requests (PRs). They will be reviewed and potentially merged after a peer review from at least one maintainer. -* **DO** give PRs short but descriptive names. -* **DO** write a useful but brief description of what the PR is for. -* **DO** ensure each commit successfully builds. The entire PR must pass all checks before it will be merged. -* **DO** address PR feedback in additional commits instead of amending. -* **DO NOT** submit "work in progress" PRs. Please mark them as *Draft*. A PR should only be submitted when it is considered ready for review. -* **DO NOT** mix independent and unrelated changes in one PR. -* If there is a major upgrade or a feature addition to the project, it might be a good idea to get started with a Github issue or a Github discussion to discuss the feature or the upgrade before starting a PR on the upgrade. +1. Install git + * Ensure the git `bin` directory has been added to %PATH%: `C:\Program Files\Git\bin` -## Pull Requests +1. Install Python + * Ensure the python `bin` directory has been added to %PATH% -We use pull requests to review and merge code into the `main` branch. +1. Install pre-commit + * Open a terminal and navigate to the repository root directory + * Install pre-commit with the following command: `pip install pre-commit` + * Install pre-commit within the repository with the following command: `pre-commit install` + * Run `pre-commit run --all-files` to check pre-commit is working -Please follow the steps below to create a pull request: - -1. Fork the repository from the `main` branch ( Refer steps to [create a fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo)). We use `main` branch only for sync'ing forks. All development contributions should be made to the `development` branch. - -1. Create a new branch (example `development`) in your forked repository for your feature or bug fix. Detailed branching and development strategy is outline in [this guide](docs/setup-guide.md#diagram-for-forking-and-syncing). - -1. Make sure the pre-commit hook is installed and working: - 1. Install pre-commit using this [link](https://pre-commit.com/#installation) - 1. Run `pre-commit run --all-files` from the root of the repository. - 1. Follow [these](https://github.com/pocc/pre-commit-hooks?tab=readme-ov-file#information-about-the-commands) instructions to install the commands - -1. Run tests, linters and checks locally and make sure the pipeline is passing - -1. Make sure the pipeline is passing - -1. Make sure you have each PR reviewed - -1. Once the PR is approved, merge it to the `main` branch, preferably using `Squash and Merge` - -## Coding Style - -We use [.NET source code analyzer](https://learn.microsoft.com/en-us/dotnet/fundamentals/code-analysis/overview?tabs=net-8) to enforce code style. -Please make sure you have the pre-commit hook installed and working. +> For full details [see this link](https://pre-commit.com/#installation) diff --git a/LICENCE.md b/LICENCE.md index 0ead432..ed56eb2 100644 --- a/LICENCE.md +++ b/LICENCE.md @@ -1,6 +1,6 @@ # MIT Licence -Copyright (c) 2023 Crown Copyright NHS England. +Copyright (c) 2024 Crown Copyright NHS England. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index bfd89e1..e6cdf44 100644 --- a/README.md +++ b/README.md @@ -1,166 +1,18 @@ # Azure Immutable Backup -## Introduction - -This repository is a blueprint accelerator solution that supports teams in taking immutable backups in Azure. It's aim is to give developers tooling and templates that can be used to create, configure and manage immutable backups using Azure Backup Vault. - -The following technologies are used: - -* Azure -* Azure CLI -* Azure Pipelines -* Terraform -* Go (used for end-to-end testing) - -### Outstanding Questions - -* The design doesn't cater for the requirement to store the backup data in a separate account (or subscription in Azure lingo). We can however support GeoRedundant storage across regions - will this suffice? Otherwise we need to look at a solution for this problem. -* The design currently caters for a scenario where a vault could be unlocked initially, and later locked. Do we want this? - -## Design - -The repository consists of: - -* Terraform modules to create the infrastructure -* Azure Pipelines to manage the deployment - -### Infrastructure - -A solution which utilises the blueprint will consist of the following types of Azure resources - -* Azure backup vault and backup policies/instances -* Azure policy definitions and assignments -* Azure monitor -* Entra ID -* Tfstate storage account -* Resources that need to be backed up - -#### Architecture - -The following diagram illustrates the high level architecture - -![Azure Architecture](./docs/azure-architecture.drawio.svg) - -1. The **backup vault** stores the backups of a variety of different Azure resources. A number of **backup policies** are registered on the vault which define the configuration for a backup such as the retention period and schedule. A number of **backup instances** are then registered with a policy applied that trigger the backups. The vault is configured as **immutable** and **locked** to enforce tamper proof backups. The **backup vault** resides in it's own isolated **resource group**. - -1. **Backup instances** link the resources to be backed up and an associated **backup policy**, and one registered trigger the backup process. The resources directly supported are Azure Blob Storage, Managed Disks, PostgreSQL (single server and flexible server) and AKS instances, although other resources are supported indirectly through Azure Storage (see **point 8** for more details). **Backup instances** are automatically registered by **Azure Policy** by creating resources to be backed up with the required tags - they are not manually registered (see **point 4** for more details). - -1. The **backup vault** accesses resources to be backed up through a **System Assigned Managed Identity** - a secure way of enabling communication between defined resources without managing a secret/password. The identity is given read access to the resources to be backed up by **Azure Policy** at the point that the backup instance is registered. - -1. **Azure Policy** is a feature that helps enforce rules and standards across an Azure tenant. In this case it is used to ensure **backup instances** are created when resources that require backup have a defined tag. **Azure Policy** will also be used to validate the **immutability** configuration of the backup vault, for example ensuring it is not set excessively resulting in a developers holiday photos being stored for 100'000 years. - -1. **Backup administrators** are a group of identities that will have time limited read only access to the **backup vault** in order to access and restore backups as required. Assignment of the role will be secured by **PIM** - Privileged Identity Management, which requires a second identity to authorise the role assignment, which is then assigned on a time limited bases. The **backup administrators** will also be responsible for monitoring and auditing backup activity via **Azure Monitor** (see **point 7** for more details). - -1. The solution requires a user account with elevated subscription contributor permissions that can create the backup resources (such as the backup **resource group**, **backup vault**, and **backup policies**). This identity will be implemented as a **federated credential** of an **app registration**, which is like a passport that lets you access different services without needing a separate password. This removes the need to manage a secret/password once configured. The identity also needs writer access to a dedicated **Storage Account** in order to read and write the **terraform** infrastructure state. - -1. All backup telemetry will flow into **Azure Monitor** for monitoring and auditing purposes. This will provide access to data such as backup logs and metrics, and provide observability over the solution. Should the need arise, the telemetry could also be integrated into an external monitoring solution. - -1. Some resources such as Azure SQL and Azure Key Vault are not directly supported by Azure **backup vault**, but can be incorporated via a supplementary process that backs up the data to Azure Blob Storage first. In the case of Azure SQL, a typical scenario could be an Azure Logic App that takes a backup of Azure SQL on a regular basis and stores the data in Azure Blob Storage. It is the aspiration of this solution to provide guidance and tooling that teams can adopt to support these scenarios. - -### Security Design - -The following diagram illustrates the security design of the solution: - -![Azure Architecture](./docs/security-design.drawio.svg) - -See the following links for further details on some concepts relevant to the design: - -* [Azure Multi-user Authorisation (MUA) and Resource Guard](https://learn.microsoft.com/en-us/azure/backup/multi-user-authorization-concept) -* [Backup Operator Role](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles/storage#backup-operator) -* [Azure Privileged Identity Management (PIM)](https://learn.microsoft.com/en-us/entra/id-governance/privileged-identity-management) - -#### Actors - -> NOTE: The roles listed below are not an exhaustive list, and are only those which are of relevance to the backup solution. - -1. Tenant Admin - - The tenant admin, aka the "global administrator", is typically a restricted group of technical specialists and/or senior engineering staff. They have full control over the Azure tenant including all subscriptions and identities. - - The actor holds the following roles: - - * Tenant Owner - * Tenant RBAC Administrator - - The following risks and mitigations should be considered: - - | Risks | Mitigations | - |-|-| - | Backup instance tampered with. | Use of PIM for temporary elevated privileges. | - | Backup policy tampered with. | Use of MUA for restricted backup operations. | - | Role based access tampered. | Dedicated admin accounts. | - | No other account able to override a malicious actor. | | +![CI](https://github.com/nhsdigital/az-backup/actions/workflows/ci-pipeline.yaml/badge.svg) -1. Subscription Admin - - The subscription admin is typically a restricted group of team leads who are deploying their teams solutions to the subscription. They have full control over the subscription, including the backup vault and the backup resources. - - The actor holds the following roles: - - * Subscription Owner - * Subscription RBAC Administrator - - The following risks and mitigations should be considered: - - | Risks | Mitigations | - |-|-| - | Backup instance tampered with.                       | Use of PIM for temporary elevated privileges. | - | Backup policy tampered with. | Use of MUA for restricted backup operations. | - | Role based access tampered. | | - -1. Deployment Service Principal - - The deployment service principal is an unattended credential used to deploy the solution from an automated process such as a pipeline or workflow. It has the permission to deploy resources (such as the backup vault) and assign the roles required for the solution to operate. - - The actor holds the following roles: - - * Subscription Contributor - * Subscription RBAC Administrator limited to the roles required by the deployment - - The following risks and mitigations should be considered: - - | Risks | Mitigations | - |-|-| - | Backup instance tampered with.                       | Use of PIM for temporary elevated privileges. | - | Backup policy tampered with. | Use of MUA for restricted backup operations. | - | Role based access tampered. | Secret scanning in pipeline. | - | Poor secret management. | Robust secret management procedures. | - -1. Backup Admin - - The backup admin is typically a group of team support engineers and/or technical specialists. They have the permission to monitor backup telemetry, and restore backups in order to recover services. - - The actor holds the following roles: - - * Subscription Backup Operator - -1. Security Admin - - The security admin is typically a group of cyber security specialists that are isolated from the other actors, by being in a different tenant or a highly restricted subscription. They have permissions to manage Resource Guard, which provide multi user authorisation to perform restricted operations on the backup vault, such as changing policies or stopping a backup instance. - - The actor holds the following roles: - - * Subscription Backup MUA Administrator - - | Risks | Mitigations | - |-|-| - | Elevated roles note revoked.                       | Use of PIM for temporary elevated privileges. | - | | Robust and well documented processes. | - - **NOTE: MUA without PIM requires a manual revocation of elevated permissions.** - -1. Backup Vault Managed Identity - - The backup vault managed identity is a "System Assigned" managed identity that performs backup vault operations. It is restricted to just the services defined at deployment, and cannot be compromised at runtime. +## Introduction - The actor holds the following roles: +This repository is a blueprint accelerator solution that supports teams in implementing immutable backups in Azure. - * Backup Vault Resource Writer - * Reader role on resources that require backup +It's aim is to give developers tooling and templates that can be used to create, configure and manage immutable backups using Azure Backup Vault in a proven way that's consistent across the organisation. -### Pipelines +See the following key docs for more information: -> TODO +* [Design](./docs/design.md) +* [Usage](./docs/usage.md) +* [Developer Guide](./docs/developer-guide.md) ## Repository Structure @@ -198,189 +50,19 @@ The repository consists of the following directories: Contains the different types of tests used to verify the solution. -## Developer Guide - -### Environment Setup - -The following are pre-reqs to working with the solution: - -* An Azure subscription -* An Azure identity which has been assigned the subscription Contributor role (required to create resources) -* [Azure CLI installed](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli-windows?tabs=azure-cli) -* [Terraform installed](https://developer.hashicorp.com/terraform/install) -* [Go installed (to run the end-to-end tests)](https://go.dev/dl/) - -> Ensure all installed components have been added to the `%PATH%` - e.g. `az`, `terraform` and `go`. - -### Getting Started - -Take the following steps to get started in configuring and verifying the infrastructure for your development environment: - -1. Setup environment variables - - Set the following environment variables in order to connect to Azure in the following steps: - - ```pwsh - $env:ARM_TENANT_ID="" - $env:ARM_SUBSCRIPTION_ID="" - $env:ARM_CLIENT_ID="" - $env:ARM_CLIENT_SECRET="" - ``` - -2. Create Backend - - A backend (e.g. storage account) is required in order to store the tfstate and work with Terraform. +## Documentation - Run the following powershell script to create the backend with default settings: `./scripts/create-tf-backend.ps1`. This script will create a resource group called `rg-nhsbackup` containing a storage account called `satfstate`. +The documentation in markdown format resides in [`./docs`](./docs/index.md). It can also be built and served as a static site using [MkDocs](https://www.mkdocs.org/). - Make a note of the name of the storage account in the script output - it's generated with a random suffix, and you'll need it in the following steps to initialise the terraform. +To build and run the docs locally, install Docker then run the following command from the root of the repository: -3. Prepare Terraform Variables (Optional) - - If you want to override the Terraform variables, make a copy of `tfvars.template` and amend any default settings as required. - - In the next step add the following flag to the `terraform apply` command in order to use your variables: - - ```pwsh - -var-file=".tfvars - ``` - -4. Initialise Terraform - - Change the working directory to `./infrastructure`. - - Terraform can now be initialised by running the following command: - - ````pwsh - terraform init -backend=true -backend-config="resource_group_name=rg-nhsbackup" -backend-config="storage_account_name=" -backend-config="container_name=tfstate" -backend-config="key=terraform.tfstate" - ```` - -5. Apply Terraform - - Apply the Terraform code to create the infrastructure. - - The `-auto-approve` flag is used to automatically approve the plan, you can remove this flag to review the plan before applying. - - ```pwsh - terraform apply -auto-approve - ``` - - Now review the deployed infrastructure in the Azure portal. You will find a backup vault and some sample backup policies. - - The repo contains an `example` module which can be utilised to further extend the sample infrastructure with some resources and backup instances. To use this module for dev/test purposes, include the module in `main.tf` and run `terraform apply` again. - -### Running the Tests - -#### Integration Tests - -The test suite consists of a number Terraform HCL integration tests that use a mock azurerm provider. - -[See this link for more information.](https://developer.hashicorp.com/terraform/language/tests) - -Take the following steps to run the test suite: - -1. Initialise Terraform - - Change the working directory to `./tests/integration-tests`. - - Terraform can now be initialised by running the following command: - - ````pwsh - terraform init -backend=false - ```` - - > NOTE: There's no need to initialise a backend for the purposes of running the tests. - -2. Run the tests - - Run the tests with the following command: - - ````pwsh - terraform test - ```` - -#### End to End Tests - -The end to end tests are written in go, and use the [terratest library](https://terratest.gruntwork.io/) and the [Azure SDK for Go](https://github.com/Azure/azure-sdk-for-go/tree/main). - -The tests depend on a connection to Azure so it can create an environment that the tests can be executed against - the environment is torn down once the test run has completed. - -See the following resources for docs and examples of terratest and the Azure SDK: - -* [Terratest docs](https://terratest.gruntwork.io/docs/) -* [Terratest repository](https://github.com/gruntwork-io/terratest) -* [Terratest test examples](https://github.com/gruntwork-io/terratest/tree/master/test) -* [Azure SDK](https://github.com/Azure/azure-sdk-for-go/tree/main) -* [Azure SDK Data Protection Module](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/resourcemanager/dataprotection/armdataprotection) - -To run the tests, take the following steps: - -1. Install go packages - - You only need to do this once when setting up your environment. - - Change the working directory to `./tests/end-to-end-tests`. - - Run the following command: - - ````pwsh - go mod tidy - ```` - -2. Setup environment variables - - The end-to-end test suite needs to login to Azure in order to execute the tests and therefore the following environment variables must be set. - - ```pwsh - $env:ARM_TENANT_ID="" - $env:ARM_SUBSCRIPTION_ID="" - $env:ARM_CLIENT_ID="" - $env:ARM_CLIENT_SECRET="" - $env:TF_STATE_RESOURCE_GROUP="rg-nhsbackup" - $env:TF_STATE_STORAGE_ACCOUNT="" - $env:TF_STATE_STORAGE_CONTAINER="terraform" - ``` - - > For the storage account name, the TF state backend should have been created during the [getting started guide](#getting-started), at which point the storage account will have been created and the name generated. - -3. Run the tests - - Run the tests with the following command: - - ````pwsh - go test -v -timeout 10m - ```` - -##### Debugging - -To debug the tests in vscode, add the following configuration to launch settings and run the configuration with the test file you want to debug open: - -```json -{ - "configurations": [ - { - "name": "Go Test", - "type": "go", - "request": "launch", - "mode": "test", - "program": "${file}", - "env": { - "ARM_TENANT_ID": "", - "ARM_SUBSCRIPTION_ID": "", - "ARM_CLIENT_ID": "", - "ARM_CLIENT_SECRET": "", - "TF_STATE_RESOURCE_GROUP": "rg-nhsbackup", - "TF_STATE_STORAGE_ACCOUNT": "", - "TF_STATE_STORAGE_CONTAINER": "terraform" - } - } - ] -} +```pwsh +docker-compose -f ./docs/docker-compose.yml up ``` -> For the storage account name, the TF state backend should have been created during the [getting started guide](#getting-started), at which point the storage account will have been created and the name generated. +Once the container is running, navigate to [http://localhost:8000](http://localhost:8000). -### Contributing +## Contributing If you want to contribute to the project, raise a PR on GitHub. @@ -399,55 +81,3 @@ We use pre-commit to run analysis and checks on the changes being committed. Tak * Run `pre-commit run --all-files` to check pre-commit is working > For full details [see this link](https://pre-commit.com/#installation) - -## CI Pipeline - -The CI pipeline builds and verifies the solution and runs a number of static code analysis steps on the code base. - -### End to End Testing - -Part of the build verification is the end to end testing step. This requires the pipeline to login to Azure in order to deploy an environment on which to execute the tests. - -A storage account must be provisioned with a container called `github-actions`, which is used by the CI pipeline to persist the terraform state. - -In order for the CI pipeline to login to Azure and use the terraform state storage account, the following GitHub actions secrets must be created: - -* `AZURE_TENANT_ID` - - The ID of an Azure tenant which can be used for the end to end test environment. - -* `AZURE_SUBSCRIPTION_ID` - - The ID of an Azure subscription which can be used for the end to end test environment. - -* `AZURE_CLIENT_ID` - - The client ID of an Azure service principal / app registration which can be used to authenticate with the end to end test environment. - - The app registration must have contributor permissions on the subscription in order to create resources. - -* `AZURE_CLIENT_SECRET` - - The client secret of an Azure app registration which can be used to authenticate with the end to end test environment. - -* `TF_STATE_RESOURCE_GROUP` - - The resource group which contains the TF state storage account. - -* `TF_STATE_STORAGE_ACCOUNT` - - The storage account used for TF state. - -* `TF_STATE_STORAGE_COMTAINER` - - The storage container used for TF state. - -### Static Code Analysis - -The following static code analysis checks are executed: - -* [Terraform format](https://developer.hashicorp.com/terraform/cli/commands/fmt) -* [Terraform lint](https://github.com/terraform-linters/tflint) -* [Checkov scan](https://www.checkov.io/) -* [Gitleaks scan](https://github.com/gitleaks/gitleaks) -* [Trivy vulnerability scan](https://github.com/aquasecurity/trivy) diff --git a/docs/azure-architecture.drawio.svg b/docs/assets/azure-architecture.drawio.svg similarity index 75% rename from docs/azure-architecture.drawio.svg rename to docs/assets/azure-architecture.drawio.svg index 73de9ea..b7882f4 100644 --- a/docs/azure-architecture.drawio.svg +++ b/docs/assets/azure-architecture.drawio.svg @@ -1,11 +1,16 @@ - + - + + + 4 + + + -
+
Subscription @@ -13,17 +18,17 @@
- + Subscription - - + + -
+
@@ -33,16 +38,16 @@
- + Resource Group - + -
+
@@ -52,16 +57,16 @@
- + Resource Group - + -
+
Postgres @@ -69,16 +74,16 @@
- + Postgr... - + -
+
Storage @@ -86,16 +91,16 @@
- + Storage - + -
+
AKS @@ -103,16 +108,16 @@
- + AKS - + -
+
Azure SQL @@ -120,20 +125,20 @@
- + Azure... - - - - - + + + + + -
+
Disk @@ -141,16 +146,16 @@
- + Disk - + -
+
@@ -163,18 +168,18 @@
- + Other... - - - + + + -
+
Key Vault @@ -182,21 +187,21 @@
- + Key Vault - - - - - - + + + + + + -
+
Backup @@ -206,16 +211,16 @@
- + Backup... - + -
+
Backup Vault @@ -223,53 +228,16 @@
- + Backup Vault - - + -
-
-
- Policy -
- Assignment -
-
-
-
- - Policy... - -
-
- - - - -
-
-
- Azure Policy -
-
-
-
- - Azure Poli... - -
-
- - - - -
+
Azure Monitor @@ -277,16 +245,16 @@
- + Azure Moni... - + -
+
@@ -296,17 +264,17 @@
- + Backup Poli... - - + + -
+
@@ -316,16 +284,16 @@
- + 1..n - + -
+
@@ -335,23 +303,23 @@
- + Backup Inst... - - - - - - - - + + + + + + + + -
+
@@ -363,18 +331,18 @@
- + 1..n - - - + + + -
+
Backup @@ -384,17 +352,17 @@
- + Backup... - - + + -
+
Authorise @@ -404,13 +372,13 @@
- + Authorise... - - + + @@ -429,11 +397,11 @@ - + -
+
Entra ID @@ -441,17 +409,17 @@
- + Entra ID - - + + -
+
Policy @@ -461,16 +429,16 @@
- + Policy... - + -
+
Managed @@ -480,18 +448,18 @@
- + Managed... - - - + + + -
+
@@ -501,18 +469,18 @@
- + PIM Access - - - + + + -
+
@@ -525,18 +493,18 @@
- + Reader... - - - + + + -
+
Terraform @@ -546,22 +514,20 @@
- + Terraform... - - - + -
+
- Writer + Contributor + RBAC Admin
Role
@@ -570,40 +536,17 @@
- - Writer... + + Contributor +... - - + + -
-
-
- - Contributor -
- Role -
-
-
-
-
-
- - Contributor... - -
-
- - - - - -
+
@@ -616,18 +559,18 @@
- + Reader... - - - + + + -
+
Deployment @@ -637,39 +580,19 @@
- + Deployment... - - - - - -
-
-
- Policy -
- Assignment -
-
-
-
- - Policy... - -
-
- - - - + + + + -
+
Take @@ -679,16 +602,16 @@
- + Take... - + -
+
1 @@ -696,16 +619,16 @@
- + 1 - + -
+
2 @@ -713,33 +636,16 @@
- + 2 - - - - -
-
-
- 4 -
-
-
-
- - 4 - -
-
- + -
+
3 @@ -747,76 +653,76 @@
- + 3 - + -
+
- 6 + 5
- - 6 + + 5 - + -
+
- 5 + 4
- - 5 + + 4 - + -
+
- 7 + 6
- - 7 + + 6 - + -
+
- 8 + 7
- - 8 + + 7 diff --git a/docs/assets/favicon.png b/docs/assets/favicon.png new file mode 100644 index 0000000..f1e0f20 Binary files /dev/null and b/docs/assets/favicon.png differ diff --git a/docs/assets/nhs-england-logo.svg b/docs/assets/nhs-england-logo.svg new file mode 100644 index 0000000..10695a4 --- /dev/null +++ b/docs/assets/nhs-england-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/security-design.drawio.svg b/docs/assets/security-design.drawio.svg similarity index 63% rename from docs/security-design.drawio.svg rename to docs/assets/security-design.drawio.svg index 1b4ea3e..3caa8ad 100644 --- a/docs/security-design.drawio.svg +++ b/docs/assets/security-design.drawio.svg @@ -1,646 +1,702 @@ - - - - - - - -
-
-
- Tenant -
-
-
-
- - Tenant - -
-
- - - - - - -
-
-
- Owner -
-
-
-
- - Owner - -
-
- - - - -
-
-
- Tenant Admin -
-
-
-
- - Tenant Ad... - -
-
- - - - -
-
-
- Subscription -
-
-
-
- - Subscription - -
-
- - - - - - -
-
-
- Owner -
-
-
-
- - Owner - -
-
- - - - - -
-
-
- Assign -
- Roles -
-
-
-
- - Assign... - -
-
- - - - - -
-
-
- Deploy -
- Resources -
-
-
-
- - Deploy... - -
-
- - - - -
-
-
- Deployment -
- Service Principal -
-
-
-
- - Deploymen... - -
-
- - - - -
-
-
- Tenant -
-
-
-
- - Tenant - -
-
- - - - - - -
-
-
- Authorise -
- Operations -
-
-
-
- - Authorise... - -
-
- - - - -
-
-
- Security Admin -
-
-
-
- - Security... - -
-
- - - - -
-
-
- Resource -
- Guard -
-
-
-
- - Resource... - -
-
- - - - -
-
-
- - Resource Group - -
-
-
-
- - Resource Group - -
-
- - - - - -
-
-
- - Resource Group - -
-
-
-
- - Resource Group - -
-
- - - - -
-
-
- Backup Vault -
-
-
-
- - Backup Vault - -
-
- - - - - - -
-
-
- Write -
-
-
-
- - Write - -
-
- - - - - -
-
-
- Read -
-
-
-
- - Read - -
-
- - - - -
-
-
- Managed -
- Identity -
-
-
-
- - Managed... - -
-
- - - - - -
-
-
- - Restore -
- Backups -
-
-
-
-
- - Restore... - -
-
- - - - - -
-
-
- Monitor -
- Backups -
-
-
-
- - Monitor... - -
-
- - - - -
-
-
- Backup -
- Admin -
-
-
-
- - Backup... - -
-
- - - - -
-
-
- Storage -
- Account -
-
-
-
- - Storage... - -
-
- - - - -
-
-
- Managed -
- Disk -
-
-
-
- - Managed... - -
-
- - - - -
-
-
- - - Other -
- Resources -
-
-
-
-
-
-
- - Other... - -
-
- - - - - -
-
-
- Owner -
-
-
-
- - Owner - -
-
- - - - -
-
-
- Subscription -
- Admin -
-
-
-
- - Subscript... - -
-
- - - - - -
-
-
- Authorise -
- Operations -
-
-
-
- - Authorise... - -
-
- - - - -
-
-
- 1 -
-
-
-
- - 1 - -
-
- - - - -
-
-
- 2 -
-
-
-
- - 2 - -
-
- - - - -
-
-
- 3 -
-
-
-
- - 3 - -
-
- - - - -
-
-
- 4 -
-
-
-
- - 4 - -
-
- - - - -
-
-
- 6 -
-
-
-
- - 6 - -
-
- - - - -
-
-
- 5 -
-
-
-
- - 5 - -
-
- - - - -
-
-
- Azure Monitor -
-
-
-
- - Azure Moni... - -
-
-
- - - - - Text is not SVG - cannot display - - - + + + + + + + +
+
+
+ Tenant +
+
+
+
+ + Tenant + +
+
+ + + + + + +
+
+
+ Owner +
+
+
+
+ + Owner + +
+
+ + + + +
+
+
+ Tenant Admin +
+
+
+
+ + Tenant Ad... + +
+
+ + + + +
+
+
+ Subscription +
+
+
+
+ + Subscription + +
+
+ + + + + + +
+
+
+ Owner +
+
+
+
+ + Owner + +
+
+ + + + + +
+
+
+ Assign +
+ Roles +
+
+
+
+ + Assign... + +
+
+ + + + + +
+
+
+ Deploy +
+ Resources +
+
+
+
+ + Deploy... + +
+
+ + + + +
+
+
+ Deployment +
+ Service Principal +
+
+
+
+ + Deploymen... + +
+
+ + + + +
+
+
+ Tenant +
+
+
+
+ + Tenant + +
+
+ + + + + + +
+
+
+ Authorise +
+ Operations +
+
+
+
+ + Authorise... + +
+
+ + + + +
+
+
+ Security Admin +
+
+
+
+ + Security... + +
+
+ + + + +
+
+
+ Resource +
+ Guard +
+
+
+
+ + Resource... + +
+
+ + + + +
+
+
+ + Resource Group + +
+
+
+
+ + Resource Group + +
+
+ + + + + +
+
+
+ + Resource Group + +
+
+
+
+ + Resource Group + +
+
+ + + + +
+
+
+ Backup Vault +
+
+
+
+ + Backup Vault + +
+
+ + + + + + +
+
+
+ Write +
+
+
+
+ + Write + +
+
+ + + + + +
+
+
+ Read +
+
+
+
+ + Read + +
+
+ + + + +
+
+
+ Managed +
+ Identity +
+
+
+
+ + Managed... + +
+
+ + + + + +
+
+
+ + Restore +
+ Backups +
+
+
+
+
+ + Restore... + +
+
+ + + + + +
+
+
+ Monitor +
+ Backups +
+
+
+
+ + Monitor... + +
+
+ + + + +
+
+
+ Backup +
+ Admin +
+
+
+
+ + Backup... + +
+
+ + + + +
+
+
+ Storage +
+ Account +
+
+
+
+ + Storage... + +
+
+ + + + +
+
+
+ Managed +
+ Disk +
+
+
+
+ + Managed... + +
+
+ + + + +
+
+
+ + + Other +
+ Resources +
+
+
+
+
+
+
+ + Other... + +
+
+ + + + + +
+
+
+ Owner +
+
+
+
+ + Owner + +
+
+ + + + +
+
+
+ Subscription +
+ Admin +
+
+
+
+ + Subscript... + +
+
+ + + + + +
+
+
+ Authorise +
+ Operations +
+
+
+
+ + Authorise... + +
+
+ + + + +
+
+
+ 1 +
+
+
+
+ + 1 + +
+
+ + + + +
+
+
+ 2 +
+
+
+
+ + 2 + +
+
+ + + + +
+
+
+ 3 +
+
+
+
+ + 3 + +
+
+ + + + +
+
+
+ 4 +
+
+
+
+ + 4 + +
+
+ + + + +
+
+
+ 7 +
+
+
+
+ + 7 + +
+
+ + + + +
+
+
+ 6 +
+
+
+
+ + 6 + +
+
+ + + + +
+
+
+ Azure Monitor +
+
+
+
+ + Azure Moni... + +
+
+ + + + +
+
+
+ Backup +
+ Monitor +
+
+
+
+ + Backup... + +
+
+ + + + +
+
+
+ 5 +
+
+
+
+ + 5 + +
+
+ + + + + +
+
+
+ Monitor +
+ Backups +
+
+
+
+ + Monitor... + +
+
+
+ + + + + Text is not SVG - cannot display + + +
\ No newline at end of file diff --git a/docs/assets/terraform-design.drawio.svg b/docs/assets/terraform-design.drawio.svg new file mode 100644 index 0000000..dfdfc93 --- /dev/null +++ b/docs/assets/terraform-design.drawio.svg @@ -0,0 +1,625 @@ + + + + + + + +
+
+
+ Az-Backup Module +
+
+
+
+ + Az-Backup Module + +
+
+ + + + +
+
+
+ + Backup Modules + +
+
+
+
+ + Backup Modules + +
+
+ + + + +
+
+
+ + Storage Account Module + +
+
+
+
+ + Storage Account Module + +
+
+ + + + +
+
+
+ 1 +
+
+
+
+ + 1 + +
+
+ + + + +
+
+
+ + Backup +
+ Policy +
+
+
+
+
+
+ + Backup... + +
+
+ + + + +
+
+
+ + Resource Group + +
+
+
+
+ + Resource Gr... + +
+
+ + + + + + +
+
+
+ + Backup Vault + +
+
+
+
+ + Backup Vault + +
+
+ + + + + + + + +
+
+
+ + Backup Modules + +
+
+
+
+ + Backup Modu... + +
+
+ + + + +
+
+
+ + Backup Instance +
+
+
+
+
+
+ + Backup Instanc... + +
+
+ + + + +
+
+
+ Consuming Application +
+
+
+
+
+ + Consuming Application + +
+
+ + + + + + +
+
+
+ + Role Assignment + +
+
+
+
+ + Role Assign... + +
+
+ + + + +
+
+
+ + + Resource A + + +
+
+
+
+ + Resource A + +
+
+ + + + +
+
+
+ + + Resource B + + +
+
+
+
+ + Resource B + +
+
+ + + + +
+
+
+ + + Resource C + + +
+
+
+
+ + Resource C + +
+
+ + + + +
+
+
+ + Managed Disk Module + +
+
+
+
+ + Managed Disk Module + +
+
+ + + + +
+
+
+ + Backup +
+ Policy +
+
+
+
+
+
+ + Backup... + +
+
+ + + + +
+
+
+ + Backup Instance +
+
+
+
+
+
+ + Backup Instanc... + +
+
+ + + + + + +
+
+
+ + PostgreSQL Module + +
+
+
+
+ + PostgreSQL Module + +
+
+ + + + +
+
+
+ + Backup +
+ Policy +
+
+
+
+
+
+ + Backup... + +
+
+ + + + +
+
+
+ + Backup Instance +
+
+
+
+
+
+ + Backup Instanc... + +
+
+ + + + + + +
+
+
+ + Azure Kubernetes Module + +
+
+
+
+ + Azure Kubernetes Module + +
+
+ + + + +
+
+
+ + Backup +
+ Policy +
+
+
+
+
+
+ + Backup... + +
+
+ + + + +
+
+
+ + Backup Instance +
+
+
+
+
+
+ + Backup Instanc... + +
+
+ + + + + + + +
+
+
+ +
+
+
+
+
+
+ +
+
+ + + + +
+
+
+ +
+
+
+
+
+
+ +
+
+ + + + +
+
+
+ +
+
+
+
+
+
+ +
+
+ + + + +
+
+
+ 3 +
+
+
+
+ + 3 + +
+
+ + + + +
+
+
+ 2 +
+
+
+
+ + 2 + +
+
+ + + + +
+
+
+ 4 +
+
+
+
+ + 4 + +
+
+ + + + +
+
+
+ 5 +
+
+
+
+ + 5 + +
+
+ + + + + + +
+
+
+ + Az-Backup + +
+
+
+
+ + Az-Backup + +
+
+ + + +
+ + + + + Text is not SVG - cannot display + + + +
\ No newline at end of file diff --git a/docs/design.md b/docs/design.md new file mode 100644 index 0000000..2167218 --- /dev/null +++ b/docs/design.md @@ -0,0 +1,168 @@ + + + +# Design + +## Overview + +A solution which utilises the blueprint will consist of the following types of Azure resources + +* Azure backup vault and backup policies/instances +* Azure policy definitions and assignments +* Azure monitor +* Entra ID +* Tfstate storage account +* Resources that need to be backed up + +## Architecture + +The following diagram illustrates the high level architecture: + +![Azure Architecture](assets/azure-architecture.drawio.svg) + +### Description + +1. The **backup vault** stores the backups of a variety of different Azure resources. A number of **backup instances** are created in the vault, which have a policy applied that defines the configuration for a backup such as the retention period and schedule. The vault is configured as **immutable** and **locked** to enforce tamper proof backups. The **backup vault** resides in it's own isolated **resource group**. + +1. **Backup instances** link the resources to be backed up and an associated **backup policy**, and one registered trigger the backup process. The resources directly supported are Azure Blob Storage, Managed Disks, PostgreSQL (single server and flexible server) and AKS instances, although other resources are supported indirectly through Azure Storage (see **point 7** for more details). **Backup instances** are created based on the variables supplied to module, which include configuration and details of the resources that need to be backed up. + +1. The **backup vault** accesses resources to be backed up through a **System Assigned Managed Identity** - a secure way of enabling communication between defined resources without managing a secret/password, which is assigned the necessary roles to the resources that require backup. + +1. **Backup administrators** are a group of identities that will have time limited read only access to the **backup vault** in order to access and restore backups as required. The **backup administrators** will also be responsible for monitoring and auditing backup activity via **Azure Monitor** (see **point 6** for more details), although this task may be delegated to service staff performing the role of **backup monitors**. + +1. The solution requires a user account with elevated subscription contributor permissions that can create the backup resources (such as the backup **resource group** and **backup vault**) and assign roles to the resources that require backup. This identity should be implemented as a **federated credential** of an **app registration**, which is like a passport that lets you access different services without needing to manage a separate password. The identity also needs writer access to a dedicated **Storage Account** in order to read and write the **terraform** infrastructure state. See the [deployment identity](usage.md#deployment-identity) section for more details. + +1. All backup telemetry will flow into **Azure Monitor** for monitoring and auditing purposes. This will provide access to data such as backup logs and metrics, and provide observability over the solution. Should the need arise, the telemetry could also be integrated into an external monitoring solution. + +1. Some resources such as Azure SQL and Azure Key Vault are not directly supported by Azure **backup vault**, but can be incorporated via a supplementary process that backs up the data to Azure Blob Storage first. In the case of Azure SQL, a typical scenario could be an Azure Logic App that takes a backup of Azure SQL on a regular basis and stores the data in Azure Blob Storage. It is the aspiration of this solution to provide guidance and tooling that teams can adopt to support these scenarios. + +## Security Design + +The following diagram illustrates the security design of the solution: + +![Security Design](assets/security-design.drawio.svg) + +See the following links for further details on some concepts relevant to the design: + +* [Azure Multi-user Authorisation (MUA) and Resource Guard](https://learn.microsoft.com/en-us/azure/backup/multi-user-authorization-concept) +* [Backup Operator Role](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles/storage#backup-operator) +* [Azure Privileged Identity Management (PIM)](https://learn.microsoft.com/en-us/entra/id-governance/privileged-identity-management) + +### Actors + +> NOTE: The roles listed below are not an exhaustive list, and are only those which are of relevance to the backup solution. + +1. Tenant Admin + + The tenant admin, aka the "global administrator", is typically a restricted group of technical specialists and/or senior engineering staff. They have full control over the Azure tenant including all subscriptions and identities. + + The actor holds the following roles: + + * Tenant Owner + * Tenant RBAC Administrator + + The following risks and mitigations should be considered: + + | Risks | Mitigations | + |-|-| + | Backup instance tampered with. | Use of PIM for temporary elevated privileges. | + | Backup policy tampered with. | Use of MUA for restricted backup operations. | + | Role based access tampered. | Dedicated admin accounts. | + | No other account able to override a malicious actor. | | + +1. Subscription Admin + + The subscription admin is typically a restricted group of team leads who are deploying their teams solutions to the subscription. They have full control over the subscription, including the backup vault and the backup resources. + + The actor holds the following roles: + + * Subscription Owner + * Subscription RBAC Administrator + + The following risks and mitigations should be considered: + + | Risks | Mitigations | + |-|-| + | Backup instance tampered with.                       | Use of PIM for temporary elevated privileges. | + | Backup policy tampered with. | Use of MUA for restricted backup operations. | + | Role based access tampered. | | + +1. Deployment Service Principal + + The deployment service principal is an unattended credential used to deploy the solution from an automated process such as a pipeline or workflow. It has the permission to deploy resources (such as the backup vault) and assign the roles required for the solution to operate. + + The actor holds the following roles: + + * Subscription Contributor + * Subscription RBAC Administrator limited to the roles required by the deployment + + The following risks and mitigations should be considered: + + | Risks | Mitigations | + |-|-| + | Backup instance tampered with.                       | Use of PIM for temporary elevated privileges. | + | Backup policy tampered with. | Use of MUA for restricted backup operations. | + | Role based access tampered. | Secret scanning in pipeline. | + | Poor secret management. | Robust secret management procedures. | + +1. Backup Admin + + The backup admin is typically a group of team support engineers and/or technical specialists. They have the permission to monitor backup telemetry, and restore backups in order to recover services. + + The actor holds the following roles: + + * Subscription Backup Operator + +1. Backup Monitor + + The backup monitor is typically a group of service staff. They have the permission to monitor backup telemetry in order to raise the alarm if any issues are found. + + The actor holds the following roles: + + * Monitoring Reader + +1. Security Admin + + The security admin is typically a group of cyber security specialists that are isolated from the other actors, by being in a different tenant or a highly restricted subscription. They have permissions to manage Resource Guard, which provide multi user authorisation to perform restricted operations on the backup vault, such as changing policies or stopping a backup instance. + + The actor holds the following roles: + + * Subscription Backup MUA Administrator + + | Risks | Mitigations | + |-|-| + | Elevated roles note revoked.                       | Use of PIM for temporary elevated privileges. | + | | Robust and well documented processes. | + + **NOTE: MUA without PIM requires a manual revocation of elevated permissions.** + +1. Backup Vault Managed Identity + + The backup vault managed identity is a "System Assigned" managed identity that performs backup vault operations. It is restricted to just the services defined at deployment, and cannot be compromised at runtime. + + The actor holds the following roles: + + * Backup Vault Resource Writer + * Reader role on resources that require backup + +## Terraform Design + +The following diagram illustrates the terraform design: + +![Terraform Design](assets/terraform-design.drawio.svg) + +### Description + +1. The **az-backup** module is essentially everything within the `./infrastructure` directory of this repository. It consists of the following resources: + * A **resource group** which will contain _most_ of the other resources in the module. + * A **backup vault** within which backup policies and instances are configured.. + * A **role assignment** which provides read access to the vault. + * A number of **backup modules** which can backup a specific type of resource. + +1. **Backup modules** are created which define policies that setup and trigger the backups once the module is deployed. The policies which are configured via terraform variables. + +1. Each **backup module** deploys the resources that are required to backup a resource that contains source data (e.g. a storage account). It consists of a **backup policy** that is configured in the **backup vault** on deployment and defines the rules such as backup retention and schedule, and an **backup instance** that applies the policy and initiates the backups of a specified resource. + +1. The **consuming application** is developed and maintained by the blueprint consumer. It will likely consist of a number of resource that make up an application or service, and contain resources that need to be backed up. The recommended way of using **az-backup** in the **consuming application** is to specify the blueprint repository as the remote source of a terraform module. [See the following link for more information.](https://developer.hashicorp.com/terraform/language/modules/sources) + +1. The **az-backup** module is configured by terraform variables which are applied at deployment time. The **consuming application** can control parameters such as the vault name, location and redundancy, as well as the backup policies and their retention period and schedule. See the [module variables](usage.md#module-variables) section for more details. diff --git a/docs/developer-guide.md b/docs/developer-guide.md new file mode 100644 index 0000000..9d6bc43 --- /dev/null +++ b/docs/developer-guide.md @@ -0,0 +1,233 @@ +# Developer Guide + +## Overview + +The following guide is for developers working on the blueprint solution - not for developers that are consuming the blueprint. + +## Environment Setup + +The following are pre-requisites to working with the solution: + +* An Azure subscription for development purposes +* An Azure identity which has been assigned the following roles at the subscription level: + * Contributor (required to create resources) + * Role Based Access Control Administrator (to assign roles to the backup vault managed identity) **with a condition that limits the roles which can be assigned to:** + * Storage Account Backup Contributor + * Disk Snapshot Contributor + * Disk Backup Reader +* [Azure CLI installed](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli-windows?tabs=azure-cli) +* [Terraform installed](https://developer.hashicorp.com/terraform/install) +* [Go installed (to run the end-to-end tests)](https://go.dev/dl/) + +Ensure all installed components have been added to the `%PATH%` - e.g. `az`, `terraform` and `go`. + +## Getting Started + +Take the following steps to get started in configuring and verifying the infrastructure for your development environment: + +1. Setup environment variables + + Set the following environment variables in order to connect to Azure in the following steps: + + ```pwsh + $env:ARM_TENANT_ID="" + $env:ARM_SUBSCRIPTION_ID="" + $env:ARM_CLIENT_ID="" + $env:ARM_CLIENT_SECRET="" + ``` + +1. Create Backend + + A backend (e.g. storage account) is required in order to store the tfstate and work with Terraform. + + Run the following powershell script to create the backend with default settings: `./scripts/create-tf-backend.ps1`. This script will create a resource group called `rg-nhsbackup` containing a storage account called `satfstate`. + + Make a note of the name of the storage account in the script output - it's generated with a random suffix, and you'll need it in the following steps to initialise the terraform. + +1. Prepare Terraform Variables (Optional) + + If you want to override the Terraform variables, make a copy of `tfvars.template` and amend any default settings as required. + + In the next step add the following flag to the `terraform apply` command in order to use your variables: + + ```pwsh + -var-file=".tfvars + ``` + +1. Initialise Terraform + + Change the working directory to `./infrastructure`. + + Terraform can now be initialised by running the following command: + + ````pwsh + terraform init -backend=true -backend-config="resource_group_name=rg-nhsbackup" -backend-config="storage_account_name=" -backend-config="container_name=tfstate" -backend-config="key=terraform.tfstate" + ```` + +1. Apply Terraform + + Apply the Terraform code to create the infrastructure. + + The `-auto-approve` flag is used to automatically approve the plan, you can remove this flag to review the plan before applying. + + ```pwsh + terraform apply -auto-approve + ``` + + Now review the deployed infrastructure in the Azure portal. You will find the resources deployed to a resource group called `rg-nhsbackup-myvault` (unless you specified a different vault name in the tfvars). + + Should you want to, you can remove the infrastructure with the following command: + + ```pwsh + terraform destroy -auto-approve + ``` + +## Integration Tests + +The test suite consists of a number Terraform HCL integration tests that use a mock azurerm provider. + +[See this link for more information.](https://developer.hashicorp.com/terraform/language/tests) + +> TIP! Consider adopting the classic red-green-refactor approach using the integration test framework when adding or modifying the terraform code. + +Take the following steps to run the test suite: + +1. Initialise Terraform + + Change the working directory to `./tests/integration-tests`. + + Terraform can now be initialised by running the following command: + + ````pwsh + terraform init -backend=false + ```` + + > NOTE: There's no need to initialise a backend for the purposes of running the tests. + +1. Run the tests + + Run the tests with the following command: + + ````pwsh + terraform test + ```` + +## End to End Tests + +The end to end tests are written in go, and use the [terratest library](https://terratest.gruntwork.io/) and the [Azure SDK for Go](https://github.com/Azure/azure-sdk-for-go/tree/main). + +The tests depend on a connection to Azure so it can create an environment that the tests can be executed against - the environment is torn down once the test run has completed. + +See the following resources for docs and examples of terratest and the Azure SDK: + +* [Terratest docs](https://terratest.gruntwork.io/docs/) +* [Terratest repository](https://github.com/gruntwork-io/terratest) +* [Terratest test examples](https://github.com/gruntwork-io/terratest/tree/master/test) +* [Azure SDK](https://github.com/Azure/azure-sdk-for-go/tree/main) +* [Azure SDK Data Protection Module](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/resourcemanager/dataprotection/armdataprotection) + +To run the tests, take the following steps: + +1. Install go packages + + You only need to do this once when setting up your environment. + + Change the working directory to `./tests/end-to-end-tests`. + + Run the following command: + + ````pwsh + go mod tidy + ```` + +1. Setup environment variables + + The end-to-end test suite needs to login to Azure in order to execute the tests and therefore the following environment variables must be set. + + ```pwsh + $env:ARM_TENANT_ID="" + $env:ARM_SUBSCRIPTION_ID="" + $env:ARM_CLIENT_ID="" + $env:ARM_CLIENT_SECRET="" + $env:TF_STATE_RESOURCE_GROUP="rg-nhsbackup" + $env:TF_STATE_STORAGE_ACCOUNT="" + $env:TF_STATE_STORAGE_CONTAINER="tfstate" + ``` + + > For the storage account name, the TF state backend should have been created during the [getting started guide](#getting-started), at which point the storage account will have been created and the name generated. + +1. Run the tests + + Run the tests with the following command: + + ````pwsh + go test -v -timeout 10m + ```` + +### Debugging + +To debug the tests in vscode, add the following configuration to launch settings and run the configuration: + +```json +{ + "configurations": [ + { + "name": "Go Test", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${workspaceFolder}/tests/end-to-end-tests", + "env": { + "ARM_TENANT_ID": "", + "ARM_SUBSCRIPTION_ID": "", + "ARM_CLIENT_ID": "", + "ARM_CLIENT_SECRET": "", + "TF_STATE_RESOURCE_GROUP": "rg-nhsbackup", + "TF_STATE_STORAGE_ACCOUNT": "", + "TF_STATE_STORAGE_CONTAINER": "tfstate" + } + } + ] +} +``` + +> For the storage account name, the TF state backend should have been created during the [getting started guide](#getting-started), at which point the storage account will have been created and the name generated. + +## CI Pipeline + +The CI pipeline builds and verifies the solution and runs a number of static code analysis steps on the code base. + +Part of the build verification is end to end testing. This requires the pipeline to login to Azure and deploy an environment on which to execute the tests. In order for the pipeline to login to Azure the following GitHub actions secrets must be created: + +* `AZURE_TENANT_ID` + The ID of an Azure tenant which can be used for the end to end test environment. + +* `AZURE_SUBSCRIPTION_ID` + The ID of an Azure subscription which can be used for the end to end test environment. + +* `AZURE_CLIENT_ID` + The client ID of an Azure service principal / app registration which can be used to authenticate with the end to end test environment. + + The app registration must have contributor permissions on the subscription in order to create resources. + +* `AZURE_CLIENT_SECRET` + The client secret of an Azure app registration which can be used to authenticate with the end to end test environment. + +* `TF_STATE_RESOURCE_GROUP` + The resource group which contains the TF state storage account. + +* `TF_STATE_STORAGE_ACCOUNT` + The storage account used for TF state. + +* `TF_STATE_STORAGE_COMTAINER` + The storage container used for TF state. + +### Static Code Analysis + +The following static code analysis checks are executed: + +* [Terraform format](https://developer.hashicorp.com/terraform/cli/commands/fmt) +* [Terraform lint](https://github.com/terraform-linters/tflint) +* [Checkov scan](https://www.checkov.io/) +* [Gitleaks scan](https://github.com/gitleaks/gitleaks) +* [Trivy vulnerability scan](https://github.com/aquasecurity/trivy) diff --git a/docs/docker-compose.yml b/docs/docker-compose.yml new file mode 100644 index 0000000..b211fa6 --- /dev/null +++ b/docs/docker-compose.yml @@ -0,0 +1,9 @@ +version: "3.8" +services: + mkdocs: + image: squidfunk/mkdocs-material + ports: + - "8000:8000" + volumes: + - ../:/docs + command: ["serve", "-a", "0.0.0.0:8000"] diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..d42fca9 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,18 @@ +# Home + +## Introduction + +This repository is a blueprint accelerator solution that supports teams in implementing immutable backups in Azure. + +It's aim is to give developers tooling and templates that can be used to create, configure and manage immutable backups using Azure Backup Vault in a proven way that's consistent across the organisation. + +## Technology Stack + +The following technologies have been used: + +* [Azure]() +* [Azure CLI](https://learn.microsoft.com/en-us/cli/azure) +* [Terraform](https://developer.hashicorp.com/terraform) +* [Go (used for end-to-end testing)](https://go.dev/dl/) +* [Terratest](https://terratest.gruntwork.io/) +* [Azure SDK for Go](https://github.com/Azure/azure-sdk-for-go) diff --git a/docs/styles.css b/docs/styles.css new file mode 100644 index 0000000..bd1d22e --- /dev/null +++ b/docs/styles.css @@ -0,0 +1,16 @@ +[data-md-color-primary="nhs"] { + --md-primary-fg-color: #005EB8; +} + +.md-header__button.md-logo { + padding: 0 !important; +} + +.md-header__title { +margin-left: 0.5rem !important; +} + +.md-header__button.md-logo img, .md-header__button.md-logo svg { + height: 2.5rem; + width: 2.5rem; +} \ No newline at end of file diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 0000000..878dc80 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,85 @@ +# Usage + +## Overview + +To use the az-backup terraform module, create a terraform module in your own code and set the source as the az-backup repository. + +[See the following link for more information about using github as the source of a terraform module.](https://developer.hashicorp.com/terraform/language/modules/sources#github) + +The az-backup module resides in the `./infrastructure` sub directory of the repository, so you need to specify that in the module source by using the double-slash syntax [as explained in this guide](https://developer.hashicorp.com/terraform/language/modules/sources#modules-in-package-sub-directories). + +In future we will use release tags to ensure consumers can depend on a specific release of the module, however this has not currently been implemented. + +## Example + +The following is an example of how the module should be used: + +```terraform +module "my_backup" { + source = "github.com/nhsdigital/az-backup//infrastructure" + vault_name = "myvault" + vault_location = "uksouth" + vault_redundancy = "LocallyRedundant" + blob_storage_backups = { + backup1 = { + backup_name = "storage1" + retention_period = "P7D" + storage_account_id = azurerm_storage_account.my_storage_account_1.id + } + backup2 = { + backup_name = "storage2" + retention_period = "P30D" + storage_account_id = azurerm_storage_account.my_storage_account_2.id + } + } + managed_disk_backups = { + backup1 = { + backup_name = "disk1" + retention_period = "P7D" + backup_intervals = ["R/2024-01-01T00:00:00+00:00/P1D"] + managed_disk_id = azurerm_managed_disk.my_managed_disk_1.id + managed_disk_resource_group = { + id = azurerm_resource_group.my_resource_group.id + name = azurerm_resource_group.my_resource_group.name + } + } + backup2 = { + backup_name = "disk2" + retention_period = "P30D" + backup_intervals = ["R/2024-01-01T00:00:00+00:00/P2D"] + managed_disk_id = azurerm_managed_disk.my_managed_disk_2.id + managed_disk_resource_group = { + id = azurerm_resource_group.my_resource_group.id + name = azurerm_resource_group.my_resource_group.name + } + } + } +} +``` + +## Deployment Identity + +To deploy the module an Azure identity (typically an app registration with client secret) is required which has been assigned the following roles at the subscription level: + +* Contributor (required to create resources) +* Role Based Access Control Administrator (to assign roles to the backup vault managed identity) **with a condition that limits the roles which can be assigned to:** + * Storage Account Backup Contributor + * Disk Snapshot Contributor + * Disk Backup Reader + +## Module Variables + +| Name | Description | Mandatory | Default | +|------|-------------|-----------|---------| +| `vault_name` | The name of the backup vault. The value supplied will be automatically prefixed with `rg-nhsbackup-`. If more than one az-backup module is created, this value must be unique across them. | Yes | n/a | +| `vault_location` | The location of the resource group that is created to contain the vault. | No | `uksouth` | +| `vault_redundancy` | The redundancy of the vault, e.g. `GeoRedundant`. [See the following link for the possible values](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/data_protection_backup_vault#redundancy) | No | `LocallyRedundant` | +| `blob_storage_backups` | A map of blob storage backups that should be created. For each backup the following values should be provided: `storage_account_id`, `backup_name` and `retention_period`. When no value is provided then no backups are created. | No | n/a | +| `blob_storage_backups.storage_account_id` | The id of the storage account that should be backed up. | Yes | n/a | +| `blob_storage_backups.backup_name` | The name of the backup, which must be unique across blob storage backups. | Yes | n/a | +| `blob_storage_backups.retention_period` | How long the backed up data will be retained for, which should be in `ISO 8601` duration format. [See the following link for the possible values](https://en.wikipedia.org/wiki/ISO_8601#Durations). | Yes | n/a | +| `managed_disk_backups` | A map of managed disk backups that should be created. For each backup the following values should be provided: `managed_disk_id`, `backup_name` and `retention_period`. When no value is provided then no backups are created. | No | n/a | +| `managed_disk_backups.managed_disk_id` | The id of the managed disk that should be backed up. | Yes | n/a | +| `managed_disk_backups.backup_name` | The name of the backup, which must be unique across managed disk backups. | Yes | n/a | +| `managed_disk_backups.retention_period` | How long the backed up data will be retained for, which should be in `ISO 8601` duration format. [See the following link for the possible values](https://en.wikipedia.org/wiki/ISO_8601#Durations). | Yes | n/a | +| `managed_disk_backups.backup_intervals` | A list of intervals at which backups should be taken, which should be in `ISO 8601` duration format. [See the following link for the possible values](https://en.wikipedia.org/wiki/ISO_8601#Time_intervals). | Yes | n/a | diff --git a/infrastructure/backup_modules.tf b/infrastructure/backup_modules.tf new file mode 100644 index 0000000..1729669 --- /dev/null +++ b/infrastructure/backup_modules.tf @@ -0,0 +1,26 @@ +module "blob_storage_backup" { + for_each = var.blob_storage_backups + source = "./modules/backup/blob_storage" + vault_id = azurerm_data_protection_backup_vault.backup_vault.id + vault_name = var.vault_name + vault_location = var.vault_location + backup_name = each.value.backup_name + retention_period = each.value.retention_period + storage_account_id = each.value.storage_account_id + vault_principal_id = azurerm_data_protection_backup_vault.backup_vault.identity[0].principal_id +} + +module "managed_disk_backup" { + for_each = var.managed_disk_backups + source = "./modules/backup/managed_disk" + vault_id = azurerm_data_protection_backup_vault.backup_vault.id + vault_name = var.vault_name + vault_location = var.vault_location + backup_name = each.value.backup_name + retention_period = each.value.retention_period + backup_intervals = each.value.backup_intervals + managed_disk_id = each.value.managed_disk_id + managed_disk_resource_group = each.value.managed_disk_resource_group + vault_principal_id = azurerm_data_protection_backup_vault.backup_vault.identity[0].principal_id + assign_resource_group_level_roles = each.key == keys(var.managed_disk_backups)[0] ? true : false +} diff --git a/infrastructure/backup_policy.tf b/infrastructure/backup_policy.tf deleted file mode 100644 index a7d506e..0000000 --- a/infrastructure/backup_policy.tf +++ /dev/null @@ -1,16 +0,0 @@ -module "blob_storage_policy" { - source = "./modules/backup_policy/blob_storage" - policy_name = "bkpol-${var.vault_name}-blobstorage" - vault_id = azurerm_data_protection_backup_vault.backup_vault.id - retention_period = "P7D" # 7 days - # NOTE - this blob policy has been configured for operational backup - # only, which continuously backs up data and does not need a schedule -} - -module "managed_disk_policy" { - source = "./modules/backup_policy/managed_disk" - policy_name = "bkpol-${var.vault_name}-manageddisk" - vault_id = azurerm_data_protection_backup_vault.backup_vault.id - retention_period = "P7D" # 7 days - backup_intervals = ["R/2024-01-01T00:00:00+00:00/P1D"] # Once per day at 00:00 -} diff --git a/infrastructure/main.tf b/infrastructure/main.tf index a93ba04..d320d4c 100644 --- a/infrastructure/main.tf +++ b/infrastructure/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.114.0" + version = "4.3.0" } } @@ -11,4 +11,4 @@ terraform { provider "azurerm" { features {} -} \ No newline at end of file +} diff --git a/infrastructure/modules/backup_instance/blob_storage/main.tf b/infrastructure/modules/backup/blob_storage/backup_instance.tf similarity index 73% rename from infrastructure/modules/backup_instance/blob_storage/main.tf rename to infrastructure/modules/backup/blob_storage/backup_instance.tf index 3bb973d..f57350d 100644 --- a/infrastructure/modules/backup_instance/blob_storage/main.tf +++ b/infrastructure/modules/backup/blob_storage/backup_instance.tf @@ -5,13 +5,13 @@ resource "azurerm_role_assignment" "role_assignment" { } resource "azurerm_data_protection_backup_instance_blob_storage" "backup_instance" { - name = var.instance_name + name = "bkinst-${var.vault_name}-blobstorage-${var.backup_name}" vault_id = var.vault_id location = var.vault_location storage_account_id = var.storage_account_id - backup_policy_id = var.policy_id + backup_policy_id = azurerm_data_protection_backup_policy_blob_storage.backup_policy.id depends_on = [ azurerm_role_assignment.role_assignment ] -} +} \ No newline at end of file diff --git a/infrastructure/modules/backup_policy/blob_storage/main.tf b/infrastructure/modules/backup/blob_storage/backup_policy.tf similarity index 66% rename from infrastructure/modules/backup_policy/blob_storage/main.tf rename to infrastructure/modules/backup/blob_storage/backup_policy.tf index 643fd65..6c1ca8e 100644 --- a/infrastructure/modules/backup_policy/blob_storage/main.tf +++ b/infrastructure/modules/backup/blob_storage/backup_policy.tf @@ -1,5 +1,5 @@ resource "azurerm_data_protection_backup_policy_blob_storage" "backup_policy" { - name = var.policy_name + name = "bkpol-${var.vault_name}-blobstorage-${var.backup_name}" vault_id = var.vault_id operational_default_retention_duration = var.retention_period } diff --git a/infrastructure/modules/backup/blob_storage/output.tf b/infrastructure/modules/backup/blob_storage/output.tf new file mode 100644 index 0000000..2181ee7 --- /dev/null +++ b/infrastructure/modules/backup/blob_storage/output.tf @@ -0,0 +1,7 @@ +output "backup_policy" { + value = azurerm_data_protection_backup_policy_blob_storage.backup_policy +} + +output "backup_instance" { + value = azurerm_data_protection_backup_instance_blob_storage.backup_instance +} \ No newline at end of file diff --git a/infrastructure/modules/backup_instance/blob_storage/variables.tf b/infrastructure/modules/backup/blob_storage/variables.tf similarity index 69% rename from infrastructure/modules/backup_instance/blob_storage/variables.tf rename to infrastructure/modules/backup/blob_storage/variables.tf index d0a72f9..91ec6b4 100644 --- a/infrastructure/modules/backup_instance/blob_storage/variables.tf +++ b/infrastructure/modules/backup/blob_storage/variables.tf @@ -1,8 +1,8 @@ -variable "instance_name" { +variable "vault_id" { type = string } -variable "vault_id" { +variable "vault_name" { type = string } @@ -14,7 +14,11 @@ variable "vault_principal_id" { type = string } -variable "policy_id" { +variable "backup_name" { + type = string +} + +variable "retention_period" { type = string } diff --git a/infrastructure/modules/backup_instance/managed_disk/main.tf b/infrastructure/modules/backup/managed_disk/backup_instance.tf similarity index 77% rename from infrastructure/modules/backup_instance/managed_disk/main.tf rename to infrastructure/modules/backup/managed_disk/backup_instance.tf index 540c2ec..e1c5f35 100644 --- a/infrastructure/modules/backup_instance/managed_disk/main.tf +++ b/infrastructure/modules/backup/managed_disk/backup_instance.tf @@ -1,4 +1,5 @@ resource "azurerm_role_assignment" "role_assignment_snapshot_contributor" { + count = var.assign_resource_group_level_roles == true ? 1 : 0 scope = var.managed_disk_resource_group.id role_definition_name = "Disk Snapshot Contributor" principal_id = var.vault_principal_id @@ -11,12 +12,12 @@ resource "azurerm_role_assignment" "role_assignment_backup_reader" { } resource "azurerm_data_protection_backup_instance_disk" "backup_instance" { - name = var.instance_name + name = "bkinst-${var.vault_name}-manageddisk-${var.backup_name}" vault_id = var.vault_id location = var.vault_location disk_id = var.managed_disk_id snapshot_resource_group_name = var.managed_disk_resource_group.name - backup_policy_id = var.policy_id + backup_policy_id = azurerm_data_protection_backup_policy_disk.backup_policy.id depends_on = [ azurerm_role_assignment.role_assignment_snapshot_contributor, diff --git a/infrastructure/modules/backup_policy/managed_disk/main.tf b/infrastructure/modules/backup/managed_disk/backup_policy.tf similarity index 71% rename from infrastructure/modules/backup_policy/managed_disk/main.tf rename to infrastructure/modules/backup/managed_disk/backup_policy.tf index 970e38a..d157216 100644 --- a/infrastructure/modules/backup_policy/managed_disk/main.tf +++ b/infrastructure/modules/backup/managed_disk/backup_policy.tf @@ -1,5 +1,5 @@ resource "azurerm_data_protection_backup_policy_disk" "backup_policy" { - name = var.policy_name + name = "bkpol-${var.vault_name}-manageddisk-${var.backup_name}" vault_id = var.vault_id default_retention_duration = var.retention_period backup_repeating_time_intervals = var.backup_intervals diff --git a/infrastructure/modules/backup/managed_disk/output.tf b/infrastructure/modules/backup/managed_disk/output.tf new file mode 100644 index 0000000..08a17c7 --- /dev/null +++ b/infrastructure/modules/backup/managed_disk/output.tf @@ -0,0 +1,7 @@ +output "backup_policy" { + value = azurerm_data_protection_backup_policy_disk.backup_policy +} + +output "backup_instance" { + value = azurerm_data_protection_backup_instance_disk.backup_instance +} \ No newline at end of file diff --git a/infrastructure/modules/backup_instance/managed_disk/variables.tf b/infrastructure/modules/backup/managed_disk/variables.tf similarity index 60% rename from infrastructure/modules/backup_instance/managed_disk/variables.tf rename to infrastructure/modules/backup/managed_disk/variables.tf index 10448d0..009dc88 100644 --- a/infrastructure/modules/backup_instance/managed_disk/variables.tf +++ b/infrastructure/modules/backup/managed_disk/variables.tf @@ -1,8 +1,8 @@ -variable "instance_name" { +variable "vault_id" { type = string } -variable "vault_id" { +variable "vault_name" { type = string } @@ -14,10 +14,18 @@ variable "vault_principal_id" { type = string } -variable "policy_id" { +variable "backup_name" { + type = string +} + +variable "retention_period" { type = string } +variable "backup_intervals" { + type = list(string) +} + variable "managed_disk_id" { type = string } @@ -28,3 +36,7 @@ variable "managed_disk_resource_group" { name = string }) } + +variable "assign_resource_group_level_roles" { + type = bool +} diff --git a/infrastructure/modules/backup_instance/kubernetes_cluster/main.tf b/infrastructure/modules/backup_instance/kubernetes_cluster/main.tf deleted file mode 100644 index a44581d..0000000 --- a/infrastructure/modules/backup_instance/kubernetes_cluster/main.tf +++ /dev/null @@ -1,70 +0,0 @@ -resource "azurerm_role_assignment" "extension_and_storage_account_permission" { - scope = var.storage_account_id - role_definition_name = "Storage Account Contributor" - principal_id = var.cluster_extension_principal_id -} - -resource "azurerm_role_assignment" "vault_msi_read_on_cluster" { - scope = var.cluster_id - role_definition_name = "Reader" - principal_id = var.vault_principal_id -} - -resource "azurerm_role_assignment" "vault_msi_read_on_snapshot_rg" { - scope = var.snapshot_resource_group_id - role_definition_name = "Reader" - principal_id = var.vault_principal_id -} - -resource "azurerm_role_assignment" "vault_msi_snapshot_contributor_on_snapshot_rg" { - scope = var.snapshot_resource_group_id - role_definition_name = "Disk Snapshot Contributor" - principal_id = var.vault_principal_id -} - -resource "azurerm_role_assignment" "vault_data_operator_on_snapshot_rg" { - scope = var.storage_account_id - role_definition_name = "Data Operator for Managed Disks" - principal_id = var.vault_principal_id -} - -resource "azurerm_role_assignment" "vault_data_contributor_on_storage" { - scope = var.snapshot_resource_group_id - role_definition_name = "Storage Blob Data Contributor" - principal_id = var.vault_principal_id -} - -resource "azurerm_role_assignment" "cluster_msi_contributor_on_snapshot_rg" { - scope = azurerm_resource_group.snap.id - role_definition_name = "Contributor" - principal_id = var.cluster_extension_principal_id -} - -resource "azurerm_data_protection_backup_instance_kubernetes_cluster" "example" { - name = var.instance_name - location = var.vault_location - vault_id = var.vault_id - kubernetes_cluster_id = var.cluster_id - snapshot_resource_group_name = var.snapshot_resource_group_name - backup_policy_id = var.policy_id - - backup_datasource_parameters { - excluded_namespaces = ["test-excluded-namespaces"] - excluded_resource_types = ["exvolumesnapshotcontents.snapshot.storage.k8s.io"] - cluster_scoped_resources_enabled = true - included_namespaces = ["test-included-namespaces"] - included_resource_types = ["involumesnapshotcontents.snapshot.storage.k8s.io"] - label_selectors = ["kubernetes.io/metadata.name:test"] - volume_snapshot_enabled = true - } - - depends_on = [ - azurerm_role_assignment.extension_and_storage_account_permission, - azurerm_role_assignment.vault_msi_read_on_cluster, - azurerm_role_assignment.vault_msi_read_on_snapshot_rg, - azurerm_role_assignment.cluster_msi_contributor_on_snapshot_rg, - azurerm_role_assignment.vault_msi_snapshot_contributor_on_snapshot_rg, - azurerm_role_assignment.vault_data_operator_on_snapshot_rg, - azurerm_role_assignment.vault_data_contributor_on_storage, - ] -} diff --git a/infrastructure/modules/backup_instance/kubernetes_cluster/variables.tf b/infrastructure/modules/backup_instance/kubernetes_cluster/variables.tf deleted file mode 100644 index 7e4135f..0000000 --- a/infrastructure/modules/backup_instance/kubernetes_cluster/variables.tf +++ /dev/null @@ -1,39 +0,0 @@ -variable "instance_name" { - type = string -} - -variable "vault_id" { - type = string -} - -variable "vault_location" { - type = string -} - -variable "cluster_id" { - type = string -} - -variable "snapshot_resource_group_name" { - type = string -} - -variable "snapshot_resource_group_id" { - type = string -} - -variable "policy_id" { - type = string -} - -variable "vault_principal_id" { - type = string -} - -variable "cluster_extension_principal_id" { - type = string -} - -variable "storage_account_id" { - type = string -} diff --git a/infrastructure/modules/backup_policy/blob_storage/output.tf b/infrastructure/modules/backup_policy/blob_storage/output.tf deleted file mode 100644 index b328c21..0000000 --- a/infrastructure/modules/backup_policy/blob_storage/output.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "id" { - value = azurerm_data_protection_backup_policy_blob_storage.backup_policy.id -} - -output "name" { - value = azurerm_data_protection_backup_policy_blob_storage.backup_policy.name -} - -output "vault_id" { - value = azurerm_data_protection_backup_policy_blob_storage.backup_policy.vault_id -} - -output "retention_period" { - value = azurerm_data_protection_backup_policy_blob_storage.backup_policy.operational_default_retention_duration -} diff --git a/infrastructure/modules/backup_policy/blob_storage/variables.tf b/infrastructure/modules/backup_policy/blob_storage/variables.tf deleted file mode 100644 index d1f27f7..0000000 --- a/infrastructure/modules/backup_policy/blob_storage/variables.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "policy_name" { - type = string -} - -variable "vault_id" { - type = string -} - -variable "retention_period" { - type = string -} diff --git a/infrastructure/modules/backup_policy/kubernetes_cluster/main.tf b/infrastructure/modules/backup_policy/kubernetes_cluster/main.tf deleted file mode 100644 index 155c76e..0000000 --- a/infrastructure/modules/backup_policy/kubernetes_cluster/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "backup_policy" { - name = var.policy_name - vault_name = var.vault_name - resource_group_name = var.resource_group_name - backup_repeating_time_intervals = var.backup_intervals - default_retention_rule { - life_cycle { - duration = var.retention_period - data_store_type = "OperationalStore" - } - } -} diff --git a/infrastructure/modules/backup_policy/kubernetes_cluster/output.tf b/infrastructure/modules/backup_policy/kubernetes_cluster/output.tf deleted file mode 100644 index 40b2809..0000000 --- a/infrastructure/modules/backup_policy/kubernetes_cluster/output.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "id" { - value = azurerm_data_protection_backup_policy_kubernetes_cluster.backup_policy.id -} diff --git a/infrastructure/modules/backup_policy/kubernetes_cluster/variables.tf b/infrastructure/modules/backup_policy/kubernetes_cluster/variables.tf deleted file mode 100644 index a17f3cd..0000000 --- a/infrastructure/modules/backup_policy/kubernetes_cluster/variables.tf +++ /dev/null @@ -1,19 +0,0 @@ -variable "policy_name" { - type = string -} - -variable "vault_name" { - type = string -} - -variable "resource_group_name" { - type = string -} - -variable "retention_period" { - type = string -} - -variable "backup_intervals" { - type = list(string) -} diff --git a/infrastructure/modules/backup_policy/managed_disk/output.tf b/infrastructure/modules/backup_policy/managed_disk/output.tf deleted file mode 100644 index 986d7f8..0000000 --- a/infrastructure/modules/backup_policy/managed_disk/output.tf +++ /dev/null @@ -1,19 +0,0 @@ -output "id" { - value = azurerm_data_protection_backup_policy_disk.backup_policy.id -} - -output "name" { - value = azurerm_data_protection_backup_policy_disk.backup_policy.name -} - -output "vault_id" { - value = azurerm_data_protection_backup_policy_disk.backup_policy.vault_id -} - -output "retention_period" { - value = azurerm_data_protection_backup_policy_disk.backup_policy.default_retention_duration -} - -output "backup_intervals" { - value = azurerm_data_protection_backup_policy_disk.backup_policy.backup_repeating_time_intervals -} diff --git a/infrastructure/modules/backup_policy/managed_disk/variables.tf b/infrastructure/modules/backup_policy/managed_disk/variables.tf deleted file mode 100644 index 8107fb6..0000000 --- a/infrastructure/modules/backup_policy/managed_disk/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "policy_name" { - type = string -} - -variable "vault_id" { - type = string -} - -variable "retention_period" { - type = string -} - -variable "backup_intervals" { - type = list(string) -} diff --git a/infrastructure/modules/example/main.tf b/infrastructure/modules/example/main.tf deleted file mode 100644 index bfc15d6..0000000 --- a/infrastructure/modules/example/main.tf +++ /dev/null @@ -1,76 +0,0 @@ -# Create some example resources -########################################################################### - -module "example_storage_account_1" { - source = "./modules/example/storage_account" - location = var.vault_location - storage_account_name = "samystorage001" - resource_group = azurerm_resource_group.resource_group.name -} - -module "example_storage_account_2" { - source = "./modules/example/storage_account" - location = var.vault_location - storage_account_name = "samystorage002" - resource_group = azurerm_resource_group.resource_group.name -} - -module "example_managed_disk" { - source = "./modules/example/managed_disk" - location = var.vault_location - disk_name = "disk-mydisk" - resource_group = azurerm_resource_group.resource_group.name -} - - -# Create some backup instances -########################################################################### - -# NOTE - in future the backup instances will be created by Azure Policy -# and will not need to be explictly defined in Terraform. - -module "blob_storage_instance_1" { - source = "./modules/backup_instance/blob_storage" - instance_name = "bkinst-${var.vault_name}-mystorage001" - vault_id = azurerm_data_protection_backup_vault.backup_vault.id - vault_location = var.vault_location - vault_principal_id = azurerm_data_protection_backup_vault.backup_vault.identity[0].principal_id - policy_id = module.blob_storage_policy.id - storage_account_id = module.example_storage_account_1.id - - depends_on = [ - module.blob_storage_policy, - module.example_storage_account_1 - ] -} - -module "blob_storage_instance_2" { - source = "./modules/backup_instance/blob_storage" - instance_name = "bkinst-${var.vault_name}-mystorage002" - vault_id = azurerm_data_protection_backup_vault.backup_vault.id - vault_location = var.vault_location - vault_principal_id = azurerm_data_protection_backup_vault.backup_vault.identity[0].principal_id - policy_id = module.blob_storage_policy.id - storage_account_id = module.example_storage_account_2.id - - depends_on = [ - module.blob_storage_policy, - module.example_storage_account_2 - ] -} - -module "managed_disk_instance" { - source = "./modules/backup_instance/managed_disk" - instance_name = "bkinst-${var.vault_name}-mydisk" - vault_id = azurerm_data_protection_backup_vault.backup_vault.id - vault_location = var.vault_location - vault_principal_id = azurerm_data_protection_backup_vault.backup_vault.identity[0].principal_id - policy_id = module.managed_disk_policy.id - managed_disk_id = module.example_managed_disk.id - managed_disk_resource_group = azurerm_resource_group.resource_group - - depends_on = [ - module.managed_disk_policy, - module.example_managed_disk - ] -} diff --git a/infrastructure/modules/example/managed_disk/main.tf b/infrastructure/modules/example/managed_disk/main.tf deleted file mode 100644 index cc0f5bd..0000000 --- a/infrastructure/modules/example/managed_disk/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "azurerm_managed_disk" "managed_disk" { - name = var.disk_name - resource_group_name = var.resource_group - location = var.location - storage_account_type = "Standard_LRS" - create_option = "Empty" - disk_size_gb = "1" -} diff --git a/infrastructure/modules/example/managed_disk/output.tf b/infrastructure/modules/example/managed_disk/output.tf deleted file mode 100644 index 3908d3b..0000000 --- a/infrastructure/modules/example/managed_disk/output.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "id" { - value = azurerm_managed_disk.managed_disk.id -} diff --git a/infrastructure/modules/example/managed_disk/variables.tf b/infrastructure/modules/example/managed_disk/variables.tf deleted file mode 100644 index a69fe9c..0000000 --- a/infrastructure/modules/example/managed_disk/variables.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "disk_name" { - type = string -} - -variable "resource_group" { - type = string -} - -variable "location" { - type = string -} diff --git a/infrastructure/modules/example/storage_account/main.tf b/infrastructure/modules/example/storage_account/main.tf deleted file mode 100644 index 368cb95..0000000 --- a/infrastructure/modules/example/storage_account/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "azurerm_storage_account" "storage_account" { - name = var.storage_account_name - resource_group_name = var.resource_group - location = var.location - account_tier = "Standard" - account_replication_type = "LRS" -} diff --git a/infrastructure/modules/example/storage_account/output.tf b/infrastructure/modules/example/storage_account/output.tf deleted file mode 100644 index 10a1022..0000000 --- a/infrastructure/modules/example/storage_account/output.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "id" { - value = azurerm_storage_account.storage_account.id -} diff --git a/infrastructure/modules/example/storage_account/variables.tf b/infrastructure/modules/example/storage_account/variables.tf deleted file mode 100644 index 55dd6a5..0000000 --- a/infrastructure/modules/example/storage_account/variables.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "location" { - type = string -} - -variable "storage_account_name" { - type = string -} - -variable "resource_group" { - type = string -} diff --git a/infrastructure/output.tf b/infrastructure/output.tf index 3970884..8deb20f 100644 --- a/infrastructure/output.tf +++ b/infrastructure/output.tf @@ -1,11 +1,3 @@ -output "vault_name" { - value = azurerm_data_protection_backup_vault.backup_vault.name -} - -output "vault_location" { - value = azurerm_data_protection_backup_vault.backup_vault.location -} - -output "vault_redundancy" { - value = azurerm_data_protection_backup_vault.backup_vault.redundancy -} +output "backup_vault" { + value = azurerm_data_protection_backup_vault.backup_vault +} \ No newline at end of file diff --git a/infrastructure/tfvars.template b/infrastructure/tfvars.template deleted file mode 100644 index 9ba5553..0000000 --- a/infrastructure/tfvars.template +++ /dev/null @@ -1,3 +0,0 @@ -vault_name = "myvault" -vault_location = "UK South" -vault_redundancy = "LocallyRedundant" \ No newline at end of file diff --git a/infrastructure/variables.tf b/infrastructure/variables.tf index ba56da9..73ed877 100644 --- a/infrastructure/variables.tf +++ b/infrastructure/variables.tf @@ -1,14 +1,36 @@ variable "vault_name" { - type = string - default = "myvault" + type = string } variable "vault_location" { type = string - default = "UK South" + default = "uksouth" } variable "vault_redundancy" { type = string default = "LocallyRedundant" } + +variable "blob_storage_backups" { + type = map(object({ + backup_name = string + retention_period = string + storage_account_id = string + })) + default = {} +} + +variable "managed_disk_backups" { + type = map(object({ + backup_name = string + retention_period = string + backup_intervals = list(string) + managed_disk_id = string + managed_disk_resource_group = object({ + id = string + name = string + }) + })) + default = {} +} \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..7049ac8 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,33 @@ +site_name: Azure Immutable Backup +site_description: An [MkDocs](https://www.mkdocs.org/) site for the Azure Immutable Backup documentation. + +extra_css: + - styles.css + +theme: + name: material + palette: + - scheme: default + primary: nhs + logo: assets/nhs-england-logo.svg + favicon: assets/favicon.png + features: + - navigation.instant + - content.tabs.link + - content.code.copy +nav: + - Home: index.md + - Design: design.md + - Usage: usage.md + - Developer Guide: developer-guide.md + +markdown_extensions: + - pymdownx.superfences + +copyright: "© NHS England" + +extra: + generator: false + social: + - icon: fontawesome/brands/github + link: https://github.com/NHSDigital/az-backup \ No newline at end of file diff --git a/tests/end-to-end-tests/basic_deployment_test.go b/tests/end-to-end-tests/basic_deployment_test.go index 6cab76f..cbb84da 100644 --- a/tests/end-to-end-tests/basic_deployment_test.go +++ b/tests/end-to-end-tests/basic_deployment_test.go @@ -2,9 +2,9 @@ package e2e_tests import ( "fmt" - "os" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection" "github.com/gruntwork-io/terratest/modules/random" "github.com/gruntwork-io/terratest/modules/terraform" test_structure "github.com/gruntwork-io/terratest/modules/test-structure" @@ -12,32 +12,36 @@ import ( ) /* - * TestBasicDeployment tests a basic deployment of the infrastructure using Terraform using the TF output variables. + * TestBasicDeployment tests the basic deployment of the infrastructure using Terraform. */ func TestBasicDeployment(t *testing.T) { t.Parallel() - terraformFolder := test_structure.CopyTerraformFolderToTemp(t, "../../infrastructure", "") - terraformStateResourceGroup := os.Getenv("TF_STATE_RESOURCE_GROUP") - terraformStateStorageAccount := os.Getenv("TF_STATE_STORAGE_ACCOUNT") - terraformStateContainer := os.Getenv("TF_STATE_STORAGE_CONTAINER") - - if terraformStateResourceGroup == "" || terraformStateStorageAccount == "" || terraformStateContainer == "" { - t.Fatalf("One or more required environment variables (TF_STATE_RESOURCE_GROUP, TF_STATE_STORAGE_ACCOUNT, TF_STATE_STORAGE_CONTAINER) are not set.") - } + environment := GetEnvironmentConfiguration(t) + credential := GetAzureCredential(t, environment) vaultName := random.UniqueId() vaultLocation := "uksouth" vaultRedundancy := "LocallyRedundant" + resourceGroupName := fmt.Sprintf("rg-nhsbackup-%s", vaultName) + backupVaultName := fmt.Sprintf("bvault-%s", vaultName) + + // Teardown stage + // ... + + defer test_structure.RunTestStage(t, "teardown", func() { + terraformOptions := test_structure.LoadTerraformOptions(t, environment.TerraformFolder) + + terraform.Destroy(t, terraformOptions) + }) // Setup stage // ... test_structure.RunTestStage(t, "setup", func() { terraformOptions := &terraform.Options{ - TerraformDir: terraformFolder, + TerraformDir: environment.TerraformFolder, - // Variables to pass to our Terraform code using -var options Vars: map[string]interface{}{ "vault_name": vaultName, "vault_location": vaultLocation, @@ -45,15 +49,14 @@ func TestBasicDeployment(t *testing.T) { }, BackendConfig: map[string]interface{}{ - "resource_group_name": terraformStateResourceGroup, - "storage_account_name": terraformStateStorageAccount, - "container_name": terraformStateContainer, + "resource_group_name": environment.TerraformStateResourceGroup, + "storage_account_name": environment.TerraformStateStorageAccount, + "container_name": environment.TerraformStateContainer, "key": vaultName + ".tfstate", }, } - // Save options for later test stages - test_structure.SaveTerraformOptions(t, terraformFolder, terraformOptions) + test_structure.SaveTerraformOptions(t, environment.TerraformFolder, terraformOptions) terraform.InitAndApply(t, terraformOptions) }) @@ -62,28 +65,20 @@ func TestBasicDeployment(t *testing.T) { // ... test_structure.RunTestStage(t, "validate", func() { - terraformOptions := test_structure.LoadTerraformOptions(t, terraformFolder) - - // Check if the vault name is as expected - expectedVaultName := fmt.Sprintf("bvault-%s", vaultName) - actualVaultName := terraform.Output(t, terraformOptions, "vault_name") - assert.Equal(t, expectedVaultName, actualVaultName) - - // Check if the vault location is as expected - actualVaultLocation := terraform.Output(t, terraformOptions, "vault_location") - assert.Equal(t, vaultLocation, actualVaultLocation) - - // Check if the vault redundancy is as expected - actualVaultRedundancy := terraform.Output(t, terraformOptions, "vault_redundancy") - assert.Equal(t, vaultRedundancy, actualVaultRedundancy) - }) - - // Teardown stage - // ... - - test_structure.RunTestStage(t, "teardown", func() { - terraformOptions := test_structure.LoadTerraformOptions(t, terraformFolder) - - terraform.Destroy(t, terraformOptions) + // Validate resource group + resourceGroup := GetResourceGroup(t, environment.SubscriptionID, credential, resourceGroupName) + assert.NotNil(t, resourceGroup, "Resource group does not exist") + assert.Equal(t, resourceGroupName, *resourceGroup.Name, "Resource group name does not match") + assert.Equal(t, vaultLocation, *resourceGroup.Location, "Resource group location does not match") + + // Validate backup vault + backupVault := GetBackupVault(t, credential, environment.SubscriptionID, resourceGroupName, backupVaultName) + assert.NotNil(t, backupVault, "Backup vault does not exist") + assert.Equal(t, backupVaultName, *backupVault.Name, "Backup vault name does not match") + assert.Equal(t, vaultLocation, *backupVault.Location, "Backup vault location does not match") + assert.NotNil(t, backupVault.Identity.PrincipalID, "Backup vault identity does not exist") + assert.Equal(t, "SystemAssigned", *backupVault.Identity.Type, "Backup vault identity type does not match") + assert.Equal(t, armdataprotection.StorageSettingTypesLocallyRedundant, *backupVault.Properties.StorageSettings[0].Type, "Backup vault redundancy does not match") + assert.Equal(t, armdataprotection.StorageSettingStoreTypesVaultStore, *backupVault.Properties.StorageSettings[0].DatastoreType, "Backup vault datastore type does not match") }) } diff --git a/tests/end-to-end-tests/blob_storage_backup_test.go b/tests/end-to-end-tests/blob_storage_backup_test.go new file mode 100644 index 0000000..14ce3c7 --- /dev/null +++ b/tests/end-to-end-tests/blob_storage_backup_test.go @@ -0,0 +1,158 @@ +package e2e_tests + +import ( + "fmt" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/gruntwork-io/terratest/modules/terraform" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/assert" +) + +type TestBlobStorageBackupExternalResources struct { + ResourceGroup armresources.ResourceGroup + StorageAccountOne armstorage.Account + StorageAccountTwo armstorage.Account +} + +/* + * Creates resources which are "external" to the az-backup module, and models + * what would be backed up in a real scenario. + */ +func setupExternalResourcesForBlobStorageBackupTest(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, vault_name string, vault_location string) *TestBlobStorageBackupExternalResources { + resourceGroupName := fmt.Sprintf("rg-nhsbackup-%s-external", vault_name) + resourceGroup := CreateResourceGroup(t, subscriptionID, credential, resourceGroupName, vault_location) + + storageAccountOneName := fmt.Sprintf("sa%sexternal1", strings.ToLower(vault_name)) + storageAccountOne := CreateStorageAccount(t, credential, subscriptionID, resourceGroupName, storageAccountOneName, vault_location) + + storageAccountTwoName := fmt.Sprintf("sa%sexternal2", strings.ToLower(vault_name)) + storageAccountTwo := CreateStorageAccount(t, credential, subscriptionID, resourceGroupName, storageAccountTwoName, vault_location) + + externalResources := &TestBlobStorageBackupExternalResources{ + ResourceGroup: resourceGroup, + StorageAccountOne: storageAccountOne, + StorageAccountTwo: storageAccountTwo, + } + + return externalResources +} + +/* + * TestBlobStorageBackup tests the deployment of a backup vault and backup policies for blob storage accounts. + */ +func TestBlobStorageBackup(t *testing.T) { + t.Parallel() + + environment := GetEnvironmentConfiguration(t) + credential := GetAzureCredential(t, environment) + + vaultName := random.UniqueId() + vaultLocation := "uksouth" + vaultRedundancy := "LocallyRedundant" + resourceGroupName := fmt.Sprintf("rg-nhsbackup-%s", vaultName) + backupVaultName := fmt.Sprintf("bvault-%s", vaultName) + + externalResources := setupExternalResourcesForBlobStorageBackupTest(t, credential, environment.SubscriptionID, vaultName, vaultLocation) + + // A map of backups which we'll use to apply the TF module, and then validate the + // policies have been created correctly + blobStorageBackups := map[string]map[string]interface{}{ + "backup1": { + "backup_name": "blob1", + "retention_period": "P7D", + "storage_account_id": *externalResources.StorageAccountOne.ID, + }, + "backup2": { + "backup_name": "blob2", + "retention_period": "P30D", + "storage_account_id": *externalResources.StorageAccountTwo.ID, + }, + } + + // Teardown stage + // ... + + defer test_structure.RunTestStage(t, "teardown", func() { + terraformOptions := test_structure.LoadTerraformOptions(t, environment.TerraformFolder) + + terraform.Destroy(t, terraformOptions) + + DeleteResourceGroup(t, credential, environment.SubscriptionID, *externalResources.ResourceGroup.Name) + }) + + // Setup stage + // ... + + test_structure.RunTestStage(t, "setup", func() { + terraformOptions := &terraform.Options{ + TerraformDir: environment.TerraformFolder, + + Vars: map[string]interface{}{ + "vault_name": vaultName, + "vault_location": vaultLocation, + "vault_redundancy": vaultRedundancy, + "blob_storage_backups": blobStorageBackups, + }, + + BackendConfig: map[string]interface{}{ + "resource_group_name": environment.TerraformStateResourceGroup, + "storage_account_name": environment.TerraformStateStorageAccount, + "container_name": environment.TerraformStateContainer, + "key": vaultName + ".tfstate", + }, + } + + // Save options for later test stages + test_structure.SaveTerraformOptions(t, environment.TerraformFolder, terraformOptions) + + terraform.InitAndApply(t, terraformOptions) + }) + + // Validate stage + // ... + + test_structure.RunTestStage(t, "validate", func() { + backupVault := GetBackupVault(t, credential, environment.SubscriptionID, resourceGroupName, backupVaultName) + backupPolicies := GetBackupPolicies(t, credential, environment.SubscriptionID, resourceGroupName, backupVaultName) + backupInstances := GetBackupInstances(t, credential, environment.SubscriptionID, resourceGroupName, backupVaultName) + + assert.Equal(t, len(blobStorageBackups), len(backupPolicies), "Expected to find %2 backup policies in vault", len(blobStorageBackups)) + assert.Equal(t, len(blobStorageBackups), len(backupInstances), "Expected to find %2 backup instances in vault", len(blobStorageBackups)) + + for _, backup := range blobStorageBackups { + backupName := backup["backup_name"].(string) + retentionPeriod := backup["retention_period"].(string) + storageAccountId := backup["storage_account_id"].(string) + + // Validate backup policy + backupPolicyName := fmt.Sprintf("bkpol-%s-blobstorage-%s", vaultName, backupName) + backupPolicy := GetBackupPolicyForName(backupPolicies, backupPolicyName) + assert.NotNil(t, backupPolicy, "Expected to find a backup policy called %s", backupPolicyName) + + // Validate retention period + backupPolicyProperties := backupPolicy.Properties.(*armdataprotection.BackupPolicy) + retentionRule := GetBackupPolicyRuleForName(backupPolicyProperties.PolicyRules, "Default").(*armdataprotection.AzureRetentionRule) + deleteOption := retentionRule.Lifecycles[0].DeleteAfter.(*armdataprotection.AbsoluteDeleteOption) + assert.Equal(t, retentionPeriod, *deleteOption.Duration, "Expected the backup policy retention period to be %s", retentionPeriod) + + // Validate backup instance + backupInstanceName := fmt.Sprintf("bkinst-%s-blobstorage-%s", vaultName, backupName) + backupInstance := GetBackupInstanceForName(backupInstances, backupInstanceName) + assert.NotNil(t, backupInstance, "Expected to find a backup policy called %s", backupInstanceName) + assert.Equal(t, storageAccountId, *backupInstance.Properties.DataSourceInfo.ResourceID, "Expected the backup instance source resource ID to be %s", storageAccountId) + assert.Equal(t, *backupPolicy.ID, *backupInstance.Properties.PolicyInfo.PolicyID, "Expected the backup instance policy ID to be %s", backupPolicy.ID) + + // Validate role assignment + backupContributorRoleDefinition := GetRoleDefinition(t, credential, "Storage Account Backup Contributor") + backupContributorRoleAssignment := GetRoleAssignment(t, credential, environment.SubscriptionID, *backupVault.Identity.PrincipalID, backupContributorRoleDefinition, storageAccountId) + assert.NotNil(t, backupContributorRoleAssignment, "Expected to find role assignment %s for principal %s on scope %s", backupContributorRoleDefinition.Name, *backupVault.Identity.PrincipalID, storageAccountId) + } + }) +} diff --git a/tests/end-to-end-tests/full_deployment_test.go b/tests/end-to-end-tests/full_deployment_test.go deleted file mode 100644 index 3b5d047..0000000 --- a/tests/end-to-end-tests/full_deployment_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package e2e_tests - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" - "github.com/gruntwork-io/terratest/modules/random" - "github.com/gruntwork-io/terratest/modules/terraform" - test_structure "github.com/gruntwork-io/terratest/modules/test-structure" - "github.com/stretchr/testify/assert" -) - -/* - * TestFullDeployment tests the full deployment of the infrastructure using Terraform. - */ -func TestFullDeployment(t *testing.T) { - t.Parallel() - - terraformFolder := test_structure.CopyTerraformFolderToTemp(t, "../../infrastructure", "") - terraformStateResourceGroup := os.Getenv("TF_STATE_RESOURCE_GROUP") - terraformStateStorageAccount := os.Getenv("TF_STATE_STORAGE_ACCOUNT") - terraformStateContainer := os.Getenv("TF_STATE_STORAGE_CONTAINER") - - if terraformStateResourceGroup == "" || terraformStateStorageAccount == "" || terraformStateContainer == "" { - t.Fatalf("One or more required environment variables (TF_STATE_RESOURCE_GROUP, TF_STATE_STORAGE_ACCOUNT, TF_STATE_STORAGE_CONTAINER) are not set.") - } - - vaultName := random.UniqueId() - vaultLocation := "uksouth" - vaultRedundancy := "LocallyRedundant" - - // Setup stage - // ... - - test_structure.RunTestStage(t, "setup", func() { - terraformOptions := &terraform.Options{ - TerraformDir: terraformFolder, - - // Variables to pass to our Terraform code using -var options - Vars: map[string]interface{}{ - "vault_name": vaultName, - "vault_location": vaultLocation, - "vault_redundancy": vaultRedundancy, - }, - - BackendConfig: map[string]interface{}{ - "resource_group_name": terraformStateResourceGroup, - "storage_account_name": terraformStateStorageAccount, - "container_name": terraformStateContainer, - "key": vaultName + ".tfstate", - }, - } - - // Save options for later test stages - test_structure.SaveTerraformOptions(t, terraformFolder, terraformOptions) - - terraform.InitAndApply(t, terraformOptions) - }) - - // Validate stage - // ... - - test_structure.RunTestStage(t, "validate", func() { - resourceGroupName := fmt.Sprintf("rg-nhsbackup-%s", vaultName) - fullVaultName := fmt.Sprintf("bvault-%s", vaultName) - - // Get credentials from environment variables - tenantID := os.Getenv("ARM_TENANT_ID") - subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") - clientID := os.Getenv("ARM_CLIENT_ID") - clientSecret := os.Getenv("ARM_CLIENT_SECRET") - - if tenantID == "" || subscriptionID == "" || clientID == "" || clientSecret == "" { - t.Fatalf("One or more required environment variables (ARM_TENANT_ID, ARM_SUBSCRIPTION_ID, ARM_CLIENT_ID, ARM_CLIENT_SECRET) are not set.") - } - - // Create a credential to authenticate with Azure Resource Manager - cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, clientSecret, nil) - assert.NoError(t, err, "Failed to obtain a credential: %v", err) - - ValidateResourceGroup(t, subscriptionID, cred, resourceGroupName, vaultLocation) - ValidateBackupVault(t, subscriptionID, cred, resourceGroupName, fullVaultName, vaultLocation) - ValidateBackupPolicies(t, subscriptionID, cred, resourceGroupName, fullVaultName, vaultName) - }) - - // Teardown stage - // ... - - test_structure.RunTestStage(t, "teardown", func() { - terraformOptions := test_structure.LoadTerraformOptions(t, terraformFolder) - - terraform.Destroy(t, terraformOptions) - }) -} - -/* - * Validates the resource group has been deployed correctly - */ -func ValidateResourceGroup(t *testing.T, subscriptionID string, - cred *azidentity.ClientSecretCredential, resourceGroupName string, vaultLocation string) { - // Create a new resource groups client - client, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil) - assert.NoError(t, err, "Failed to create resource group client: %v", err) - assert.NoError(t, err) - - // Get the resource group - resp, err := client.Get(context.Background(), resourceGroupName, nil) - assert.NoError(t, err, "Failed to get resource group: %v", err) - - // Validate the resource group - assert.NotNil(t, resp.ResourceGroup, "Resource group does not exist") - assert.Equal(t, resourceGroupName, *resp.ResourceGroup.Name, "Resource group name does not match") - assert.Equal(t, vaultLocation, *resp.ResourceGroup.Location, "Resource group location does not match") -} - -/* - * Validates the backup vault has been deployed correctly - */ -func ValidateBackupVault(t *testing.T, subscriptionID string, cred *azidentity.ClientSecretCredential, resourceGroupName string, vaultName string, vaultLocation string) { - // Create a new Data Protection Backup Vaults client - client, err := armdataprotection.NewBackupVaultsClient(subscriptionID, cred, nil) - assert.NoError(t, err, "Failed to create data protection client: %v", err) - - // Get the backup vault - resp, err := client.Get(context.Background(), resourceGroupName, vaultName, nil) - assert.NoError(t, err, "Failed to get backup vault: %v", err) - - // Validate the backup vault - assert.NotNil(t, resp.BackupVaultResource, "Backup vault does not exist") - assert.Equal(t, vaultName, *resp.BackupVaultResource.Name, "Backup vault name does not match") - assert.Equal(t, vaultLocation, *resp.BackupVaultResource.Location, "Backup vault location does not match") - assert.NotNil(t, resp.BackupVaultResource.Identity.PrincipalID, "Backup vault identity does not exist") - assert.Equal(t, "SystemAssigned", *resp.BackupVaultResource.Identity.Type, "Backup vault identity type does not match") - assert.Equal(t, armdataprotection.StorageSettingTypesLocallyRedundant, *resp.BackupVaultResource.Properties.StorageSettings[0].Type, "Backup vault redundancy does not match") - assert.Equal(t, armdataprotection.StorageSettingStoreTypesVaultStore, *resp.BackupVaultResource.Properties.StorageSettings[0].DatastoreType, "Backup vault datastore type does not match") -} - -/* - * Validates the backup policies have been deployed correctly - */ -func ValidateBackupPolicies(t *testing.T, subscriptionID string, cred *azidentity.ClientSecretCredential, resourceGroupName string, fullVaultName string, vaultName string) { - ctx := context.Background() - - // Create a client to interact with Data Protection vault backup policies - client, err := armdataprotection.NewBackupPoliciesClient(subscriptionID, cred, nil) - assert.NoError(t, err, "Failed to create data protection client: %v", err) - - policyPager := client.NewListPager(resourceGroupName, fullVaultName, nil) - - // Fetch all backup policies from the vault - var policies []*armdataprotection.BaseBackupPolicyResource - - for policyPager.More() { - page, err := policyPager.NextPage(ctx) - assert.NoError(t, err, "Failed to get backup policies: %v", err) - - policies = append(policies, page.Value...) - } - - // Validate the policies - if len(policies) == 0 { - assert.Fail(t, "Expected to find at least one backup policy in vault %s", fullVaultName) - } else { - assert.Equal(t, len(policies), 2, "Expected to find two backup policies in vault %s", fullVaultName) - - ValidateManagedDiskPolicy(t, policies, vaultName) - ValidateBlobStoragePolicy(t, policies, vaultName) - } -} - -/* - * Validates the blob storage backup policy - */ -func ValidateBlobStoragePolicy(t *testing.T, policies []*armdataprotection.BaseBackupPolicyResource, vaultName string) { - blobStoragePolicyName := fmt.Sprintf("bkpol-%s-blobstorage", vaultName) - blobStoragePolicy := GetBackupPolicyForName(policies, blobStoragePolicyName) - assert.NotNil(t, blobStoragePolicy, "Expected to find a blob storage backup policy called %s", blobStoragePolicyName) - - blobStoragePolicyProperties, ok := blobStoragePolicy.Properties.(*armdataprotection.BackupPolicy) - assert.True(t, ok, "Failed to cast blob storage policy properties to BackupPolicy") - - // Validate the retention policy - retentionPeriodPolicyRule := GetBackupPolicyRuleForName(blobStoragePolicyProperties.PolicyRules, "Default") - assert.NotNil(t, retentionPeriodPolicyRule, "Expected to find a policy rule called Default in the blob storage backup policies") - - azureRetentionRule, ok := retentionPeriodPolicyRule.(*armdataprotection.AzureRetentionRule) - assert.True(t, ok, "Failed to cast retention period policy rule to AzureRetentionRule") - - deleteOption, ok := azureRetentionRule.Lifecycles[0].DeleteAfter.(*armdataprotection.AbsoluteDeleteOption) - assert.True(t, ok, "Failed to cast delete option to AbsoluteDeleteOption") - - assert.Equal(t, "P7D", *deleteOption.Duration, "Expected the blob storage retention period to be P7D") -} - -/* - * Validates the managed disk backup policy - */ -func ValidateManagedDiskPolicy(t *testing.T, policies []*armdataprotection.BaseBackupPolicyResource, vaultName string) { - managedDiskPolicyName := fmt.Sprintf("bkpol-%s-manageddisk", vaultName) - managedDiskPolicy := GetBackupPolicyForName(policies, managedDiskPolicyName) - assert.NotNil(t, managedDiskPolicy, "Expected to find a managed disk backup policy called %s", managedDiskPolicyName) - - managedDiskPolicyProperties, ok := managedDiskPolicy.Properties.(*armdataprotection.BackupPolicy) - assert.True(t, ok, "Failed to cast managed disk policy properties to BackupPolicy") - - // Validate the repeating time intervals - backupIntervalsPolicyRule := GetBackupPolicyRuleForName(managedDiskPolicyProperties.PolicyRules, "BackupIntervals") - assert.NotNil(t, backupIntervalsPolicyRule, "Expected to find a policy rule called BackupIntervals in the managed disk backup policies") - - azureBackupRule, ok := backupIntervalsPolicyRule.(*armdataprotection.AzureBackupRule) - assert.True(t, ok, "Failed to cast backup intervals policy rule to AzureBackupRule") - - trigger, ok := azureBackupRule.Trigger.(*armdataprotection.ScheduleBasedTriggerContext) - assert.True(t, ok, "Failed to cast azure backup rule trigger to ScheduleBasedTriggerContext") - - assert.Equal(t, "R/2024-01-01T00:00:00+00:00/P1D", *trigger.Schedule.RepeatingTimeIntervals[0], - "Expected the managed disk backup policy repeating time intervals to be R/2024-01-01T00:00:00+00:00/P1D") - - // Validate the retention policy - retentionPeriodPolicyRule := GetBackupPolicyRuleForName(managedDiskPolicyProperties.PolicyRules, "Default") - assert.NotNil(t, retentionPeriodPolicyRule, "Expected to find a policy rule called Default in the managed disk backup policies") - - azureRetentionRule, ok := retentionPeriodPolicyRule.(*armdataprotection.AzureRetentionRule) - assert.True(t, ok, "Failed to cast retention period policy rule to AzureRetentionRule") - - deleteOption, ok := azureRetentionRule.Lifecycles[0].DeleteAfter.(*armdataprotection.AbsoluteDeleteOption) - assert.True(t, ok, "Failed to cast delete option to AbsoluteDeleteOption") - - assert.Equal(t, "P7D", *deleteOption.Duration, "Expected the managed disk retention period to be P7D") -} - -/* - * Gets a backup policy from the provided list for the provided name - */ -func GetBackupPolicyForName(policies []*armdataprotection.BaseBackupPolicyResource, name string) *armdataprotection.BaseBackupPolicyResource { - for _, policy := range policies { - if *policy.Name == name { - return policy - } - } - - return nil -} - -/* - * Gets a backup policy rules from the provided list for the provided name - */ -func GetBackupPolicyRuleForName(policyRules []armdataprotection.BasePolicyRuleClassification, name string) armdataprotection.BasePolicyRuleClassification { - for _, policyRule := range policyRules { - if *policyRule.GetBasePolicyRule().Name == name { - return policyRule - } - } - - return nil -} diff --git a/tests/end-to-end-tests/go.mod b/tests/end-to-end-tests/go.mod index cf942e2..cd16c07 100644 --- a/tests/end-to-end-tests/go.mod +++ b/tests/end-to-end-tests/go.mod @@ -5,9 +5,13 @@ go 1.21 toolchain go1.23.1 require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection v1.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.0.0 github.com/gruntwork-io/terratest v0.47.1 github.com/stretchr/testify v1.9.0 ) @@ -18,7 +22,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.13.0 // indirect cloud.google.com/go/storage v1.29.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/agext/levenshtein v1.2.3 // indirect diff --git a/tests/end-to-end-tests/go.sum b/tests/end-to-end-tests/go.sum index 525ce22..9a5b691 100644 --- a/tests/end-to-end-tests/go.sum +++ b/tests/end-to-end-tests/go.sum @@ -187,16 +187,27 @@ cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuW cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go v51.0.0+incompatible h1:p7blnyJSjJqf5jflHbSGhIhEpXIgIFmYZNg5uwqweso= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0 h1:qtRcg5Y7jNJ4jEzPq4GpWLfTspHdNe2ZK6LjwGcjgmU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization v1.0.0/go.mod h1:lPneRe3TwsoDRKY4O6YDLXHhEWrD+TIRa8XrV/3/fqw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0 h1:/Di3vB4sNeQ+7A8efjUVENvyB945Wruvstucqp7ZArg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0/go.mod h1:gM3K25LQlsET3QR+4V74zxCsFAy0r6xMNN9n80SZn+4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection v1.0.0 h1:VFqjVi532z3gdltbAkYrPl9Ez0czn3ZPM+bjmvLq6fk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection v1.0.0/go.mod h1:CmZQSRwBPP7KNjDA+PHaoR2m8wgOsbTd9ncqZgSzgHA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0 h1:lMW1lD/17LUA5z1XTURo7LcVG2ICBPlyMHjIUrcFZNQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0/go.mod h1:ceIuwmxDWptoW3eCqSXlnPsZFKh4X+R38dWPv7GS9Vs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.0.0 h1:TMEyRFKh1zaSPmoQh3kxK+xRAYVq8guCI/7SMO0F3KY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.0.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= diff --git a/tests/end-to-end-tests/helpers.go b/tests/end-to-end-tests/helpers.go new file mode 100644 index 0000000..1ab4b0c --- /dev/null +++ b/tests/end-to-end-tests/helpers.go @@ -0,0 +1,377 @@ +package e2e_tests + +import ( + "context" + "fmt" + "log" + "os" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/assert" +) + +type Config struct { + TerraformFolder string + TenantID string + SubscriptionID string + ClientID string + ClientSecret string + TerraformStateResourceGroup string + TerraformStateStorageAccount string + TerraformStateContainer string +} + +/* + * GetEnvironmentConfiguration gets the environment config that is required to execute a test. + */ +func GetEnvironmentConfiguration(t *testing.T) *Config { + terraformFolder := test_structure.CopyTerraformFolderToTemp(t, "../../infrastructure", "") + + tenantID := os.Getenv("ARM_TENANT_ID") + if tenantID == "" { + t.Fatalf("ARM_TENANT_ID must be set") + } + + subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") + if subscriptionID == "" { + t.Fatalf("ARM_SUBSCRIPTION_ID must be set") + } + + clientID := os.Getenv("ARM_CLIENT_ID") + if clientID == "" { + t.Fatalf("ARM_CLIENT_ID must be set") + } + + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + if clientSecret == "" { + t.Fatalf("ARM_CLIENT_SECRET must be set") + } + + terraformStateResourceGroup := os.Getenv("TF_STATE_RESOURCE_GROUP") + if terraformStateResourceGroup == "" { + t.Fatalf("TF_STATE_RESOURCE_GROUP must be set") + } + + terraformStateStorageAccount := os.Getenv("TF_STATE_STORAGE_ACCOUNT") + if terraformStateStorageAccount == "" { + t.Fatalf("TF_STATE_STORAGE_ACCOUNT must be set") + } + + terraformStateContainer := os.Getenv("TF_STATE_STORAGE_CONTAINER") + if terraformStateContainer == "" { + t.Fatalf("TF_STATE_STORAGE_CONTAINER must be set") + } + + config := &Config{ + TerraformFolder: terraformFolder, + TenantID: tenantID, + SubscriptionID: subscriptionID, + ClientID: clientID, + ClientSecret: clientSecret, + TerraformStateResourceGroup: terraformStateResourceGroup, + TerraformStateStorageAccount: terraformStateStorageAccount, + TerraformStateContainer: terraformStateContainer, + } + + return config +} + +/* + * Gets a credential for authenticating with Azure Resource Manager. + */ +func GetAzureCredential(t *testing.T, environment *Config) *azidentity.ClientSecretCredential { + credential, err := azidentity.NewClientSecretCredential(environment.TenantID, environment.ClientID, environment.ClientSecret, nil) + assert.NoError(t, err, "Failed to obtain a credential: %v", err) + + return credential +} + +/* + * Gets a resource group for the provided name. + */ +func GetResourceGroup(t *testing.T, subscriptionID string, + credential *azidentity.ClientSecretCredential, name string) armresources.ResourceGroup { + // Create a new resource groups client + client, err := armresources.NewResourceGroupsClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create resource group client: %v", err) + + // Get the resource group + resp, err := client.Get(context.Background(), name, nil) + assert.NoError(t, err, "Failed to get resource group: %v", err) + + return resp.ResourceGroup +} + +/* + * Gets a role definition for the provided role name. + */ +func GetRoleDefinition(t *testing.T, credential *azidentity.ClientSecretCredential, roleName string) *armauthorization.RoleDefinition { + roleDefinitionsClient, err := armauthorization.NewRoleDefinitionsClient(credential, nil) + assert.NoError(t, err, "Failed to create role definition client: %v", err) + + // Create a pager to list role definitions + filter := fmt.Sprintf("roleName eq '%s'", roleName) + pager := roleDefinitionsClient.NewListPager("", &armauthorization.RoleDefinitionsClientListOptions{Filter: &filter}) + + for pager.More() { + page, err := pager.NextPage(context.Background()) + assert.NoError(t, err, "Failed to list role definitions") + + for _, roleDefinition := range page.RoleDefinitionListResult.Value { + if *roleDefinition.Properties.RoleName == roleName { + return roleDefinition + } + } + } + + return nil +} + +/* + * Gets a role assignment in the provided scope for the provided role definition, + * that's been assigned to the provided principal id. + */ +func GetRoleAssignment(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, + principalId string, roleDefinition *armauthorization.RoleDefinition, scope string) *armauthorization.RoleAssignment { + roleAssignmentsClient, err := armauthorization.NewRoleAssignmentsClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create role assignments client: %v", err) + + // List role assignments for the given scope + filter := fmt.Sprintf("principalId eq '%s'", principalId) + pager := roleAssignmentsClient.NewListForScopePager(scope, &armauthorization.RoleAssignmentsClientListForScopeOptions{Filter: &filter}) + + // Find the role assignment for the given definition + for pager.More() { + page, err := pager.NextPage(context.Background()) + assert.NoError(t, err, "Failed to list role assignments") + + // Check if the role definition is among the assigned roles + for _, roleAssignment := range page.RoleAssignmentListResult.Value { + // Use string.contains, as the role definition ID on a role assignment + // is a longer URI which includes the subscription scope + if strings.Contains(*roleAssignment.Properties.RoleDefinitionID, *roleDefinition.ID) { + return roleAssignment + } + } + } + + return nil +} + +/* + * Gets a backup vault for the provided name. + */ +func GetBackupVault(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, resourceGroupName string, backupVaultName string) armdataprotection.BackupVaultResource { + client, err := armdataprotection.NewBackupVaultsClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create data protection client: %v", err) + + // Get the backup vault + resp, err := client.Get(context.Background(), resourceGroupName, backupVaultName, nil) + assert.NoError(t, err, "Failed to get backup vault: %v", err) + + return resp.BackupVaultResource +} + +/* + * Gets the backup policies for the provided backup vault. + */ +func GetBackupPolicies(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, resourceGroupName string, backupVaultName string) []*armdataprotection.BaseBackupPolicyResource { + client, err := armdataprotection.NewBackupPoliciesClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create data protection client: %v", err) + + policyPager := client.NewListPager(resourceGroupName, backupVaultName, nil) + + var policies []*armdataprotection.BaseBackupPolicyResource + + for policyPager.More() { + page, err := policyPager.NextPage(context.Background()) + assert.NoError(t, err, "Failed to get backup policies: %v", err) + + policies = append(policies, page.Value...) + } + + return policies +} + +/* + * Gets the backup instances for the provided backup vault. + */ +func GetBackupInstances(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, resourceGroupName string, backupVaultName string) []*armdataprotection.BackupInstanceResource { + client, err := armdataprotection.NewBackupInstancesClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create data protection client: %v", err) + + policyPager := client.NewListPager(resourceGroupName, backupVaultName, nil) + + var instances []*armdataprotection.BackupInstanceResource + + for policyPager.More() { + page, err := policyPager.NextPage(context.Background()) + assert.NoError(t, err, "Failed to get backup policies: %v", err) + + instances = append(instances, page.Value...) + } + + return instances +} + +/* + * Gets a backup policy from the provided list for the provided name + */ +func GetBackupPolicyForName(policies []*armdataprotection.BaseBackupPolicyResource, name string) *armdataprotection.BaseBackupPolicyResource { + for _, policy := range policies { + if *policy.Name == name { + return policy + } + } + + return nil +} + +/* + * Gets a backup policy rules from the provided list for the provided name + */ +func GetBackupPolicyRuleForName(policyRules []armdataprotection.BasePolicyRuleClassification, name string) armdataprotection.BasePolicyRuleClassification { + for _, policyRule := range policyRules { + if *policyRule.GetBasePolicyRule().Name == name { + return policyRule + } + } + + return nil +} + +/* + * Gets a backup instance from the provided list for the provided name + */ +func GetBackupInstanceForName(instances []*armdataprotection.BackupInstanceResource, name string) *armdataprotection.BackupInstanceResource { + for _, instance := range instances { + if *instance.Name == name { + return instance + } + } + + return nil +} + +/* + * Creates a resource group that can be used for testing purposes. + */ +func CreateResourceGroup(t *testing.T, subscriptionID string, credential *azidentity.ClientSecretCredential, resourceGroupName string, resourceGroupLocation string) armresources.ResourceGroup { + client, err := armresources.NewResourceGroupsClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create resource group client: %v", err) + + log.Printf("Creating resource group %s in location %s", resourceGroupName, resourceGroupLocation) + + resp, err := client.CreateOrUpdate( + context.Background(), + resourceGroupName, + armresources.ResourceGroup{ + Location: &resourceGroupLocation, + }, + nil, + ) + assert.NoError(t, err, "Failed to create resource group: %v", err) + + log.Printf("Resource group %s created successfully", resourceGroupName) + + return resp.ResourceGroup +} + +/* + * Creates a storage account that can be used for testing purposes. + */ +func CreateStorageAccount(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, + resourceGroupName string, storageAccountName string, storageAccountLocation string) armstorage.Account { + client, err := armstorage.NewAccountsClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create storage account client: %v", err) + + log.Printf("Creating storage account %s in location %s", storageAccountName, storageAccountLocation) + + pollerResp, err := client.BeginCreate( + context.Background(), + resourceGroupName, + storageAccountName, + armstorage.AccountCreateParameters{ + SKU: &armstorage.SKU{ + Name: to.Ptr(armstorage.SKUNameStandardLRS), + }, + Kind: to.Ptr(armstorage.KindStorageV2), + Location: &storageAccountLocation, + }, + nil, + ) + assert.NoError(t, err, "Failed to begin creating storage account: %v", err) + + // Wait for the creation to complete + resp, err := pollerResp.PollUntilDone(context.Background(), nil) + assert.NoError(t, err, "Failed to create storage account: %v", err) + + log.Printf("Storage account %s created successfully", storageAccountName) + + return resp.Account +} + +/* + * Creates a managed disk that can be used for testing purposes. + */ +func CreateManagedDisk(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, + resourceGroupName string, diskName string, diskLocation string, diskSizeGB int32) armcompute.Disk { + client, err := armcompute.NewDisksClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create disks client: %v", err) + + log.Printf("Creating managed disk %s in location %s", diskName, diskLocation) + + pollerResp, err := client.BeginCreateOrUpdate( + context.Background(), + resourceGroupName, + diskName, + armcompute.Disk{ + Location: &diskLocation, + SKU: &armcompute.DiskSKU{ + Name: to.Ptr(armcompute.DiskStorageAccountTypesStandardLRS), + }, + Properties: &armcompute.DiskProperties{ + DiskSizeGB: &diskSizeGB, + CreationData: &armcompute.CreationData{CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty)}, + }, + }, + nil, + ) + assert.NoError(t, err, "Failed to begin creating managed disk: %v", err) + + // Wait for the creation to complete + resp, err := pollerResp.PollUntilDone(context.Background(), nil) + assert.NoError(t, err, "Failed to create managed disk: %v", err) + + log.Printf("Managed disk %s created successfully", diskName) + + return resp.Disk +} + +/* + * Deletes a resource group. + */ +func DeleteResourceGroup(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, resourceGroupName string) { + client, err := armresources.NewResourceGroupsClient(subscriptionID, credential, nil) + assert.NoError(t, err, "Failed to create resource group client: %v", err) + + log.Printf("Deleting resource group %s", resourceGroupName) + + pollerResp, err := client.BeginDelete(context.Background(), resourceGroupName, nil) + assert.NoError(t, err, "Failed to delete resource group: %v", err) + + // Wait for the creation to complete + _, err = pollerResp.PollUntilDone(context.Background(), nil) + assert.NoError(t, err, "Failed to create storage account: %v", err) + + log.Printf("Resource group %s deleted successfully", resourceGroupName) +} diff --git a/tests/end-to-end-tests/managed_disk_backup_test.go b/tests/end-to-end-tests/managed_disk_backup_test.go new file mode 100644 index 0000000..73bfc28 --- /dev/null +++ b/tests/end-to-end-tests/managed_disk_backup_test.go @@ -0,0 +1,182 @@ +package e2e_tests + +import ( + "fmt" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dataprotection/armdataprotection" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/gruntwork-io/terratest/modules/terraform" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/assert" +) + +type TestManagedDiskBackupExternalResources struct { + ResourceGroup armresources.ResourceGroup + ManagedDiskOne armcompute.Disk + ManagedDiskTwo armcompute.Disk +} + +/* + * Creates resources which are "external" to the az-backup module, and models + * what would be backed up in a real scenario. + */ +func setupExternalResourcesForManagedDiskBackupTest(t *testing.T, credential *azidentity.ClientSecretCredential, subscriptionID string, vault_name string, vault_location string) *TestManagedDiskBackupExternalResources { + resourceGroupName := fmt.Sprintf("rg-nhsbackup-%s-external", vault_name) + resourceGroup := CreateResourceGroup(t, subscriptionID, credential, resourceGroupName, vault_location) + + managedDiskOneName := fmt.Sprintf("disk-%s-external-1", strings.ToLower(vault_name)) + managedDiskOne := CreateManagedDisk(t, credential, subscriptionID, resourceGroupName, managedDiskOneName, vault_location, int32(1)) + + managedDiskTwoName := fmt.Sprintf("disk-%s-external-2", strings.ToLower(vault_name)) + managedDiskTwo := CreateManagedDisk(t, credential, subscriptionID, resourceGroupName, managedDiskTwoName, vault_location, int32(1)) + + externalResources := &TestManagedDiskBackupExternalResources{ + ResourceGroup: resourceGroup, + ManagedDiskOne: managedDiskOne, + ManagedDiskTwo: managedDiskTwo, + } + + return externalResources +} + +/* + * TestManagedDiskBackup tests the deployment of a backup vault and backup policies for blob storage accounts. + */ +func TestManagedDiskBackup(t *testing.T) { + t.Parallel() + + environment := GetEnvironmentConfiguration(t) + credential := GetAzureCredential(t, environment) + + vaultName := random.UniqueId() + vaultLocation := "uksouth" + vaultRedundancy := "LocallyRedundant" + resourceGroupName := fmt.Sprintf("rg-nhsbackup-%s", vaultName) + backupVaultName := fmt.Sprintf("bvault-%s", vaultName) + + externalResources := setupExternalResourcesForManagedDiskBackupTest(t, credential, environment.SubscriptionID, vaultName, vaultLocation) + + // A map of backups which we'll use to apply the TF module, and then validate the + // policies have been created correctly + managedDiskBackups := map[string]map[string]interface{}{ + "backup1": { + "backup_name": "disk1", + "retention_period": "P7D", + "backup_intervals": []string{"R/2024-01-01T00:00:00+00:00/P1D"}, + "managed_disk_id": *externalResources.ManagedDiskOne.ID, + "managed_disk_resource_group": map[string]interface{}{ + "id": *externalResources.ResourceGroup.ID, + "name": *externalResources.ResourceGroup.Name, + }, + }, + "backup2": { + "backup_name": "disk2", + "retention_period": "P30D", + "backup_intervals": []string{"R/2024-01-01T00:00:00+00:00/P2D"}, + "managed_disk_id": *externalResources.ManagedDiskTwo.ID, + "managed_disk_resource_group": map[string]interface{}{ + "id": *externalResources.ResourceGroup.ID, + "name": *externalResources.ResourceGroup.Name, + }, + }, + } + + // Teardown stage + // ... + + defer test_structure.RunTestStage(t, "teardown", func() { + terraformOptions := test_structure.LoadTerraformOptions(t, environment.TerraformFolder) + + terraform.Destroy(t, terraformOptions) + + DeleteResourceGroup(t, credential, environment.SubscriptionID, *externalResources.ResourceGroup.Name) + }) + + // Setup stage + // ... + + test_structure.RunTestStage(t, "setup", func() { + terraformOptions := &terraform.Options{ + TerraformDir: environment.TerraformFolder, + + Vars: map[string]interface{}{ + "vault_name": vaultName, + "vault_location": vaultLocation, + "vault_redundancy": vaultRedundancy, + "managed_disk_backups": managedDiskBackups, + }, + + BackendConfig: map[string]interface{}{ + "resource_group_name": environment.TerraformStateResourceGroup, + "storage_account_name": environment.TerraformStateStorageAccount, + "container_name": environment.TerraformStateContainer, + "key": vaultName + ".tfstate", + }, + } + + // Save options for later test stages + test_structure.SaveTerraformOptions(t, environment.TerraformFolder, terraformOptions) + + terraform.InitAndApply(t, terraformOptions) + }) + + // Validate stage + // ... + + test_structure.RunTestStage(t, "validate", func() { + backupVault := GetBackupVault(t, credential, environment.SubscriptionID, resourceGroupName, backupVaultName) + backupPolicies := GetBackupPolicies(t, credential, environment.SubscriptionID, resourceGroupName, backupVaultName) + backupInstances := GetBackupInstances(t, credential, environment.SubscriptionID, resourceGroupName, backupVaultName) + + assert.Equal(t, len(managedDiskBackups), len(backupPolicies), "Expected to find %2 backup policies in vault", len(managedDiskBackups)) + assert.Equal(t, len(managedDiskBackups), len(backupInstances), "Expected to find %2 backup instances in vault", len(managedDiskBackups)) + + for _, backup := range managedDiskBackups { + backupName := backup["backup_name"].(string) + retentionPeriod := backup["retention_period"].(string) + backupIntervals := backup["backup_intervals"].([]string) + managedDiskId := backup["managed_disk_id"].(string) + managedDiskResourceGroup := backup["managed_disk_resource_group"].(map[string]interface{}) + managedDiskResourceGroupId := managedDiskResourceGroup["id"].(string) + + // Validate backup policy + backupPolicyName := fmt.Sprintf("bkpol-%s-manageddisk-%s", vaultName, backupName) + backupPolicy := GetBackupPolicyForName(backupPolicies, backupPolicyName) + assert.NotNil(t, backupPolicy, "Expected to find a backup policy called %s", backupPolicyName) + + // Validate retention period + backupPolicyProperties := backupPolicy.Properties.(*armdataprotection.BackupPolicy) + retentionRule := GetBackupPolicyRuleForName(backupPolicyProperties.PolicyRules, "Default").(*armdataprotection.AzureRetentionRule) + deleteOption := retentionRule.Lifecycles[0].DeleteAfter.(*armdataprotection.AbsoluteDeleteOption) + assert.Equal(t, retentionPeriod, *deleteOption.Duration, "Expected the backup policy retention period to be %s", retentionPeriod) + + // Validate backup intervals + backupRule := GetBackupPolicyRuleForName(backupPolicyProperties.PolicyRules, "BackupIntervals").(*armdataprotection.AzureBackupRule) + schedule := backupRule.Trigger.(*armdataprotection.ScheduleBasedTriggerContext).Schedule + for index, interval := range schedule.RepeatingTimeIntervals { + assert.Equal(t, backupIntervals[index], *interval, "Expected backup policy repeating interval %s to be %s", index, backupIntervals[index]) + } + + // Validate backup instance + backupInstanceName := fmt.Sprintf("bkinst-%s-manageddisk-%s", vaultName, backupName) + backupInstance := GetBackupInstanceForName(backupInstances, backupInstanceName) + assert.NotNil(t, backupInstance, "Expected to find a backup policy called %s", backupInstanceName) + assert.Equal(t, managedDiskId, *backupInstance.Properties.DataSourceInfo.ResourceID, "Expected the backup instance source resource ID to be %s", managedDiskId) + assert.Equal(t, *backupPolicy.ID, *backupInstance.Properties.PolicyInfo.PolicyID, "Expected the backup instance policy ID to be %s", backupPolicy.ID) + + // Validate role assignments + snapshotContributorRoleDefinition := GetRoleDefinition(t, credential, "Disk Snapshot Contributor") + snapshotContributorRoleAssignment := GetRoleAssignment(t, credential, environment.SubscriptionID, *backupVault.Identity.PrincipalID, snapshotContributorRoleDefinition, managedDiskResourceGroupId) + assert.NotNil(t, snapshotContributorRoleAssignment, "Expected to find role assignment %s for principal %s on scope %s", snapshotContributorRoleDefinition.Name, *backupVault.Identity.PrincipalID, managedDiskResourceGroupId) + + backupReaderRoleDefinition := GetRoleDefinition(t, credential, "Disk Backup Reader") + backupReaderRoleAssignment := GetRoleAssignment(t, credential, environment.SubscriptionID, *backupVault.Identity.PrincipalID, backupReaderRoleDefinition, managedDiskId) + assert.NotNil(t, backupReaderRoleAssignment, "Expected to find role assignment %s for principal %s on scope %s", backupReaderRoleDefinition.Name, *backupVault.Identity.PrincipalID, managedDiskId) + } + }) +} diff --git a/tests/end-to-end-tests/terraform_output_test.go b/tests/end-to-end-tests/terraform_output_test.go new file mode 100644 index 0000000..47c5ab1 --- /dev/null +++ b/tests/end-to-end-tests/terraform_output_test.go @@ -0,0 +1,76 @@ +package e2e_tests + +import ( + "fmt" + "testing" + + "github.com/gruntwork-io/terratest/modules/random" + "github.com/gruntwork-io/terratest/modules/terraform" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/assert" +) + +/* + * TestTerraformOutput tests the output variables of the Terraform deployment. + */ +func TestTerraformOutput(t *testing.T) { + t.Parallel() + + environment := GetEnvironmentConfiguration(t) + + vaultName := random.UniqueId() + vaultLocation := "uksouth" + vaultRedundancy := "LocallyRedundant" + + // Teardown stage + // ... + + defer test_structure.RunTestStage(t, "teardown", func() { + terraformOptions := test_structure.LoadTerraformOptions(t, environment.TerraformFolder) + + terraform.Destroy(t, terraformOptions) + }) + + // Setup stage + // ... + + test_structure.RunTestStage(t, "setup", func() { + terraformOptions := &terraform.Options{ + TerraformDir: environment.TerraformFolder, + + Vars: map[string]interface{}{ + "vault_name": vaultName, + "vault_location": vaultLocation, + "vault_redundancy": vaultRedundancy, + }, + + BackendConfig: map[string]interface{}{ + "resource_group_name": environment.TerraformStateResourceGroup, + "storage_account_name": environment.TerraformStateStorageAccount, + "container_name": environment.TerraformStateContainer, + "key": vaultName + ".tfstate", + }, + } + + test_structure.SaveTerraformOptions(t, environment.TerraformFolder, terraformOptions) + + terraform.InitAndApply(t, terraformOptions) + }) + + // Validate stage + // ... + + test_structure.RunTestStage(t, "validate", func() { + terraformOptions := test_structure.LoadTerraformOptions(t, environment.TerraformFolder) + + expectedVaultName := fmt.Sprintf("bvault-%s", vaultName) + actualVaultName := terraform.OutputMap(t, terraformOptions, "backup_vault")["name"] + assert.Equal(t, expectedVaultName, actualVaultName) + + actualVaultLocation := terraform.OutputMap(t, terraformOptions, "backup_vault")["location"] + assert.Equal(t, vaultLocation, actualVaultLocation) + + actualVaultRedundancy := terraform.OutputMap(t, terraformOptions, "backup_vault")["redundancy"] + assert.Equal(t, vaultRedundancy, actualVaultRedundancy) + }) +} diff --git a/tests/integration-tests/azurerm/data.tfmock.hcl b/tests/integration-tests/azurerm/data.tfmock.hcl index 85641aa..b9681bb 100644 --- a/tests/integration-tests/azurerm/data.tfmock.hcl +++ b/tests/integration-tests/azurerm/data.tfmock.hcl @@ -1,5 +1,23 @@ +mock_resource "azurerm_resource_group" { + defaults = { + id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group" + } +} + mock_resource "azurerm_data_protection_backup_vault" { defaults = { id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataProtection/backupVaults/bvault-testvault" } -} \ No newline at end of file +} + +mock_resource "azurerm_data_protection_backup_policy_blob_storage" { + defaults = { + id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataProtection/backupVaults/bvault-testvault/backupPolicies/bkpol-testvault-testpolicy" + } +} + +mock_resource "azurerm_data_protection_backup_policy_disk" { + defaults = { + id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataProtection/backupVaults/bvault-testvault/backupPolicies/bkpol-testvault-testpolicy" + } +} diff --git a/tests/integration-tests/backup_modules_blob_storage.tftest.hcl b/tests/integration-tests/backup_modules_blob_storage.tftest.hcl new file mode 100644 index 0000000..4f3f87f --- /dev/null +++ b/tests/integration-tests/backup_modules_blob_storage.tftest.hcl @@ -0,0 +1,139 @@ +mock_provider "azurerm" { + source = "./azurerm" +} + +run "setup_tests" { + module { + source = "./setup" + } +} + +run "create_blob_storage_backup" { + command = apply + + module { + source = "../../infrastructure" + } + + variables { + vault_name = run.setup_tests.vault_name + vault_location = "uksouth" + blob_storage_backups = { + backup1 = { + backup_name = "storage1" + retention_period = "P7D" + storage_account_id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Storage/storageAccounts/sastorage1" + } + backup2 = { + backup_name = "storage2" + retention_period = "P30D" + storage_account_id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Storage/storageAccounts/sastorage2" + } + } + } + + assert { + condition = length(module.blob_storage_backup) == 2 + error_message = "Number of backup modules not as expected." + } + + assert { + condition = length(module.blob_storage_backup["backup1"].backup_policy.id) > 0 + error_message = "Blob storage backup policy id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup1"].backup_policy.name == "bkpol-${var.vault_name}-blobstorage-storage1" + error_message = "Blob storage backup policy name not as expected." + } + + assert { + condition = module.blob_storage_backup["backup1"].backup_policy.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Blob storage backup policy vault id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup1"].backup_policy.operational_default_retention_duration == "P7D" + error_message = "Blob storage backup policy retention period not as expected." + } + + assert { + condition = length(module.blob_storage_backup["backup1"].backup_instance.id) > 0 + error_message = "Blob storage backup instance id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup1"].backup_instance.name == "bkinst-${var.vault_name}-blobstorage-storage1" + error_message = "Blob storage backup instance name not as expected." + } + + assert { + condition = module.blob_storage_backup["backup1"].backup_instance.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Blob storage backup instance vault id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup1"].backup_instance.location == azurerm_data_protection_backup_vault.backup_vault.location + error_message = "Blob storage backup instance location not as expected." + } + + assert { + condition = length(module.blob_storage_backup["backup1"].backup_instance.storage_account_id) > 0 + error_message = "Blob storage backup instance storage account id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup1"].backup_instance.backup_policy_id == module.blob_storage_backup["backup1"].backup_policy.id + error_message = "Blob storage backup instance backup policy id not as expected." + } + + assert { + condition = length(module.blob_storage_backup["backup2"].backup_policy.id) > 0 + error_message = "Blob storage backup policy id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup2"].backup_policy.name == "bkpol-${var.vault_name}-blobstorage-storage2" + error_message = "Blob storage backup policy name not as expected." + } + + assert { + condition = module.blob_storage_backup["backup2"].backup_policy.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Blob storage backup policy vault id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup2"].backup_policy.operational_default_retention_duration == "P30D" + error_message = "Blob storage backup policy retention period not as expected." + } + + assert { + condition = length(module.blob_storage_backup["backup2"].backup_instance.id) > 0 + error_message = "Blob storage backup instance id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup2"].backup_instance.name == "bkinst-${var.vault_name}-blobstorage-storage2" + error_message = "Blob storage backup instance name not as expected." + } + + assert { + condition = module.blob_storage_backup["backup2"].backup_instance.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Blob storage backup instance vault id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup2"].backup_instance.location == azurerm_data_protection_backup_vault.backup_vault.location + error_message = "Blob storage backup instance location not as expected." + } + + assert { + condition = length(module.blob_storage_backup["backup2"].backup_instance.storage_account_id) > 0 + error_message = "Blob storage backup instance storage account id not as expected." + } + + assert { + condition = module.blob_storage_backup["backup2"].backup_instance.backup_policy_id == module.blob_storage_backup["backup2"].backup_policy.id + error_message = "Blob storage backup instance backup policy id not as expected." + } +} \ No newline at end of file diff --git a/tests/integration-tests/backup_modules_managed_disk.tftest.hcl b/tests/integration-tests/backup_modules_managed_disk.tftest.hcl new file mode 100644 index 0000000..08fdb6f --- /dev/null +++ b/tests/integration-tests/backup_modules_managed_disk.tftest.hcl @@ -0,0 +1,169 @@ +mock_provider "azurerm" { + source = "./azurerm" +} + +run "setup_tests" { + module { + source = "./setup" + } +} + +run "create_managed_disk_backup" { + command = apply + + module { + source = "../../infrastructure" + } + + variables { + vault_name = run.setup_tests.vault_name + vault_location = "uksouth" + managed_disk_backups = { + backup1 = { + backup_name = "disk1" + retention_period = "P7D" + backup_intervals = ["R/2024-01-01T00:00:00+00:00/P1D"] + managed_disk_id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Compute/disks/disk-1" + managed_disk_resource_group = { + id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group1" + name = "example-resource-group1" + } + } + backup2 = { + backup_name = "disk2" + retention_period = "P30D" + backup_intervals = ["R/2024-01-01T00:00:00+00:00/P2D"] + managed_disk_id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Compute/disks/disk-2" + managed_disk_resource_group = { + id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group2" + name = "example-resource-group2" + } + } + } + } + + assert { + condition = length(module.managed_disk_backup) == 2 + error_message = "Number of backup modules not as expected." + } + + assert { + condition = length(module.managed_disk_backup["backup1"].backup_policy.id) > 0 + error_message = "Managed disk backup policy id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_policy.name == "bkpol-${var.vault_name}-manageddisk-disk1" + error_message = "Managed disk backup policy name not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_policy.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Managed disk backup policy vault id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_policy.default_retention_duration == "P7D" + error_message = "Managed disk backup policy retention period not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_policy.backup_repeating_time_intervals[0] == "R/2024-01-01T00:00:00+00:00/P1D" + error_message = "Managed disk backup policy backup intervals not as expected." + } + + assert { + condition = length(module.managed_disk_backup["backup1"].backup_instance.id) > 0 + error_message = "Managed disk backup instance id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_instance.name == "bkinst-${var.vault_name}-manageddisk-disk1" + error_message = "Managed disk backup instance name not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_instance.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Managed disk backup instance vault id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_instance.location == azurerm_data_protection_backup_vault.backup_vault.location + error_message = "Managed disk backup instance location not as expected." + } + + assert { + condition = length(module.managed_disk_backup["backup1"].backup_instance.disk_id) > 0 + error_message = "Managed disk backup instance managed disk id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_instance.snapshot_resource_group_name == "example-resource-group1" + error_message = "Managed disk backup instance snapshot resource group not as expected." + } + + assert { + condition = module.managed_disk_backup["backup1"].backup_instance.backup_policy_id == module.managed_disk_backup["backup1"].backup_policy.id + error_message = "Managed disk backup instance backup policy id not as expected." + } + + assert { + condition = length(module.managed_disk_backup["backup2"].backup_policy.id) > 0 + error_message = "Managed disk backup policy id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_policy.name == "bkpol-${var.vault_name}-manageddisk-disk2" + error_message = "Managed disk backup policy name not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_policy.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Managed disk backup policy vault id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_policy.default_retention_duration == "P30D" + error_message = "Managed disk backup policy retention period not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_policy.backup_repeating_time_intervals[0] == "R/2024-01-01T00:00:00+00:00/P2D" + error_message = "Managed disk backup policy backup intervals not as expected." + } + + assert { + condition = length(module.managed_disk_backup["backup2"].backup_instance.id) > 0 + error_message = "Managed disk backup instance id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_instance.name == "bkinst-${var.vault_name}-manageddisk-disk2" + error_message = "Managed disk backup instance name not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_instance.vault_id == azurerm_data_protection_backup_vault.backup_vault.id + error_message = "Managed disk backup instance vault id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_instance.location == azurerm_data_protection_backup_vault.backup_vault.location + error_message = "Managed disk backup instance location not as expected." + } + + assert { + condition = length(module.managed_disk_backup["backup2"].backup_instance.disk_id) > 0 + error_message = "Managed disk backup instance managed disk id not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_instance.snapshot_resource_group_name == "example-resource-group2" + error_message = "Managed disk backup instance snapshot resource group not as expected." + } + + assert { + condition = module.managed_disk_backup["backup2"].backup_instance.backup_policy_id == module.managed_disk_backup["backup2"].backup_policy.id + error_message = "Managed disk backup instance backup policy id not as expected." + } +} \ No newline at end of file diff --git a/tests/integration-tests/backup_policy.tftest.hcl b/tests/integration-tests/backup_policy.tftest.hcl deleted file mode 100644 index d8a1875..0000000 --- a/tests/integration-tests/backup_policy.tftest.hcl +++ /dev/null @@ -1,78 +0,0 @@ -mock_provider "azurerm" { - source = "./azurerm" -} - -run "setup_tests" { - module { - source = "./setup" - } -} - -run "create_blob_storage_policy" { - command = apply - - module { - source = "../../infrastructure" - } - - variables { - vault_name = run.setup_tests.vault_name - } - - assert { - condition = length(module.blob_storage_policy.id) > 0 - error_message = "Blob storage policy id not as expected." - } - - assert { - condition = module.blob_storage_policy.name == "bkpol-${var.vault_name}-blobstorage" - error_message = "Blob storage policy name not as expected." - } - - assert { - condition = module.blob_storage_policy.vault_id == azurerm_data_protection_backup_vault.backup_vault.id - error_message = "Blob storage policy vault id not as expected." - } - - assert { - condition = module.blob_storage_policy.retention_period == "P7D" - error_message = "Blob storage policy retention period not as expected." - } -} - -run "create_managed_disk_policy" { - command = apply - - module { - source = "../../infrastructure" - } - - variables { - vault_name = run.setup_tests.vault_name - } - - assert { - condition = length(module.managed_disk_policy.id) > 0 - error_message = "Managed disk policy id not as expected." - } - - assert { - condition = module.managed_disk_policy.name == "bkpol-${var.vault_name}-manageddisk" - error_message = "Managed disk policy name not as expected." - } - - assert { - condition = module.managed_disk_policy.vault_id == azurerm_data_protection_backup_vault.backup_vault.id - error_message = "Managed disk policy vault id not as expected." - } - - assert { - condition = module.managed_disk_policy.retention_period == "P7D" - error_message = "Managed disk policy retention period not as expected." - } - - assert { - condition = can(module.managed_disk_policy.backup_intervals) && length(module.managed_disk_policy.backup_intervals) == 1 && module.managed_disk_policy.backup_intervals[0] == "R/2024-01-01T00:00:00+00:00/P1D" - error_message = "Managed disk policy backup intervals not as expected." - } -} \ No newline at end of file diff --git a/tests/integration-tests/main.tf b/tests/integration-tests/main.tf index 82f3445..e309c32 100644 --- a/tests/integration-tests/main.tf +++ b/tests/integration-tests/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.114.0" + version = "4.3.0" } } }