diff --git a/Makefile b/Makefile index 631c680a..2347bf31 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ OAPI_CODEGEN ?= $(ENVTEST_ASSETS_DIR)/oapi-codegen # Run acceptance tests .PHONY: testacc testacc: - TF_ACC=1 go test ./... -v -run TestAcc $(TESTARGS) -timeout 120m + TF_ACC=1 go test ./... -v -run TestAcc $(TESTARGS) -timeout 180m # Run unit tests .PHONY: test diff --git a/README.md b/README.md index 5e0e87e8..b0746268 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ make build ## Adding Dependencies This provider uses [Go modules](https://github.com/golang/go/wiki/Modules). -Please see the Go documentation for the most up to date information about using Go modules. +Please see the Go documentation for the most up-to-date information about using Go modules. To add a new dependency `github.com/author/dependency` to your Terraform provider: @@ -90,7 +90,6 @@ terraform { # provider configuration provider "astro" { organization_id = "" - host = "https://api.astronomer-dev.io" } # get information on an existing workspace diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index fabc6dfe..8c6f4a78 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -3,12 +3,12 @@ page_title: "astro_cluster Resource - astro" subcategory: "" description: |- - Cluster resource + Cluster resource. If creating multiple clusters, add a delay between each cluster creation to avoid cluster creation limiting errors. --- # astro_cluster (Resource) -Cluster resource +Cluster resource. If creating multiple clusters, add a delay between each cluster creation to avoid cluster creation limiting errors. ## Example Usage diff --git a/docs/resources/deployment.md b/docs/resources/deployment.md index 8d9d6b81..77293dbe 100644 --- a/docs/resources/deployment.md +++ b/docs/resources/deployment.md @@ -14,22 +14,23 @@ Deployment resource ```terraform resource "astro_deployment" "dedicated" { - name = "my dedicated deployment" - description = "an example deployment" - type = "DEDICATED" - cluster_id = "clv17vgft000801kkydsws63x" - contact_emails = ["preview@astronomer.test"] - default_task_pod_cpu = "0.25" - default_task_pod_memory = "0.5Gi" - executor = "KUBERNETES" - is_cicd_enforced = true - is_dag_deploy_enabled = true - is_development_mode = false - is_high_availability = true - resource_quota_cpu = "10" - resource_quota_memory = "20Gi" - scheduler_size = "SMALL" - workspace_id = "clnp86ly5000401ndaga21g81" + original_astro_runtime_version = "11.3.0" + name = "my dedicated deployment" + description = "an example deployment" + type = "DEDICATED" + cluster_id = "clv17vgft000801kkydsws63x" + contact_emails = ["preview@astronomer.test"] + default_task_pod_cpu = "0.25" + default_task_pod_memory = "0.5Gi" + executor = "KUBERNETES" + is_cicd_enforced = true + is_dag_deploy_enabled = true + is_development_mode = false + is_high_availability = true + resource_quota_cpu = "10" + resource_quota_memory = "20Gi" + scheduler_size = "SMALL" + workspace_id = "clnp86ly5000401ndaga21g81" environment_variables = [{ key = "key1" value = "value1" @@ -38,24 +39,25 @@ resource "astro_deployment" "dedicated" { } resource "astro_deployment" "standard" { - name = "my standard deployment" - description = "an example deployment" - type = "STANDARD" - cloud_provider = "AWS" - region = "us-east-1" - contact_emails = [] - default_task_pod_cpu = "0.25" - default_task_pod_memory = "0.5Gi" - executor = "CELERY" - is_cicd_enforced = true - is_dag_deploy_enabled = true - is_development_mode = false - is_high_availability = false - resource_quota_cpu = "10" - resource_quota_memory = "20Gi" - scheduler_size = "SMALL" - workspace_id = "clnp86ly500a401ndaga20g81" - environment_variables = [] + original_astro_runtime_version = "11.3.0" + name = "my standard deployment" + description = "an example deployment" + type = "STANDARD" + cloud_provider = "AWS" + region = "us-east-1" + contact_emails = [] + default_task_pod_cpu = "0.25" + default_task_pod_memory = "0.5Gi" + executor = "CELERY" + is_cicd_enforced = true + is_dag_deploy_enabled = true + is_development_mode = false + is_high_availability = false + resource_quota_cpu = "10" + resource_quota_memory = "20Gi" + scheduler_size = "SMALL" + workspace_id = "clnp86ly500a401ndaga20g81" + environment_variables = [] worker_queues = [{ name = "default" is_default = true @@ -67,18 +69,19 @@ resource "astro_deployment" "standard" { } resource "astro_deployment" "hybrid" { - name = "my hybrid deployment" - description = "an example deployment" - type = "HYBRID" - cluster_id = "clnp86ly5000401ndagu20g81" - task_pod_node_pool_id = "clnp86ly5000301ndzfxz895w" - contact_emails = ["example@astronomer.io"] - executor = "KUBERNETES" - is_cicd_enforced = true - is_dag_deploy_enabled = true - scheduler_replicas = 1 - scheduler_au = 5 - workspace_id = "clnp86ly5000401ndaga20g81" + original_astro_runtime_version = "11.3.0" + name = "my hybrid deployment" + description = "an example deployment" + type = "HYBRID" + cluster_id = "clnp86ly5000401ndagu20g81" + task_pod_node_pool_id = "clnp86ly5000301ndzfxz895w" + contact_emails = ["example@astronomer.io"] + executor = "KUBERNETES" + is_cicd_enforced = true + is_dag_deploy_enabled = true + scheduler_replicas = 1 + scheduler_au = 5 + workspace_id = "clnp86ly5000401ndaga20g81" environment_variables = [{ key = "key1" value = "value1" @@ -110,6 +113,7 @@ resource "astro_deployment" "hybrid" { - `default_task_pod_memory` (String) Deployment default task pod memory - required for 'STANDARD' and 'DEDICATED' deployments - `is_development_mode` (Boolean) Deployment development mode - required for 'STANDARD' and 'DEDICATED' deployments. If changing from 'False' to 'True', the deployment will be recreated - `is_high_availability` (Boolean) Deployment high availability - required for 'STANDARD' and 'DEDICATED' deployments +- `original_astro_runtime_version` (String) Deployment's original Astro Runtime version. The Terraform provider will use this provided Astro runtime version to create the Deployment. The Astro runtime version can be updated with your Astro project Dockerfile, but if this value is changed, the Deployment will be recreated with this new Astro runtime version. - `region` (String) Deployment region - required for 'STANDARD' deployments. If changing this value, the deployment will be recreated in the new region - `resource_quota_cpu` (String) Deployment resource quota CPU - required for 'STANDARD' and 'DEDICATED' deployments - `resource_quota_memory` (String) Deployment resource quota memory - required for 'STANDARD' and 'DEDICATED' deployments @@ -123,7 +127,7 @@ resource "astro_deployment" "hybrid" { ### Read-Only - `airflow_version` (String) Deployment Airflow version -- `astro_runtime_version` (String) Deployment Astro Runtime version. The terraform provider will use the latest Astro runtime version for the Deployment. The Astro runtime version can be updated with your Astro project Dockerfile +- `astro_runtime_version` (String) Deployment's current Astro Runtime version - `created_at` (String) Deployment creation timestamp - `created_by` (Attributes) Deployment creator (see [below for nested schema](#nestedatt--created_by)) - `dag_tarball_version` (String) Deployment DAG tarball version diff --git a/examples/resources/astro_deployment/resource.tf b/examples/resources/astro_deployment/resource.tf index 2972a034..237757b3 100644 --- a/examples/resources/astro_deployment/resource.tf +++ b/examples/resources/astro_deployment/resource.tf @@ -1,20 +1,21 @@ resource "astro_deployment" "dedicated" { - name = "my dedicated deployment" - description = "an example deployment" - type = "DEDICATED" - cluster_id = "clv17vgft000801kkydsws63x" - contact_emails = ["preview@astronomer.test"] - default_task_pod_cpu = "0.25" - default_task_pod_memory = "0.5Gi" - executor = "KUBERNETES" - is_cicd_enforced = true - is_dag_deploy_enabled = true - is_development_mode = false - is_high_availability = true - resource_quota_cpu = "10" - resource_quota_memory = "20Gi" - scheduler_size = "SMALL" - workspace_id = "clnp86ly5000401ndaga21g81" + original_astro_runtime_version = "11.3.0" + name = "my dedicated deployment" + description = "an example deployment" + type = "DEDICATED" + cluster_id = "clv17vgft000801kkydsws63x" + contact_emails = ["preview@astronomer.test"] + default_task_pod_cpu = "0.25" + default_task_pod_memory = "0.5Gi" + executor = "KUBERNETES" + is_cicd_enforced = true + is_dag_deploy_enabled = true + is_development_mode = false + is_high_availability = true + resource_quota_cpu = "10" + resource_quota_memory = "20Gi" + scheduler_size = "SMALL" + workspace_id = "clnp86ly5000401ndaga21g81" environment_variables = [{ key = "key1" value = "value1" @@ -23,24 +24,25 @@ resource "astro_deployment" "dedicated" { } resource "astro_deployment" "standard" { - name = "my standard deployment" - description = "an example deployment" - type = "STANDARD" - cloud_provider = "AWS" - region = "us-east-1" - contact_emails = [] - default_task_pod_cpu = "0.25" - default_task_pod_memory = "0.5Gi" - executor = "CELERY" - is_cicd_enforced = true - is_dag_deploy_enabled = true - is_development_mode = false - is_high_availability = false - resource_quota_cpu = "10" - resource_quota_memory = "20Gi" - scheduler_size = "SMALL" - workspace_id = "clnp86ly500a401ndaga20g81" - environment_variables = [] + original_astro_runtime_version = "11.3.0" + name = "my standard deployment" + description = "an example deployment" + type = "STANDARD" + cloud_provider = "AWS" + region = "us-east-1" + contact_emails = [] + default_task_pod_cpu = "0.25" + default_task_pod_memory = "0.5Gi" + executor = "CELERY" + is_cicd_enforced = true + is_dag_deploy_enabled = true + is_development_mode = false + is_high_availability = false + resource_quota_cpu = "10" + resource_quota_memory = "20Gi" + scheduler_size = "SMALL" + workspace_id = "clnp86ly500a401ndaga20g81" + environment_variables = [] worker_queues = [{ name = "default" is_default = true @@ -52,18 +54,19 @@ resource "astro_deployment" "standard" { } resource "astro_deployment" "hybrid" { - name = "my hybrid deployment" - description = "an example deployment" - type = "HYBRID" - cluster_id = "clnp86ly5000401ndagu20g81" - task_pod_node_pool_id = "clnp86ly5000301ndzfxz895w" - contact_emails = ["example@astronomer.io"] - executor = "KUBERNETES" - is_cicd_enforced = true - is_dag_deploy_enabled = true - scheduler_replicas = 1 - scheduler_au = 5 - workspace_id = "clnp86ly5000401ndaga20g81" + original_astro_runtime_version = "11.3.0" + name = "my hybrid deployment" + description = "an example deployment" + type = "HYBRID" + cluster_id = "clnp86ly5000401ndagu20g81" + task_pod_node_pool_id = "clnp86ly5000301ndzfxz895w" + contact_emails = ["example@astronomer.io"] + executor = "KUBERNETES" + is_cicd_enforced = true + is_dag_deploy_enabled = true + scheduler_replicas = 1 + scheduler_au = 5 + workspace_id = "clnp86ly5000401ndaga20g81" environment_variables = [{ key = "key1" value = "value1" diff --git a/internal/provider/datasources/data_source_deployment.go b/internal/provider/datasources/data_source_deployment.go index 675bf97d..34bca801 100644 --- a/internal/provider/datasources/data_source_deployment.go +++ b/internal/provider/datasources/data_source_deployment.go @@ -73,7 +73,7 @@ func (d *deploymentDataSource) Read( req datasource.ReadRequest, resp *datasource.ReadResponse, ) { - var data models.Deployment + var data models.DeploymentDataSource // Read Terraform configuration data into the model resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) @@ -106,7 +106,7 @@ func (d *deploymentDataSource) Read( } // Populate the model with the response data - diags := data.ReadFromResponse(ctx, deployment.JSON200, false) + diags := data.ReadFromResponse(ctx, deployment.JSON200) if diags.HasError() { resp.Diagnostics.Append(diags...) return diff --git a/internal/provider/datasources/data_source_deployments_test.go b/internal/provider/datasources/data_source_deployments_test.go index c880c8a5..8ec5adbb 100644 --- a/internal/provider/datasources/data_source_deployments_test.go +++ b/internal/provider/datasources/data_source_deployments_test.go @@ -117,7 +117,10 @@ resource "astro_workspace" "test_workspace" { cicd_enforced_default = true } +data "astro_deployment_options" "deployment_options" {} + resource "astro_deployment" "test_deployment_kubernetes" { + original_astro_runtime_version = tolist(data.astro_deployment_options.deployment_options.runtime_releases)[0].version name = "%v-1" description = "%v" type = "STANDARD" @@ -143,6 +146,7 @@ resource "astro_deployment" "test_deployment_kubernetes" { } resource "astro_deployment" "test_deployment_celery" { + original_astro_runtime_version = tolist(data.astro_deployment_options.deployment_options.runtime_releases)[0].version name = "%v-2" description = "%v" type = "STANDARD" diff --git a/internal/provider/models/deployment.go b/internal/provider/models/deployment.go index 84af8a59..19fbf97b 100644 --- a/internal/provider/models/deployment.go +++ b/internal/provider/models/deployment.go @@ -11,7 +11,66 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -type Deployment struct { +type DeploymentResource struct { + // Common fields + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + CreatedAt types.String `tfsdk:"created_at"` + UpdatedAt types.String `tfsdk:"updated_at"` + CreatedBy types.Object `tfsdk:"created_by"` + UpdatedBy types.Object `tfsdk:"updated_by"` + WorkspaceId types.String `tfsdk:"workspace_id"` + Type types.String `tfsdk:"type"` + Region types.String `tfsdk:"region"` + CloudProvider types.String `tfsdk:"cloud_provider"` + OriginalAstroRuntimeVersion types.String `tfsdk:"original_astro_runtime_version"` + AstroRuntimeVersion types.String `tfsdk:"astro_runtime_version"` + AirflowVersion types.String `tfsdk:"airflow_version"` + Namespace types.String `tfsdk:"namespace"` + ContactEmails types.Set `tfsdk:"contact_emails"` + Executor types.String `tfsdk:"executor"` + SchedulerCpu types.String `tfsdk:"scheduler_cpu"` + SchedulerMemory types.String `tfsdk:"scheduler_memory"` + SchedulerAu types.Int64 `tfsdk:"scheduler_au"` + SchedulerReplicas types.Int64 `tfsdk:"scheduler_replicas"` + ImageTag types.String `tfsdk:"image_tag"` + ImageRepository types.String `tfsdk:"image_repository"` + ImageVersion types.String `tfsdk:"image_version"` + EnvironmentVariables types.Set `tfsdk:"environment_variables"` + WebserverIngressHostname types.String `tfsdk:"webserver_ingress_hostname"` + WebserverUrl types.String `tfsdk:"webserver_url"` + WebserverAirflowApiUrl types.String `tfsdk:"webserver_airflow_api_url"` + Status types.String `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + DagTarballVersion types.String `tfsdk:"dag_tarball_version"` + DesiredDagTarballVersion types.String `tfsdk:"desired_dag_tarball_version"` + IsCicdEnforced types.Bool `tfsdk:"is_cicd_enforced"` + IsDagDeployEnabled types.Bool `tfsdk:"is_dag_deploy_enabled"` + WorkloadIdentity types.String `tfsdk:"workload_identity"` + ExternalIps types.Set `tfsdk:"external_ips"` + OidcIssuerUrl types.String `tfsdk:"oidc_issuer_url"` + WorkerQueues types.Set `tfsdk:"worker_queues"` + + // Hybrid and dedicated specific fields + ClusterId types.String `tfsdk:"cluster_id"` + + // Hybrid deployment specific fields + TaskPodNodePoolId types.String `tfsdk:"task_pod_node_pool_id"` + + // Hosted (standard and dedicated) deployment specific fields + ResourceQuotaCpu types.String `tfsdk:"resource_quota_cpu"` + ResourceQuotaMemory types.String `tfsdk:"resource_quota_memory"` + DefaultTaskPodCpu types.String `tfsdk:"default_task_pod_cpu"` + DefaultTaskPodMemory types.String `tfsdk:"default_task_pod_memory"` + SchedulerSize types.String `tfsdk:"scheduler_size"` + IsDevelopmentMode types.Bool `tfsdk:"is_development_mode"` + IsHighAvailability types.Bool `tfsdk:"is_high_availability"` + ScalingStatus types.Object `tfsdk:"scaling_status"` + ScalingSpec types.Object `tfsdk:"scaling_spec"` +} + +type DeploymentDataSource struct { // Common fields Id types.String `tfsdk:"id"` Name types.String `tfsdk:"name"` @@ -69,10 +128,10 @@ type Deployment struct { IsHighAvailability types.Bool `tfsdk:"is_high_availability"` } -func (data *Deployment) ReadFromResponse( +func (data *DeploymentResource) ReadFromResponse( ctx context.Context, deployment *platform.Deployment, - isResource bool, + originalAstroRuntimeVersion *string, ) diag.Diagnostics { // Read common fields data.Id = types.StringValue(deployment.Id) @@ -96,9 +155,14 @@ func (data *Deployment) ReadFromResponse( return diags } data.WorkspaceId = types.StringValue(deployment.WorkspaceId) + data.Type = types.StringPointerValue((*string)(deployment.Type)) data.Region = types.StringPointerValue(deployment.Region) data.CloudProvider = types.StringPointerValue((*string)(deployment.CloudProvider)) + + // OriginalAstroRuntimeVersion is the version of the Astro runtime that was used to create the deployment + data.OriginalAstroRuntimeVersion = types.StringPointerValue(originalAstroRuntimeVersion) data.AstroRuntimeVersion = types.StringValue(deployment.AstroRuntimeVersion) + data.AirflowVersion = types.StringValue(deployment.AirflowVersion) data.Namespace = types.StringValue(deployment.Namespace) data.ContactEmails, diags = utils.StringSet(deployment.ContactEmails) @@ -106,12 +170,12 @@ func (data *Deployment) ReadFromResponse( return diags } data.Executor = types.StringPointerValue((*string)(deployment.Executor)) + data.SchedulerCpu = types.StringValue(deployment.SchedulerCpu) + data.SchedulerMemory = types.StringValue(deployment.SchedulerMemory) if deployment.SchedulerAu != nil { deploymentSchedulerAu := int64(*deployment.SchedulerAu) data.SchedulerAu = types.Int64Value(deploymentSchedulerAu) } - data.SchedulerCpu = types.StringValue(deployment.SchedulerCpu) - data.SchedulerMemory = types.StringValue(deployment.SchedulerMemory) data.SchedulerReplicas = types.Int64Value(int64(deployment.SchedulerReplicas)) data.ImageTag = types.StringValue(deployment.ImageTag) data.ImageRepository = types.StringValue(deployment.ImageRepository) @@ -125,29 +189,127 @@ func (data *Deployment) ReadFromResponse( data.WebserverAirflowApiUrl = types.StringValue(deployment.WebServerAirflowApiUrl) data.Status = types.StringValue(string(deployment.Status)) data.StatusReason = types.StringPointerValue(deployment.StatusReason) - data.Type = types.StringPointerValue((*string)(deployment.Type)) data.DagTarballVersion = types.StringPointerValue(deployment.DagTarballVersion) data.DesiredDagTarballVersion = types.StringPointerValue(deployment.DesiredDagTarballVersion) - if isResource { - data.WorkerQueues, diags = utils.ObjectSet(ctx, deployment.WorkerQueues, schemas.WorkerQueueResourceAttributeTypes(), WorkerQueueResourceTypesObject) + data.IsCicdEnforced = types.BoolValue(deployment.IsCicdEnforced) + data.IsDagDeployEnabled = types.BoolValue(deployment.IsDagDeployEnabled) + data.WorkloadIdentity = types.StringPointerValue(deployment.WorkloadIdentity) + data.ExternalIps, diags = utils.StringSet(deployment.ExternalIPs) + if diags.HasError() { + return diags + } + data.OidcIssuerUrl = types.StringPointerValue(deployment.OidcIssuerUrl) + data.WorkerQueues, diags = utils.ObjectSet(ctx, deployment.WorkerQueues, schemas.WorkerQueueResourceAttributeTypes(), WorkerQueueResourceTypesObject) + if diags.HasError() { + return diags + } + + // Read hybrid and dedicated specific fields + data.ClusterId = types.StringPointerValue(deployment.ClusterId) + + // Read hybrid deployment specific fields + data.TaskPodNodePoolId = types.StringPointerValue(deployment.TaskPodNodePoolId) + + // Read hosted deployment specific fields + data.ResourceQuotaCpu = types.StringPointerValue(deployment.ResourceQuotaCpu) + data.ResourceQuotaMemory = types.StringPointerValue(deployment.ResourceQuotaMemory) + data.DefaultTaskPodCpu = types.StringPointerValue(deployment.DefaultTaskPodCpu) + data.DefaultTaskPodMemory = types.StringPointerValue(deployment.DefaultTaskPodMemory) + data.SchedulerSize = types.StringPointerValue((*string)(deployment.SchedulerSize)) + data.IsDevelopmentMode = types.BoolPointerValue(deployment.IsDevelopmentMode) + data.IsHighAvailability = types.BoolPointerValue(deployment.IsHighAvailability) + + // Currently, the scaling status and spec are only available in development mode + // However, there is a bug in the API where the scaling status and spec are returned even if the deployment is not in development mode for updated deployments + // This is a workaround to handle the bug until the API is fixed + // Issue here: https://github.com/astronomer/astro/issues/21073 + if deployment.IsDevelopmentMode != nil && *deployment.IsDevelopmentMode { + data.ScalingStatus, diags = ScalingStatusTypesObject(ctx, deployment.ScalingStatus) if diags.HasError() { return diags } - } else { - data.WorkerQueues, diags = utils.ObjectSet(ctx, deployment.WorkerQueues, schemas.WorkerQueueDataSourceAttributeTypes(), WorkerQueueDataSourceTypesObject) + data.ScalingSpec, diags = ScalingSpecTypesObject(ctx, deployment.ScalingSpec) if diags.HasError() { return diags } + } else { + data.ScalingStatus = types.ObjectNull(schemas.ScalingStatusAttributeTypes()) + data.ScalingSpec = types.ObjectNull(schemas.ScalingSpecAttributeTypes()) + } + + return nil +} + +func (data *DeploymentDataSource) ReadFromResponse( + ctx context.Context, + deployment *platform.Deployment, +) diag.Diagnostics { + // Read common fields + data.Id = types.StringValue(deployment.Id) + data.Name = types.StringValue(deployment.Name) + // If the description is nil, set it to an empty string since the terraform state/config for this resource + // cannot have a null value for a string. + if deployment.Description != nil { + data.Description = types.StringValue(*deployment.Description) + } else { + data.Description = types.StringValue("") + } + data.CreatedAt = types.StringValue(deployment.CreatedAt.String()) + data.UpdatedAt = types.StringValue(deployment.UpdatedAt.String()) + var diags diag.Diagnostics + data.CreatedBy, diags = SubjectProfileTypesObject(ctx, deployment.CreatedBy) + if diags.HasError() { + return diags + } + data.UpdatedBy, diags = SubjectProfileTypesObject(ctx, deployment.UpdatedBy) + if diags.HasError() { + return diags + } + data.WorkspaceId = types.StringValue(deployment.WorkspaceId) + data.Type = types.StringPointerValue((*string)(deployment.Type)) + data.Region = types.StringPointerValue(deployment.Region) + data.CloudProvider = types.StringPointerValue((*string)(deployment.CloudProvider)) + data.AstroRuntimeVersion = types.StringValue(deployment.AstroRuntimeVersion) + data.AirflowVersion = types.StringValue(deployment.AirflowVersion) + data.Namespace = types.StringValue(deployment.Namespace) + data.ContactEmails, diags = utils.StringSet(deployment.ContactEmails) + if diags.HasError() { + return diags } + data.Executor = types.StringPointerValue((*string)(deployment.Executor)) + if deployment.SchedulerAu != nil { + deploymentSchedulerAu := int64(*deployment.SchedulerAu) + data.SchedulerAu = types.Int64Value(deploymentSchedulerAu) + } + data.SchedulerCpu = types.StringValue(deployment.SchedulerCpu) + data.SchedulerMemory = types.StringValue(deployment.SchedulerMemory) + data.SchedulerReplicas = types.Int64Value(int64(deployment.SchedulerReplicas)) + data.ImageTag = types.StringValue(deployment.ImageTag) + data.ImageRepository = types.StringValue(deployment.ImageRepository) + data.ImageVersion = types.StringPointerValue(deployment.ImageVersion) + data.EnvironmentVariables, diags = utils.ObjectSet(ctx, deployment.EnvironmentVariables, schemas.DeploymentEnvironmentVariableAttributeTypes(), DeploymentEnvironmentVariableTypesObject) + if diags.HasError() { + return diags + } + data.WebserverIngressHostname = types.StringValue(deployment.WebServerIngressHostname) + data.WebserverUrl = types.StringValue(deployment.WebServerUrl) + data.WebserverAirflowApiUrl = types.StringValue(deployment.WebServerAirflowApiUrl) + data.Status = types.StringValue(string(deployment.Status)) + data.StatusReason = types.StringPointerValue(deployment.StatusReason) + data.DagTarballVersion = types.StringPointerValue(deployment.DagTarballVersion) + data.DesiredDagTarballVersion = types.StringPointerValue(deployment.DesiredDagTarballVersion) data.IsCicdEnforced = types.BoolValue(deployment.IsCicdEnforced) data.IsDagDeployEnabled = types.BoolValue(deployment.IsDagDeployEnabled) - data.WorkloadIdentity = types.StringPointerValue(deployment.WorkloadIdentity) data.ExternalIps, diags = utils.StringSet(deployment.ExternalIPs) if diags.HasError() { return diags } data.OidcIssuerUrl = types.StringPointerValue(deployment.OidcIssuerUrl) + data.WorkerQueues, diags = utils.ObjectSet(ctx, deployment.WorkerQueues, schemas.WorkerQueueDataSourceAttributeTypes(), WorkerQueueDataSourceTypesObject) + if diags.HasError() { + return diags + } // Read hybrid and dedicated specific fields data.ClusterId = types.StringPointerValue(deployment.ClusterId) @@ -161,8 +323,8 @@ func (data *Deployment) ReadFromResponse( data.DefaultTaskPodCpu = types.StringPointerValue(deployment.DefaultTaskPodCpu) data.DefaultTaskPodMemory = types.StringPointerValue(deployment.DefaultTaskPodMemory) data.SchedulerSize = types.StringPointerValue((*string)(deployment.SchedulerSize)) - data.IsHighAvailability = types.BoolPointerValue(deployment.IsHighAvailability) data.IsDevelopmentMode = types.BoolPointerValue(deployment.IsDevelopmentMode) + data.IsHighAvailability = types.BoolPointerValue(deployment.IsHighAvailability) // Currently, the scaling status and spec are only available in development mode // However, there is a bug in the API where the scaling status and spec are returned even if the deployment is not in development mode for updated deployments diff --git a/internal/provider/models/deployments.go b/internal/provider/models/deployments.go index dfe16f64..fc051ad7 100644 --- a/internal/provider/models/deployments.go +++ b/internal/provider/models/deployments.go @@ -24,8 +24,8 @@ func (data *Deployments) ReadFromResponse( ) diag.Diagnostics { values := make([]attr.Value, len(deployments)) for i, deployment := range deployments { - var singleDeploymentData Deployment - diags := singleDeploymentData.ReadFromResponse(ctx, &deployment, false) + var singleDeploymentData DeploymentDataSource + diags := singleDeploymentData.ReadFromResponse(ctx, &deployment) if diags.HasError() { return diags } diff --git a/internal/provider/resources/resource_cluster.go b/internal/provider/resources/resource_cluster.go index 7dc03e21..cbd5e4c4 100644 --- a/internal/provider/resources/resource_cluster.go +++ b/internal/provider/resources/resource_cluster.go @@ -51,7 +51,7 @@ func (r *ClusterResource) Schema( ) { resp.Schema = schema.Schema{ // This description is used by the documentation generator and the language server. - MarkdownDescription: "Cluster resource", + MarkdownDescription: "Cluster resource. If creating multiple clusters, add a delay between each cluster creation to avoid cluster creation limiting errors.", Attributes: schemas.ClusterResourceSchemaAttributes(ctx), } } diff --git a/internal/provider/resources/resource_cluster_test.go b/internal/provider/resources/resource_cluster_test.go index 9e41ca7b..f9e33575 100644 --- a/internal/provider/resources/resource_cluster_test.go +++ b/internal/provider/resources/resource_cluster_test.go @@ -363,9 +363,9 @@ func TestAcc_ResourceClusterRemovedOutsideOfTerraform(t *testing.T) { clusterResource := fmt.Sprintf("astro_cluster.%v", clusterName) depInput := clusterInput{ Name: clusterName, - Region: "us-east-1", - CloudProvider: "AWS", - DbInstanceType: "db.m6g.large", + Region: "us-central1", + CloudProvider: "GCP", + DbInstanceType: "Small General Purpose", } resource.ParallelTest(t, resource.TestCase{ ProtoV6ProviderFactories: astronomerprovider.TestAccProtoV6ProviderFactories, @@ -414,7 +414,8 @@ type dedicatedDeploymentInput struct { } func dedicatedDeployment(input dedicatedDeploymentInput) string { - return fmt.Sprintf(`resource "astro_deployment" "%v" { + return fmt.Sprintf(` +resource "astro_deployment" "%v" { name = "%s" description = "%s" type = "DEDICATED" diff --git a/internal/provider/resources/resource_deployment.go b/internal/provider/resources/resource_deployment.go index 2b191be0..64cf4dbb 100644 --- a/internal/provider/resources/resource_deployment.go +++ b/internal/provider/resources/resource_deployment.go @@ -83,48 +83,32 @@ func (r *DeploymentResource) Create( req resource.CreateRequest, resp *resource.CreateResponse, ) { - var data models.Deployment + var data models.DeploymentResource // Read Terraform plan data into the model resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) - if resp.Diagnostics.HasError() { return } - deploymentOptions, err := r.platformClient.GetDeploymentOptionsWithResponse(ctx, r.organizationId, &platform.GetDeploymentOptionsParams{ - DeploymentType: lo.ToPtr(platform.GetDeploymentOptionsParamsDeploymentType(data.Type.ValueString())), - Executor: lo.ToPtr(platform.GetDeploymentOptionsParamsExecutor(data.Executor.ValueString())), - CloudProvider: lo.ToPtr(platform.GetDeploymentOptionsParamsCloudProvider(data.CloudProvider.ValueString())), - }) - if err != nil { - tflog.Error(ctx, "failed to get deployment options", map[string]interface{}{"error": err}) - resp.Diagnostics.AddError( - "Client Error", - fmt.Sprintf("Unable to get deployment options for deployment creation, got error: %s", err), - ) - return - } - _, diagnostic := clients.NormalizeAPIError(ctx, deploymentOptions.HTTPResponse, deploymentOptions.Body) - if diagnostic != nil { - resp.Diagnostics.Append(diagnostic) - return - } - if deploymentOptions.JSON200 == nil || len(deploymentOptions.JSON200.RuntimeReleases) == 0 { - resp.Diagnostics.AddError( - "Client Error", - "Unable to get runtime releases for deployment creation, got empty runtime releases", - ) - return - } - var diags diag.Diagnostics var createDeploymentRequest platform.CreateDeploymentRequest + originalAstroRuntimeVersion := data.OriginalAstroRuntimeVersion.ValueString() + if len(originalAstroRuntimeVersion) == 0 { + var diagnostic diag.Diagnostic + originalAstroRuntimeVersion, diagnostic = r.GetLatestAstroRuntimeVersion(ctx, &data) + if diagnostic != nil { + resp.Diagnostics.Append(diagnostic) + return + + } + } + switch data.Type.ValueString() { case string(platform.DeploymentTypeSTANDARD): createStandardDeploymentRequest := platform.CreateStandardDeploymentRequest{ - AstroRuntimeVersion: deploymentOptions.JSON200.RuntimeReleases[0].Version, + AstroRuntimeVersion: originalAstroRuntimeVersion, CloudProvider: (*platform.CreateStandardDeploymentRequestCloudProvider)(data.CloudProvider.ValueStringPointer()), DefaultTaskPodCpu: data.DefaultTaskPodCpu.ValueString(), DefaultTaskPodMemory: data.DefaultTaskPodMemory.ValueString(), @@ -173,7 +157,7 @@ func (r *DeploymentResource) Create( return } - err = createDeploymentRequest.FromCreateStandardDeploymentRequest(createStandardDeploymentRequest) + err := createDeploymentRequest.FromCreateStandardDeploymentRequest(createStandardDeploymentRequest) if err != nil { tflog.Error(ctx, fmt.Sprintf("failed to create standard deployment error: %v", err)) resp.Diagnostics.AddError( @@ -185,7 +169,7 @@ func (r *DeploymentResource) Create( case string(platform.DeploymentTypeDEDICATED): createDedicatedDeploymentRequest := platform.CreateDedicatedDeploymentRequest{ - AstroRuntimeVersion: deploymentOptions.JSON200.RuntimeReleases[0].Version, + AstroRuntimeVersion: originalAstroRuntimeVersion, ClusterId: data.ClusterId.ValueString(), DefaultTaskPodCpu: data.DefaultTaskPodCpu.ValueString(), DefaultTaskPodMemory: data.DefaultTaskPodMemory.ValueString(), @@ -233,7 +217,7 @@ func (r *DeploymentResource) Create( return } - err = createDeploymentRequest.FromCreateDedicatedDeploymentRequest(createDedicatedDeploymentRequest) + err := createDeploymentRequest.FromCreateDedicatedDeploymentRequest(createDedicatedDeploymentRequest) if err != nil { tflog.Error(ctx, fmt.Sprintf("failed to create dedicated deployment error: %v", err)) resp.Diagnostics.AddError( @@ -245,7 +229,7 @@ func (r *DeploymentResource) Create( case string(platform.DeploymentTypeHYBRID): createHybridDeploymentRequest := platform.CreateHybridDeploymentRequest{ - AstroRuntimeVersion: deploymentOptions.JSON200.RuntimeReleases[0].Version, + AstroRuntimeVersion: originalAstroRuntimeVersion, ClusterId: data.ClusterId.ValueString(), Description: data.Description.ValueStringPointer(), Executor: platform.CreateHybridDeploymentRequestExecutor(data.Executor.ValueString()), @@ -284,7 +268,7 @@ func (r *DeploymentResource) Create( return } - err = createDeploymentRequest.FromCreateHybridDeploymentRequest(createHybridDeploymentRequest) + err := createDeploymentRequest.FromCreateHybridDeploymentRequest(createHybridDeploymentRequest) if err != nil { tflog.Error(ctx, fmt.Sprintf("failed to create hybrid deployment error: %v", err)) resp.Diagnostics.AddError( @@ -308,13 +292,13 @@ func (r *DeploymentResource) Create( ) return } - _, diagnostic = clients.NormalizeAPIError(ctx, deployment.HTTPResponse, deployment.Body) + _, diagnostic := clients.NormalizeAPIError(ctx, deployment.HTTPResponse, deployment.Body) if diagnostic != nil { resp.Diagnostics.Append(diagnostic) return } - diags = data.ReadFromResponse(ctx, deployment.JSON200, true) + diags = data.ReadFromResponse(ctx, deployment.JSON200, data.OriginalAstroRuntimeVersion.ValueStringPointer()) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -331,11 +315,10 @@ func (r *DeploymentResource) Read( req resource.ReadRequest, resp *resource.ReadResponse, ) { - var data models.Deployment + var data models.DeploymentResource // Read Terraform prior state data into the model resp.Diagnostics.Append(req.State.Get(ctx, &data)...) - if resp.Diagnostics.HasError() { return } @@ -366,7 +349,7 @@ func (r *DeploymentResource) Read( return } - diags := data.ReadFromResponse(ctx, deployment.JSON200, true) + diags := data.ReadFromResponse(ctx, deployment.JSON200, data.OriginalAstroRuntimeVersion.ValueStringPointer()) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -383,7 +366,7 @@ func (r *DeploymentResource) Update( req resource.UpdateRequest, resp *resource.UpdateResponse, ) { - var data models.Deployment + var data models.DeploymentResource // Read Terraform plan data into the model resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) @@ -582,7 +565,7 @@ func (r *DeploymentResource) Update( return } - diags = data.ReadFromResponse(ctx, deployment.JSON200, true) + diags = data.ReadFromResponse(ctx, deployment.JSON200, data.OriginalAstroRuntimeVersion.ValueStringPointer()) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -599,7 +582,7 @@ func (r *DeploymentResource) Delete( req resource.DeleteRequest, resp *resource.DeleteResponse, ) { - var data models.Deployment + var data models.DeploymentResource // Read Terraform prior state data into the model resp.Diagnostics.Append(req.State.Get(ctx, &data)...) @@ -647,7 +630,7 @@ func (r *DeploymentResource) ValidateConfig( req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse, ) { - var data models.Deployment + var data models.DeploymentResource resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) if resp.Diagnostics.HasError() { @@ -682,7 +665,7 @@ func (r *DeploymentResource) ValidateConfig( } } -func validateHybridConfig(ctx context.Context, data *models.Deployment) diag.Diagnostics { +func validateHybridConfig(ctx context.Context, data *models.DeploymentResource) diag.Diagnostics { var diags diag.Diagnostics // Required hybrid values if data.SchedulerAu.IsNull() { @@ -778,7 +761,7 @@ func validateHybridConfig(ctx context.Context, data *models.Deployment) diag.Dia return diags } -func validateStandardConfig(ctx context.Context, data *models.Deployment) diag.Diagnostics { +func validateStandardConfig(ctx context.Context, data *models.DeploymentResource) diag.Diagnostics { var diags diag.Diagnostics // Required standard values if data.Region.IsNull() { @@ -804,7 +787,7 @@ func validateStandardConfig(ctx context.Context, data *models.Deployment) diag.D return diags } -func validateHostedConfig(ctx context.Context, data *models.Deployment) diag.Diagnostics { +func validateHostedConfig(ctx context.Context, data *models.DeploymentResource) diag.Diagnostics { // Required hosted values var diags diag.Diagnostics if data.SchedulerSize.IsNull() { @@ -909,7 +892,7 @@ func validateHostedConfig(ctx context.Context, data *models.Deployment) diag.Dia return diags } -func validateClusterIdConfig(ctx context.Context, data *models.Deployment) diag.Diagnostics { +func validateClusterIdConfig(ctx context.Context, data *models.DeploymentResource) diag.Diagnostics { var diags diag.Diagnostics // Required clusterId value if data.ClusterId.IsNull() { @@ -1039,3 +1022,29 @@ func RequestDeploymentEnvironmentVariables(ctx context.Context, environmentVaria }) return platformEnvVars, nil } + +func (r *DeploymentResource) GetLatestAstroRuntimeVersion(ctx context.Context, data *models.DeploymentResource) (string, diag.Diagnostic) { + deploymentOptions, err := r.platformClient.GetDeploymentOptionsWithResponse(ctx, r.organizationId, &platform.GetDeploymentOptionsParams{ + DeploymentType: lo.ToPtr(platform.GetDeploymentOptionsParamsDeploymentType(data.Type.ValueString())), + Executor: lo.ToPtr(platform.GetDeploymentOptionsParamsExecutor(data.Executor.ValueString())), + CloudProvider: lo.ToPtr(platform.GetDeploymentOptionsParamsCloudProvider(data.CloudProvider.ValueString())), + }) + if err != nil { + tflog.Error(ctx, "failed to get deployment options", map[string]interface{}{"error": err}) + return "", diag.NewErrorDiagnostic( + "Client Error", + fmt.Sprintf("Unable to get deployment options for deployment creation, got error: %s", err), + ) + } + _, diagnostic := clients.NormalizeAPIError(ctx, deploymentOptions.HTTPResponse, deploymentOptions.Body) + if diagnostic != nil { + return "", diagnostic + } + if deploymentOptions.JSON200 == nil || len(deploymentOptions.JSON200.RuntimeReleases) == 0 { + return "", diag.NewErrorDiagnostic( + "Client Error", + "Unable to get runtime releases for deployment creation, got empty runtime releases", + ) + } + return deploymentOptions.JSON200.RuntimeReleases[0].Version, nil +} diff --git a/internal/provider/schemas/deployment.go b/internal/provider/schemas/deployment.go index fc4da08d..85b053f9 100644 --- a/internal/provider/schemas/deployment.go +++ b/internal/provider/schemas/deployment.go @@ -71,8 +71,15 @@ func DeploymentResourceSchemaAttributes() map[string]resourceSchema.Attribute { stringplanmodifier.RequiresReplaceIfConfigured(), }, }, + "original_astro_runtime_version": resourceSchema.StringAttribute{ + MarkdownDescription: "Deployment's original Astro Runtime version. The Terraform provider will use this provided Astro runtime version to create the Deployment. The Astro runtime version can be updated with your Astro project Dockerfile, but if this value is changed, the Deployment will be recreated with this new Astro runtime version.", + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, "astro_runtime_version": resourceSchema.StringAttribute{ - MarkdownDescription: "Deployment Astro Runtime version. The terraform provider will use the latest Astro runtime version for the Deployment. The Astro runtime version can be updated with your Astro project Dockerfile", + MarkdownDescription: "Deployment's current Astro Runtime version", Computed: true, }, "airflow_version": resourceSchema.StringAttribute{