From 8998fe24464df405329c5325d6283730205a3aaa Mon Sep 17 00:00:00 2001
From: Vandy Liu <33995460+vandyliu@users.noreply.github.com>
Date: Fri, 19 Apr 2024 18:42:01 -0700
Subject: [PATCH] cluster and clusters data source (#22)
---
docs/data-sources/cluster.md | 82 +++++++
docs/data-sources/clusters.md | 100 +++++++++
.../astronomer_cluster/data-source.tf | 3 +
.../astronomer_clusters/data-source.tf | 9 +
.../datasources/data_source_cluster.go | 117 ++++++++++
.../datasources/data_source_cluster_test.go | 51 +++++
.../datasources/data_source_clusters.go | 147 +++++++++++++
.../datasources/data_source_clusters_test.go | 108 +++++++++
internal/provider/models/cluster.go | 153 +++++++++++++
internal/provider/models/clusters.go | 45 ++++
internal/provider/provider.go | 2 +
internal/provider/schemas/cluster.go | 208 ++++++++++++++++++
internal/provider/schemas/clusters.go | 76 +++++++
13 files changed, 1101 insertions(+)
create mode 100644 docs/data-sources/cluster.md
create mode 100644 docs/data-sources/clusters.md
create mode 100644 examples/data-sources/astronomer_cluster/data-source.tf
create mode 100644 examples/data-sources/astronomer_clusters/data-source.tf
create mode 100644 internal/provider/datasources/data_source_cluster.go
create mode 100644 internal/provider/datasources/data_source_cluster_test.go
create mode 100644 internal/provider/datasources/data_source_clusters.go
create mode 100644 internal/provider/datasources/data_source_clusters_test.go
create mode 100644 internal/provider/models/cluster.go
create mode 100644 internal/provider/models/clusters.go
create mode 100644 internal/provider/schemas/cluster.go
create mode 100644 internal/provider/schemas/clusters.go
diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md
new file mode 100644
index 00000000..1afb9d71
--- /dev/null
+++ b/docs/data-sources/cluster.md
@@ -0,0 +1,82 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "astronomer_cluster Data Source - astronomer"
+subcategory: ""
+description: |-
+ Cluster data source
+---
+
+# astronomer_cluster (Data Source)
+
+Cluster data source
+
+## Example Usage
+
+```terraform
+data "astronomer_cluster" "example" {
+ id = "clozc036j01to01jrlgvueo8t"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `id` (String) Cluster identifier
+
+### Read-Only
+
+- `cloud_provider` (String) Cluster cloud provider
+- `created_at` (String) Cluster creation timestamp
+- `db_instance_type` (String) Cluster database instance type
+- `is_limited` (Boolean) Whether the cluster is limited
+- `metadata` (Attributes) Cluster metadata (see [below for nested schema](#nestedatt--metadata))
+- `name` (String) Cluster name
+- `node_pools` (Attributes List) Cluster node pools (see [below for nested schema](#nestedatt--node_pools))
+- `pod_subnet_range` (String) Cluster pod subnet range
+- `provider_account` (String) Cluster provider account
+- `region` (String) Cluster region
+- `service_peering_range` (String) Cluster service peering range
+- `service_subnet_range` (String) Cluster service subnet range
+- `status` (String) Cluster status
+- `tags` (Attributes List) Cluster tags (see [below for nested schema](#nestedatt--tags))
+- `tenant_id` (String) Cluster tenant ID
+- `type` (String) Cluster type
+- `updated_at` (String) Cluster last updated timestamp
+- `vpc_subnet_range` (String) Cluster VPC subnet range
+- `workspace_ids` (List of String) Cluster workspace IDs
+
+
+### Nested Schema for `metadata`
+
+Read-Only:
+
+- `external_ips` (List of String) Cluster external IPs
+- `oidc_issuer_url` (String) Cluster OIDC issuer URL
+
+
+
+### Nested Schema for `node_pools`
+
+Read-Only:
+
+- `cloud_provider` (String) Node pool cloud provider
+- `cluster_id` (String) Node pool cluster identifier
+- `created_at` (String) Node pool creation timestamp
+- `id` (String) Node pool identifier
+- `is_default` (Boolean) Whether the node pool is the default node pool of the cluster
+- `max_node_count` (Number) Node pool maximum node count
+- `name` (String) Node pool name
+- `node_instance_type` (String) Node pool node instance type
+- `supported_astro_machines` (List of String) Node pool supported Astro machines
+- `updated_at` (String) Node pool last updated timestamp
+
+
+
+### Nested Schema for `tags`
+
+Read-Only:
+
+- `key` (String) Cluster tag key
+- `value` (String) Cluster tag value
diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md
new file mode 100644
index 00000000..44f0bd5a
--- /dev/null
+++ b/docs/data-sources/clusters.md
@@ -0,0 +1,100 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "astronomer_clusters Data Source - astronomer"
+subcategory: ""
+description: |-
+ Clusters data source
+---
+
+# astronomer_clusters (Data Source)
+
+Clusters data source
+
+## Example Usage
+
+```terraform
+data "astronomer_clusters" "example_clusters" {}
+
+data "astronomer_clusters" "example_clusters_filter_by_names" {
+ names = ["my cluster"]
+}
+
+data "astronomer_clusters" "example_clusters_filter_by_cloud_provider" {
+ cloud_provider = ["AWS"]
+}
+```
+
+
+## Schema
+
+### Optional
+
+- `cloud_provider` (String)
+- `names` (List of String)
+
+### Read-Only
+
+- `clusters` (Attributes List) (see [below for nested schema](#nestedatt--clusters))
+
+
+### Nested Schema for `clusters`
+
+Required:
+
+- `id` (String) Cluster identifier
+
+Read-Only:
+
+- `cloud_provider` (String) Cluster cloud provider
+- `created_at` (String) Cluster creation timestamp
+- `db_instance_type` (String) Cluster database instance type
+- `is_limited` (Boolean) Whether the cluster is limited
+- `metadata` (Attributes) Cluster metadata (see [below for nested schema](#nestedatt--clusters--metadata))
+- `name` (String) Cluster name
+- `node_pools` (Attributes List) Cluster node pools (see [below for nested schema](#nestedatt--clusters--node_pools))
+- `pod_subnet_range` (String) Cluster pod subnet range
+- `provider_account` (String) Cluster provider account
+- `region` (String) Cluster region
+- `service_peering_range` (String) Cluster service peering range
+- `service_subnet_range` (String) Cluster service subnet range
+- `status` (String) Cluster status
+- `tags` (Attributes List) Cluster tags (see [below for nested schema](#nestedatt--clusters--tags))
+- `tenant_id` (String) Cluster tenant ID
+- `type` (String) Cluster type
+- `updated_at` (String) Cluster last updated timestamp
+- `vpc_subnet_range` (String) Cluster VPC subnet range
+- `workspace_ids` (List of String) Cluster workspace IDs
+
+
+### Nested Schema for `clusters.metadata`
+
+Read-Only:
+
+- `external_ips` (List of String) Cluster external IPs
+- `oidc_issuer_url` (String) Cluster OIDC issuer URL
+
+
+
+### Nested Schema for `clusters.node_pools`
+
+Read-Only:
+
+- `cloud_provider` (String) Node pool cloud provider
+- `cluster_id` (String) Node pool cluster identifier
+- `created_at` (String) Node pool creation timestamp
+- `id` (String) Node pool identifier
+- `is_default` (Boolean) Whether the node pool is the default node pool of the cluster
+- `max_node_count` (Number) Node pool maximum node count
+- `name` (String) Node pool name
+- `node_instance_type` (String) Node pool node instance type
+- `supported_astro_machines` (List of String) Node pool supported Astro machines
+- `updated_at` (String) Node pool last updated timestamp
+
+
+
+### Nested Schema for `clusters.tags`
+
+Read-Only:
+
+- `key` (String) Cluster tag key
+- `value` (String) Cluster tag value
diff --git a/examples/data-sources/astronomer_cluster/data-source.tf b/examples/data-sources/astronomer_cluster/data-source.tf
new file mode 100644
index 00000000..831010c9
--- /dev/null
+++ b/examples/data-sources/astronomer_cluster/data-source.tf
@@ -0,0 +1,3 @@
+data "astronomer_cluster" "example" {
+ id = "clozc036j01to01jrlgvueo8t"
+}
diff --git a/examples/data-sources/astronomer_clusters/data-source.tf b/examples/data-sources/astronomer_clusters/data-source.tf
new file mode 100644
index 00000000..232ae891
--- /dev/null
+++ b/examples/data-sources/astronomer_clusters/data-source.tf
@@ -0,0 +1,9 @@
+data "astronomer_clusters" "example_clusters" {}
+
+data "astronomer_clusters" "example_clusters_filter_by_names" {
+ names = ["my cluster"]
+}
+
+data "astronomer_clusters" "example_clusters_filter_by_cloud_provider" {
+ cloud_provider = ["AWS"]
+}
\ No newline at end of file
diff --git a/internal/provider/datasources/data_source_cluster.go b/internal/provider/datasources/data_source_cluster.go
new file mode 100644
index 00000000..4d4b3712
--- /dev/null
+++ b/internal/provider/datasources/data_source_cluster.go
@@ -0,0 +1,117 @@
+package datasources
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients"
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/models"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var _ datasource.DataSource = &clusterDataSource{}
+var _ datasource.DataSourceWithConfigure = &clusterDataSource{}
+
+func NewClusterDataSource() datasource.DataSource {
+ return &clusterDataSource{}
+}
+
+// clusterDataSource defines the data source implementation.
+type clusterDataSource struct {
+ PlatformClient platform.ClientWithResponsesInterface
+ OrganizationId string
+}
+
+func (d *clusterDataSource) Metadata(
+ ctx context.Context,
+ req datasource.MetadataRequest,
+ resp *datasource.MetadataResponse,
+) {
+ resp.TypeName = req.ProviderTypeName + "_cluster"
+}
+
+func (d *clusterDataSource) Schema(
+ ctx context.Context,
+ req datasource.SchemaRequest,
+ resp *datasource.SchemaResponse,
+) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Cluster data source",
+ Attributes: schemas.ClusterDataSourceSchemaAttributes(),
+ }
+}
+
+func (d *clusterDataSource) Configure(
+ ctx context.Context,
+ req datasource.ConfigureRequest,
+ resp *datasource.ConfigureResponse,
+) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ apiClients, ok := req.ProviderData.(models.ApiClientsModel)
+ if !ok {
+ utils.DataSourceApiClientConfigureError(ctx, req, resp)
+ return
+ }
+
+ d.PlatformClient = apiClients.PlatformClient
+ d.OrganizationId = apiClients.OrganizationId
+}
+
+func (d *clusterDataSource) Read(
+ ctx context.Context,
+ req datasource.ReadRequest,
+ resp *datasource.ReadResponse,
+) {
+ var data models.Cluster
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ cluster, err := d.PlatformClient.GetClusterWithResponse(
+ ctx,
+ d.OrganizationId,
+ data.Id.ValueString(),
+ )
+ if err != nil {
+ tflog.Error(ctx, "failed to get cluster", map[string]interface{}{"error": err})
+ resp.Diagnostics.AddError(
+ "Client Error",
+ fmt.Sprintf("Unable to read cluster, got error: %s", err),
+ )
+ return
+ }
+ _, diagnostic := clients.NormalizeAPIError(ctx, cluster.HTTPResponse, cluster.Body)
+ if diagnostic != nil {
+ resp.Diagnostics.Append(diagnostic)
+ return
+ }
+ if cluster.JSON200 == nil {
+ tflog.Error(ctx, "failed to get cluster", map[string]interface{}{"error": "nil response"})
+ resp.Diagnostics.AddError("Client Error", "Unable to read cluster, got nil response")
+ return
+ }
+
+ // Populate the model with the response data
+ diags := data.ReadFromResponse(ctx, cluster.JSON200)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
diff --git a/internal/provider/datasources/data_source_cluster_test.go b/internal/provider/datasources/data_source_cluster_test.go
new file mode 100644
index 00000000..c29c7337
--- /dev/null
+++ b/internal/provider/datasources/data_source_cluster_test.go
@@ -0,0 +1,51 @@
+package datasources_test
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ astronomerprovider "github.com/astronomer/astronomer-terraform-provider/internal/provider"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+)
+
+func TestAcc_DataSourceCluster(t *testing.T) {
+ hybridClusterId := os.Getenv("HYBRID_CLUSTER_ID")
+ resourceName := "test_data_cluster_hybrid"
+ resourceVar := fmt.Sprintf("data.astronomer_cluster.%v", resourceName)
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() {
+ astronomerprovider.TestAccPreCheck(t)
+ },
+ ProtoV6ProviderFactories: astronomerprovider.TestAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ // Check the data source for cluster for a hybrid organization
+ {
+ Config: astronomerprovider.ProviderConfig(t, false) + cluster(resourceName, hybridClusterId),
+ Check: resource.ComposeTestCheckFunc(
+ // These checks are for the cluster data source (singular)
+ resource.TestCheckResourceAttrSet(resourceVar, "id"),
+ resource.TestCheckResourceAttrSet(resourceVar, "name"),
+ resource.TestCheckResourceAttrSet(resourceVar, "cloud_provider"),
+ resource.TestCheckResourceAttrSet(resourceVar, "db_instance_type"),
+ resource.TestCheckResourceAttrSet(resourceVar, "region"),
+ resource.TestCheckResourceAttrSet(resourceVar, "vpc_subnet_range"),
+ resource.TestCheckResourceAttrSet(resourceVar, "created_at"),
+ resource.TestCheckResourceAttrSet(resourceVar, "updated_at"),
+ resource.TestCheckResourceAttr(resourceVar, "type", "HYBRID"),
+ resource.TestCheckResourceAttrSet(resourceVar, "provider_account"),
+ resource.TestCheckResourceAttrSet(resourceVar, "node_pools.0.id"),
+ resource.TestCheckResourceAttrSet(resourceVar, "node_pools.0.name"),
+ resource.TestCheckResourceAttrSet(resourceVar, "metadata.external_ips.0"),
+ ),
+ },
+ },
+ })
+}
+
+func cluster(resourceName, clusterId string) string {
+ return fmt.Sprintf(`
+data astronomer_cluster "%v" {
+ id = "%v"
+}`, resourceName, clusterId)
+}
diff --git a/internal/provider/datasources/data_source_clusters.go b/internal/provider/datasources/data_source_clusters.go
new file mode 100644
index 00000000..b37d776e
--- /dev/null
+++ b/internal/provider/datasources/data_source_clusters.go
@@ -0,0 +1,147 @@
+package datasources
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+
+ "github.com/samber/lo"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients"
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/models"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var _ datasource.DataSource = &clustersDataSource{}
+var _ datasource.DataSourceWithConfigure = &clustersDataSource{}
+
+func NewClustersDataSource() datasource.DataSource {
+ return &clustersDataSource{}
+}
+
+// clustersDataSource defines the data source implementation.
+type clustersDataSource struct {
+ PlatformClient platform.ClientWithResponsesInterface
+ OrganizationId string
+}
+
+func (d *clustersDataSource) Metadata(
+ ctx context.Context,
+ req datasource.MetadataRequest,
+ resp *datasource.MetadataResponse,
+) {
+ resp.TypeName = req.ProviderTypeName + "_clusters"
+}
+
+func (d *clustersDataSource) Schema(
+ ctx context.Context,
+ req datasource.SchemaRequest,
+ resp *datasource.SchemaResponse,
+) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Clusters data source",
+ Attributes: schemas.ClustersDataSourceSchemaAttributes(),
+ }
+}
+
+func (d *clustersDataSource) Configure(
+ ctx context.Context,
+ req datasource.ConfigureRequest,
+ resp *datasource.ConfigureResponse,
+) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ apiClients, ok := req.ProviderData.(models.ApiClientsModel)
+ if !ok {
+ utils.DataSourceApiClientConfigureError(ctx, req, resp)
+ return
+ }
+
+ d.PlatformClient = apiClients.PlatformClient
+ d.OrganizationId = apiClients.OrganizationId
+}
+
+func (d *clustersDataSource) Read(
+ ctx context.Context,
+ req datasource.ReadRequest,
+ resp *datasource.ReadResponse,
+) {
+ var data models.ClustersDataSource
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ params := &platform.ListClustersParams{
+ Limit: lo.ToPtr(1000),
+ }
+ var diags diag.Diagnostics
+ if len(data.CloudProvider.ValueString()) > 0 {
+ params.Provider = (*platform.ListClustersParamsProvider)(data.CloudProvider.ValueStringPointer())
+ }
+ params.Names, diags = utils.TypesListToStringSlicePtr(ctx, data.Names)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ var clusters []platform.Cluster
+ offset := 0
+ for {
+ params.Offset = &offset
+ clustersResp, err := d.PlatformClient.ListClustersWithResponse(
+ ctx,
+ d.OrganizationId,
+ params,
+ )
+ if err != nil {
+ tflog.Error(ctx, "failed to list clusters", map[string]interface{}{"error": err})
+ resp.Diagnostics.AddError(
+ "Client Error",
+ fmt.Sprintf("Unable to read clusters, got error: %s", err),
+ )
+ return
+ }
+ _, diagnostic := clients.NormalizeAPIError(ctx, clustersResp.HTTPResponse, clustersResp.Body)
+ if diagnostic != nil {
+ resp.Diagnostics.Append(diagnostic)
+ return
+ }
+ if clustersResp.JSON200 == nil {
+ tflog.Error(ctx, "failed to list clusters", map[string]interface{}{"error": "nil response"})
+ resp.Diagnostics.AddError("Client Error", "Unable to read clusters, got nil response")
+ return
+ }
+
+ clusters = append(clusters, clustersResp.JSON200.Clusters...)
+
+ if clustersResp.JSON200.TotalCount <= offset {
+ break
+ }
+
+ offset += 1000
+ }
+
+ // Populate the model with the response data
+ diags = data.ReadFromResponse(ctx, clusters)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
diff --git a/internal/provider/datasources/data_source_clusters_test.go b/internal/provider/datasources/data_source_clusters_test.go
new file mode 100644
index 00000000..453b5f9d
--- /dev/null
+++ b/internal/provider/datasources/data_source_clusters_test.go
@@ -0,0 +1,108 @@
+package datasources_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/hashicorp/terraform-plugin-testing/terraform"
+
+ astronomerprovider "github.com/astronomer/astronomer-terraform-provider/internal/provider"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+)
+
+func TestAcc_DataSourceClustersHybrid(t *testing.T) {
+ tfVarName := "test_data_clusters_hybrid"
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() {
+ astronomerprovider.TestAccPreCheck(t)
+ },
+ ProtoV6ProviderFactories: astronomerprovider.TestAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ // Check the data source for clusters for a hybrid organization
+ {
+ Config: astronomerprovider.ProviderConfig(t, false) + clusters(tfVarName),
+ Check: resource.ComposeTestCheckFunc(
+ checkClusters(tfVarName),
+ ),
+ },
+ },
+ })
+}
+
+func clusters(tfVarName string) string {
+ return fmt.Sprintf(`
+data astronomer_clusters "%v" {}`, tfVarName)
+}
+
+func checkClusters(tfVarName string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ instanceState, numClusters, err := utils.GetDataSourcesLength(s, tfVarName, "clusters")
+ if err != nil {
+ return err
+ }
+ if numClusters == 0 {
+ return fmt.Errorf("expected clusters to be greater or equal to 1, got %s", instanceState.Attributes["clusters.#"])
+ }
+
+ // Check the first cluster
+ clustersIdx := 0
+
+ id := fmt.Sprintf("clusters.%d.id", clustersIdx)
+ if instanceState.Attributes[id] == "" {
+ return fmt.Errorf("expected 'id' to be set")
+ }
+ name := fmt.Sprintf("clusters.%d.name", clustersIdx)
+ if instanceState.Attributes[name] == "" {
+ return fmt.Errorf("expected 'name' to be set")
+ }
+ cloudProvider := fmt.Sprintf("clusters.%d.cloud_provider", clustersIdx)
+ if instanceState.Attributes[cloudProvider] == "" {
+ return fmt.Errorf("expected 'cloud_provider' to be set")
+ }
+ dbInstanceType := fmt.Sprintf("clusters.%d.db_instance_type", clustersIdx)
+ if instanceState.Attributes[dbInstanceType] == "" {
+ return fmt.Errorf("expected 'db_instance_type' to be set")
+ }
+ region := fmt.Sprintf("clusters.%d.region", clustersIdx)
+ if instanceState.Attributes[region] == "" {
+ return fmt.Errorf("expected 'region' to be set")
+ }
+ vpcSubnetRange := fmt.Sprintf("clusters.%d.vpc_subnet_range", clustersIdx)
+ if instanceState.Attributes[vpcSubnetRange] == "" {
+ return fmt.Errorf("expected 'vpc_subnet_range' to be set")
+ }
+ createdAt := fmt.Sprintf("clusters.%d.created_at", clustersIdx)
+ if instanceState.Attributes[createdAt] == "" {
+ return fmt.Errorf("expected 'created_at' to be set")
+ }
+ updatedAt := fmt.Sprintf("clusters.%d.updated_at", clustersIdx)
+ if instanceState.Attributes[updatedAt] == "" {
+ return fmt.Errorf("expected 'updated_at' to be set")
+ }
+ typ := fmt.Sprintf("clusters.%d.type", clustersIdx)
+ if instanceState.Attributes[typ] != string(platform.ClusterTypeHYBRID) {
+ return fmt.Errorf("expected 'type' to be set")
+ }
+ providerAccount := fmt.Sprintf("clusters.%d.provider_account", clustersIdx)
+ if instanceState.Attributes[providerAccount] == "" {
+ return fmt.Errorf("expected 'provider_account' to be set")
+ }
+ nodePoolsId := fmt.Sprintf("clusters.%d.node_pools.0.id", clustersIdx)
+ if instanceState.Attributes[nodePoolsId] == "" {
+ return fmt.Errorf("expected 'node_pools.0.id' to be set")
+ }
+ nodePoolsName := fmt.Sprintf("clusters.%d.node_pools.0.name", clustersIdx)
+ if instanceState.Attributes[nodePoolsName] == "" {
+ return fmt.Errorf("expected 'node_pools.0.name' to be set")
+ }
+ metadataExternalIps := fmt.Sprintf("clusters.%d.metadata.external_ips.0", clustersIdx)
+ if instanceState.Attributes[metadataExternalIps] == "" {
+ return fmt.Errorf("expected 'metadata.external_ips.0' to be set")
+ }
+
+ return nil
+ }
+}
diff --git a/internal/provider/models/cluster.go b/internal/provider/models/cluster.go
new file mode 100644
index 00000000..a9a451c7
--- /dev/null
+++ b/internal/provider/models/cluster.go
@@ -0,0 +1,153 @@
+package models
+
+import (
+ "context"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// Cluster describes the resource data source data model.
+type Cluster struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ CloudProvider types.String `tfsdk:"cloud_provider"`
+ DbInstanceType types.String `tfsdk:"db_instance_type"`
+ Region types.String `tfsdk:"region"`
+ PodSubnetRange types.String `tfsdk:"pod_subnet_range"`
+ ServicePeeringRange types.String `tfsdk:"service_peering_range"`
+ ServiceSubnetRange types.String `tfsdk:"service_subnet_range"`
+ VpcSubnetRange types.String `tfsdk:"vpc_subnet_range"`
+ Metadata types.Object `tfsdk:"metadata"`
+ Status types.String `tfsdk:"status"`
+ CreatedAt types.String `tfsdk:"created_at"`
+ UpdatedAt types.String `tfsdk:"updated_at"`
+ Type types.String `tfsdk:"type"`
+ TenantId types.String `tfsdk:"tenant_id"`
+ ProviderAccount types.String `tfsdk:"provider_account"`
+ NodePools types.List `tfsdk:"node_pools"`
+ WorkspaceIds types.List `tfsdk:"workspace_ids"`
+ Tags types.List `tfsdk:"tags"`
+ IsLimited types.Bool `tfsdk:"is_limited"`
+}
+
+type ClusterTag struct {
+ Key types.String `tfsdk:"key"`
+ Value types.String `tfsdk:"value"`
+}
+
+type NodePool struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ ClusterId types.String `tfsdk:"cluster_id"`
+ CloudProvider types.String `tfsdk:"cloud_provider"`
+ MaxNodeCount types.Int64 `tfsdk:"max_node_count"`
+ NodeInstanceType types.String `tfsdk:"node_instance_type"`
+ IsDefault types.Bool `tfsdk:"is_default"`
+ SupportedAstroMachines types.List `tfsdk:"supported_astro_machines"`
+ CreatedAt types.String `tfsdk:"created_at"`
+ UpdatedAt types.String `tfsdk:"updated_at"`
+}
+
+func (data *Cluster) ReadFromResponse(
+ ctx context.Context,
+ cluster *platform.Cluster,
+) diag.Diagnostics {
+ data.Id = types.StringValue(cluster.Id)
+ data.Name = types.StringValue(cluster.Name)
+ data.CloudProvider = types.StringValue(string(cluster.CloudProvider))
+ data.DbInstanceType = types.StringValue(cluster.DbInstanceType)
+ data.Region = types.StringValue(cluster.Region)
+ data.PodSubnetRange = types.StringPointerValue(cluster.PodSubnetRange)
+ data.ServicePeeringRange = types.StringPointerValue(cluster.ServicePeeringRange)
+ data.ServiceSubnetRange = types.StringPointerValue(cluster.ServiceSubnetRange)
+ data.VpcSubnetRange = types.StringValue(cluster.VpcSubnetRange)
+ var diags diag.Diagnostics
+ data.Metadata, diags = ClusterMetadataTypesObject(ctx, cluster.Metadata)
+ if diags.HasError() {
+ return diags
+ }
+ data.Status = types.StringValue(string(cluster.Status))
+ data.CreatedAt = types.StringValue(cluster.CreatedAt.String())
+ data.UpdatedAt = types.StringValue(cluster.UpdatedAt.String())
+ data.Type = types.StringValue(string(cluster.Type))
+ data.TenantId = types.StringPointerValue(cluster.TenantId)
+ data.ProviderAccount = types.StringPointerValue(cluster.ProviderAccount)
+ data.NodePools, diags = utils.ObjectList(ctx, cluster.NodePools, schemas.NodePoolAttributeTypes(), NodePoolTypesObject)
+ if diags.HasError() {
+ return diags
+ }
+ data.WorkspaceIds, diags = utils.StringList(cluster.WorkspaceIds)
+ if diags.HasError() {
+ return diags
+ }
+ data.Tags, diags = utils.ObjectList(ctx, cluster.Tags, schemas.ClusterTagAttributeTypes(), ClusterTagTypesObject)
+ if diags.HasError() {
+ return diags
+ }
+ data.IsLimited = types.BoolPointerValue(cluster.IsLimited)
+
+ return nil
+}
+
+func ClusterTagTypesObject(
+ ctx context.Context,
+ tag platform.ClusterK8sTag,
+) (types.Object, diag.Diagnostics) {
+ obj := ClusterTag{
+ Key: types.StringPointerValue(tag.Key),
+ Value: types.StringPointerValue(tag.Value),
+ }
+
+ return types.ObjectValueFrom(ctx, schemas.ClusterTagAttributeTypes(), obj)
+}
+
+func NodePoolTypesObject(
+ ctx context.Context,
+ nodePool platform.NodePool,
+) (types.Object, diag.Diagnostics) {
+ supportedAstroMachines, diags := utils.StringList(nodePool.SupportedAstroMachines)
+ if diags.HasError() {
+ return types.ObjectNull(schemas.NodePoolAttributeTypes()), diags
+ }
+ obj := NodePool{
+ Id: types.StringValue(nodePool.Id),
+ Name: types.StringValue(nodePool.Name),
+ ClusterId: types.StringValue(nodePool.ClusterId),
+ CloudProvider: types.StringValue(string(nodePool.CloudProvider)),
+ MaxNodeCount: types.Int64Value(int64(nodePool.MaxNodeCount)),
+ NodeInstanceType: types.StringValue(nodePool.NodeInstanceType),
+ IsDefault: types.BoolValue(nodePool.IsDefault),
+ SupportedAstroMachines: supportedAstroMachines,
+ CreatedAt: types.StringValue(nodePool.CreatedAt.String()),
+ UpdatedAt: types.StringValue(nodePool.UpdatedAt.String()),
+ }
+
+ return types.ObjectValueFrom(ctx, schemas.NodePoolAttributeTypes(), obj)
+}
+
+type ClusterMetadata struct {
+ OidcIssuerUrl types.String `tfsdk:"oidc_issuer_url"`
+ ExternalIps types.List `tfsdk:"external_ips"`
+}
+
+func ClusterMetadataTypesObject(
+ ctx context.Context,
+ metadata *platform.ClusterMetadata,
+) (types.Object, diag.Diagnostics) {
+ if metadata != nil {
+ externalIps, diags := utils.StringList(metadata.ExternalIPs)
+ if diags.HasError() {
+ return types.ObjectNull(schemas.ClusterMetadataAttributeTypes()), diags
+ }
+ obj := ClusterMetadata{
+ OidcIssuerUrl: types.StringPointerValue(metadata.OidcIssuerUrl),
+ ExternalIps: externalIps,
+ }
+ return types.ObjectValueFrom(ctx, schemas.ClusterMetadataAttributeTypes(), obj)
+ }
+ return types.ObjectNull(schemas.ClusterMetadataAttributeTypes()), nil
+}
diff --git a/internal/provider/models/clusters.go b/internal/provider/models/clusters.go
new file mode 100644
index 00000000..ef8d59bf
--- /dev/null
+++ b/internal/provider/models/clusters.go
@@ -0,0 +1,45 @@
+package models
+
+import (
+ "context"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// ClustersDataSource describes the data source data model.
+type ClustersDataSource struct {
+ Clusters types.List `tfsdk:"clusters"`
+ CloudProvider types.String `tfsdk:"cloud_provider"` // query parameter
+ Names types.List `tfsdk:"names"` // query parameter
+}
+
+func (data *ClustersDataSource) ReadFromResponse(
+ ctx context.Context,
+ clusters []platform.Cluster,
+) diag.Diagnostics {
+ values := make([]attr.Value, len(clusters))
+ for i, deployment := range clusters {
+ var singleClusterData Cluster
+ diags := singleClusterData.ReadFromResponse(ctx, &deployment)
+ if diags.HasError() {
+ return diags
+ }
+
+ objectValue, diags := types.ObjectValueFrom(ctx, schemas.ClustersElementAttributeTypes(), singleClusterData)
+ if diags.HasError() {
+ return diags
+ }
+ values[i] = objectValue
+ }
+ var diags diag.Diagnostics
+ data.Clusters, diags = types.ListValue(types.ObjectType{AttrTypes: schemas.ClustersElementAttributeTypes()}, values)
+ if diags.HasError() {
+ return diags
+ }
+
+ return nil
+}
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index 045b22b5..55eca576 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -132,6 +132,8 @@ func (p *AstronomerProvider) DataSources(ctx context.Context) []func() datasourc
datasources.NewDeploymentDataSource,
datasources.NewDeploymentsDataSource,
datasources.NewOrganizationDataSource,
+ datasources.NewClusterDataSource,
+ datasources.NewClustersDataSource,
}
}
diff --git a/internal/provider/schemas/cluster.go b/internal/provider/schemas/cluster.go
new file mode 100644
index 00000000..a9363304
--- /dev/null
+++ b/internal/provider/schemas/cluster.go
@@ -0,0 +1,208 @@
+package schemas
+
+import (
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/validators"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ datasourceSchema "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func ClusterDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster identifier",
+ Required: true,
+ Validators: []validator.String{validators.IsCuid()},
+ },
+ "name": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster name",
+ Computed: true,
+ },
+ "cloud_provider": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster cloud provider",
+ Computed: true,
+ },
+ "db_instance_type": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster database instance type",
+ Computed: true,
+ },
+ "region": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster region",
+ Computed: true,
+ },
+ "pod_subnet_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster pod subnet range",
+ Computed: true,
+ },
+ "service_peering_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster service peering range",
+ Computed: true,
+ },
+ "service_subnet_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster service subnet range",
+ Computed: true,
+ },
+ "vpc_subnet_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster VPC subnet range",
+ Computed: true,
+ },
+ "metadata": datasourceSchema.SingleNestedAttribute{
+ Attributes: ClusterMetadataDataSourceAttributes(),
+ Computed: true,
+ MarkdownDescription: "Cluster metadata",
+ },
+ "status": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster status",
+ Computed: true,
+ },
+ "created_at": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster creation timestamp",
+ Computed: true,
+ },
+ "updated_at": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster last updated timestamp",
+ Computed: true,
+ },
+ "type": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster type",
+ Computed: true,
+ },
+ "tenant_id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster tenant ID",
+ Computed: true,
+ },
+ "provider_account": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster provider account",
+ Computed: true,
+ },
+ "node_pools": datasourceSchema.ListNestedAttribute{
+ NestedObject: datasourceSchema.NestedAttributeObject{
+ Attributes: NodePoolDataSourceSchemaAttributes(),
+ },
+
+ MarkdownDescription: "Cluster node pools",
+ Computed: true,
+ },
+ "workspace_ids": datasourceSchema.ListAttribute{
+ ElementType: types.StringType,
+ MarkdownDescription: "Cluster workspace IDs",
+ Computed: true,
+ },
+ "tags": datasourceSchema.ListNestedAttribute{
+ NestedObject: datasourceSchema.NestedAttributeObject{
+ Attributes: ClusterTagDataSourceAttributes(),
+ },
+ MarkdownDescription: "Cluster tags",
+ Computed: true,
+ },
+ "is_limited": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether the cluster is limited",
+ Computed: true,
+ },
+ }
+}
+
+func ClusterMetadataAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "external_ips": types.ListType{ElemType: types.StringType},
+ "oidc_issuer_url": types.StringType,
+ }
+}
+
+func ClusterMetadataDataSourceAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "external_ips": datasourceSchema.ListAttribute{
+ ElementType: types.StringType,
+ MarkdownDescription: "Cluster external IPs",
+ Computed: true,
+ },
+ "oidc_issuer_url": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster OIDC issuer URL",
+ Computed: true,
+ },
+ }
+}
+
+func ClusterTagAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "key": types.StringType,
+ "value": types.StringType,
+ }
+}
+
+func ClusterTagDataSourceAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "key": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster tag key",
+ Computed: true,
+ },
+ "value": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Cluster tag value",
+ Computed: true,
+ },
+ }
+}
+
+func NodePoolAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "id": types.StringType,
+ "name": types.StringType,
+ "cluster_id": types.StringType,
+ "cloud_provider": types.StringType,
+ "max_node_count": types.Int64Type,
+ "node_instance_type": types.StringType,
+ "is_default": types.BoolType,
+ "supported_astro_machines": types.ListType{
+ ElemType: types.StringType,
+ },
+ "created_at": types.StringType,
+ "updated_at": types.StringType,
+ }
+}
+
+func NodePoolDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Node pool identifier",
+ Computed: true,
+ },
+ "name": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Node pool name",
+ Computed: true,
+ },
+ "cluster_id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Node pool cluster identifier",
+ Computed: true,
+ },
+ "cloud_provider": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Node pool cloud provider",
+ Computed: true,
+ },
+ "max_node_count": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "Node pool maximum node count",
+ Computed: true,
+ },
+ "node_instance_type": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Node pool node instance type",
+ Computed: true,
+ },
+ "is_default": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether the node pool is the default node pool of the cluster",
+ Computed: true,
+ },
+ "supported_astro_machines": datasourceSchema.ListAttribute{
+ ElementType: types.StringType,
+ MarkdownDescription: "Node pool supported Astro machines",
+ Computed: true,
+ },
+ "created_at": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Node pool creation timestamp",
+ Computed: true,
+ },
+ "updated_at": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Node pool last updated timestamp",
+ Computed: true,
+ },
+ }
+}
diff --git a/internal/provider/schemas/clusters.go b/internal/provider/schemas/clusters.go
new file mode 100644
index 00000000..3e10e3cf
--- /dev/null
+++ b/internal/provider/schemas/clusters.go
@@ -0,0 +1,76 @@
+package schemas
+
+import (
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func ClustersElementAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "id": types.StringType,
+ "name": types.StringType,
+ "cloud_provider": types.StringType,
+ "db_instance_type": types.StringType,
+ "region": types.StringType,
+ "pod_subnet_range": types.StringType,
+ "service_peering_range": types.StringType,
+ "service_subnet_range": types.StringType,
+ "vpc_subnet_range": types.StringType,
+ "metadata": types.ObjectType{
+ AttrTypes: ClusterMetadataAttributeTypes(),
+ },
+ "status": types.StringType,
+ "created_at": types.StringType,
+ "updated_at": types.StringType,
+ "type": types.StringType,
+ "tenant_id": types.StringType,
+ "provider_account": types.StringType,
+ "node_pools": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: NodePoolAttributeTypes(),
+ },
+ },
+ "workspace_ids": types.ListType{
+ ElemType: types.StringType,
+ },
+ "tags": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: ClusterTagAttributeTypes(),
+ },
+ },
+ "is_limited": types.BoolType,
+ }
+}
+
+func ClustersDataSourceSchemaAttributes() map[string]schema.Attribute {
+ return map[string]schema.Attribute{
+ "clusters": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: ClusterDataSourceSchemaAttributes(),
+ },
+ Computed: true,
+ },
+ "cloud_provider": schema.StringAttribute{
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ string(platform.ClusterCloudProviderAWS),
+ string(platform.ClusterCloudProviderGCP),
+ string(platform.ClusterCloudProviderAZURE),
+ ),
+ },
+ },
+ "names": schema.ListAttribute{
+ ElementType: types.StringType,
+ Optional: true,
+ Validators: []validator.List{
+ listvalidator.ValueStringsAre(stringvalidator.LengthAtLeast(1)),
+ },
+ },
+ }
+}