diff --git a/docs/data-sources/cluster_options.md b/docs/data-sources/cluster_options.md
new file mode 100644
index 00000000..17e457aa
--- /dev/null
+++ b/docs/data-sources/cluster_options.md
@@ -0,0 +1,118 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "astronomer_cluster_options Data Source - astronomer"
+subcategory: ""
+description: |-
+ ClusterOptions data source
+---
+
+# astronomer_cluster_options (Data Source)
+
+ClusterOptions data source
+
+## Example Usage
+
+```terraform
+data "astronomer_cluster_options" "example_cluster_options" {
+ type = "HYBRID"
+}
+
+data "astronomer_cluster_options" "example_cluster_options_filter_by_provider" {
+ type = "HYBRID"
+ cloud_provider = "AWS"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `type` (String)
+
+### Optional
+
+- `cloud_provider` (String)
+
+### Read-Only
+
+- `cluster_options` (Attributes List) (see [below for nested schema](#nestedatt--cluster_options))
+
+
+### Nested Schema for `cluster_options`
+
+Read-Only:
+
+- `database_instances` (Attributes List) ClusterOption database instances (see [below for nested schema](#nestedatt--cluster_options--database_instances))
+- `default_database_instance` (Attributes) ClusterOption default database instance (see [below for nested schema](#nestedatt--cluster_options--default_database_instance))
+- `default_node_instance` (Attributes) ClusterOption default node instance (see [below for nested schema](#nestedatt--cluster_options--default_node_instance))
+- `default_pod_subnet_range` (String) ClusterOption default pod subnet range
+- `default_region` (Attributes) ClusterOption default region (see [below for nested schema](#nestedatt--cluster_options--default_region))
+- `default_service_peering_range` (String) ClusterOption default service peering range
+- `default_service_subnet_range` (String) ClusterOption default service subnet range
+- `default_vpc_subnet_range` (String) ClusterOption default vps subnet range
+- `node_count_default` (Number) ClusterOption node count default
+- `node_count_max` (Number) ClusterOption node count max
+- `node_count_min` (Number) ClusterOption node count min
+- `node_instances` (Attributes List) ClusterOption node instances (see [below for nested schema](#nestedatt--cluster_options--node_instances))
+- `provider` (String) ClusterOption provider
+- `regions` (Attributes List) ClusterOption regions (see [below for nested schema](#nestedatt--cluster_options--regions))
+
+
+### Nested Schema for `cluster_options.database_instances`
+
+Read-Only:
+
+- `cpu` (Number) Provider instance cpu
+- `memory` (String) Provider instance memory
+- `name` (String) Provider instance name
+
+
+
+### Nested Schema for `cluster_options.default_database_instance`
+
+Read-Only:
+
+- `cpu` (Number) Provider instance cpu
+- `memory` (String) Provider instance memory
+- `name` (String) Provider instance name
+
+
+
+### Nested Schema for `cluster_options.default_node_instance`
+
+Read-Only:
+
+- `cpu` (Number) Provider instance cpu
+- `memory` (String) Provider instance memory
+- `name` (String) Provider instance name
+
+
+
+### Nested Schema for `cluster_options.default_region`
+
+Read-Only:
+
+- `banned_instances` (List of String) Region banned instances
+- `limited` (Boolean) Region is limited bool
+- `name` (String) Region is limited bool
+
+
+
+### Nested Schema for `cluster_options.node_instances`
+
+Read-Only:
+
+- `cpu` (Number) Provider instance cpu
+- `memory` (String) Provider instance memory
+- `name` (String) Provider instance name
+
+
+
+### Nested Schema for `cluster_options.regions`
+
+Read-Only:
+
+- `banned_instances` (List of String) Region banned instances
+- `limited` (Boolean) Region is limited bool
+- `name` (String) Region is limited bool
diff --git a/examples/data-sources/astronomer_cluster_options/data-source.tf b/examples/data-sources/astronomer_cluster_options/data-source.tf
new file mode 100644
index 00000000..888b62e8
--- /dev/null
+++ b/examples/data-sources/astronomer_cluster_options/data-source.tf
@@ -0,0 +1,8 @@
+data "astronomer_cluster_options" "example_cluster_options" {
+ type = "HYBRID"
+}
+
+data "astronomer_cluster_options" "example_cluster_options_filter_by_provider" {
+ type = "HYBRID"
+ cloud_provider = "AWS"
+}
diff --git a/internal/provider/datasources/data_source_cluster_options.go b/internal/provider/datasources/data_source_cluster_options.go
new file mode 100644
index 00000000..0d0a681d
--- /dev/null
+++ b/internal/provider/datasources/data_source_cluster_options.go
@@ -0,0 +1,125 @@
+package datasources
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients"
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/models"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var _ datasource.DataSource = &clusterOptionsDataSource{}
+var _ datasource.DataSourceWithConfigure = &clusterOptionsDataSource{}
+
+func NewClusterOptionsDataSource() datasource.DataSource {
+ return &clusterOptionsDataSource{}
+}
+
+// clusterOptionsDataSource defines the data source implementation.
+type clusterOptionsDataSource struct {
+ PlatformClient platform.ClientWithResponsesInterface
+ OrganizationId string
+}
+
+func (d *clusterOptionsDataSource) Metadata(
+ ctx context.Context,
+ req datasource.MetadataRequest,
+ resp *datasource.MetadataResponse,
+) {
+ resp.TypeName = req.ProviderTypeName + "_cluster_options"
+}
+
+func (d *clusterOptionsDataSource) Schema(
+ ctx context.Context,
+ req datasource.SchemaRequest,
+ resp *datasource.SchemaResponse,
+) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "ClusterOptions data source",
+ Attributes: schemas.ClusterOptionsDataSourceSchemaAttributes(),
+ }
+}
+
+func (d *clusterOptionsDataSource) Configure(
+ ctx context.Context,
+ req datasource.ConfigureRequest,
+ resp *datasource.ConfigureResponse,
+) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ apiClients, ok := req.ProviderData.(models.ApiClientsModel)
+ if !ok {
+ utils.DataSourceApiClientConfigureError(ctx, req, resp)
+ return
+ }
+
+ d.PlatformClient = apiClients.PlatformClient
+ d.OrganizationId = apiClients.OrganizationId
+}
+
+func (d *clusterOptionsDataSource) Read(
+ ctx context.Context,
+ req datasource.ReadRequest,
+ resp *datasource.ReadResponse,
+) {
+ var data models.ClusterOptionsDataSource
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ provider := platform.GetClusterOptionsParamsProvider(data.CloudProvider.ValueString())
+ params := &platform.GetClusterOptionsParams{
+ Type: platform.GetClusterOptionsParamsType(data.Type.ValueString()),
+ Provider: &provider,
+ }
+
+ var clusterOptions []platform.ClusterOptions
+ clusterOptionsResp, err := d.PlatformClient.GetClusterOptionsWithResponse(
+ ctx,
+ d.OrganizationId,
+ params,
+ )
+
+ if err != nil {
+ tflog.Error(ctx, "failed to list clusterOptions", map[string]interface{}{"error": err})
+ resp.Diagnostics.AddError(
+ "Client Error",
+ fmt.Sprintf("Unable to read clusterOptions, got error: %s", err),
+ )
+ return
+ }
+ _, diagnostic := clients.NormalizeAPIError(ctx, clusterOptionsResp.HTTPResponse, clusterOptionsResp.Body)
+
+ if diagnostic != nil {
+ resp.Diagnostics.Append(diagnostic)
+ return
+ }
+ if clusterOptionsResp.JSON200 == nil {
+ tflog.Error(ctx, "failed to list clusterOptions", map[string]interface{}{"error": "nil response"})
+ resp.Diagnostics.AddError("Client Error", "Unable to read clusterOptions, got nil response")
+ return
+ }
+ clusterOptions = append(clusterOptions, *clusterOptionsResp.JSON200...)
+
+ // Populate the model with the response data
+ diags := data.ReadFromResponse(ctx, clusterOptions)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
diff --git a/internal/provider/datasources/data_source_cluster_options_test.go b/internal/provider/datasources/data_source_cluster_options_test.go
new file mode 100644
index 00000000..e5952725
--- /dev/null
+++ b/internal/provider/datasources/data_source_cluster_options_test.go
@@ -0,0 +1,136 @@
+package datasources_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+
+ astronomerprovider "github.com/astronomer/astronomer-terraform-provider/internal/provider"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+ "github.com/hashicorp/terraform-plugin-testing/terraform"
+)
+
+func TestAcc_DataSourceClusterOptions(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() {
+ astronomerprovider.TestAccPreCheck(t)
+ },
+ ProtoV6ProviderFactories: astronomerprovider.TestAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: astronomerprovider.ProviderConfig(t, true) + clusterOptions("HYBRID", "AWS"),
+ Check: resource.ComposeTestCheckFunc(
+ checkClusterOptions("AWS"),
+ ),
+ },
+ {
+ Config: astronomerprovider.ProviderConfig(t, true) + clusterOptionsWithoutProviderFilter("HYBRID"),
+ Check: resource.ComposeTestCheckFunc(
+ checkClusterOptionsWithoutProviderFilter(),
+ ),
+ },
+ },
+ })
+}
+
+func clusterOptions(clusterType, provider string) string {
+ return fmt.Sprintf(`
+data astronomer_cluster_options "test_data_cluster_options" {
+ type = "%v"
+ cloud_provider = "%v"
+}`, clusterType, provider)
+}
+
+func clusterOptionsWithoutProviderFilter(clusterType string) string {
+ return fmt.Sprintf(`
+data astronomer_cluster_options "test_data_cluster_options" {
+ type = "%v"
+}`, clusterType)
+}
+
+func checkClusterOptions(provider string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ instanceState, numClusterOptions, err := utils.GetDataSourcesLength(s, "test_data_cluster_options", "cluster_options")
+ if err != nil {
+ return err
+ }
+ if numClusterOptions == 0 {
+ return fmt.Errorf("expected clusterOptions to be greater or equal to 1, got %s", instanceState.Attributes["cluster_options.#"])
+ }
+
+ clusterOptionIdx := -1
+ for i := 0; i < numClusterOptions; i++ {
+ idxProvider := fmt.Sprintf("cluster_options.%d.provider", i)
+ if instanceState.Attributes[idxProvider] == provider {
+ clusterOptionIdx = i
+ break
+ }
+ }
+ if clusterOptionIdx == -1 {
+ return fmt.Errorf("cluster option with provider %s not found", provider)
+ }
+ databaseInstance1 := fmt.Sprintf("cluster_options.%d.database_instances.0", clusterOptionIdx)
+ resource.TestCheckResourceAttrSet(databaseInstance1, "cpu")
+ resource.TestCheckResourceAttrSet(databaseInstance1, "memory")
+ resource.TestCheckResourceAttrSet(databaseInstance1, "name")
+
+ defaultDatabaseInstance := fmt.Sprintf("cluster_options.%d.default_database_instance", clusterOptionIdx)
+ resource.TestCheckResourceAttrSet(defaultDatabaseInstance, "cpu")
+ resource.TestCheckResourceAttrSet(defaultDatabaseInstance, "memory")
+ resource.TestCheckResourceAttrSet(defaultDatabaseInstance, "name")
+
+ nodeInstance1 := fmt.Sprintf("cluster_options.%d.node_instances.0", clusterOptionIdx)
+ resource.TestCheckResourceAttrSet(nodeInstance1, "cpu")
+ resource.TestCheckResourceAttrSet(nodeInstance1, "memory")
+ resource.TestCheckResourceAttrSet(nodeInstance1, "name")
+
+ defaultNodeInstance := fmt.Sprintf("cluster_options.%d.default_node_instance", clusterOptionIdx)
+ resource.TestCheckResourceAttrSet(defaultNodeInstance, "cpu")
+ resource.TestCheckResourceAttrSet(defaultNodeInstance, "memory")
+ resource.TestCheckResourceAttrSet(defaultNodeInstance, "name")
+
+ region1 := fmt.Sprintf("cluster_options.%d.regions.0", clusterOptionIdx)
+ resource.TestCheckResourceAttrSet(region1, "name")
+
+ defaultRegion := fmt.Sprintf("cluster_options.%d.default_region", clusterOptionIdx)
+ resource.TestCheckResourceAttrSet(defaultRegion, "name")
+
+ clusterOption1 := fmt.Sprintf("cluster_options.%d", clusterOptionIdx)
+ resource.TestCheckResourceAttrSet(clusterOption1, "node_count_min")
+ resource.TestCheckResourceAttrSet(clusterOption1, "node_count_max")
+ resource.TestCheckResourceAttrSet(clusterOption1, "node_count_default")
+ resource.TestCheckResourceAttrSet(clusterOption1, "default_vpc_subnet_range")
+ resource.TestCheckResourceAttrSet(clusterOption1, "default_pod_subnet_range")
+ resource.TestCheckResourceAttrSet(clusterOption1, "default_service_subnet_range")
+ resource.TestCheckResourceAttrSet(clusterOption1, "default_service_peering_range")
+
+ return nil
+ }
+}
+
+func checkClusterOptionsWithoutProviderFilter() resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ instanceState, numClusterOptions, err := utils.GetDataSourcesLength(s, "test_data_cluster_options", "cluster_options")
+ if err != nil {
+ return err
+ }
+ if numClusterOptions <= 1 {
+ return fmt.Errorf("expected clusterOptions to be greater or equal to 1, got %s", instanceState.Attributes["cluster_options.#"])
+ }
+ var providers []string
+ for i := 0; i < numClusterOptions; i++ {
+ idxProvider := fmt.Sprintf("cluster_options.%d.provider", i)
+ providers = append(providers, instanceState.Attributes[idxProvider])
+ }
+ if len(providers) == 0 {
+ return fmt.Errorf("expected providers to be greater than 0")
+ }
+
+ for _, provider := range providers {
+ checkClusterOptions(provider)
+ }
+
+ return nil
+ }
+}
diff --git a/internal/provider/models/cluster_options.go b/internal/provider/models/cluster_options.go
new file mode 100644
index 00000000..85e4b943
--- /dev/null
+++ b/internal/provider/models/cluster_options.go
@@ -0,0 +1,156 @@
+package models
+
+import (
+ "context"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// ClusterOptionsDataSource describes the data source data model.
+type ClusterOptionsDataSource struct {
+ ClusterOptions types.List `tfsdk:"cluster_options"`
+ Type types.String `tfsdk:"type"`
+ CloudProvider types.String `tfsdk:"cloud_provider"`
+}
+
+func (data *ClusterOptionsDataSource) ReadFromResponse(
+ ctx context.Context,
+ clusterOptions []platform.ClusterOptions,
+) diag.Diagnostics {
+ if len(clusterOptions) == 0 {
+ types.ListNull(types.ObjectType{AttrTypes: schemas.ClusterOptionsElementAttributeTypes()})
+ }
+
+ values := make([]attr.Value, len(clusterOptions))
+ for i, clusterOption := range clusterOptions {
+ var data ClusterOptionDataSource
+ diags := data.ReadFromResponse(ctx, &clusterOption)
+ if diags.HasError() {
+ return diags
+ }
+
+ objectValue, diags := types.ObjectValueFrom(ctx, schemas.ClusterOptionsElementAttributeTypes(), data)
+ if diags.HasError() {
+ return diags
+ }
+ values[i] = objectValue
+ }
+ var diags diag.Diagnostics
+ data.ClusterOptions, diags = types.ListValue(types.ObjectType{AttrTypes: schemas.ClusterOptionsElementAttributeTypes()}, values)
+ if diags.HasError() {
+ return diags
+ }
+ return nil
+}
+
+// ClusterOptionsDataSource describes the data source data model.
+type ClusterOptionDataSource struct {
+ Provider types.String `tfsdk:"provider"`
+ DefaultVpcSubnetRange types.String `tfsdk:"default_vpc_subnet_range"`
+ DefaultPodSubnetRange types.String `tfsdk:"default_pod_subnet_range"`
+ DefaultServiceSubnetRange types.String `tfsdk:"default_service_subnet_range"`
+ DefaultServicePeeringRange types.String `tfsdk:"default_service_peering_range"`
+ NodeCountMin types.Int64 `tfsdk:"node_count_min"`
+ NodeCountMax types.Int64 `tfsdk:"node_count_max"`
+ NodeCountDefault types.Int64 `tfsdk:"node_count_default"`
+ DefaultRegion types.Object `tfsdk:"default_region"`
+ Regions types.List `tfsdk:"regions"`
+ DefaultNodeInstance types.Object `tfsdk:"default_node_instance"`
+ NodeInstances types.List `tfsdk:"node_instances"`
+ DefaultDatabaseInstance types.Object `tfsdk:"default_database_instance"`
+ DatabaseInstances types.List `tfsdk:"database_instances"`
+}
+
+func (data *ClusterOptionDataSource) ReadFromResponse(
+ ctx context.Context,
+ clusterOption *platform.ClusterOptions,
+) diag.Diagnostics {
+ data.Provider = types.StringValue(string(clusterOption.Provider))
+ data.DefaultVpcSubnetRange = types.StringValue(clusterOption.DefaultVpcSubnetRange)
+ data.DefaultPodSubnetRange = types.StringPointerValue(clusterOption.DefaultPodSubnetRange)
+ data.DefaultServiceSubnetRange = types.StringPointerValue(clusterOption.DefaultServiceSubnetRange)
+ data.DefaultServicePeeringRange = types.StringPointerValue(clusterOption.DefaultServicePeeringRange)
+ data.NodeCountMin = types.Int64Value(int64(clusterOption.NodeCountMin))
+ data.NodeCountMax = types.Int64Value(int64(clusterOption.NodeCountMax))
+ data.NodeCountDefault = types.Int64Value(int64(clusterOption.NodeCountDefault))
+ var diags diag.Diagnostics
+ data.DefaultRegion, diags = RegionTypesObject(ctx, clusterOption.DefaultRegion)
+ if diags.HasError() {
+ return diags
+ }
+
+ data.Regions, diags = utils.ObjectList(ctx, &clusterOption.Regions, schemas.RegionAttributeTypes(), RegionTypesObject)
+ if diags.HasError() {
+ return diags
+ }
+ data.DefaultNodeInstance, diags = ProviderInstanceObject(ctx, clusterOption.DefaultNodeInstance)
+ if diags.HasError() {
+ return diags
+ }
+
+ data.NodeInstances, diags = utils.ObjectList(ctx, &clusterOption.NodeInstances, schemas.ProviderInstanceAttributeTypes(), ProviderInstanceObject)
+ if diags.HasError() {
+ return diags
+ }
+
+ data.DefaultDatabaseInstance, diags = ProviderInstanceObject(ctx, clusterOption.DefaultDatabaseInstance)
+ if diags.HasError() {
+ return diags
+ }
+
+ data.DatabaseInstances, diags = utils.ObjectList(ctx, &clusterOption.DatabaseInstances, schemas.ProviderInstanceAttributeTypes(), ProviderInstanceObject)
+ if diags.HasError() {
+ return diags
+ }
+
+ return nil
+}
+
+type Region struct {
+ Name types.String `tfsdk:"name"`
+ Limited types.Bool `tfsdk:"limited"`
+ BannedInstances types.List `tfsdk:"banned_instances"`
+}
+
+func RegionTypesObject(
+ ctx context.Context,
+ regionInput platform.ProviderRegion,
+) (regionOutput types.Object, diags diag.Diagnostics) {
+ region := Region{
+ Name: types.StringValue(regionInput.Name),
+ }
+ region.BannedInstances, diags = utils.StringList(regionInput.BannedInstances)
+ if diags.HasError() {
+ return regionOutput, diags
+ }
+
+ if regionInput.Limited != nil {
+ val := types.BoolValue(*regionInput.Limited)
+ region.Limited = val
+ }
+ return types.ObjectValueFrom(ctx, schemas.RegionAttributeTypes(), region)
+}
+
+type ProviderInstance struct {
+ Name types.String `tfsdk:"name"`
+ Memory types.String `tfsdk:"memory"`
+ Cpu types.Int64 `tfsdk:"cpu"`
+}
+
+func ProviderInstanceObject(
+ ctx context.Context,
+ providerInstanceInput platform.ProviderInstanceType,
+) (types.Object, diag.Diagnostics) {
+ providerInstance := ProviderInstance{
+ Name: types.StringValue(providerInstanceInput.Name),
+ Memory: types.StringValue(providerInstanceInput.Memory),
+ Cpu: types.Int64Value(int64(providerInstanceInput.Cpu)),
+ }
+ return types.ObjectValueFrom(ctx, schemas.ProviderInstanceAttributeTypes(), providerInstance)
+}
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index 55eca576..3af780f6 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -132,6 +132,7 @@ func (p *AstronomerProvider) DataSources(ctx context.Context) []func() datasourc
datasources.NewDeploymentDataSource,
datasources.NewDeploymentsDataSource,
datasources.NewOrganizationDataSource,
+ datasources.NewClusterOptionsDataSource,
datasources.NewClusterDataSource,
datasources.NewClustersDataSource,
}
diff --git a/internal/provider/schemas/cluster_options.go b/internal/provider/schemas/cluster_options.go
new file mode 100644
index 00000000..eaf771f1
--- /dev/null
+++ b/internal/provider/schemas/cluster_options.go
@@ -0,0 +1,205 @@
+package schemas
+
+import (
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ datasourceSchema "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func ClusterOptionsElementAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "provider": types.StringType,
+ "default_vpc_subnet_range": types.StringType,
+ "default_pod_subnet_range": types.StringType,
+ "default_service_subnet_range": types.StringType,
+ "default_service_peering_range": types.StringType,
+ "node_count_min": types.Int64Type,
+ "node_count_max": types.Int64Type,
+ "node_count_default": types.Int64Type,
+ "default_region": types.ObjectType{
+ AttrTypes: RegionAttributeTypes(),
+ },
+ "regions": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: RegionAttributeTypes(),
+ },
+ },
+ "default_node_instance": types.ObjectType{
+ AttrTypes: ProviderInstanceAttributeTypes(),
+ },
+ "node_instances": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: ProviderInstanceAttributeTypes(),
+ },
+ },
+ "default_database_instance": types.ObjectType{
+ AttrTypes: ProviderInstanceAttributeTypes(),
+ },
+ "database_instances": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: ProviderInstanceAttributeTypes(),
+ },
+ },
+ }
+}
+
+func RegionAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "name": types.StringType,
+ "limited": types.BoolType,
+ "banned_instances": types.ListType{
+ ElemType: types.StringType,
+ },
+ }
+}
+
+func ProviderInstanceAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "name": types.StringType,
+ "memory": types.StringType,
+ "cpu": types.Int64Type,
+ }
+}
+
+func TemplateVersionAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "version": types.StringType,
+ "url": types.StringType,
+ }
+}
+
+func ClusterOptionsDataSourceSchemaAttributes() map[string]schema.Attribute {
+ return map[string]schema.Attribute{
+ "cluster_options": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: ClusterOptionDataSourceSchemaAttributes(),
+ },
+ Computed: true,
+ },
+ "type": schema.StringAttribute{
+ Required: true,
+ },
+ "cloud_provider": schema.StringAttribute{
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ string(platform.ClusterCloudProviderAWS),
+ string(platform.ClusterCloudProviderGCP),
+ string(platform.ClusterCloudProviderAZURE),
+ ),
+ },
+ },
+ }
+}
+
+func ClusterOptionDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "provider": datasourceSchema.StringAttribute{
+ MarkdownDescription: "ClusterOption provider",
+ Computed: true,
+ },
+ "default_vpc_subnet_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "ClusterOption default vps subnet range",
+ Computed: true,
+ },
+ "default_pod_subnet_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "ClusterOption default pod subnet range",
+ Computed: true,
+ },
+ "default_service_subnet_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "ClusterOption default service subnet range",
+ Computed: true,
+ },
+ "default_service_peering_range": datasourceSchema.StringAttribute{
+ MarkdownDescription: "ClusterOption default service peering range",
+ Computed: true,
+ },
+ "node_count_min": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "ClusterOption node count min",
+ Computed: true,
+ },
+ "node_count_max": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "ClusterOption node count max",
+ Computed: true,
+ },
+ "node_count_default": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "ClusterOption node count default",
+ Computed: true,
+ },
+ "default_region": datasourceSchema.SingleNestedAttribute{
+ MarkdownDescription: "ClusterOption default region",
+ Computed: true,
+ Attributes: DatasourceRegionAttributes(),
+ },
+ "regions": datasourceSchema.ListNestedAttribute{
+ MarkdownDescription: "ClusterOption regions",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: DatasourceRegionAttributes(),
+ },
+ },
+ "default_node_instance": datasourceSchema.SingleNestedAttribute{
+ MarkdownDescription: "ClusterOption default node instance",
+ Computed: true,
+ Attributes: DatasourceProviderInstanceAttributes(),
+ },
+ "node_instances": datasourceSchema.ListNestedAttribute{
+ MarkdownDescription: "ClusterOption node instances",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: DatasourceProviderInstanceAttributes(),
+ },
+ },
+ "default_database_instance": datasourceSchema.SingleNestedAttribute{
+ MarkdownDescription: "ClusterOption default database instance",
+ Computed: true,
+ Attributes: DatasourceProviderInstanceAttributes(),
+ },
+ "database_instances": datasourceSchema.ListNestedAttribute{
+ MarkdownDescription: "ClusterOption database instances",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: DatasourceProviderInstanceAttributes(),
+ },
+ },
+ }
+}
+
+func DatasourceRegionAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "name": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Region is limited bool",
+ },
+ "limited": datasourceSchema.BoolAttribute{
+ Computed: true,
+ MarkdownDescription: "Region is limited bool",
+ },
+ "banned_instances": datasourceSchema.ListAttribute{
+ ElementType: types.StringType,
+ MarkdownDescription: "Region banned instances",
+ Computed: true,
+ },
+ }
+}
+
+func DatasourceProviderInstanceAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "name": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Provider instance name",
+ },
+ "cpu": datasourceSchema.Int64Attribute{
+ Computed: true,
+ MarkdownDescription: "Provider instance cpu",
+ },
+ "memory": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Provider instance memory",
+ },
+ }
+}