Skip to content

Commit

Permalink
Fix creating and updating hosted deployments with scaling specs (#62)
Browse files Browse the repository at this point in the history
  • Loading branch information
vandyliu authored May 17, 2024
1 parent 0ba1d5e commit 37b1be9
Show file tree
Hide file tree
Showing 7 changed files with 415 additions and 136 deletions.
13 changes: 8 additions & 5 deletions docs/resources/deployment.md
Original file line number Diff line number Diff line change
Expand Up @@ -171,24 +171,27 @@ Read-Only:
<a id="nestedatt--scaling_spec"></a>
### Nested Schema for `scaling_spec`

Optional:
Required:

- `hibernation_spec` (Attributes) (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec))
- `hibernation_spec` (Attributes) Hibernation configuration for the deployment. The deployment will hibernate according to the schedules defined in this configuration. To remove the hibernation configuration, set scaling_spec to null. (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec))

<a id="nestedatt--scaling_spec--hibernation_spec"></a>
### Nested Schema for `scaling_spec.hibernation_spec`

Optional:

- `override` (Attributes) (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec--override))
- `schedules` (Attributes Set) (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec--schedules))
- `override` (Attributes) Hibernation override configuration. Set to null to remove the override. (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec--override))
- `schedules` (Attributes Set) List of hibernation schedules. Set to null to remove all schedules. (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec--schedules))

<a id="nestedatt--scaling_spec--hibernation_spec--override"></a>
### Nested Schema for `scaling_spec.hibernation_spec.override`

Optional:
Required:

- `is_hibernating` (Boolean)

Optional:

- `override_until` (String)

Read-Only:
Expand Down
186 changes: 110 additions & 76 deletions internal/provider/models/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,23 +244,13 @@ func (data *DeploymentResource) ReadFromResponse(
data.SchedulerSize = types.StringPointerValue((*string)(deployment.SchedulerSize))
data.IsDevelopmentMode = types.BoolPointerValue(deployment.IsDevelopmentMode)
data.IsHighAvailability = types.BoolPointerValue(deployment.IsHighAvailability)

// Currently, the scaling status and spec are only available in development mode
// However, there is a bug in the API where the scaling status and spec are returned even if the deployment is not in development mode for updated deployments
// This is a workaround to handle the bug until the API is fixed
// Issue here: https://github.com/astronomer/astro/issues/21073
if deployment.IsDevelopmentMode != nil && *deployment.IsDevelopmentMode {
data.ScalingStatus, diags = ScalingStatusTypesObject(ctx, deployment.ScalingStatus)
if diags.HasError() {
return diags
}
data.ScalingSpec, diags = ScalingSpecTypesObject(ctx, deployment.ScalingSpec)
if diags.HasError() {
return diags
}
} else {
data.ScalingStatus = types.ObjectNull(schemas.ScalingStatusAttributeTypes())
data.ScalingSpec = types.ObjectNull(schemas.ScalingSpecAttributeTypes())
data.ScalingStatus, diags = ScalingStatusTypesObject(ctx, deployment.ScalingStatus)
if diags.HasError() {
return diags
}
data.ScalingSpec, diags = ScalingSpecTypesObject(ctx, deployment.ScalingSpec)
if diags.HasError() {
return diags
}

return nil
Expand Down Expand Up @@ -351,23 +341,13 @@ func (data *DeploymentDataSource) ReadFromResponse(
data.SchedulerSize = types.StringPointerValue((*string)(deployment.SchedulerSize))
data.IsDevelopmentMode = types.BoolPointerValue(deployment.IsDevelopmentMode)
data.IsHighAvailability = types.BoolPointerValue(deployment.IsHighAvailability)

// Currently, the scaling status and spec are only available in development mode
// However, there is a bug in the API where the scaling status and spec are returned even if the deployment is not in development mode for updated deployments
// This is a workaround to handle the bug until the API is fixed
// Issue here: https://github.com/astronomer/astro/issues/21073
if deployment.IsDevelopmentMode != nil && *deployment.IsDevelopmentMode {
data.ScalingStatus, diags = ScalingStatusTypesObject(ctx, deployment.ScalingStatus)
if diags.HasError() {
return diags
}
data.ScalingSpec, diags = ScalingSpecTypesObject(ctx, deployment.ScalingSpec)
if diags.HasError() {
return diags
}
} else {
data.ScalingStatus = types.ObjectNull(schemas.ScalingStatusAttributeTypes())
data.ScalingSpec = types.ObjectNull(schemas.ScalingSpecAttributeTypes())
data.ScalingStatus, diags = ScalingStatusTypesObject(ctx, deployment.ScalingStatus)
if diags.HasError() {
return diags
}
data.ScalingSpec, diags = ScalingSpecTypesObject(ctx, deployment.ScalingSpec)
if diags.HasError() {
return diags
}

return nil
Expand Down Expand Up @@ -459,11 +439,11 @@ func WorkerQueueDataSourceTypesObject(
}

type DeploymentScalingSpec struct {
HibernationSpec HibernationSpec `tfsdk:"hibernation_spec"`
HibernationSpec types.Object `tfsdk:"hibernation_spec"`
}

type DeploymentStatus struct {
HibernationStatus HibernationStatus `tfsdk:"hibernation_status"`
HibernationStatus types.Object `tfsdk:"hibernation_status"`
}

type HibernationStatus struct {
Expand All @@ -474,8 +454,8 @@ type HibernationStatus struct {
}

type HibernationSpec struct {
Override HibernationSpecOverride `tfsdk:"override"`
Schedules []HibernationSchedule `tfsdk:"schedules"`
Override types.Object `tfsdk:"override"`
Schedules types.Set `tfsdk:"schedules"`
}

type HibernationSpecOverride struct {
Expand All @@ -491,54 +471,108 @@ type HibernationSchedule struct {
WakeAtCron types.String `tfsdk:"wake_at_cron"`
}

func HibernationStatusTypesObject(
ctx context.Context,
hibernationStatus *platform.DeploymentHibernationStatus,
) (types.Object, diag.Diagnostics) {
if hibernationStatus == nil {
return types.ObjectNull(schemas.HibernationStatusAttributeTypes()), nil
}

obj := HibernationStatus{
IsHibernating: types.BoolValue(hibernationStatus.IsHibernating),
NextEventType: types.StringPointerValue((*string)(hibernationStatus.NextEventType)),
NextEventAt: types.StringPointerValue(hibernationStatus.NextEventAt),
Reason: types.StringPointerValue(hibernationStatus.Reason),
}
return types.ObjectValueFrom(ctx, schemas.HibernationStatusAttributeTypes(), obj)
}

func HibernationOverrideTypesObject(
ctx context.Context,
hibernationOverride *platform.DeploymentHibernationOverride,
) (types.Object, diag.Diagnostics) {
if hibernationOverride == nil {
return types.ObjectNull(schemas.HibernationOverrideAttributeTypes()), nil
}
obj := HibernationSpecOverride{
IsHibernating: types.BoolPointerValue(hibernationOverride.IsHibernating),
IsActive: types.BoolPointerValue(hibernationOverride.IsActive),
}
if hibernationOverride.OverrideUntil != nil {
obj.OverrideUntil = types.StringValue(hibernationOverride.OverrideUntil.Format(time.RFC3339))
}
return types.ObjectValueFrom(ctx, schemas.HibernationOverrideAttributeTypes(), obj)
}

func HibernationScheduleTypesObject(
ctx context.Context,
schedule platform.DeploymentHibernationSchedule,
) (types.Object, diag.Diagnostics) {
obj := HibernationSchedule{
Description: types.StringPointerValue(schedule.Description),
HibernateAtCron: types.StringValue(schedule.HibernateAtCron),
IsEnabled: types.BoolValue(schedule.IsEnabled),
WakeAtCron: types.StringValue(schedule.WakeAtCron),
}
return types.ObjectValueFrom(ctx, schemas.HibernationScheduleAttributeTypes(), obj)
}

func HibernationSpecTypesObject(
ctx context.Context,
hibernationSpec *platform.DeploymentHibernationSpec,
) (types.Object, diag.Diagnostics) {
if hibernationSpec == nil || (hibernationSpec.Override == nil && hibernationSpec.Schedules == nil) {
return types.ObjectNull(schemas.HibernationSpecAttributeTypes()), nil
}

override, diags := HibernationOverrideTypesObject(ctx, hibernationSpec.Override)
if diags.HasError() {
return types.ObjectNull(schemas.HibernationSpecAttributeTypes()), diags
}
schedules, diags := utils.ObjectSet(ctx, hibernationSpec.Schedules, schemas.HibernationScheduleAttributeTypes(), HibernationScheduleTypesObject)
if diags.HasError() {
return types.ObjectNull(schemas.HibernationSpecAttributeTypes()), diags
}
obj := HibernationSpec{
Override: override,
Schedules: schedules,
}
return types.ObjectValueFrom(ctx, schemas.HibernationSpecAttributeTypes(), obj)
}

func ScalingStatusTypesObject(
ctx context.Context,
scalingStatus *platform.DeploymentScalingStatus,
) (types.Object, diag.Diagnostics) {
if scalingStatus != nil && scalingStatus.HibernationStatus != nil {
obj := DeploymentStatus{
HibernationStatus: HibernationStatus{
IsHibernating: types.BoolValue(scalingStatus.HibernationStatus.IsHibernating),
NextEventType: types.StringPointerValue((*string)(scalingStatus.HibernationStatus.NextEventType)),
NextEventAt: types.StringPointerValue(scalingStatus.HibernationStatus.NextEventAt),
Reason: types.StringPointerValue(scalingStatus.HibernationStatus.Reason),
},
}
return types.ObjectValueFrom(ctx, schemas.ScalingStatusAttributeTypes(), obj)
if scalingStatus == nil {
return types.ObjectNull(schemas.ScalingStatusAttributeTypes()), nil
}

hibernationStatus, diags := HibernationStatusTypesObject(ctx, scalingStatus.HibernationStatus)
if diags.HasError() {
return types.ObjectNull(schemas.ScalingStatusAttributeTypes()), diags
}
return types.ObjectNull(schemas.ScalingStatusAttributeTypes()), nil
obj := DeploymentStatus{
HibernationStatus: hibernationStatus,
}
return types.ObjectValueFrom(ctx, schemas.ScalingStatusAttributeTypes(), obj)
}

func ScalingSpecTypesObject(
ctx context.Context,
scalingSpec *platform.DeploymentScalingSpec,
) (types.Object, diag.Diagnostics) {
if scalingSpec != nil && scalingSpec.HibernationSpec != nil && (scalingSpec.HibernationSpec.Override != nil || scalingSpec.HibernationSpec.Schedules != nil) {
obj := DeploymentScalingSpec{
HibernationSpec: HibernationSpec{},
}
if scalingSpec.HibernationSpec.Override != nil {
obj.HibernationSpec.Override = HibernationSpecOverride{
IsHibernating: types.BoolPointerValue(scalingSpec.HibernationSpec.Override.IsHibernating),
IsActive: types.BoolPointerValue(scalingSpec.HibernationSpec.Override.IsActive),
}
if scalingSpec.HibernationSpec.Override.OverrideUntil != nil {
obj.HibernationSpec.Override.OverrideUntil = types.StringValue(scalingSpec.HibernationSpec.Override.OverrideUntil.Format(time.RFC3339))
}
}
if scalingSpec.HibernationSpec.Schedules != nil {
schedules := make([]HibernationSchedule, 0, len(*scalingSpec.HibernationSpec.Schedules))
for _, schedule := range *scalingSpec.HibernationSpec.Schedules {
schedules = append(schedules, HibernationSchedule{
Description: types.StringPointerValue(schedule.Description),
HibernateAtCron: types.StringValue(schedule.HibernateAtCron),
IsEnabled: types.BoolValue(schedule.IsEnabled),
WakeAtCron: types.StringValue(schedule.WakeAtCron),
})
}
obj.HibernationSpec.Schedules = schedules
}
return types.ObjectValueFrom(ctx, schemas.ScalingSpecAttributeTypes(), obj)
if scalingSpec == nil {
return types.ObjectNull(schemas.ScalingSpecAttributeTypes()), nil
}

hibernationSpec, diags := HibernationSpecTypesObject(ctx, scalingSpec.HibernationSpec)
if diags.HasError() {
return types.ObjectNull(schemas.ScalingSpecAttributeTypes()), diags
}
obj := DeploymentScalingSpec{
HibernationSpec: hibernationSpec,
}
return types.ObjectNull(schemas.ScalingSpecAttributeTypes()), nil
return types.ObjectValueFrom(ctx, schemas.ScalingSpecAttributeTypes(), obj)
}
24 changes: 16 additions & 8 deletions internal/provider/resources/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ func (r *ClusterResource) Create(
return
}

var diags diag.Diagnostics
var createClusterRequest platform.CreateClusterRequest

switch platform.ClusterCloudProvider(data.CloudProvider.ValueString()) {
Expand Down Expand Up @@ -221,7 +220,6 @@ func (r *ClusterResource) Create(
Refresh: r.resourceRefreshFunc(ctx, cluster.JSON200.Id),
Timeout: 3 * time.Hour,
MinTimeout: 1 * time.Minute,
Delay: 5 * time.Minute,
}

// readyCluster is the final state of the cluster after it has reached a target status
Expand Down Expand Up @@ -375,7 +373,6 @@ func (r *ClusterResource) Update(
Refresh: r.resourceRefreshFunc(ctx, cluster.JSON200.Id),
Timeout: 3 * time.Hour,
MinTimeout: 1 * time.Minute,
Delay: 5 * time.Minute,
}

// readyCluster is the final state of the cluster after it has reached a target status
Expand Down Expand Up @@ -447,7 +444,6 @@ func (r *ClusterResource) Delete(
Refresh: r.resourceRefreshFunc(ctx, data.Id.ValueString()),
Timeout: 1 * time.Hour,
MinTimeout: 30 * time.Second,
Delay: 1 * time.Minute,
}

_, err = stateConf.WaitForStateContext(ctx)
Expand Down Expand Up @@ -493,7 +489,7 @@ func (r *ClusterResource) ValidateConfig(
}

func validateAwsConfig(ctx context.Context, data *models.ClusterResource) diag.Diagnostics {
var diags diag.Diagnostics
diags := make(diag.Diagnostics, 0)

// Unallowed values
if !data.TenantId.IsNull() {
Expand Down Expand Up @@ -524,7 +520,7 @@ func validateAwsConfig(ctx context.Context, data *models.ClusterResource) diag.D
}

func validateAzureConfig(ctx context.Context, data *models.ClusterResource) diag.Diagnostics {
var diags diag.Diagnostics
diags := make(diag.Diagnostics, 0)

// Unallowed values
if !data.ServicePeeringRange.IsNull() {
Expand All @@ -549,7 +545,7 @@ func validateAzureConfig(ctx context.Context, data *models.ClusterResource) diag
}

func validateGcpConfig(ctx context.Context, data *models.ClusterResource) diag.Diagnostics {
var diags diag.Diagnostics
diags := make(diag.Diagnostics, 0)

// required values
if data.ServicePeeringRange.IsNull() {
Expand Down Expand Up @@ -600,6 +596,18 @@ func (r *ClusterResource) resourceRefreshFunc(ctx context.Context, clusterId str
if diagnostic != nil {
return nil, "", fmt.Errorf("error getting cluster %s", diagnostic.Detail())
}
return cluster.JSON200, string(cluster.JSON200.Status), nil
if cluster != nil && cluster.JSON200 != nil {
switch cluster.JSON200.Status {
case platform.ClusterStatusCREATED:
return cluster.JSON200, string(cluster.JSON200.Status), nil
case platform.ClusterStatusUPDATEFAILED, platform.ClusterStatusCREATEFAILED:
return cluster.JSON200, string(cluster.JSON200.Status), fmt.Errorf("cluster mutation failed for cluster '%v'", cluster.JSON200.Id)
case platform.ClusterStatusCREATING, platform.ClusterStatusUPDATING:
return cluster.JSON200, string(cluster.JSON200.Status), nil
default:
return cluster.JSON200, string(cluster.JSON200.Status), fmt.Errorf("unexpected cluster status '%v' for cluster '%v'", cluster.JSON200.Status, cluster.JSON200.Id)
}
}
return nil, "", fmt.Errorf("error getting cluster %s", clusterId)
}
}
1 change: 1 addition & 0 deletions internal/provider/resources/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ const SKIP_CLUSTER_RESOURCE_TESTS = "SKIP_CLUSTER_RESOURCE_TESTS"
const SKIP_CLUSTER_RESOURCE_TESTS_REASON = "Skipping dedicated cluster (and dedicated deployment) resource tests. To run these tests, unset the SKIP_CLUSTER_RESOURCE_TESTS environment variable."

func TestAcc_ResourceClusterAwsWithDedicatedDeployments(t *testing.T) {
t.Skip("AWS cluster creation is currently not working on dev")
if os.Getenv(SKIP_CLUSTER_RESOURCE_TESTS) == "True" {
t.Skip(SKIP_CLUSTER_RESOURCE_TESTS_REASON)
}
Expand Down
Loading

0 comments on commit 37b1be9

Please sign in to comment.