Skip to content

Commit

Permalink
Refactor DFKE & fix cluster creation
Browse files Browse the repository at this point in the history
  • Loading branch information
ducvm29 committed Sep 5, 2024
1 parent 2123eb7 commit 81b8405
Show file tree
Hide file tree
Showing 4 changed files with 224 additions and 16 deletions.
10 changes: 7 additions & 3 deletions commons/api_path.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,10 @@ var ApiPath = struct {
ListIpAddress func(vpcId string) string
ListExistingIpOfFloatingIp func(vpcId string) string
ListExistingInstanceOfFloatingIp func(vpcId string) string
Subnet func(vpcId string) string
DedicatedFKEUpgradeVersion func(vpcId string, clusterId string) string
DedicatedFKEManagement func(vpcId string, clusterId string) string
Subnet func(vpcId string) string
DedicatedFKEGet func(vpcId string, clusterId string) string
DedicatedFKEUpgradeVersion func(vpcId string, clusterId string) string
DedicatedFKEManagement func(vpcId string, clusterId string) string

ManagedFKEList func(vpcId string, page int, pageSize int) string
ManagedFKEGet func(vpcId string, platform string, clusterId string) string
Expand Down Expand Up @@ -126,6 +127,9 @@ var ApiPath = struct {
},
Subnet: func(vpcId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/network/subnets", vpcId) },

DedicatedFKEGet: func(vpcId string, clusterId string) string {
return fmt.Sprintf("/v1/xplat/fke/vpc/%s/cluster/%s?page=1&page_size=25", vpcId, clusterId)
},
DedicatedFKEUpgradeVersion: func(vpcId string, clusterId string) string {
return fmt.Sprintf("/v1/xplat/fke/vpc/%s/cluster/%s/upgrade-version", vpcId, clusterId)
},
Expand Down
44 changes: 31 additions & 13 deletions fptcloud/dfke/resource_dfke.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,12 @@ func (r *resourceDedicatedKubernetesEngine) Create(ctx context.Context, request

tflog.Info(ctx, "Created cluster with id "+createResponse.Cluster.ID)

state.Id = types.StringValue(createResponse.Cluster.ID)

if err = r.waitForSucceeded(ctx, &state, 30*time.Minute, true); err != nil {
response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error waiting cluster up", err.Error()))
return
}
if _, err = r.internalRead(ctx, createResponse.Cluster.ID, &state); err != nil {
response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading cluster state", err.Error()))
return
Expand Down Expand Up @@ -174,10 +180,22 @@ func (r *resourceDedicatedKubernetesEngine) ImportState(ctx context.Context, req
tflog.Info(ctx, "Importing DFKE cluster ID "+request.ID)

var state dedicatedKubernetesEngine
state.VpcId = types.StringValue("188af427-269b-418a-90bb-0cb27afc6c1e")

state.Id = types.StringValue(request.ID)
_, err := r.internalRead(ctx, request.ID, &state)
// format: vpcId/clusterId
id := request.ID
pieces := strings.Split(id, "/")
if len(pieces) != 2 {
response.Diagnostics.Append(diag2.NewErrorDiagnostic("Invalid format", "must be in format vpcId/clusterId"))
return
}

vpcId := pieces[0]
clusterId := pieces[1]

state.VpcId = types.StringValue(vpcId)

state.Id = types.StringValue(clusterId)
_, err := r.internalRead(ctx, clusterId, &state)
if err != nil {
response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API", err.Error()))
return
Expand Down Expand Up @@ -349,18 +367,18 @@ func (r *resourceDedicatedKubernetesEngine) internalRead(ctx context.Context, cl
vpcId := state.VpcId.ValueString()
tflog.Info(ctx, "Reading state of cluster ID "+clusterId+", VPC ID "+vpcId)

a, err := r.client.SendGetRequest(fmt.Sprintf("/v1/xplat/fke/vpc/%s/cluster/%s?page=1&page_size=25", vpcId, clusterId))
a, err := r.client.SendGetRequest(commons.ApiPath.DedicatedFKEGet(vpcId, clusterId))

if err != nil {
return nil, err
}

var d dedicatedKubernetesEngineReadResponse
err = json.Unmarshal(a, &d)
data := d.Cluster
if err != nil {
return nil, err
}
data := d.Cluster

var awx dedicatedKubernetesEngineParams
err = json.Unmarshal([]byte(d.Cluster.AwxParams), &awx)
Expand Down Expand Up @@ -496,7 +514,7 @@ func (r *resourceDedicatedKubernetesEngine) diff(ctx context.Context, from *dedi
}
tflog.Info(ctx, fmt.Sprintf("Resized master from %d to %d", master, master2))

err := r.waitForSucceeded(ctx, from, 5*time.Minute)
err := r.waitForSucceeded(ctx, from, 5*time.Minute, false)
if err != nil {
d := diag2.NewErrorDiagnostic("Error waiting for cluster after resizing master disk to return to SUCCEEDED state", err.Error())
return &d
Expand Down Expand Up @@ -527,7 +545,7 @@ func (r *resourceDedicatedKubernetesEngine) diff(ctx context.Context, from *dedi
}
tflog.Info(ctx, fmt.Sprintf("Resized worker from %d to %d", worker, worker2))

err := r.waitForSucceeded(ctx, from, 5*time.Minute)
err := r.waitForSucceeded(ctx, from, 5*time.Minute, false)
if err != nil {
d := diag2.NewErrorDiagnostic("Error waiting for cluster after resizing worker disk to return to SUCCEEDED state", err.Error())
return &d
Expand All @@ -553,7 +571,7 @@ func (r *resourceDedicatedKubernetesEngine) diff(ctx context.Context, from *dedi
}
tflog.Info(ctx, fmt.Sprintf("Changed master from %s to %s", masterType, master2Type))

err := r.waitForSucceeded(ctx, from, 20*time.Minute)
err := r.waitForSucceeded(ctx, from, 20*time.Minute, false)
if err != nil {
d := diag2.NewErrorDiagnostic("Error waiting for cluster after changing master type to return to SUCCEEDED state", err.Error())
return &d
Expand All @@ -580,7 +598,7 @@ func (r *resourceDedicatedKubernetesEngine) diff(ctx context.Context, from *dedi

tflog.Info(ctx, fmt.Sprintf("Changed worker from %s to %s", workerType, worker2Type))

err := r.waitForSucceeded(ctx, from, 20*time.Minute)
err := r.waitForSucceeded(ctx, from, 20*time.Minute, false)
if err != nil {
d := diag2.NewErrorDiagnostic("Error waiting for cluster after changing worker type to return to SUCCEEDED state", err.Error())
return &d
Expand Down Expand Up @@ -609,7 +627,7 @@ func (r *resourceDedicatedKubernetesEngine) diff(ctx context.Context, from *dedi
to.ScaleMin.ValueInt64(), to.ScaleMax.ValueInt64(),
))

err := r.waitForSucceeded(ctx, from, 5*time.Minute)
err := r.waitForSucceeded(ctx, from, 5*time.Minute, false)
if err != nil {
d := diag2.NewErrorDiagnostic("Error waiting for cluster after updating autoscale to return to SUCCEEDED state", err.Error())
return &d
Expand Down Expand Up @@ -641,7 +659,7 @@ func (r *resourceDedicatedKubernetesEngine) diff(ctx context.Context, from *dedi
return diagErr2
}

err := r.waitForSucceeded(ctx, from, 1*time.Hour)
err := r.waitForSucceeded(ctx, from, 1*time.Hour, false)
if err != nil {
d := diag2.NewErrorDiagnostic("Error waiting for cluster after upgrading to return to SUCCEEDED state", err.Error())
return &d
Expand All @@ -667,7 +685,7 @@ func (r *resourceDedicatedKubernetesEngine) manage(state *dedicatedKubernetesEng
return nil
}

func (r *resourceDedicatedKubernetesEngine) waitForSucceeded(ctx context.Context, state *dedicatedKubernetesEngine, timeout time.Duration) error {
func (r *resourceDedicatedKubernetesEngine) waitForSucceeded(ctx context.Context, state *dedicatedKubernetesEngine, timeout time.Duration, ignoreError bool) error {
clusterId := state.clusterUUID()
durationText := fmt.Sprintf("%v", timeout)
tflog.Info(ctx, "Waiting for cluster "+clusterId+" to succeed, duration "+durationText)
Expand Down Expand Up @@ -717,7 +735,7 @@ func (r *resourceDedicatedKubernetesEngine) waitForSucceeded(ctx context.Context
return errors.New("cluster is stopped")
}
}
if e != nil {
if e != nil && !ignoreError {
return e
}
}
Expand Down
185 changes: 185 additions & 0 deletions fptcloud/dfke/resource_dfke_state.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
package fptcloud_dfke

import (
"context"
"encoding/json"
"errors"
"fmt"
diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"terraform-provider-fptcloud/commons"
)

var (
_ resource.Resource = &resourceDedicatedKubernetesEngineState{}
_ resource.ResourceWithConfigure = &resourceDedicatedKubernetesEngineState{}
_ resource.ResourceWithImportState = &resourceDedicatedKubernetesEngineState{}
)

type resourceDedicatedKubernetesEngineState struct {
client *commons.Client
}

func NewResourceDedicatedKubernetesEngineState() resource.Resource {
return &resourceDedicatedKubernetesEngineState{}
}

func (r *resourceDedicatedKubernetesEngineState) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) {
tflog.Info(ctx, "Importing state for DFKE cluster ID "+request.ID)

var state dedicatedKubernetesEngineState
state.Id = types.StringValue(request.ID)

// TODO fix
state.VpcId = types.StringValue("188af427-269b-418a-90bb-0cb27afc6c1e")

_, err := r.internalRead(ctx, request.ID, &state)
if err != nil {
response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API", err.Error()))
return
}

diags := response.State.Set(ctx, &state)
response.Diagnostics.Append(diags...)
if response.Diagnostics.HasError() {
return
}

// lack of ability to import without VPC ID
//response.Diagnostics.Append(diag2.NewErrorDiagnostic("Unimplemented", "Importing DFKE clusters isn't currently supported"))
//return
}

func (r *resourceDedicatedKubernetesEngineState) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) {
response.TypeName = request.ProviderTypeName + "_dedicated_kubernetes_engine_v1_state"
}

func (r *resourceDedicatedKubernetesEngineState) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) {
response.Schema = schema.Schema{
Description: "Manage dedicated FKE cluster state",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Required: true,
PlanModifiers: forceNewPlanModifiersString,
},
"vpc_id": schema.StringAttribute{
Required: true,
PlanModifiers: forceNewPlanModifiersString,
},
"is_running": schema.BoolAttribute{
Required: true,
PlanModifiers: []planmodifier.Bool{
boolplanmodifier.RequiresReplace(),
},
},
},
}
}

func (r *resourceDedicatedKubernetesEngineState) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) {
var state dedicatedKubernetesEngineState
diags := request.Plan.Get(ctx, &state)

response.Diagnostics.Append(diags...)
if response.Diagnostics.HasError() {
return
}

if _, err := r.internalRead(ctx, state.Id.ValueString(), &state); err != nil {
response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading cluster state", err.Error()))
return
}

diags = response.State.Set(ctx, &state)
response.Diagnostics.Append(diags...)
if response.Diagnostics.HasError() {
return
}
}

func (r *resourceDedicatedKubernetesEngineState) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) {
var state dedicatedKubernetesEngineState
diags := request.State.Get(ctx, &state)

response.Diagnostics.Append(diags...)
if response.Diagnostics.HasError() {
return
}

if _, err := r.internalRead(ctx, state.Id.ValueString(), &state); err != nil {
response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading cluster state", err.Error()))
return
}

diags = response.State.Set(ctx, &state)
response.Diagnostics.Append(diags...)
if response.Diagnostics.HasError() {
return
}
}

func (r *resourceDedicatedKubernetesEngineState) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
//TODO implement me
panic("implement me")
}

func (r *resourceDedicatedKubernetesEngineState) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
}

func (r *resourceDedicatedKubernetesEngineState) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) {
if request.ProviderData == nil {
return
}

client, ok := request.ProviderData.(*commons.Client)
if !ok {
response.Diagnostics.AddError(
"Unexpected Resource Configure Type",
fmt.Sprintf("Expected *commons.Client, got: %T. Please report this issue to the provider developers.", request.ProviderData),
)

return
}

r.client = client
}

func (r *resourceDedicatedKubernetesEngineState) internalRead(ctx context.Context, clusterId string, state *dedicatedKubernetesEngineState) (*dedicatedKubernetesEngineReadResponse, error) {
vpcId := state.VpcId.ValueString()
tflog.Info(ctx, "Reading state of cluster ID "+clusterId+", VPC ID "+vpcId)

a, err := r.client.SendGetRequest(commons.ApiPath.DedicatedFKEGet(vpcId, clusterId))

if err != nil {
return nil, err
}

var d dedicatedKubernetesEngineReadResponse
err = json.Unmarshal(a, &d)
if err != nil {
return nil, err
}

data := d.Cluster
if data.Status != "STOPPED" && data.IsRunning == false {
return &d, errors.New("cluster is not running, but status is " + data.Status + " instead of STOPPED")
}

if data.Status != "SUCCEEDED" && data.IsRunning == true {
return &d, errors.New("cluster is running, but status is " + data.Status + " instead of SUCCEEDED")
}

state.IsRunning = types.BoolValue(data.IsRunning)
return &d, nil
}

type dedicatedKubernetesEngineState struct {
Id types.String `tfsdk:"id"`
VpcId types.String `tfsdk:"vpc_id"`
IsRunning types.Bool `tfsdk:"is_running"`
}
1 change: 1 addition & 0 deletions fptcloud/provider_tf6.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ func (x *xplatProvider) DataSources(ctx context.Context) []func() datasource.Dat
func (x *xplatProvider) Resources(ctx context.Context) []func() resource.Resource {
return []func() resource.Resource{
fptcloud_dfke.NewResourceDedicatedKubernetesEngine,
fptcloud_dfke.NewResourceDedicatedKubernetesEngineState,
fptcloud_mfke.NewResourceManagedKubernetesEngine,
}
}

0 comments on commit 81b8405

Please sign in to comment.